aerospike 2.29.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -193,7 +193,7 @@ module Aerospike
193
193
  # Exp.val(0))
194
194
  # end</pre>
195
195
  #
196
- # @param return_type metadata attributes to return. See {@link CDT::ListReturnTypeend
196
+ # @param return_type metadata attributes to return. See {@link CDT::ListReturnType
197
197
  # @param value search expression
198
198
  # @param bin list bin or list value expression
199
199
  # @param ctx optional context path for nested CDT
@@ -210,7 +210,7 @@ module Aerospike
210
210
  # ListExp.getByValueRange(CDT::ListReturnType::VALUE, Exp.val(10), Exp.val(20), Exp.listBin("a"))
211
211
  # end</pre>
212
212
  #
213
- # @param return_type metadata attributes to return. See {@link CDT::ListReturnTypeend
213
+ # @param return_type metadata attributes to return. See {@link CDT::ListReturnType}
214
214
  # @param value_begin begin expression inclusive. If nil, range is less than value_end.
215
215
  # @param value_end end expression exclusive. If nil, range is greater than equal to value_begin.
216
216
  # @param bin bin or list value expression
@@ -228,7 +228,7 @@ module Aerospike
228
228
  end
229
229
 
230
230
  # Create expression that selects list items nearest to value and greater by relative rank with a count limit
231
- # and returns selected data specified by return_type (See {@link CDT::ListReturnTypeend).
231
+ # and returns selected data specified by return_type (See {@link CDT::ListReturnType).
232
232
  #
233
233
  # Examples for ordered list [0,4,5,9,11,15]:
234
234
  #
@@ -365,11 +365,17 @@ module Aerospike
365
365
  end
366
366
 
367
367
  def self.get_value_type(return_type)
368
- if (return_type & ~CDT::ListReturnType::INVERTED) == CDT::ListReturnType::VALUE
369
- Exp::Type::LIST
370
- else
371
- Exp::Type::INT
372
- end
368
+ t = return_type & ~CDT::ListReturnType::INVERTED
369
+ case t
370
+ when CDT::ListReturnType::INDEX, CDT::ListReturnType::REVERSE_INDEX, CDT::ListReturnType::RANK, CDT::ListReturnType::REVERSE_RANK, CDT::ListReturnType::VALUE
371
+ Exp::Type::LIST
372
+ when CDT::ListReturnType::COUNT
373
+ Exp::Type::INT
374
+ when CDT::ListReturnType::EXISTS
375
+ Exp::Type::BOOL
376
+ else
377
+ raise Aerospike::Exceptions::Aerospike.new(Aerospike::ResultCode::SERVER_ERROR, "Invalid MapReturnType: #{return_type}")
378
+ end
373
379
  end
374
380
 
375
381
  def self.pack_range_operation(command, return_type, value_begin, value_end, ctx)
@@ -479,15 +479,26 @@ module Aerospike
479
479
 
480
480
  def self.get_value_type(return_type)
481
481
  t = return_type & ~CDT::MapReturnType::INVERTED
482
-
483
- if t <= CDT::MapReturnType::COUNT
484
- return Exp::Type::INT
485
- end
486
-
487
- if t == CDT::MapReturnType::KEY_VALUE
488
- return Exp::Type::MAP
482
+ case t
483
+ when CDT::MapReturnType::INDEX, CDT::MapReturnType::REVERSE_INDEX, CDT::MapReturnType::RANK, CDT::MapReturnType::REVERSE_RANK, CDT::MapReturnType::KEY, CDT::MapReturnType::VALUE
484
+ Exp::Type::LIST
485
+ when CDT::MapReturnType::COUNT
486
+ Aerospike::Exp::Type::INT
487
+ when CDT::MapReturnType::KEY_VALUE
488
+ Exp::Type::MAP
489
+ when CDT::MapReturnType::EXISTS
490
+ Exp::Type::BOOL
491
+ else
492
+ raise Aerospike::Exceptions::Aerospike.new(Aerospike::ResultCode::SERVER_ERROR, "Invalid MapReturnType: #{return_type}")
489
493
  end
490
- return Exp::Type::LIST
494
+ # if t <= CDT::MapReturnType::COUNT
495
+ # return Exp::Type::INT
496
+ # end
497
+ #
498
+ # if t == CDT::MapReturnType::KEY_VALUE
499
+ # return Exp::Type::MAP
500
+ # end
501
+ # return Exp::Type::LIST
491
502
  end
492
503
  end # class MapExp
493
504
  end # module Aerospike
@@ -26,6 +26,11 @@ module Aerospike
26
26
 
27
27
  PARTITIONS = 4096
28
28
  FULL_HEALTH = 100
29
+ HAS_PARTITION_SCAN = 1 << 0
30
+ HAS_QUERY_SHOW = 1 << 1
31
+ HAS_BATCH_ANY = 1 << 2
32
+ HAS_PARTITION_QUERY = 1 << 3
33
+
29
34
 
30
35
  # Initialize server node with connection parameters.
31
36
  def initialize(cluster, nv)
@@ -58,6 +63,14 @@ module Aerospike
58
63
  @connections = ::Aerospike::ConnectionPool.new(cluster, host)
59
64
  end
60
65
 
66
+ def partition_query?
67
+ (@features & HAS_PARTITION_QUERY) != 0
68
+ end
69
+
70
+ def query_show?
71
+ (@features & HAS_QUERY_SHOW) != 0
72
+ end
73
+
61
74
  def update_racks(parser)
62
75
  new_racks = parser.update_racks
63
76
  @racks.value = new_racks if new_racks
@@ -23,9 +23,9 @@ module Aerospike
23
23
 
24
24
  class QueryCommand < StreamCommand #:nodoc:
25
25
 
26
- def initialize(node, policy, statement, recordset, partitions)
26
+ def initialize(cluster, node, policy, statement, recordset, partitions)
27
27
  super(node)
28
-
28
+ @cluster = cluster
29
29
  @policy = policy
30
30
  @statement = statement
31
31
  @recordset = recordset
@@ -33,209 +33,7 @@ module Aerospike
33
33
  end
34
34
 
35
35
  def write_buffer
36
- fieldCount = 0
37
- filterSize = 0
38
- binNameSize = 0
39
- predSize = 0
40
-
41
- begin_cmd
42
-
43
- if @statement.namespace
44
- @data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
45
- fieldCount+=1
46
- end
47
-
48
- if @statement.index_name
49
- @data_offset += @statement.index_name.bytesize + FIELD_HEADER_SIZE
50
- fieldCount+=1
51
- end
52
-
53
- if @statement.set_name
54
- @data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
55
- fieldCount+=1
56
- end
57
-
58
- if !is_scan?
59
- col_type = @statement.filters[0].collection_type
60
- if col_type > 0
61
- @data_offset += FIELD_HEADER_SIZE + 1
62
- fieldCount += 1
63
- end
64
-
65
- @data_offset += FIELD_HEADER_SIZE
66
- filterSize+=1 # num filters
67
-
68
- @statement.filters.each do |filter|
69
- sz = filter.estimate_size
70
- filterSize += sz
71
- end
72
- @data_offset += filterSize
73
- fieldCount+=1
74
-
75
- if @statement.bin_names && @statement.bin_names.length > 0
76
- @data_offset += FIELD_HEADER_SIZE
77
- binNameSize+=1 # num bin names
78
-
79
- @statement.bin_names.each do |bin_name|
80
- binNameSize += bin_name.bytesize + 1
81
- end
82
- @data_offset += binNameSize
83
- fieldCount+=1
84
- end
85
- else
86
- @data_offset += @partitions.length * 2 + FIELD_HEADER_SIZE
87
- fieldCount += 1
88
-
89
- if @policy.records_per_second > 0
90
- @data_offset += 4 + FIELD_HEADER_SIZE
91
- fieldCount += 1
92
- end
93
-
94
- # Calling query with no filters is more efficiently handled by a primary index scan.
95
- # Estimate scan options size.
96
- # @data_offset += (2 + FIELD_HEADER_SIZE)
97
- # fieldCount+=1
98
- end
99
-
100
- @statement.set_task_id
101
-
102
- @data_offset += 8 + FIELD_HEADER_SIZE
103
- fieldCount+=1
104
-
105
- predexp = @policy.predexp || @statement.predexp
106
-
107
- if predexp
108
- @data_offset += FIELD_HEADER_SIZE
109
- predSize = Aerospike::PredExp.estimate_size(predexp)
110
- @data_offset += predSize
111
- fieldCount += 1
112
- end
113
-
114
- if @statement.function_name
115
- @data_offset += FIELD_HEADER_SIZE + 1 # udf type
116
- @data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
117
- @data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
118
-
119
- if @statement.function_args && @statement.function_args.length > 0
120
- functionArgBuffer = Value.of(@statement.function_args).to_bytes
121
- else
122
- functionArgBuffer = ''
123
- end
124
- @data_offset += FIELD_HEADER_SIZE + functionArgBuffer.bytesize
125
- fieldCount += 4
126
- end
127
-
128
- if @statement.filters.nil? || @statement.filters.empty?
129
- if @statement.bin_names && @statement.bin_names.length > 0
130
- @statement.bin_names.each do |bin_name|
131
- estimate_operation_size_for_bin_name(bin_name)
132
- end
133
- end
134
- end
135
-
136
- size_buffer
137
-
138
- readAttr = @policy.include_bin_data ? INFO1_READ : INFO1_READ | INFO1_NOBINDATA
139
- operation_count = (is_scan? && !@statement.bin_names.nil?) ? @statement.bin_names.length : 0
140
-
141
- write_header(@policy, readAttr, 0, fieldCount, operation_count)
142
-
143
- if @statement.namespace
144
- write_field_string(@statement.namespace, Aerospike::FieldType::NAMESPACE)
145
- end
146
-
147
- unless @statement.index_name.nil?
148
- write_field_string(@statement.index_name, Aerospike::FieldType::INDEX_NAME)
149
- end
150
-
151
- if @statement.set_name
152
- write_field_string(@statement.set_name, Aerospike::FieldType::TABLE)
153
- end
154
-
155
- if !is_scan?
156
- col_type = @statement.filters[0].collection_type
157
- if col_type > 0
158
- write_field_header(1, Aerospike::FieldType::INDEX_TYPE)
159
- @data_buffer.write_byte(col_type, @data_offset)
160
- @data_offset+=1
161
- end
162
-
163
- write_field_header(filterSize, Aerospike::FieldType::INDEX_RANGE)
164
- @data_buffer.write_byte(@statement.filters.length, @data_offset)
165
- @data_offset+=1
166
-
167
- @statement.filters.each do |filter|
168
- @data_offset = filter.write(@data_buffer, @data_offset)
169
- end
170
-
171
- # Query bin names are specified as a field (Scan bin names are specified later as operations)
172
- if @statement.bin_names && @statement.bin_names.length > 0
173
- write_field_header(binNameSize, Aerospike::FieldType::QUERY_BINLIST)
174
- @data_buffer.write_byte(@statement.bin_names.length, @data_offset)
175
- @data_offset += 1
176
-
177
- @statement.bin_names.each do |bin_name|
178
- len = @data_buffer.write_binary(bin_name, @data_offset + 1)
179
- @data_buffer.write_byte(len, @data_offset)
180
- @data_offset += len + 1;
181
- end
182
- end
183
- else
184
- write_field_header(@partitions.length * 2, Aerospike::FieldType::PID_ARRAY)
185
- for pid in @partitions
186
- @data_buffer.write_uint16_little_endian(pid, @data_offset)
187
- @data_offset += 2
188
- end
189
-
190
- if @policy.records_per_second > 0
191
- write_field_int(@policy.records_per_second, Aerospike::FieldType::RECORDS_PER_SECOND)
192
- end
193
-
194
- # Calling query with no filters is more efficiently handled by a primary index scan.
195
- # write_field_header(2, Aerospike::FieldType::SCAN_OPTIONS)
196
- # priority = @policy.priority.ord
197
- # priority = priority << 4
198
- # @data_buffer.write_byte(priority, @data_offset)
199
- # @data_offset+=1
200
- # @data_buffer.write_byte(100.ord, @data_offset)
201
- # @data_offset+=1
202
- end
203
-
204
- write_field_header(8, Aerospike::FieldType::TRAN_ID)
205
- @data_buffer.write_int64(@statement.task_id, @data_offset)
206
- @data_offset += 8
207
-
208
- if predexp
209
- write_field_header(predSize, Aerospike::FieldType::PREDEXP)
210
- @data_offset = Aerospike::PredExp.write(
211
- predexp, @data_buffer, @data_offset
212
- )
213
- end
214
-
215
- if @statement.function_name
216
- write_field_header(1, Aerospike::FieldType::UDF_OP)
217
- if @statement.return_data
218
- @data_buffer.write_byte(1, @data_offset)
219
- @data_offset+=1
220
- else
221
- @data_buffer.write_byte(2, @data_offset)
222
- @data_offset+=1
223
- end
224
-
225
- write_field_string(@statement.package_name, Aerospike::FieldType::UDF_PACKAGE_NAME)
226
- write_field_string(@statement.function_name, Aerospike::FieldType::UDF_FUNCTION)
227
- write_field_bytes(functionArgBuffer, Aerospike::FieldType::UDF_ARGLIST)
228
- end
229
-
230
- if is_scan? && !@statement.bin_names.nil?
231
- @statement.bin_names.each do |bin_name|
232
- write_operation_for_bin_name(bin_name, Aerospike::Operation::READ)
233
- end
234
- end
235
-
236
- end_cmd
237
-
238
- return nil
36
+ set_query(@cluster, @policy, @statement, false, @partitions)
239
37
  end
240
38
 
241
39
  def is_scan?
@@ -34,7 +34,7 @@ module Aerospike
34
34
  list.each do |node_partition|
35
35
  threads << Thread.new do
36
36
  Thread.current.abort_on_exception = true
37
- command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
37
+ command = QueryPartitionCommand.new(cluster, node_partition.node, tracker, policy, statement, recordset, node_partition)
38
38
  begin
39
39
  command.execute
40
40
  rescue => e
@@ -48,7 +48,7 @@ module Aerospike
48
48
  else
49
49
  # Use a single thread for all nodes for all node
50
50
  list.each do |node_partition|
51
- command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
51
+ command = QueryPartitionCommand.new(cluster, node_partition.node, tracker, policy, statement, recordset, node_partition)
52
52
  begin
53
53
  command.execute
54
54
  rescue => e
@@ -21,243 +21,17 @@ module Aerospike
21
21
  private
22
22
 
23
23
  class QueryPartitionCommand < QueryCommand #:nodoc:
24
- def initialize(node, tracker, policy, statement, recordset, node_partitions)
25
- super(node, policy, statement, recordset, @node_partitions)
24
+ def initialize(cluster, node, tracker, policy, statement, recordset, node_partitions)
25
+ super(cluster, node, policy, statement, recordset, @node_partitions)
26
26
  @node_partitions = node_partitions
27
27
  @tracker = tracker
28
28
  end
29
29
 
30
30
  def write_buffer
31
- function_arg_buffer = nil
32
- field_count = 0
33
- filter_size = 0
34
- bin_name_size = 0
35
-
36
- begin_cmd
37
-
38
- if @statement.namespace
39
- @data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
40
- field_count += 1
41
- end
42
-
43
- if @statement.set_name
44
- @data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
45
- field_count += 1
46
- end
47
-
48
- # Estimate recordsPerSecond field size. This field is used in new servers and not used
49
- # (but harmless to add) in old servers.
50
- if @policy.records_per_second > 0
51
- @data_offset += 4 + FIELD_HEADER_SIZE
52
- field_count += 1
53
- end
54
-
55
- # Estimate socket timeout field size. This field is used in new servers and not used
56
- # (but harmless to add) in old servers.
57
- @data_offset += 4 + FIELD_HEADER_SIZE
58
- field_count += 1
59
-
60
- # Estimate task_id field.
61
- @data_offset += 8 + FIELD_HEADER_SIZE
62
- field_count += 1
63
-
64
- filter = @statement.filters[0]
65
- bin_names = @statement.bin_names
66
- packed_ctx = nil
67
-
68
- if filter
69
- col_type = filter.collection_type
70
-
71
- # Estimate INDEX_TYPE field.
72
- if col_type > 0
73
- @data_offset += FIELD_HEADER_SIZE + 1
74
- field_count += 1
75
- end
76
-
77
- # Estimate INDEX_RANGE field.
78
- @data_offset += FIELD_HEADER_SIZE
79
- filter_size += 1 # num filters
80
- filter_size += filter.estimate_size
81
-
82
- @data_offset += filter_size
83
- field_count += 1
84
-
85
- packed_ctx = filter.packed_ctx
86
- if packed_ctx
87
- @data_offset += FIELD_HEADER_SIZE + packed_ctx.length
88
- field_count += 1
89
- end
90
- end
91
-
92
- @statement.set_task_id
93
- predexp = @policy.predexp || @statement.predexp
94
-
95
- if predexp
96
- @data_offset += FIELD_HEADER_SIZE
97
- pred_size = Aerospike::PredExp.estimate_size(predexp)
98
- @data_offset += pred_size
99
- field_count += 1
100
- end
101
-
102
- unless @policy.filter_exp.nil?
103
- exp_size = estimate_expression_size(@policy.filter_exp)
104
- field_count += 1 if exp_size > 0
105
- end
106
-
107
- # Estimate aggregation/background function size.
108
- if @statement.function_name
109
- @data_offset += FIELD_HEADER_SIZE + 1 # udf type
110
- @data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
111
- @data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
112
-
113
- function_arg_buffer = ""
114
- if @statement.function_args && @statement.function_args.length > 0
115
- function_arg_buffer = Value.of(@statement.function_args).to_bytes
116
- end
117
- @data_offset += FIELD_HEADER_SIZE + function_arg_buffer.bytesize
118
- field_count += 4
119
- end
120
-
121
- max_records = 0
122
- parts_full_size = 0
123
- parts_partial_digest_size = 0
124
- parts_partial_bval_size = 0
125
-
126
- unless @node_partitions.nil?
127
- parts_full_size = @node_partitions.parts_full.length * 2
128
- parts_partial_digest_size = @node_partitions.parts_partial.length * 20
129
-
130
- unless filter.nil?
131
- parts_partial_bval_size = @node_partitions.parts_partial.length * 8
132
- end
133
- max_records = @node_partitions.record_max
134
- end
135
-
136
- if parts_full_size > 0
137
- @data_offset += parts_full_size + FIELD_HEADER_SIZE
138
- field_count += 1
139
- end
140
-
141
- if parts_partial_digest_size > 0
142
- @data_offset += parts_partial_digest_size + FIELD_HEADER_SIZE
143
- field_count += 1
144
- end
145
-
146
- if parts_partial_bval_size > 0
147
- @data_offset += parts_partial_bval_size + FIELD_HEADER_SIZE
148
- field_count += 1
149
- end
150
-
151
- # Estimate max records field size. This field is used in new servers and not used
152
- # (but harmless to add) in old servers.
153
- if max_records > 0
154
- @data_offset += 8 + FIELD_HEADER_SIZE
155
- field_count += 1
156
- end
157
-
158
- operation_count = 0
159
- unless bin_names.empty?
160
- # Estimate size for selected bin names (query bin names already handled for old servers).
161
- bin_names.each do |bin_name|
162
- estimate_operation_size_for_bin_name(bin_name)
163
- end
164
- operation_count = bin_names.length
165
- end
166
-
167
- projected_offset = @data_offset
168
-
169
- size_buffer
170
-
171
- read_attr = INFO1_READ
172
- read_attr |= INFO1_NOBINDATA if !@policy.include_bin_data
173
- read_attr |= INFO1_SHORT_QUERY if @policy.short_query
174
-
175
- infoAttr = INFO3_PARTITION_DONE
176
-
177
- write_header(@policy, read_attr, 0, field_count, operation_count)
178
-
179
- write_field_string(@statement.namespace, FieldType::NAMESPACE) if @statement.namespace
180
- write_field_string(@statement.set_name, FieldType::TABLE) if @statement.set_name
181
-
182
- # Write records per second.
183
- write_field_int(@policy.records_per_second, FieldType::RECORDS_PER_SECOND) if @policy.records_per_second > 0
184
-
185
- write_filter_exp(@policy.filter_exp, exp_size)
186
-
187
- # Write socket idle timeout.
188
- write_field_int(@policy.socket_timeout, FieldType::SOCKET_TIMEOUT)
189
-
190
- # Write task_id field
191
- write_field_int64(@statement.task_id, FieldType::TRAN_ID)
192
-
193
- unless predexp.nil?
194
- write_field_header(pred_size, Aerospike::FieldType::PREDEXP)
195
- @data_offset = Aerospike::PredExp.write(
196
- predexp, @data_buffer, @data_offset
197
- )
198
- end
199
-
200
- if filter
201
- type = filter.collection_type
202
-
203
- if type > 0
204
- write_field_header(1, FieldType::INDEX_TYPE)
205
- @data_offset += @data_buffer.write_byte(type, @data_offset)
206
- end
207
-
208
- write_field_header(filter_size, FieldType::INDEX_RANGE)
209
- @data_offset += @data_buffer.write_byte(1, @data_offset)
210
- @data_offset = filter.write(@data_buffer, @data_offset)
211
-
212
- if packed_ctx
213
- write_field_header(packed_ctx.length, FieldType::INDEX_CONTEXT)
214
- @data_offset += @data_buffer.write_binary(packed_ctx, @data_offset)
215
- end
216
- end
217
-
218
- if @statement.function_name
219
- write_field_header(1, FieldType::UDF_OP)
220
- @data_offset += @data_buffer.write_byte(1, @data_offset)
221
- write_field_string(@statement.package_name, FieldType::UDF_PACKAGE_NAME)
222
- write_field_string(@statement.function_name, FieldType::UDF_FUNCTION)
223
- write_field_string(function_arg_buffer, FieldType::UDF_ARGLIST)
224
- end
225
-
226
- if parts_full_size > 0
227
- write_field_header(parts_full_size, FieldType::PID_ARRAY)
228
- @node_partitions.parts_full.each do |part|
229
- @data_offset += @data_buffer.write_uint16_little_endian(part.id, @data_offset)
230
- end
231
- end
232
-
233
- if parts_partial_digest_size > 0
234
- write_field_header(parts_partial_digest_size, FieldType::DIGEST_ARRAY)
235
- @node_partitions.parts_partial.each do |part|
236
- @data_offset += @data_buffer.write_binary(part.digest, @data_offset)
237
- end
238
- end
239
-
240
- if parts_partial_bval_size > 0
241
- write_field_header(parts_partial_bval_size, FieldType::BVAL_ARRAY)
242
- @node_partitions.parts_partial.each do |part|
243
- @data_offset += @data_buffer.write_uint64_little_endian(part.bval, @data_offset)
244
- end
245
- end
246
-
247
- if max_records > 0
248
- write_field(max_records, FieldType::MAX_RECORDS)
249
- end
250
-
251
- unless bin_names.empty?
252
- bin_names.each do |bin_name|
253
- write_operation_for_bin_name(bin_name, Operation::READ)
254
- end
255
- end
31
+ set_query(@cluster, @policy, @statement, false, @node_partitions)
32
+ end
256
33
 
257
- end_cmd
258
34
 
259
- nil
260
- end
261
35
 
262
36
  def should_retry(e)
263
37
  # !! converts nil to false
@@ -34,7 +34,7 @@ module Aerospike
34
34
  list.each do |node_partition|
35
35
  threads << Thread.new do
36
36
  Thread.current.abort_on_exception = true
37
- command = ScanPartitionCommand.new(policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
37
+ command = ScanPartitionCommand.new(cluster, policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
38
38
  begin
39
39
  command.execute
40
40
  rescue => e
@@ -48,7 +48,7 @@ module Aerospike
48
48
  else
49
49
  # Use a single thread for all nodes for all node
50
50
  list.each do |node_partition|
51
- command = ScanPartitionCommand.new(policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
51
+ command = ScanPartitionCommand.new(cluster, policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
52
52
  begin
53
53
  command.execute
54
54
  rescue => e
@@ -23,9 +23,9 @@ module Aerospike
23
23
 
24
24
  class ScanPartitionCommand < StreamCommand #:nodoc:
25
25
 
26
- def initialize(policy, tracker, node_partitions, namespace, set_name, bin_names, recordset)
26
+ def initialize(cluster, policy, tracker, node_partitions, namespace, set_name, bin_names, recordset)
27
27
  super(node_partitions.node)
28
-
28
+ @cluster = cluster
29
29
  @policy = policy
30
30
  @namespace = namespace
31
31
  @set_name = set_name
@@ -36,7 +36,7 @@ module Aerospike
36
36
  end
37
37
 
38
38
  def write_buffer
39
- set_scan(@policy, @namespace, @set_name, @bin_names, @node_partitions)
39
+ set_scan(@cluster, @policy, @namespace, @set_name, @bin_names, @node_partitions)
40
40
  end
41
41
 
42
42
  def should_retry(e)
@@ -33,12 +33,11 @@ module Aerospike
33
33
  end
34
34
 
35
35
  def write_buffer
36
- set_query(@policy, @statement, background, nil)
36
+ set_query(@cluster, @policy, @statement, true, nil)
37
37
  end
38
38
 
39
- def parse_row
39
+ def parse_row(result_code)
40
40
  field_count = @data_buffer.read_int16(18)
41
- result_code = @data_buffer.read(5).ord & 0xFF
42
41
  skip_key(field_count)
43
42
 
44
43
  if result_code != 0
@@ -1,4 +1,4 @@
1
- # Copyright 2013-2020 Aerospike, Inc.
1
+ # Copyright 2013-2023 Aerospike, Inc.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -47,7 +47,7 @@ module Aerospike
47
47
  end
48
48
 
49
49
  conn = node.get_connection(0)
50
- responseMap, _ = Info.request(conn, command)
50
+ responseMap, = Info.request(conn, command)
51
51
  node.put_connection(conn)
52
52
 
53
53
  response = responseMap[command]
@@ -1,4 +1,4 @@
1
1
  # encoding: utf-8
2
2
  module Aerospike
3
- VERSION = "2.29.0"
3
+ VERSION = "3.0.0"
4
4
  end