aerospike 2.22.0 → 2.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +307 -262
  3. data/lib/aerospike/atomic/atomic.rb +1 -1
  4. data/lib/aerospike/cdt/context.rb +7 -7
  5. data/lib/aerospike/cdt/list_return_type.rb +4 -0
  6. data/lib/aerospike/cdt/map_operation.rb +6 -6
  7. data/lib/aerospike/cdt/map_return_type.rb +4 -0
  8. data/lib/aerospike/client.rb +59 -84
  9. data/lib/aerospike/command/admin_command.rb +1 -1
  10. data/lib/aerospike/command/batch_index_node.rb +1 -1
  11. data/lib/aerospike/command/batch_item.rb +1 -1
  12. data/lib/aerospike/command/command.rb +65 -25
  13. data/lib/aerospike/command/field_type.rb +25 -25
  14. data/lib/aerospike/command/login_command.rb +4 -4
  15. data/lib/aerospike/command/multi_command.rb +8 -2
  16. data/lib/aerospike/command/read_command.rb +2 -2
  17. data/lib/aerospike/connection/authenticate.rb +3 -3
  18. data/lib/aerospike/exp/exp.rb +1262 -0
  19. data/lib/aerospike/features.rb +9 -9
  20. data/lib/aerospike/host/parse.rb +2 -2
  21. data/lib/aerospike/key.rb +10 -1
  22. data/lib/aerospike/node/refresh/info.rb +1 -1
  23. data/lib/aerospike/node/verify/name.rb +1 -1
  24. data/lib/aerospike/node/verify/partition_generation.rb +1 -1
  25. data/lib/aerospike/node/verify/peers_generation.rb +1 -1
  26. data/lib/aerospike/node/verify/rebalance_generation.rb +1 -1
  27. data/lib/aerospike/policy/policy.rb +4 -1
  28. data/lib/aerospike/policy/query_policy.rb +35 -2
  29. data/lib/aerospike/policy/scan_policy.rb +19 -2
  30. data/lib/aerospike/privilege.rb +1 -1
  31. data/lib/aerospike/query/node_partitions.rb +39 -0
  32. data/lib/aerospike/query/partition_filter.rb +66 -0
  33. data/lib/aerospike/query/partition_status.rb +36 -0
  34. data/lib/aerospike/query/partition_tracker.rb +347 -0
  35. data/lib/aerospike/query/query_command.rb +1 -1
  36. data/lib/aerospike/query/query_executor.rb +73 -0
  37. data/lib/aerospike/query/query_partition_command.rb +266 -0
  38. data/lib/aerospike/query/scan_command.rb +3 -3
  39. data/lib/aerospike/query/scan_executor.rb +69 -0
  40. data/lib/aerospike/query/scan_partition_command.rb +49 -0
  41. data/lib/aerospike/query/statement.rb +8 -1
  42. data/lib/aerospike/query/stream_command.rb +15 -1
  43. data/lib/aerospike/result_code.rb +79 -4
  44. data/lib/aerospike/role.rb +2 -2
  45. data/lib/aerospike/task/execute_task.rb +2 -2
  46. data/lib/aerospike/task/index_task.rb +1 -1
  47. data/lib/aerospike/user_role.rb +1 -1
  48. data/lib/aerospike/utils/buffer.rb +32 -7
  49. data/lib/aerospike/utils/pool.rb +1 -1
  50. data/lib/aerospike/value/value.rb +6 -6
  51. data/lib/aerospike/version.rb +1 -1
  52. data/lib/aerospike.rb +8 -0
  53. metadata +14 -5
@@ -0,0 +1,347 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2014-2020 Aerospike, Inc.
4
+ #
5
+ # Portions may be licensed to Aerospike, Inc. under one or more contributor
6
+ # license agreements.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may not
9
+ # use this file except in compliance with the License. You may obtain a copy of
10
+ # the License at http:#www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
+ # License for the specific language governing permissions and limitations under
16
+ # the License.
17
+
18
+ module Aerospike
19
+ class PartitionTracker
20
+ attr_reader :partitions, :partitions_capacity, :partition_begin, :node_capacity,
21
+ :node_filter, :partition_filter, :node_partitions_list, :max_records,
22
+ :sleep_between_retries, :socket_timeout, :total_timeout, :iteration, :deadline
23
+
24
+ def initialize(policy, nodes, partition_filter = nil)
25
+ if partition_filter.nil?
26
+ return init_for_node(policy, nodes[0]) if nodes.length == 1
27
+ return init_for_nodes(policy, nodes)
28
+ end
29
+
30
+ # Validate here instead of initial PartitionFilter constructor because total number of
31
+ # cluster partitions may change on the server and PartitionFilter will never have access
32
+ # to Cluster instance. Use fixed number of partitions for now.
33
+ unless partition_filter.partition_begin.between?(0, Node::PARTITIONS - 1)
34
+ raise Aerospike::Exceptions::Aerospike.new(
35
+ Aerospike::ResultCode::PARAMETER_ERROR,
36
+ "Invalid partition begin #{partition_filter.partition_begin}. Valid range: 0-#{Aerospike::Node::PARTITIONS - 1}"
37
+ )
38
+ end
39
+
40
+ if partition_filter.count <= 0
41
+ raise Aerospike::Exceptions::Aerospike.new(
42
+ Aerospike::ResultCode::PARAMETER_ERROR,
43
+ "Invalid partition count #{partition_filter.count}"
44
+ )
45
+ end
46
+
47
+ if partition_filter.partition_begin + partition_filter.count > Node::PARTITIONS
48
+ raise Aerospike::Exceptions::Aerospike.new(
49
+ Aerospike::ResultCode::PARAMETER_ERROR,
50
+ "Invalid partition range (#{partition_filter.partition_begin}, #{partition_filter.partition_begin + partition_filter.count}"
51
+ )
52
+ end
53
+
54
+ @partition_begin = partition_filter.partition_begin
55
+ @node_capacity = nodes.length
56
+ @node_filter = nil
57
+ @partitions_capacity = partition_filter.count
58
+ @max_records = policy.max_records
59
+ @iteration = 1
60
+
61
+ if partition_filter.partitions.nil? then
62
+ partition_filter.partitions = init_partitions(policy, partition_filter.count, partition_filter.digest)
63
+ elsif policy.max_records <= 0
64
+ # Retry all partitions when max_records not specified.
65
+ partition_filter.partitions.each do |ps|
66
+ ps.retry = true
67
+ end
68
+ end
69
+
70
+ @partitions = partition_filter.partitions
71
+ @partition_filter = partition_filter
72
+ init_timeout(policy)
73
+ end
74
+
75
+ def assign_partitions_to_nodes(cluster, namespace)
76
+ list = []
77
+
78
+ pmap = cluster.partitions
79
+ replica_array = pmap[namespace]
80
+ raise Aerospike::Exceptions::InvalidNamespace("namespace not found in the partition map") if !replica_array
81
+
82
+ master = (replica_array.get)[0]
83
+ master = master.get
84
+
85
+ @partitions.each do |part|
86
+ if part&.retry
87
+ node = master[part.id]
88
+
89
+ unless node
90
+ raise Exceptions::Aerospike.new(Aerospike::ResultCode::INVALID_NAMESPACE, "Invalid Partition Id #{part.id} for namespace `#{namespace}` in Partition Scan")
91
+ end
92
+
93
+ part.retry = false
94
+
95
+
96
+ # Use node name to check for single node equality because
97
+ # partition map may be in transitional state between
98
+ # the old and new node with the same name.
99
+ next if @node_filter && @node_filter.name != node.name
100
+
101
+ np = find_node(list, node)
102
+
103
+ unless np
104
+ # If the partition map is in a transitional state, multiple
105
+ # node_partitions instances (each with different partitions)
106
+ # may be created for a single node.
107
+ np = NodePartitions.new(node)
108
+ list << np
109
+ end
110
+ np.add_partition(part)
111
+ end
112
+ end
113
+
114
+ if @max_records.positive?
115
+ # Distribute max_records across nodes.
116
+ node_size = list.length
117
+
118
+ if @max_records < node_size
119
+ # Only include nodes that have at least 1 record requested.
120
+ node_size = @max_records
121
+ list = list[0...node_size]
122
+ end
123
+
124
+ max = 0
125
+ max = @max_records / node_size if node_size.positive?
126
+ rem = @max_records - (max * node_size)
127
+
128
+ list[0...node_size].each_with_index do |np, i|
129
+ np.record_max = (i < rem ? max + 1 : max)
130
+ end
131
+ end
132
+
133
+ @node_partitions_list = list
134
+ list
135
+ end
136
+
137
+ def init_timeout(policy)
138
+ @sleep_between_retries = policy.sleep_between_retries
139
+ @socket_timeout = policy.socket_timeout
140
+ @total_timeout = policy.timeout
141
+ if @total_timeout.positive?
142
+ @deadline = Time.now + @total_timeout
143
+ if !@socket_timeout || @socket_timeout > @total_timeout
144
+ @socket_timeout = @total_timeout
145
+ end
146
+ end
147
+ end
148
+
149
+ def init_partitions(policy, partition_count, digest)
150
+ parts_all = Array.new(partition_count)
151
+
152
+ (0...partition_count).each do |i|
153
+ parts_all[i] = Aerospike::PartitionStatus.new(@partition_begin + i)
154
+ end
155
+
156
+ parts_all[0].digest = digest if digest
157
+
158
+ @sleep_between_retries = policy.sleep_between_retries
159
+ @socket_timeout = policy.socket_timeout
160
+ @total_timeout = policy.timeout
161
+
162
+ if @total_timeout.positive?
163
+ @deadline = Time.now + @total_timeout
164
+
165
+ if @socket_timeout == 0 || @socket_timeout > @total_timeout
166
+ @socket_timeout = @total_timeout
167
+ end
168
+ end
169
+
170
+ parts_all
171
+ end
172
+
173
+ attr_writer :sleep_between_retries
174
+
175
+
176
+ def find_node(list, node)
177
+ list.each do |node_partition|
178
+ # Use pointer equality for performance.
179
+ return node_partition if node_partition.node == node
180
+ end
181
+ nil
182
+ end
183
+
184
+ def partition_unavailable(node_partitions, partition_id)
185
+ @partitions[partition_id-@partition_begin].retry = true
186
+ node_partitions.parts_unavailable+=1
187
+ end
188
+
189
+ def set_digest(node_partitions, key)
190
+ partition_id = key.partition_id
191
+ @partitions[partition_id-@partition_begin].digest = key.digest
192
+ node_partitions.record_count+=1
193
+ end
194
+
195
+ def set_last(node_partitions, key, bval)
196
+ partition_id = key.partition_id()
197
+ if partition_id-@partition_begin < 0
198
+ raise "key.partition_id: #{@partition_id}, partition_begin: #{@partition_begin}"
199
+ end
200
+ ps = @partitions[partition_id-@partition_begin]
201
+ ps.digest = key.digest
202
+ ps.bval = bval
203
+ node_partitions.record_count+=1
204
+ end
205
+
206
+ def complete?(cluster, policy)
207
+ record_count = 0
208
+ parts_unavailable = 0
209
+
210
+ @node_partitions_list.each do |np|
211
+ record_count += np.record_count
212
+ parts_unavailable += np.parts_unavailable
213
+ end
214
+
215
+ if parts_unavailable == 0
216
+ if @max_records <= 0
217
+ @partition_filter&.done = true
218
+ else
219
+ if cluster.supports_partition_query.get()
220
+ done = true
221
+
222
+ @node_partitions_list.each do |np|
223
+ if np.record_count >= np.record_max
224
+ mark_retry(np)
225
+ done = false
226
+ end
227
+ end
228
+
229
+ @partition_filter&.done = done
230
+ else
231
+ # Server version >= 6.0 will return all records for each node up to
232
+ # that node's max. If node's record count reached max, there stilthen
233
+ # may be records available for that node.
234
+ @node_partitions_list.each do |np|
235
+ mark_retry(np) if np.record_count > 0
236
+ end
237
+ # Servers version < 6.0 can return less records than max and still
238
+ # have more records for each node, so the node is only done if nthen
239
+ # records were retrieved for that node.
240
+
241
+ @partition_filter&.done = (record_count == 0)
242
+ end
243
+ end
244
+ return true
245
+ end
246
+
247
+ return true if @max_records&.positive? && record_count >= @max_records
248
+
249
+ # Check if limits have been reached
250
+ if policy.max_retries.positive? && @iteration > policy.max_retries
251
+ raise Aerospike::Exceptions::Aerospike.new(Aerospike::ResultCode::MAX_RETRIES_EXCEEDED, "Max retries exceeded: #{policy.max_retries}")
252
+ end
253
+
254
+ if policy.total_timeout > 0
255
+ # Check for total timeout.
256
+ remaining = @deadline - Time.now - @sleep_between_retries
257
+
258
+ raise Aerospike::Exceptions::Timeout.new(policy.totle_timeout, @iteration) if remaining <= 0
259
+
260
+ if remaining < @total_timeout
261
+ @total_timeout = remaining
262
+
263
+ if @socket_timeout > @total_timeout
264
+ @socket_timeout = @total_timeout
265
+ end
266
+ end
267
+ end
268
+
269
+ # Prepare for next iteration.
270
+ if @max_records > 0
271
+ @max_records -= record_count
272
+ end
273
+ @iteration+=1
274
+ false
275
+ end
276
+
277
+ def should_retry(node_partitions, err)
278
+ case err
279
+ when Aerospike::Exceptions::Aerospike
280
+ case err.result_code
281
+ when Aerospike::ResultCode::TIMEOUT,
282
+ Aerospike::ResultCode::NETWORK_ERROR,
283
+ Aerospike::ResultCode::SERVER_NOT_AVAILABLE,
284
+ Aerospike::ResultCode::INDEX_NOTFOUND
285
+ mark_retry(node_partitions)
286
+ node_partitions.parts_unavailable = node_partitions.parts_full.length + node_partitions.parts_partial.length
287
+ true
288
+ end
289
+ else
290
+ false
291
+ end
292
+ end
293
+
294
+ def mark_retry(node_partitions)
295
+ node_partitions.parts_full.each do |ps|
296
+ ps.retry = true
297
+ end
298
+
299
+ node_partitions.parts_partial.each do |ps|
300
+ ps.retry = true
301
+ end
302
+ end
303
+
304
+ def to_s
305
+ sb = StringIO.new
306
+ @partitions.each_with_index do |ps, i|
307
+ sb << ps.to_s
308
+ sb << if (i+1)%16 == 0
309
+ "\n"
310
+ else
311
+ "\t"
312
+ end
313
+ end
314
+ sb.string
315
+ end
316
+
317
+ private
318
+
319
+ def init_for_nodes(policy, nodes)
320
+ ppn = Aerospike::Node::PARTITIONS / nodes.length
321
+ ppn += ppn / 4
322
+
323
+ @partition_begin = 0
324
+ @node_capacity = nodes.length
325
+ @node_filter = nil
326
+ @partitions_capacity = ppn
327
+ @max_records = policy.max_records
328
+ @iteration = 1
329
+
330
+ @partitions = init_partitions(policy, Aerospike::Node::PARTITIONS, nil)
331
+ init_timeout(policy)
332
+ end
333
+
334
+ def init_for_node(policy, node)
335
+ @partition_begin = 0
336
+ @node_capacity = 1
337
+ @node_filter = node
338
+ @partitions_capacity = Aerospike::Node::PARTITIONS
339
+ @max_records = policy.max_records
340
+ @iteration = 1
341
+
342
+ @partitions = init_partitions(policy, Aerospike::Node::PARTITIONS, nil)
343
+ init_timeout(policy)
344
+ end
345
+
346
+ end
347
+ end
@@ -82,7 +82,7 @@ module Aerospike
82
82
  @data_offset += binNameSize
83
83
  fieldCount+=1
84
84
  end
85
- else
85
+ else
86
86
  @data_offset += @partitions.length * 2 + FIELD_HEADER_SIZE
87
87
  fieldCount += 1
88
88
 
@@ -0,0 +1,73 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2014-2020 Aerospike, Inc.
4
+ #
5
+ # Portions may be licensed to Aerospike, Inc. under one or more contributor
6
+ # license agreements.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may not
9
+ # use this file except in compliance with the License. You may obtain a copy of
10
+ # the License at http:#www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
+ # License for the specific language governing permissions and limitations under
16
+ # the License.
17
+
18
+ module Aerospike
19
+ class QueryExecutor # :nodoc:
20
+
21
+ def self.query_partitions(cluster, policy, tracker, statement, recordset)
22
+ interval = policy.sleep_between_retries
23
+
24
+ should_retry = false
25
+
26
+ loop do
27
+ list = tracker.assign_partitions_to_nodes(cluster, statement.namespace)
28
+
29
+ if policy.concurrent_nodes
30
+ threads = []
31
+ # Use a thread per node
32
+ list.each do |node_partition|
33
+
34
+ threads << Thread.new do
35
+ Thread.current.abort_on_exception = true
36
+ command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
37
+ begin
38
+ command.execute
39
+ rescue => e
40
+ should_retry ||= command.should_retry(e)
41
+ # puts "should retry: #{should_retry}"
42
+ Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
43
+ end
44
+ end
45
+ end
46
+ threads.each(&:join)
47
+ else
48
+ # Use a single thread for all nodes for all node
49
+ list.each do |node_partition|
50
+ command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
51
+ begin
52
+ command.execute
53
+ rescue => e
54
+ should_retry ||= command.should_retry(e)
55
+ Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
56
+ end
57
+ end
58
+ end
59
+
60
+ complete = tracker.complete?(@cluster, policy)
61
+
62
+ if complete || !should_retry
63
+ recordset.thread_finished
64
+ return
65
+ end
66
+ sleep(interval) if policy.sleep_between_retries > 0
67
+ statement.reset_task_id
68
+ end
69
+ end
70
+
71
+ end
72
+
73
+ end
@@ -0,0 +1,266 @@
1
+ # encoding: utf-8
2
+ # Copyright 2014-2020 Aerospike, Inc.
3
+ #
4
+ # Portions may be licensed to Aerospike, Inc. under one or more contributor
5
+ # license agreements.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License") you may not
8
+ # use this file except in compliance with the License. You may obtain a copy of
9
+ # the License at http:#www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+ # License for the specific language governing permissions and limitations under
15
+ # the License.
16
+
17
+ require 'aerospike/query/stream_command'
18
+ require 'aerospike/query/recordset'
19
+
20
+ module Aerospike
21
+
22
+ private
23
+
24
+ class QueryPartitionCommand < QueryCommand #:nodoc:
25
+
26
+ def initialize(node, tracker, policy, statement, recordset, node_partitions)
27
+ super(node, policy, statement, recordset, @node_partitions)
28
+ @node_partitions = node_partitions
29
+ @tracker = tracker
30
+ end
31
+
32
+ def write_buffer
33
+ function_arg_buffer = nil
34
+ field_count = 0
35
+ filter_size = 0
36
+ bin_name_size = 0
37
+
38
+ begin_cmd
39
+
40
+ if @statement.namespace
41
+ @data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
42
+ field_count+=1
43
+ end
44
+
45
+ if @statement.set_name
46
+ @data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
47
+ field_count+=1
48
+ end
49
+
50
+ # Estimate recordsPerSecond field size. This field is used in new servers and not used
51
+ # (but harmless to add) in old servers.
52
+ if @policy.records_per_second > 0
53
+ @data_offset += 4 + FIELD_HEADER_SIZE
54
+ field_count+=1
55
+ end
56
+
57
+ # Estimate socket timeout field size. This field is used in new servers and not used
58
+ # (but harmless to add) in old servers.
59
+ @data_offset += 4 + FIELD_HEADER_SIZE
60
+ field_count+=1
61
+
62
+ # Estimate task_id field.
63
+ @data_offset += 8 + FIELD_HEADER_SIZE
64
+ field_count+=1
65
+
66
+ filter = @statement.filters[0]
67
+ bin_names = @statement.bin_names
68
+ packed_ctx = nil
69
+
70
+ if filter
71
+ col_type = filter.collection_type
72
+
73
+ # Estimate INDEX_TYPE field.
74
+ if col_type > 0
75
+ @data_offset += FIELD_HEADER_SIZE + 1
76
+ field_count+=1
77
+ end
78
+
79
+ # Estimate INDEX_RANGE field.
80
+ @data_offset += FIELD_HEADER_SIZE
81
+ filter_size+=1 # num filters
82
+ filter_size += filter.estimate_size
83
+
84
+ @data_offset += filter_size
85
+ field_count+=1
86
+
87
+ # TODO: Implement
88
+ # packed_ctx = filter.packed_ctx
89
+ # if packed_ctx
90
+ # @data_offset += FIELD_HEADER_SIZE + packed_ctx.length
91
+ # field_count+=1
92
+ # end
93
+ end
94
+
95
+ @statement.set_task_id
96
+ predexp = @policy.predexp || @statement.predexp
97
+
98
+ if predexp
99
+ @data_offset += FIELD_HEADER_SIZE
100
+ pred_size = Aerospike::PredExp.estimate_size(predexp)
101
+ @data_offset += pred_size
102
+ field_count += 1
103
+ end
104
+
105
+ # Estimate aggregation/background function size.
106
+ if @statement.function_name
107
+ @data_offset += FIELD_HEADER_SIZE + 1 # udf type
108
+ @data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
109
+ @data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
110
+
111
+ function_arg_buffer=''
112
+ if @statement.function_args && @statement.function_args.length > 0
113
+ function_arg_buffer = Value.of(@statement.function_args).to_bytes
114
+ end
115
+ @data_offset += FIELD_HEADER_SIZE + function_arg_buffer.bytesize
116
+ field_count += 4
117
+ end
118
+
119
+ max_records = 0
120
+ parts_full_size = 0
121
+ parts_partial_digest_size = 0
122
+ parts_partial_bval_size = 0
123
+
124
+ unless @node_partitions.nil?
125
+ parts_full_size = @node_partitions.parts_full.length * 2
126
+ parts_partial_digest_size = @node_partitions.parts_partial.length * 20
127
+
128
+ unless filter.nil?
129
+ parts_partial_bval_size = @node_partitions.parts_partial.length * 8
130
+ end
131
+ max_records = @node_partitions.record_max
132
+ end
133
+
134
+ if parts_full_size > 0
135
+ @data_offset += parts_full_size + FIELD_HEADER_SIZE
136
+ field_count+=1
137
+ end
138
+
139
+ if parts_partial_digest_size > 0
140
+ @data_offset += parts_partial_digest_size + FIELD_HEADER_SIZE
141
+ field_count+=1
142
+ end
143
+
144
+ if parts_partial_bval_size > 0
145
+ @data_offset += parts_partial_bval_size + FIELD_HEADER_SIZE
146
+ field_count+=1
147
+ end
148
+
149
+ # Estimate max records field size. This field is used in new servers and not used
150
+ # (but harmless to add) in old servers.
151
+ if max_records > 0
152
+ @data_offset += 8 + FIELD_HEADER_SIZE
153
+ field_count+=1
154
+ end
155
+
156
+ operation_count = 0
157
+ unless bin_names.empty?
158
+ # Estimate size for selected bin names (query bin names already handled for old servers).
159
+ bin_names.each do |bin_name|
160
+ estimate_operation_size_for_bin_name(bin_name)
161
+ end
162
+ operation_count = bin_names.length
163
+ end
164
+
165
+ projected_offset = @data_offset
166
+
167
+ size_buffer
168
+
169
+ read_attr = INFO1_READ
170
+ read_attr |= INFO1_NOBINDATA if !@policy.include_bin_data
171
+ read_attr |= INFO1_SHORT_QUERY if @policy.short_query
172
+
173
+ infoAttr = INFO3_PARTITION_DONE
174
+
175
+ write_header(@policy, read_attr, 0, field_count, operation_count)
176
+
177
+ write_field_string(@statement.namespace, FieldType::NAMESPACE) if @statement.namespace
178
+ write_field_string(@statement.set_name, FieldType::TABLE) if @statement.set_name
179
+
180
+ # Write records per second.
181
+ write_field_int(@policy.records_per_second, FieldType::RECORDS_PER_SECOND) if @policy.records_per_second > 0
182
+
183
+ # Write socket idle timeout.
184
+ write_field_int(@policy.socket_timeout, FieldType::SOCKET_TIMEOUT)
185
+
186
+ # Write task_id field
187
+ write_field_int64(@statement.task_id, FieldType::TRAN_ID)
188
+
189
+ unless predexp.nil?
190
+ write_field_header(pred_size, Aerospike::FieldType::PREDEXP)
191
+ @data_offset = Aerospike::PredExp.write(
192
+ predexp, @data_buffer, @data_offset
193
+ )
194
+ end
195
+
196
+ if filter
197
+ type = filter.collection_type
198
+
199
+ if type > 0
200
+ write_field_header(1, FieldType::INDEX_TYPE)
201
+ @data_offset += @data_buffer.write_byte(type, @data_offset)
202
+ end
203
+
204
+ write_field_header(filter_size, FieldType::INDEX_RANGE)
205
+ @data_offset += @data_buffer.write_byte(1, @data_offset)
206
+ @data_offset = filter.write(@data_buffer, @data_offset)
207
+
208
+ # TODO: Implement
209
+ # if packed_ctx
210
+ # write_field_header(packed_ctx.length, FieldType::INDEX_CONTEXT)
211
+ # @data_buffer.write_binary(packed_ctx, @data_offset)
212
+ # end
213
+ end
214
+
215
+ if @statement.function_name
216
+ write_field_header(1, FieldType::UDF_OP)
217
+ @data_offset += @data_buffer.write_byte(1, @data_offset)
218
+ write_field_string(@statement.package_name, FieldType::UDF_PACKAGE_NAME)
219
+ write_field_string(@statement.function_name, FieldType::UDF_FUNCTION)
220
+ write_field_string(function_arg_buffer, FieldType::UDF_ARGLIST)
221
+ end
222
+
223
+ if parts_full_size > 0
224
+ write_field_header(parts_full_size, FieldType::PID_ARRAY)
225
+ @node_partitions.parts_full.each do |part|
226
+ @data_offset += @data_buffer.write_uint16_little_endian(part.id, @data_offset)
227
+ end
228
+ end
229
+
230
+ if parts_partial_digest_size > 0
231
+ write_field_header(parts_partial_digest_size, FieldType::DIGEST_ARRAY)
232
+ @node_partitions.parts_partial.each do |part|
233
+ @data_offset += @data_buffer.write_binary(part.digest, @data_offset)
234
+ end
235
+ end
236
+
237
+ if parts_partial_bval_size > 0
238
+ write_field_header(parts_partial_bval_size, FieldType::BVAL_ARRAY)
239
+ @node_partitions.parts_partial.each do |part|
240
+ @data_offset += @data_buffer.write_uint64_little_endian(part.bval, @data_offset)
241
+ end
242
+ end
243
+
244
+ if max_records > 0
245
+ write_field(max_records, FieldType::MAX_RECORDS)
246
+ end
247
+
248
+ unless bin_names.empty?
249
+ bin_names.each do |bin_name|
250
+ write_operation_for_bin_name(bin_name, Operation::READ)
251
+ end
252
+ end
253
+
254
+ end_cmd
255
+
256
+ nil
257
+ end
258
+
259
+ def should_retry(e)
260
+ # !! converts nil to false
261
+ !!@tracker&.should_retry(@node_partitions, e)
262
+ end
263
+
264
+ end # class
265
+
266
+ end # module