aerospike 2.29.0 → 4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (58) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +30 -0
  3. data/README.md +13 -9
  4. data/lib/aerospike/batch_attr.rb +292 -0
  5. data/lib/aerospike/batch_delete.rb +48 -0
  6. data/lib/aerospike/batch_read.rb +97 -0
  7. data/lib/aerospike/batch_record.rb +83 -0
  8. data/lib/aerospike/batch_results.rb +38 -0
  9. data/lib/aerospike/batch_udf.rb +76 -0
  10. data/lib/aerospike/batch_write.rb +79 -0
  11. data/lib/aerospike/cdt/bit_operation.rb +4 -5
  12. data/lib/aerospike/cdt/map_operation.rb +24 -10
  13. data/lib/aerospike/cdt/map_policy.rb +6 -3
  14. data/lib/aerospike/cdt/map_return_type.rb +8 -0
  15. data/lib/aerospike/client.rb +39 -56
  16. data/lib/aerospike/cluster.rb +50 -46
  17. data/lib/aerospike/command/batch_index_command.rb +7 -11
  18. data/lib/aerospike/command/batch_index_node.rb +3 -4
  19. data/lib/aerospike/command/batch_operate_command.rb +151 -0
  20. data/lib/aerospike/command/batch_operate_node.rb +51 -0
  21. data/lib/aerospike/command/command.rb +231 -128
  22. data/lib/aerospike/exp/exp.rb +54 -27
  23. data/lib/aerospike/exp/exp_bit.rb +24 -24
  24. data/lib/aerospike/exp/exp_hll.rb +12 -12
  25. data/lib/aerospike/exp/exp_list.rb +101 -86
  26. data/lib/aerospike/exp/exp_map.rb +118 -110
  27. data/lib/aerospike/exp/operation.rb +2 -2
  28. data/lib/aerospike/info.rb +2 -4
  29. data/lib/aerospike/node.rb +20 -3
  30. data/lib/aerospike/operation.rb +38 -0
  31. data/lib/aerospike/policy/batch_delete_policy.rb +71 -0
  32. data/lib/aerospike/policy/batch_policy.rb +53 -4
  33. data/lib/aerospike/{command/batch_direct_node.rb → policy/batch_read_policy.rb} +17 -19
  34. data/lib/aerospike/policy/batch_udf_policy.rb +75 -0
  35. data/lib/aerospike/policy/batch_write_policy.rb +105 -0
  36. data/lib/aerospike/policy/policy.rb +3 -40
  37. data/lib/aerospike/query/query_command.rb +3 -205
  38. data/lib/aerospike/query/query_executor.rb +2 -2
  39. data/lib/aerospike/query/query_partition_command.rb +4 -230
  40. data/lib/aerospike/query/scan_executor.rb +2 -2
  41. data/lib/aerospike/query/scan_partition_command.rb +3 -3
  42. data/lib/aerospike/query/server_command.rb +2 -2
  43. data/lib/aerospike/query/statement.rb +5 -21
  44. data/lib/aerospike/task/execute_task.rb +2 -2
  45. data/lib/aerospike/utils/buffer.rb +15 -15
  46. data/lib/aerospike/version.rb +1 -1
  47. data/lib/aerospike.rb +13 -12
  48. metadata +16 -14
  49. data/lib/aerospike/command/batch_direct_command.rb +0 -105
  50. data/lib/aerospike/command/batch_direct_exists_command.rb +0 -51
  51. data/lib/aerospike/query/pred_exp/and_or.rb +0 -32
  52. data/lib/aerospike/query/pred_exp/geo_json_value.rb +0 -41
  53. data/lib/aerospike/query/pred_exp/integer_value.rb +0 -32
  54. data/lib/aerospike/query/pred_exp/op.rb +0 -27
  55. data/lib/aerospike/query/pred_exp/regex.rb +0 -32
  56. data/lib/aerospike/query/pred_exp/regex_flags.rb +0 -23
  57. data/lib/aerospike/query/pred_exp/string_value.rb +0 -29
  58. data/lib/aerospike/query/pred_exp.rb +0 -192
@@ -0,0 +1,71 @@
1
+ # encoding: utf-8
2
+ # Copyright 2014-2024 Aerospike, Inc.
3
+ #
4
+ # Portions may be licensed to Aerospike, Inc. under one or more contributor
5
+ # license agreements.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License") you may not
8
+ # use this file except in compliance with the License. You may obtain a copy of
9
+ # the License at http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+ # License for the specific language governing permissions and limitations under
15
+ # the License.
16
+
17
+ module Aerospike
18
+
19
+ # Policy attributes used in batch delete commands.
20
+ class BatchDeletePolicy
21
+ attr_accessor :filter_exp, :commit_level, :generation_policy, :generation, :durable_delete, :send_key
22
+
23
+ def initialize(opt = {})
24
+ # Optional expression filter. If filter_exp exists and evaluates to false, the specific batch key
25
+ # request is not performed and {BatchRecord#result_code} is set to
26
+ # {ResultCode#FILTERED_OUT}.
27
+ #
28
+ # If exists, this filter overrides the batch parent filter {Policy#filter_exp}
29
+ # for the specific key in batch commands that allow a different policy per key.
30
+ # Otherwise, this filter is ignored.
31
+ #
32
+ # Default: nil
33
+ @filter_exp = opt[:filter_exp]
34
+
35
+ # Desired consistency guarantee when committing a transaction on the server. The default
36
+ # (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
37
+ # be successful before returning success to the client.
38
+ #
39
+ # Default: CommitLevel.COMMIT_ALL
40
+ @commit_level = opt[:commit_level] || CommitLevel::COMMIT_ALL
41
+
42
+ # Qualify how to handle record deletes based on record generation. The default (NONE)
43
+ # indicates that the generation is not used to restrict deletes.
44
+ #
45
+ # Default: GenerationPolicy.NONE
46
+ @generation_policy = opt[:generation_policy] || GenerationPolicy::NONE
47
+
48
+ # Expected generation. Generation is the number of times a record has been modified
49
+ # (including creation) on the server. This field is only relevant when generationPolicy
50
+ # is not NONE.
51
+ #
52
+ # Default: 0
53
+ @generation = opt[:generation] || 0
54
+
55
+ # If the transaction results in a record deletion, leave a tombstone for the record.
56
+ # This prevents deleted records from reappearing after node failures.
57
+ # Valid for Aerospike Server Enterprise Edition only.
58
+ #
59
+ # Default: false (do not tombstone deleted records).
60
+ @durable_delete = opt[:durable_delete] || false
61
+
62
+ # Send user defined key in addition to hash digest.
63
+ # If true, the key will be stored with the tombstone record on the server.
64
+ #
65
+ # Default: false (do not send the user defined key)
66
+ @send_key = opt[:send_key] || false
67
+
68
+ self
69
+ end
70
+ end
71
+ end
@@ -21,12 +21,14 @@ module Aerospike
21
21
 
22
22
  # Container object for batch policy command.
23
23
  class BatchPolicy < Policy
24
-
25
- attr_accessor :use_batch_direct
24
+ attr_accessor :allow_inline_ssd, :respond_all_keys, :send_key
26
25
 
27
26
  def initialize(opt={})
28
- super(opt)
27
+ super
29
28
 
29
+ # [:nodoc:]
30
+ # DEPRECATED
31
+ # This setting does not have any effect anymore.
30
32
  # Use old batch direct protocol where batch reads are handled by direct
31
33
  # low-level batch server database routines. The batch direct protocol can
32
34
  # be faster when there is a single namespace. But there is one important
@@ -38,11 +40,58 @@ module Aerospike
38
40
  # index protocol will perform this record proxy when necessary.
39
41
  #
40
42
  # Default: false (use new batch index protocol if server supports it)
41
- @use_batch_direct = opt.fetch(:use_batch_direct) { false }
43
+ @use_batch_direct = opt.fetch(:use_batch_direct, false)
44
+
45
+
46
+ # Allow batch to be processed immediately in the server's receiving thread for SSD
47
+ # namespaces. If false, the batch will always be processed in separate service threads.
48
+ # Server versions &lt; 6.0 ignore this field.
49
+ #
50
+ # Inline processing can introduce the possibility of unfairness because the server
51
+ # can process the entire batch before moving onto the next command.
52
+ #
53
+ # Default: false
54
+ @allow_inline_ssd = opt.fetch(:allow_inline_ssd, false)
55
+
56
+
57
+ # Should all batch keys be attempted regardless of errors. This field is used on both
58
+ # the client and server. The client handles node specific errors and the server handles
59
+ # key specific errors.
60
+ #
61
+ # If true, every batch key is attempted regardless of previous key specific errors.
62
+ # Node specific errors such as timeouts stop keys to that node, but keys directed at
63
+ # other nodes will continue to be processed.
64
+ #
65
+ # If false, the server will stop the batch to its node on most key specific errors.
66
+ # The exceptions are {ResultCode#KEY_NOT_FOUND_ERROR} and
67
+ # {ResultCode#FILTERED_OUT} which never stop the batch.
68
+ # The client will stop the entire batch on node specific errors. The client will
69
+ # not stop the entire batch commands run in parallel.
70
+ #
71
+ # Server versions < 6.0 do not support this field and treat this value as false
72
+ # for key specific errors.
73
+ #
74
+ # Default: true
75
+ @respond_all_keys = opt.fetch(:respond_all_keys, true)
76
+
77
+
78
+ # Send user defined key in addition to hash digest on a record put.
79
+ # The default is to _not_ send the user defined key.
80
+ @send_key = opt.fetch(:send_key, false)
42
81
 
43
82
  self
44
83
  end
45
84
 
85
+ def self.read_default
86
+ BatchPolicy.new
87
+ end
88
+
89
+ def self.write_default
90
+ bp = BatchPolicy.new
91
+ bp.max_retries = 0
92
+ bp
93
+ end
94
+
46
95
  end # class
47
96
 
48
97
  end # module
@@ -17,24 +17,22 @@
17
17
 
18
18
  module Aerospike
19
19
 
20
- BatchNamespace = Struct.new :namespace, :keys
21
-
22
- class BatchDirectNode #:nodoc:
23
-
24
- attr_accessor :node
25
- attr_accessor :batch_namespaces
26
-
27
- def self.generate_list(cluster, replica_policy, keys)
28
- keys.group_by { |key| cluster.get_node_for_key(replica_policy, key) }
29
- .map { |node, keys_for_node| BatchDirectNode.new(node, keys_for_node) }
20
+ # Policy attributes used in batch read commands.
21
+ class BatchReadPolicy
22
+
23
+ attr_accessor :filter_exp
24
+
25
+ def initialize(opt={})
26
+ # Optional expression filter. If filter_exp exists and evaluates to false, the specific batch key
27
+ # request is not performed and {BatchRecord#result_code} is set to
28
+ # {ResultCode#FILTERED_OUT}.
29
+ #
30
+ # If exists, this filter overrides the batch parent filter {Policy#filter_exp}
31
+ # for the specific key in batch commands that allow a different policy per key.
32
+ # Otherwise, this filter is ignored.
33
+ #
34
+ # Default: nil
35
+ @filter_exp = opt[:filter_exp]
30
36
  end
31
-
32
- def initialize(node, keys)
33
- @node = node
34
- @batch_namespaces = keys.group_by(&:namespace)
35
- .map { |ns, keys_for_ns| BatchNamespace.new(ns, keys_for_ns) }
36
- end
37
-
38
37
  end
39
-
40
- end
38
+ end
@@ -0,0 +1,75 @@
1
+ # Copyright 2014-2023 Aerospike, Inc.
2
+ #
3
+ # Portions may be licensed to Aerospike, Inc. under one or more contributor
4
+ # license agreements.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may not
7
+ # use this file except in compliance with the License. You may obtain a copy of
8
+ # the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
+ # License for the specific language governing permissions and limitations under
16
+ # the License.
17
+
18
+ module Aerospike
19
+
20
+ # Policy attributes used in batch UDF execute commands.
21
+ class BatchUDFPolicy
22
+
23
+ attr_accessor :filter_exp, :commit_level, :ttl, :durable_delete, :send_key
24
+
25
+ alias expiration ttl
26
+ alias expiration= ttl=
27
+
28
+ def initialize(opt={})
29
+ # Optional expression filter. If filter_exp exists and evaluates to false, the specific batch key
30
+ # request is not performed and {BatchRecord#resultCode} is set to
31
+ # {ResultCode#FILTERED_OUT}.
32
+ #
33
+ # If exists, this filter overrides the batch parent filter {Policy#filter_exp}
34
+ # for the specific key in batch commands that allow a different policy per key.
35
+ # Otherwise, this filter is ignored.
36
+ #
37
+ # Default: nil
38
+ @filter_exp = opt[:filter_exp]
39
+
40
+ # Desired consistency guarantee when committing a transaction on the server. The default
41
+ # (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
42
+ # be successful before returning success to the client.
43
+ #
44
+ # Default: CommitLevel::COMMIT_ALL
45
+ @commit_level = opt.fetch(:commit_level, CommitLevel::COMMIT_ALL)
46
+
47
+ # Record expiration; also known as time-to-live (TTL).
48
+ # Seconds record will live before being removed by the server.
49
+ #
50
+ # Supported values:
51
+ # - `Aerospike::TTL::NEVER_EXPIRE`: Never expire record; requires Aerospike 2
52
+ # server versions >= 2.7.2 or Aerospike 3 server versions >= 3.1.4. Do
53
+ # not use for older servers.
54
+ # - `Aerospike::TTL::NAMESPACE_DEFAULT`: Default to namespace configuration
55
+ # variable "default-ttl" on the server.
56
+ # - `Aerospike::TTL::DONT_UPDATE`: Do not change a record's expiration date
57
+ # when updating the record. Requires Aerospike server v3.10.1 or later.
58
+ # - Any value > 0: Actual time-to-live in seconds.
59
+ @ttl = opt[:ttl] || opt[:expiration] || 0
60
+
61
+ # If the transaction results in a record deletion, leave a tombstone for the record.
62
+ # This prevents deleted records from reappearing after node failures.
63
+ # Valid for Aerospike Server Enterprise Edition only.
64
+ #
65
+ # Default: false (do not tombstone deleted records).
66
+ @durable_delete = opt.fetch(:durable_delete, false)
67
+
68
+ # Send user defined key in addition to hash digest.
69
+ # If true, the key will be stored with the record on the server.
70
+ #
71
+ # Default: false (do not send the user defined key)
72
+ @send_key = opt.fetch(:send_key, false)
73
+ end
74
+ end
75
+ end
@@ -0,0 +1,105 @@
1
+ # Copyright 2014-2023 Aerospike, Inc.
2
+ #
3
+ # Portions may be licensed to Aerospike, Inc. under one or more contributor
4
+ # license agreements.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may not
7
+ # use this file except in compliance with the License. You may obtain a copy of
8
+ # the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
+ # License for the specific language governing permissions and limitations under
16
+ # the License.
17
+
18
+ module Aerospike
19
+
20
+
21
+ # Policy attributes used in batch write commands.
22
+ class BatchWritePolicy
23
+
24
+ attr_accessor :filter_exp, :record_exists_action, :commit_level,
25
+ :generation_policy, :generation, :ttl, :durable_delete,
26
+ :send_key
27
+
28
+ alias expiration ttl
29
+ alias expiration= ttl=
30
+
31
+ def initialize(opt={})
32
+ # Optional expression filter. If filter_exp exists and evaluates to false, the specific batch key
33
+ # request is not performed and {BatchRecord#result_code} is set to
34
+ # {ResultCode#FILTERED_OUT}.
35
+ #
36
+ # If exists, this filter overrides the batch parent filter {Policy#filter_exp}
37
+ # for the specific key in batch commands that allow a different policy per key.
38
+ # Otherwise, this filter is ignored.
39
+ #
40
+ # Default: nil
41
+ @filter_exp = opt[:filter_exp]
42
+
43
+ # Qualify how to handle writes where the record already exists.
44
+ #
45
+ # Default: RecordExistsAction::UPDATE
46
+ @record_exists_action = opt.fetch(:record_exists_action, RecordExistsAction::UPDATE)
47
+
48
+ # Desired consistency guarantee when committing a transaction on the server. The default
49
+ # (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
50
+ # be successful before returning success to the client.
51
+ #
52
+ # Default: CommitLevel::COMMIT_ALL
53
+ @commit_level = opt.fetch(:commit_level, CommitLevel::COMMIT_ALL)
54
+
55
+ # Qualify how to handle record writes based on record generation. The default (NONE)
56
+ # indicates that the generation is not used to restrict writes.
57
+ #
58
+ # The server does not support this field for UDF execute() calls. The read-modify-write
59
+ # usage model can still be enforced inside the UDF code itself.
60
+ #
61
+ # Default: GenerationPolicy::NONE
62
+ @generation_policy = opt.fetch(:generation_policy, GenerationPolicy::NONE)
63
+
64
+ # Expected generation. Generation is the number of times a record has been modified
65
+ # (including creation) on the server. If a write operation is creating a record,
66
+ # the expected generation would be <code>0</code>. This field is only relevant when
67
+ # generationPolicy is not NONE.
68
+ #
69
+ # The server does not support this field for UDF execute() calls. The read-modify-write
70
+ # usage model can still be enforced inside the UDF code itself.
71
+ #
72
+ # Default: 0
73
+ @generation = opt.fetch(:generation, 0)
74
+
75
+ # Record expiration; also known as time-to-live (TTL).
76
+ # Seconds record will live before being removed by the server.
77
+ #
78
+ # Supported values:
79
+ # - `Aerospike::TTL::NEVER_EXPIRE`: Never expire record; requires Aerospike 2
80
+ # server versions >= 2.7.2 or Aerospike 3 server versions >= 3.1.4. Do
81
+ # not use for older servers.
82
+ # - `Aerospike::TTL::NAMESPACE_DEFAULT`: Default to namespace configuration
83
+ # variable "default-ttl" on the server.
84
+ # - `Aerospike::TTL::DONT_UPDATE`: Do not change a record's expiration date
85
+ # when updating the record. Requires Aerospike server v3.10.1 or later.
86
+ # - Any value > 0: Actual time-to-live in seconds.
87
+ @ttl = opt[:ttl] || opt[:expiration] || 0
88
+
89
+ # If the transaction results in a record deletion, leave a tombstone for the record.
90
+ # This prevents deleted records from reappearing after node failures.
91
+ # Valid for Aerospike Server Enterprise Edition only.
92
+ #
93
+ # Default: false (do not tombstone deleted records).
94
+ @durable_delete = opt.fetch(:durable_delete, false)
95
+
96
+ # Send user defined key in addition to hash digest.
97
+ # If true, the key will be stored with the record on the server.
98
+ #
99
+ # Default: false (do not send the user defined key)
100
+ @send_key = opt.fetch(:send_key, false)
101
+
102
+ self
103
+ end
104
+ end
105
+ end
@@ -22,7 +22,7 @@ module Aerospike
22
22
  # Container object for client policy command.
23
23
  class Policy
24
24
  attr_accessor :filter_exp, :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
25
- :predexp, :fail_on_filtered_out, :replica, :use_compression, :socket_timeout
25
+ :fail_on_filtered_out, :replica, :use_compression, :socket_timeout
26
26
 
27
27
  alias total_timeout timeout
28
28
  alias total_timeout= timeout=
@@ -31,7 +31,7 @@ module Aerospike
31
31
  # Container object for transaction policy attributes used in all database
32
32
  # operation calls.
33
33
 
34
- # Optional expression filter. If filterExp exists and evaluates to false, the
34
+ # Optional expression filter. If filter_exp exists and evaluates to false, the
35
35
  # transaction is ignored.
36
36
  #
37
37
  # Default: nil
@@ -57,44 +57,7 @@ module Aerospike
57
57
  # TODO: Remove for next major release
58
58
  @priority = opt[:priority] || Priority::DEFAULT
59
59
 
60
- # Set optional predicate expression filters in postfix notation.
61
- # Predicate expression filters are applied on the query results on the server.
62
- # Predicate expression filters may occur on any bin in the record.
63
- # Requires Aerospike Server versions >= 3.12
64
- #
65
- # Postfix notation is described here: http://wiki.c2.com/?PostfixNotation
66
- #
67
- # Example:
68
- #
69
- # (c >= 11 and c <= 20) or (d > 3 and (d < 5)
70
- # policy.predexp = [
71
- # PredExp.integer_bin("c"),
72
- # PredExp.integer_value(11),
73
- # PredExp.integer_greater_eq(),
74
- # PredExp.integer_bin("c"),
75
- # PredExp.integer_value(20),
76
- # PredExp.integer_less_eq(),
77
- # PredExp.and(2),
78
- # PredExp.integer_bin("d"),
79
- # PredExp.integer_value(3),
80
- # PredExp.integer_greater(),
81
- # PredExp.integer_bin("d"),
82
- # PredExp.integer_value(5),
83
- # PredExp.integer_less(),
84
- # PredExp.and(2),
85
- # PredExp.or(2)
86
- # ]
87
- #
88
- # # Record last update time > 2017-01-15
89
- # policy.predexp = [
90
- # PredExp.rec_last_update(),
91
- # PredExp.integer_value(Time.new(2017, 1, 15).to_i),
92
- # PredExp.integer_greater(),
93
- # PredExp.integer_greater()
94
- # ]
95
- @predexp = opt[:predexp] || nil
96
-
97
- # Throw exception if @predexp is defined and that filter evaluates
60
+ # Throw exception if @filter_exp is defined and that filter evaluates
98
61
  # to false (transaction ignored). The Aerospike::Exceptions::Aerospike
99
62
  # will contain result code Aerospike::ResultCode::FILTERED_OUT.
100
63
  # This field is not applicable to batch, scan or query commands.
@@ -23,9 +23,9 @@ module Aerospike
23
23
 
24
24
  class QueryCommand < StreamCommand #:nodoc:
25
25
 
26
- def initialize(node, policy, statement, recordset, partitions)
26
+ def initialize(cluster, node, policy, statement, recordset, partitions)
27
27
  super(node)
28
-
28
+ @cluster = cluster
29
29
  @policy = policy
30
30
  @statement = statement
31
31
  @recordset = recordset
@@ -33,209 +33,7 @@ module Aerospike
33
33
  end
34
34
 
35
35
  def write_buffer
36
- fieldCount = 0
37
- filterSize = 0
38
- binNameSize = 0
39
- predSize = 0
40
-
41
- begin_cmd
42
-
43
- if @statement.namespace
44
- @data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
45
- fieldCount+=1
46
- end
47
-
48
- if @statement.index_name
49
- @data_offset += @statement.index_name.bytesize + FIELD_HEADER_SIZE
50
- fieldCount+=1
51
- end
52
-
53
- if @statement.set_name
54
- @data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
55
- fieldCount+=1
56
- end
57
-
58
- if !is_scan?
59
- col_type = @statement.filters[0].collection_type
60
- if col_type > 0
61
- @data_offset += FIELD_HEADER_SIZE + 1
62
- fieldCount += 1
63
- end
64
-
65
- @data_offset += FIELD_HEADER_SIZE
66
- filterSize+=1 # num filters
67
-
68
- @statement.filters.each do |filter|
69
- sz = filter.estimate_size
70
- filterSize += sz
71
- end
72
- @data_offset += filterSize
73
- fieldCount+=1
74
-
75
- if @statement.bin_names && @statement.bin_names.length > 0
76
- @data_offset += FIELD_HEADER_SIZE
77
- binNameSize+=1 # num bin names
78
-
79
- @statement.bin_names.each do |bin_name|
80
- binNameSize += bin_name.bytesize + 1
81
- end
82
- @data_offset += binNameSize
83
- fieldCount+=1
84
- end
85
- else
86
- @data_offset += @partitions.length * 2 + FIELD_HEADER_SIZE
87
- fieldCount += 1
88
-
89
- if @policy.records_per_second > 0
90
- @data_offset += 4 + FIELD_HEADER_SIZE
91
- fieldCount += 1
92
- end
93
-
94
- # Calling query with no filters is more efficiently handled by a primary index scan.
95
- # Estimate scan options size.
96
- # @data_offset += (2 + FIELD_HEADER_SIZE)
97
- # fieldCount+=1
98
- end
99
-
100
- @statement.set_task_id
101
-
102
- @data_offset += 8 + FIELD_HEADER_SIZE
103
- fieldCount+=1
104
-
105
- predexp = @policy.predexp || @statement.predexp
106
-
107
- if predexp
108
- @data_offset += FIELD_HEADER_SIZE
109
- predSize = Aerospike::PredExp.estimate_size(predexp)
110
- @data_offset += predSize
111
- fieldCount += 1
112
- end
113
-
114
- if @statement.function_name
115
- @data_offset += FIELD_HEADER_SIZE + 1 # udf type
116
- @data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
117
- @data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
118
-
119
- if @statement.function_args && @statement.function_args.length > 0
120
- functionArgBuffer = Value.of(@statement.function_args).to_bytes
121
- else
122
- functionArgBuffer = ''
123
- end
124
- @data_offset += FIELD_HEADER_SIZE + functionArgBuffer.bytesize
125
- fieldCount += 4
126
- end
127
-
128
- if @statement.filters.nil? || @statement.filters.empty?
129
- if @statement.bin_names && @statement.bin_names.length > 0
130
- @statement.bin_names.each do |bin_name|
131
- estimate_operation_size_for_bin_name(bin_name)
132
- end
133
- end
134
- end
135
-
136
- size_buffer
137
-
138
- readAttr = @policy.include_bin_data ? INFO1_READ : INFO1_READ | INFO1_NOBINDATA
139
- operation_count = (is_scan? && !@statement.bin_names.nil?) ? @statement.bin_names.length : 0
140
-
141
- write_header(@policy, readAttr, 0, fieldCount, operation_count)
142
-
143
- if @statement.namespace
144
- write_field_string(@statement.namespace, Aerospike::FieldType::NAMESPACE)
145
- end
146
-
147
- unless @statement.index_name.nil?
148
- write_field_string(@statement.index_name, Aerospike::FieldType::INDEX_NAME)
149
- end
150
-
151
- if @statement.set_name
152
- write_field_string(@statement.set_name, Aerospike::FieldType::TABLE)
153
- end
154
-
155
- if !is_scan?
156
- col_type = @statement.filters[0].collection_type
157
- if col_type > 0
158
- write_field_header(1, Aerospike::FieldType::INDEX_TYPE)
159
- @data_buffer.write_byte(col_type, @data_offset)
160
- @data_offset+=1
161
- end
162
-
163
- write_field_header(filterSize, Aerospike::FieldType::INDEX_RANGE)
164
- @data_buffer.write_byte(@statement.filters.length, @data_offset)
165
- @data_offset+=1
166
-
167
- @statement.filters.each do |filter|
168
- @data_offset = filter.write(@data_buffer, @data_offset)
169
- end
170
-
171
- # Query bin names are specified as a field (Scan bin names are specified later as operations)
172
- if @statement.bin_names && @statement.bin_names.length > 0
173
- write_field_header(binNameSize, Aerospike::FieldType::QUERY_BINLIST)
174
- @data_buffer.write_byte(@statement.bin_names.length, @data_offset)
175
- @data_offset += 1
176
-
177
- @statement.bin_names.each do |bin_name|
178
- len = @data_buffer.write_binary(bin_name, @data_offset + 1)
179
- @data_buffer.write_byte(len, @data_offset)
180
- @data_offset += len + 1;
181
- end
182
- end
183
- else
184
- write_field_header(@partitions.length * 2, Aerospike::FieldType::PID_ARRAY)
185
- for pid in @partitions
186
- @data_buffer.write_uint16_little_endian(pid, @data_offset)
187
- @data_offset += 2
188
- end
189
-
190
- if @policy.records_per_second > 0
191
- write_field_int(@policy.records_per_second, Aerospike::FieldType::RECORDS_PER_SECOND)
192
- end
193
-
194
- # Calling query with no filters is more efficiently handled by a primary index scan.
195
- # write_field_header(2, Aerospike::FieldType::SCAN_OPTIONS)
196
- # priority = @policy.priority.ord
197
- # priority = priority << 4
198
- # @data_buffer.write_byte(priority, @data_offset)
199
- # @data_offset+=1
200
- # @data_buffer.write_byte(100.ord, @data_offset)
201
- # @data_offset+=1
202
- end
203
-
204
- write_field_header(8, Aerospike::FieldType::TRAN_ID)
205
- @data_buffer.write_int64(@statement.task_id, @data_offset)
206
- @data_offset += 8
207
-
208
- if predexp
209
- write_field_header(predSize, Aerospike::FieldType::PREDEXP)
210
- @data_offset = Aerospike::PredExp.write(
211
- predexp, @data_buffer, @data_offset
212
- )
213
- end
214
-
215
- if @statement.function_name
216
- write_field_header(1, Aerospike::FieldType::UDF_OP)
217
- if @statement.return_data
218
- @data_buffer.write_byte(1, @data_offset)
219
- @data_offset+=1
220
- else
221
- @data_buffer.write_byte(2, @data_offset)
222
- @data_offset+=1
223
- end
224
-
225
- write_field_string(@statement.package_name, Aerospike::FieldType::UDF_PACKAGE_NAME)
226
- write_field_string(@statement.function_name, Aerospike::FieldType::UDF_FUNCTION)
227
- write_field_bytes(functionArgBuffer, Aerospike::FieldType::UDF_ARGLIST)
228
- end
229
-
230
- if is_scan? && !@statement.bin_names.nil?
231
- @statement.bin_names.each do |bin_name|
232
- write_operation_for_bin_name(bin_name, Aerospike::Operation::READ)
233
- end
234
- end
235
-
236
- end_cmd
237
-
238
- return nil
36
+ set_query(@cluster, @policy, @statement, false, @partitions)
239
37
  end
240
38
 
241
39
  def is_scan?
@@ -34,7 +34,7 @@ module Aerospike
34
34
  list.each do |node_partition|
35
35
  threads << Thread.new do
36
36
  Thread.current.abort_on_exception = true
37
- command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
37
+ command = QueryPartitionCommand.new(cluster, node_partition.node, tracker, policy, statement, recordset, node_partition)
38
38
  begin
39
39
  command.execute
40
40
  rescue => e
@@ -48,7 +48,7 @@ module Aerospike
48
48
  else
49
49
  # Use a single thread for all nodes for all node
50
50
  list.each do |node_partition|
51
- command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
51
+ command = QueryPartitionCommand.new(cluster, node_partition.node, tracker, policy, statement, recordset, node_partition)
52
52
  begin
53
53
  command.execute
54
54
  rescue => e