extendi-cassandra_object 1.0.17 → 1.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +4 -0
- data/.travis.yml +23 -11
- data/CHANGELOG +9 -0
- data/README.md +22 -23
- data/extendi-cassandra_object.gemspec +2 -2
- data/lib/cassandra_object/adapters/cassandra_adapter.rb +55 -48
- data/lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb +61 -56
- data/lib/cassandra_object/attribute_methods.rb +2 -2
- data/lib/cassandra_object/persistence.rb +1 -1
- data/lib/cassandra_object/scope/finder_methods.rb +36 -24
- data/lib/cassandra_object/scoping.rb +2 -0
- data/lib/cassandra_object/timestamps.rb +5 -1
- data/test/support/cassandra.rb +7 -9
- data/test/unit/connections/connections_test.rb +0 -2
- data/test/unit/persistence_schema_ck_test.rb +1 -5
- data/test/unit/scope/finder_methods_test.rb +32 -3
- data/test/unit/scope/query_methods_test.rb +0 -1
- data/test/unit/timestamps_test.rb +24 -1
- metadata +13 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c5523ad43210b62402fbfb637042415f14a7fe174fa3f3646556b1b1e3701df9
|
4
|
+
data.tar.gz: a9ed0b2512e264395a1340c2fbccbaf9259d893105c738c9724d2ee0eba2a944
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f038a28315870cef3b0bc1366e3baa46d4098b7a7c3a4733ec02a2a71281861f87a111e9dfbf1d5c9a0c167b8e1e72198662dded4c7a7b90acbd154b5131e305
|
7
|
+
data.tar.gz: 58ab0a6d6347d97533334ee017ef8d7f9aaf9890e770d1b2abf3629f7e1b80f0a372241985c7c38e47dc4cebd8a2efa100a5c98384ff6ca17dea6927f976ea76
|
data/.gitignore
CHANGED
data/.travis.yml
CHANGED
@@ -1,20 +1,21 @@
|
|
1
|
+
dist: trusty
|
2
|
+
|
1
3
|
language: ruby
|
2
4
|
rvm:
|
3
|
-
- 2.4.0
|
4
|
-
- 2.4.1
|
5
|
-
- 2.4.2
|
6
5
|
- 2.5.1
|
6
|
+
- 2.6.5
|
7
|
+
- 2.7.0
|
7
8
|
env:
|
8
|
-
-
|
9
|
+
- SCYLLA_VERSION=3.0.2
|
9
10
|
- CASSANDRA_VERSION=3.0.10
|
10
11
|
- CASSANDRA_VERSION=3.9
|
11
|
-
- CASSANDRA_VERSION=2.1.2 ACTIVEMODEL_VERSION='< 5'
|
12
|
-
- CASSANDRA_VERSION=3.0.10 ACTIVEMODEL_VERSION='< 5'
|
13
|
-
- CASSANDRA_VERSION=3.9 ACTIVEMODEL_VERSION='< 5'
|
14
12
|
|
15
13
|
jdk:
|
16
14
|
- oraclejdk8
|
17
15
|
|
16
|
+
services:
|
17
|
+
- docker
|
18
|
+
|
18
19
|
before_install:
|
19
20
|
- sudo apt-get install libjna-java
|
20
21
|
- sudo apt-get install python-support
|
@@ -23,7 +24,18 @@ before_install:
|
|
23
24
|
- sudo pip install ccm
|
24
25
|
|
25
26
|
install:
|
26
|
-
-
|
27
|
-
|
28
|
-
|
29
|
-
|
27
|
+
- |
|
28
|
+
if [ -n "$CASSANDRA_VERSION" ];then
|
29
|
+
ccm create -n 1 -v $CASSANDRA_VERSION -i 127.0.0. -s -b test-cluster;
|
30
|
+
ccm start;
|
31
|
+
fi
|
32
|
+
if [ -n "$SCYLLA_VERSION" ];then
|
33
|
+
SCYLLA_IMAGE=scylladb/scylla:$SCYLLA_VERSION
|
34
|
+
docker pull $SCYLLA_IMAGE
|
35
|
+
docker run --name cassandra_test -d -p "9042:9042" -p "9160:9160" $SCYLLA_IMAGE
|
36
|
+
function check_scylla(){ docker exec -it cassandra_test nodetool status | grep UN; }
|
37
|
+
until check_scylla; do
|
38
|
+
echo "waiting..."
|
39
|
+
done
|
40
|
+
fi
|
41
|
+
bundle install
|
data/CHANGELOG
CHANGED
data/README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# Cassandra Object
|
2
2
|
[](http://travis-ci.org/giovannelli/cassandra_object) [](https://codeclimate.com/github/giovannelli/cassandra_object)
|
3
3
|
|
4
|
-
Cassandra Object uses ActiveModel to mimic much of the behavior in ActiveRecord.
|
4
|
+
Cassandra Object uses ActiveModel to mimic much of the behavior in ActiveRecord.
|
5
5
|
Use cql3 provided by ruby-driver gem and uses the old thrift structure with the possible option at [this link](https://docs.datastax.com/en/cql/3.1/cql/cql_reference/create_table_r.html?hl=create%2Ctable):
|
6
6
|
|
7
7
|
```shell
|
@@ -11,22 +11,20 @@ CREATE TABLE keyspace.table (
|
|
11
11
|
column1 text,
|
12
12
|
value blob,
|
13
13
|
PRIMARY KEY (key, column1)
|
14
|
-
) WITH
|
15
|
-
|
16
|
-
AND CLUSTERING ORDER BY (column1 ASC)
|
17
|
-
AND bloom_filter_fp_chance = 0.001
|
18
|
-
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
|
14
|
+
) WITH bloom_filter_fp_chance = 0.01
|
15
|
+
AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}
|
19
16
|
AND comment = ''
|
20
|
-
AND compaction = {'
|
21
|
-
AND compression = {'
|
22
|
-
AND
|
17
|
+
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
|
18
|
+
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
|
19
|
+
AND crc_check_chance = 1.0
|
20
|
+
AND dclocal_read_repair_chance = 0.1
|
23
21
|
AND default_time_to_live = 0
|
24
22
|
AND gc_grace_seconds = 864000
|
25
23
|
AND max_index_interval = 2048
|
26
24
|
AND memtable_flush_period_in_ms = 0
|
27
25
|
AND min_index_interval = 128
|
28
|
-
AND read_repair_chance =
|
29
|
-
AND speculative_retry = '
|
26
|
+
AND read_repair_chance = 0.0
|
27
|
+
AND speculative_retry = '99.0PERCENTILE';
|
30
28
|
```
|
31
29
|
|
32
30
|
You can also use the a custom schema structure with the possible options at [this link](https://docs.datastax.com/en/cql/3.3/cql/cql_reference/cqlCreateTable.html#tabProp):
|
@@ -39,20 +37,20 @@ CREATE TABLE keyspace.table (
|
|
39
37
|
field2 varchar,
|
40
38
|
field3 float,
|
41
39
|
PRIMARY KEY (key)
|
42
|
-
) WITH
|
43
|
-
|
44
|
-
AND caching = {'keys':'ALL', 'rows_per_partition':'NONE'}'
|
40
|
+
) WITH bloom_filter_fp_chance = 0.01
|
41
|
+
AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}
|
45
42
|
AND comment = ''
|
46
|
-
AND compaction = {'
|
47
|
-
AND compression = {'
|
48
|
-
AND
|
43
|
+
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
|
44
|
+
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
|
45
|
+
AND crc_check_chance = 1.0
|
46
|
+
AND dclocal_read_repair_chance = 0.1
|
49
47
|
AND default_time_to_live = 0
|
50
48
|
AND gc_grace_seconds = 864000
|
51
49
|
AND max_index_interval = 2048
|
52
50
|
AND memtable_flush_period_in_ms = 0
|
53
51
|
AND min_index_interval = 128
|
54
|
-
AND read_repair_chance =
|
55
|
-
AND speculative_retry = '
|
52
|
+
AND read_repair_chance = 0.0
|
53
|
+
AND speculative_retry = '99.0PERCENTILE';
|
56
54
|
```
|
57
55
|
|
58
56
|
## Installation
|
@@ -120,16 +118,16 @@ You can define a custom configuration for the cassandra connection, allowing you
|
|
120
118
|
```ruby
|
121
119
|
class Widget < CassandraObject::BaseSchema
|
122
120
|
string :name
|
123
|
-
|
121
|
+
|
124
122
|
def self.custom_config
|
125
|
-
#return custom cassandra configuration
|
123
|
+
#return custom cassandra configuration
|
126
124
|
{ }
|
127
125
|
end
|
128
126
|
end
|
129
127
|
```
|
130
|
-
|
128
|
+
|
131
129
|
## Using with Cassandra
|
132
|
-
|
130
|
+
|
133
131
|
Add a config/cassandra.yml:
|
134
132
|
|
135
133
|
```yaml
|
@@ -140,6 +138,7 @@ development:
|
|
140
138
|
connect_timeout: 0.1,
|
141
139
|
request_timeout: 0.1,
|
142
140
|
consistency: :any/:one/:two/:three/:quorum/:all/:local_quorum/:each_quorum/:serial/:local_serial/:local_one,
|
141
|
+
write_consistency: :any/:one/:two/:three/:quorum/:all/:local_quorum/:each_quorum/:serial/:local_serial/:local_one,
|
143
142
|
protocol_version: 3,
|
144
143
|
page_size: 10000,
|
145
144
|
trace: true/false
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
Gem::Specification.new do |s|
|
4
4
|
s.name = 'extendi-cassandra_object'
|
5
|
-
s.version = '1.
|
5
|
+
s.version = '1.1.1'
|
6
6
|
s.description = 'Cassandra ActiveModel'
|
7
7
|
s.summary = 'Cassandra ActiveModel'
|
8
8
|
s.authors = ['Duccio Giovannelli', 'gotime']
|
@@ -17,7 +17,7 @@ Gem::Specification.new do |s|
|
|
17
17
|
s.test_files = `git ls-files -- {test}/*`.split("\n")
|
18
18
|
s.require_paths = ['lib']
|
19
19
|
|
20
|
-
s.add_runtime_dependency('activemodel', '
|
20
|
+
s.add_runtime_dependency('activemodel', '>= 4.2.0', '< 7.0.0')
|
21
21
|
s.add_runtime_dependency('cassandra-driver', '>= 3.2.3')
|
22
22
|
s.add_runtime_dependency('lz4-ruby', '>= 0.3.3')
|
23
23
|
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
gem 'cassandra-driver'
|
2
4
|
require 'cassandra'
|
3
5
|
require 'logger'
|
@@ -6,7 +8,6 @@ module CassandraObject
|
|
6
8
|
module Adapters
|
7
9
|
class CassandraAdapter < AbstractAdapter
|
8
10
|
class QueryBuilder
|
9
|
-
|
10
11
|
def initialize(adapter, scope)
|
11
12
|
@adapter = adapter
|
12
13
|
@scope = scope
|
@@ -27,22 +28,27 @@ module CassandraObject
|
|
27
28
|
"SELECT #{select_string} FROM #{@scope.klass.column_family}",
|
28
29
|
where_string_async(nil)
|
29
30
|
]
|
30
|
-
str <<
|
31
|
+
str << 'ALLOW FILTERING' if @scope.klass.allow_filtering
|
31
32
|
return [] << str.delete_if(&:blank?) * ' '
|
32
33
|
end
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
end
|
34
|
+
|
35
|
+
str = [
|
36
|
+
"SELECT #{select_string} FROM #{@scope.klass.column_family}",
|
37
|
+
where_string_async(@scope.id_values)
|
38
|
+
]
|
39
|
+
str << 'ALLOW FILTERING' if @scope.klass.allow_filtering
|
40
|
+
[str.delete_if(&:blank?) * ' ']
|
41
41
|
end
|
42
42
|
|
43
|
-
def where_string_async(
|
43
|
+
def where_string_async(ids)
|
44
44
|
wheres = @scope.where_values.dup.select.each_with_index { |_, i| i.even? }
|
45
|
-
|
45
|
+
if ids.present?
|
46
|
+
wheres << if ids.size > 1
|
47
|
+
"#{@scope._key} IN (#{ids.map { |id| "'#{id}'" }.join(',')})"
|
48
|
+
else
|
49
|
+
"#{@scope._key} = '#{ids.first}'"
|
50
|
+
end
|
51
|
+
end
|
46
52
|
"WHERE #{wheres * ' AND '}" if wheres.any?
|
47
53
|
end
|
48
54
|
end
|
@@ -57,6 +63,7 @@ module CassandraObject
|
|
57
63
|
:connections_per_local_node,
|
58
64
|
:connections_per_remote_node,
|
59
65
|
:consistency,
|
66
|
+
:write_consistency,
|
60
67
|
:credentials,
|
61
68
|
:futures_factory,
|
62
69
|
:hosts,
|
@@ -82,14 +89,14 @@ module CassandraObject
|
|
82
89
|
])
|
83
90
|
|
84
91
|
{
|
85
|
-
load_balancing_policy: 'Cassandra::LoadBalancing::Policies::%s',
|
92
|
+
# load_balancing_policy: 'Cassandra::LoadBalancing::Policies::%s',
|
86
93
|
reconnection_policy: 'Cassandra::Reconnection::Policies::%s',
|
87
94
|
retry_policy: 'Cassandra::Retry::Policies::%s'
|
88
95
|
}.each do |policy_key, class_template|
|
89
96
|
params = cluster_options[policy_key]
|
90
97
|
if params
|
91
98
|
if params.is_a?(Hash)
|
92
|
-
cluster_options[policy_key] = (class_template % [params[:policy].classify]).constantize.new(*params[:params]||[])
|
99
|
+
cluster_options[policy_key] = (class_template % [params[:policy].classify]).constantize.new(*params[:params] || [])
|
93
100
|
else
|
94
101
|
cluster_options[policy_key] = (class_template % [params.classify]).constantize.new
|
95
102
|
end
|
@@ -101,7 +108,8 @@ module CassandraObject
|
|
101
108
|
heartbeat_interval: cluster_options.keys.include?(:heartbeat_interval) ? cluster_options[:heartbeat_interval] : 30,
|
102
109
|
idle_timeout: cluster_options[:idle_timeout] || 60,
|
103
110
|
max_schema_agreement_wait: 1,
|
104
|
-
consistency: cluster_options[:consistency] || :
|
111
|
+
consistency: cluster_options[:consistency] || :local_one,
|
112
|
+
write_consistency: cluster_options[:write_consistency] || cluster_options[:consistency] || :local_one,
|
105
113
|
protocol_version: cluster_options[:protocol_version] || 3,
|
106
114
|
page_size: cluster_options[:page_size] || 10000
|
107
115
|
})
|
@@ -116,6 +124,8 @@ module CassandraObject
|
|
116
124
|
end
|
117
125
|
|
118
126
|
def execute(statement, arguments = [])
|
127
|
+
consistency = config[:write_consistency] || config[:consistency]
|
128
|
+
# puts "cassandra adapter: #{consistency}"
|
119
129
|
ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: statement) do
|
120
130
|
type_hints = []
|
121
131
|
arguments.each { |a| type_hints << CassandraObject::Types::TypeHelper.guess_type(a) } unless arguments.nil?
|
@@ -124,6 +134,8 @@ module CassandraObject
|
|
124
134
|
end
|
125
135
|
|
126
136
|
def execute_async(queries, arguments = [])
|
137
|
+
consistency = config[:consistency]
|
138
|
+
# puts "execute_async adapter: #{consistency}"
|
127
139
|
retries = 0
|
128
140
|
futures = queries.map do |q|
|
129
141
|
ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: q) do
|
@@ -146,8 +158,8 @@ module CassandraObject
|
|
146
158
|
def select(scope)
|
147
159
|
queries = QueryBuilder.new(self, scope).to_query_async
|
148
160
|
# todo paginate
|
149
|
-
arguments = scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.blank? }
|
150
|
-
cql_rows = execute_async(queries, arguments).map{|item| item.rows.map{|x| x}}.flatten!
|
161
|
+
arguments = scope.where_values.select.each_with_index { |_, i| i.odd? }.reject { |c| c.blank? }
|
162
|
+
cql_rows = execute_async(queries, arguments).map { |item| item.rows.map { |x| x } }.flatten!
|
151
163
|
cql_rows.each do |cql_row|
|
152
164
|
attributes = cql_row.to_hash
|
153
165
|
key = attributes.delete(scope._key)
|
@@ -165,31 +177,31 @@ module CassandraObject
|
|
165
177
|
|
166
178
|
def write(table, id, attributes, ttl = nil)
|
167
179
|
statement = "INSERT INTO #{table} (#{(attributes.keys).join(',')}) VALUES (#{(['?'] * attributes.size).join(',')})"
|
168
|
-
statement += " USING TTL #{ttl
|
180
|
+
statement += " USING TTL #{ttl}" if ttl.present?
|
169
181
|
arguments = attributes.values
|
170
182
|
execute(statement, arguments)
|
171
183
|
end
|
172
184
|
|
173
185
|
def write_update(table, id, attributes)
|
174
|
-
queries =[]
|
186
|
+
queries = []
|
175
187
|
# id here is the name of the key of the model
|
176
188
|
id_value = attributes[id]
|
177
189
|
if (not_nil_attributes = attributes.reject { |key, value| value.nil? }).any?
|
178
190
|
statement = "INSERT INTO #{table} (#{(not_nil_attributes.keys).join(',')}) VALUES (#{(['?'] * not_nil_attributes.size).join(',')})"
|
179
|
-
queries << {query: statement, arguments: not_nil_attributes.values}
|
191
|
+
queries << { query: statement, arguments: not_nil_attributes.values }
|
180
192
|
end
|
181
193
|
if (nil_attributes = attributes.select { |key, value| value.nil? }).any?
|
182
|
-
queries << {query: "DELETE #{nil_attributes.keys.join(',')} FROM #{table} WHERE #{id} = ?", arguments: [id_value.to_s]}
|
194
|
+
queries << { query: "DELETE #{nil_attributes.keys.join(',')} FROM #{table} WHERE #{id} = ?", arguments: [id_value.to_s] }
|
183
195
|
end
|
184
196
|
execute_batchable(queries)
|
185
197
|
end
|
186
198
|
|
187
199
|
def delete(scope, ids, attributes = {})
|
188
200
|
ids = [ids] if !ids.is_a?(Array)
|
189
|
-
statement = "DELETE FROM #{scope.column_family} WHERE #{scope._key} IN (#{ids.map{|id| '?'}.join(',')})"
|
201
|
+
statement = "DELETE FROM #{scope.column_family} WHERE #{scope._key} IN (#{ids.map { |id| '?' }.join(',')})"
|
190
202
|
arguments = ids
|
191
203
|
unless attributes.blank?
|
192
|
-
statement += " AND #{attributes.keys.map{ |k| "#{k} = ?" }.join(' AND ')}"
|
204
|
+
statement += " AND #{attributes.keys.map { |k| "#{k} = ?" }.join(' AND ')}"
|
193
205
|
arguments += attributes.values
|
194
206
|
end
|
195
207
|
execute(statement, arguments)
|
@@ -197,20 +209,23 @@ module CassandraObject
|
|
197
209
|
|
198
210
|
def delete_single(obj)
|
199
211
|
keys = obj.class._keys
|
200
|
-
wheres = keys.map{ |k| "#{k} = ?" }.join(' AND ')
|
201
|
-
arguments = keys.map{ |k| obj.attributes[k] }
|
212
|
+
wheres = keys.map { |k| "#{k} = ?" }.join(' AND ')
|
213
|
+
arguments = keys.map { |k| obj.attributes[k] }
|
202
214
|
statement = "DELETE FROM #{obj.class.column_family} WHERE #{wheres}"
|
203
215
|
execute(statement, arguments)
|
204
216
|
end
|
205
217
|
|
206
218
|
def execute_batch(statements)
|
207
|
-
raise '
|
219
|
+
raise 'Statements is empty!' if statements.empty?
|
220
|
+
consistency = config[:write_consistency] || config[:consistency]
|
221
|
+
# puts "cassandra adapter execute batch #{consistency}"
|
222
|
+
|
208
223
|
batch = connection.batch do |b|
|
209
224
|
statements.each do |statement|
|
210
225
|
b.add(statement[:query], arguments: statement[:arguments])
|
211
226
|
end
|
212
227
|
end
|
213
|
-
connection.execute(batch, page_size: config[:page_size])
|
228
|
+
connection.execute(batch, consistency: consistency, page_size: config[:page_size])
|
214
229
|
end
|
215
230
|
|
216
231
|
# SCHEMA
|
@@ -239,7 +254,7 @@ module CassandraObject
|
|
239
254
|
def schema_execute(cql, keyspace)
|
240
255
|
schema_db = Cassandra.cluster cassandra_cluster_options
|
241
256
|
connection = schema_db.connect keyspace
|
242
|
-
connection.execute cql, consistency: consistency
|
257
|
+
connection.execute cql, consistency: config[:write_consistency] || config[:consistency]
|
243
258
|
end
|
244
259
|
|
245
260
|
def cassandra_version
|
@@ -248,14 +263,6 @@ module CassandraObject
|
|
248
263
|
|
249
264
|
# /SCHEMA
|
250
265
|
|
251
|
-
def consistency
|
252
|
-
defined?(@consistency) ? @consistency : nil
|
253
|
-
end
|
254
|
-
|
255
|
-
def consistency=(val)
|
256
|
-
@consistency = val
|
257
|
-
end
|
258
|
-
|
259
266
|
def statement_create_with_options(stmt, options = '')
|
260
267
|
if !options.nil?
|
261
268
|
statement_with_options stmt, options
|
@@ -266,7 +273,7 @@ module CassandraObject
|
|
266
273
|
AND caching = '{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}'
|
267
274
|
AND comment = ''
|
268
275
|
AND compaction = {'min_sstable_size': '52428800', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
|
269
|
-
AND compression = {'chunk_length_kb': '64', '
|
276
|
+
AND compression = {'chunk_length_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
|
270
277
|
AND dclocal_read_repair_chance = 0.0
|
271
278
|
AND default_time_to_live = 0
|
272
279
|
AND gc_grace_seconds = 864000
|
@@ -276,24 +283,24 @@ module CassandraObject
|
|
276
283
|
AND read_repair_chance = 1.0
|
277
284
|
AND speculative_retry = 'NONE';"
|
278
285
|
else
|
279
|
-
"#{stmt} WITH
|
280
|
-
AND
|
281
|
-
AND gc_grace_seconds = 864000
|
282
|
-
AND bloom_filter_fp_chance = 0.01
|
283
|
-
AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' }
|
286
|
+
"#{stmt} WITH bloom_filter_fp_chance = 0.01
|
287
|
+
AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}
|
284
288
|
AND comment = ''
|
285
|
-
AND compaction = {
|
286
|
-
AND compression = {
|
289
|
+
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
|
290
|
+
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
|
291
|
+
AND crc_check_chance = 1.0
|
292
|
+
AND dclocal_read_repair_chance = 0.1
|
287
293
|
AND default_time_to_live = 0
|
288
|
-
AND
|
289
|
-
AND min_index_interval = 128
|
294
|
+
AND gc_grace_seconds = 864000
|
290
295
|
AND max_index_interval = 2048
|
291
|
-
AND
|
296
|
+
AND memtable_flush_period_in_ms = 0
|
297
|
+
AND min_index_interval = 128
|
298
|
+
AND read_repair_chance = 0.0
|
299
|
+
AND speculative_retry = '99.0PERCENTILE';
|
292
300
|
"
|
293
301
|
end
|
294
302
|
end
|
295
303
|
end
|
296
|
-
|
297
304
|
end
|
298
305
|
end
|
299
306
|
end
|
@@ -26,34 +26,39 @@ module CassandraObject
|
|
26
26
|
|
27
27
|
if @scope.id_values.empty?
|
28
28
|
str = [
|
29
|
-
|
30
|
-
|
29
|
+
"SELECT #{select_string} FROM #{@scope.klass.column_family}",
|
30
|
+
where_string_async(nil)
|
31
31
|
]
|
32
32
|
str << 'ALLOW FILTERING' if @scope.klass.allow_filtering
|
33
33
|
return [] << str.delete_if(&:blank?) * ' '
|
34
34
|
end
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
str.delete_if(&:blank?) * ' '
|
42
|
-
end
|
35
|
+
str = [
|
36
|
+
"SELECT #{select_string} FROM #{@scope.klass.column_family}",
|
37
|
+
where_string_async(@scope.id_values)
|
38
|
+
]
|
39
|
+
str << 'ALLOW FILTERING' if @scope.klass.allow_filtering
|
40
|
+
[str.delete_if(&:blank?) * ' ']
|
43
41
|
end
|
44
42
|
|
45
|
-
def where_string_async(
|
43
|
+
def where_string_async(ids)
|
46
44
|
conditions = []
|
47
|
-
|
45
|
+
|
46
|
+
if ids.present?
|
47
|
+
conditions << if ids.size > 1
|
48
|
+
"#{@adapter.primary_key_column} IN (#{ids.map { |id| "'#{id}'" }.join(',')})"
|
49
|
+
else
|
50
|
+
"#{@adapter.primary_key_column} = '#{ids.first}'"
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
48
54
|
select_values = @scope.select_values.select { |sv| sv != :column1 }
|
49
55
|
if select_values.size > 0
|
50
56
|
select_str = select_values.size > 1 ? "column1 IN (#{select_values.map { |sv| '?' }.join(',')})" : 'column1 = ?'
|
51
57
|
conditions << select_str
|
52
58
|
end
|
53
59
|
conditions += @scope.where_values.select.each_with_index { |_, i| i.even? }
|
54
|
-
|
60
|
+
conditions.any? ? "WHERE #{conditions.join(' AND ')}" : nil
|
55
61
|
end
|
56
|
-
|
57
62
|
end
|
58
63
|
|
59
64
|
def primary_key_column
|
@@ -70,6 +75,7 @@ module CassandraObject
|
|
70
75
|
:connections_per_local_node,
|
71
76
|
:connections_per_remote_node,
|
72
77
|
:consistency,
|
78
|
+
:write_consistency,
|
73
79
|
:credentials,
|
74
80
|
:futures_factory,
|
75
81
|
:hosts,
|
@@ -95,26 +101,27 @@ module CassandraObject
|
|
95
101
|
])
|
96
102
|
|
97
103
|
{
|
98
|
-
load_balancing_policy: 'Cassandra::LoadBalancing::Policies::%s',
|
104
|
+
# load_balancing_policy: 'Cassandra::LoadBalancing::Policies::%s',
|
99
105
|
reconnection_policy: 'Cassandra::Reconnection::Policies::%s',
|
100
106
|
retry_policy: 'Cassandra::Retry::Policies::%s'
|
101
107
|
}.each do |policy_key, class_template|
|
102
108
|
params = cluster_options[policy_key]
|
103
109
|
if params
|
104
110
|
if params.is_a?(Hash)
|
105
|
-
cluster_options[policy_key] = (class_template % [params[:policy].classify]).constantize.new(*params[:params]||[])
|
111
|
+
cluster_options[policy_key] = (class_template % [params[:policy].classify]).constantize.new(*params[:params] || [])
|
106
112
|
else
|
107
113
|
cluster_options[policy_key] = (class_template % [params.classify]).constantize.new
|
108
114
|
end
|
109
115
|
end
|
110
116
|
end
|
111
|
-
|
117
|
+
|
112
118
|
# Setting defaults
|
113
119
|
cluster_options.merge!({
|
114
120
|
heartbeat_interval: cluster_options.keys.include?(:heartbeat_interval) ? cluster_options[:heartbeat_interval] : 30,
|
115
121
|
idle_timeout: cluster_options[:idle_timeout] || 60,
|
116
122
|
max_schema_agreement_wait: 1,
|
117
|
-
consistency: cluster_options[:consistency] || :
|
123
|
+
consistency: cluster_options[:consistency] || :local_one,
|
124
|
+
write_consistency: cluster_options[:write_consistency] || cluster_options[:consistency] || :local_one,
|
118
125
|
protocol_version: cluster_options[:protocol_version] || 3,
|
119
126
|
page_size: cluster_options[:page_size] || 10000
|
120
127
|
})
|
@@ -129,12 +136,17 @@ module CassandraObject
|
|
129
136
|
end
|
130
137
|
|
131
138
|
def execute(statement, arguments = [])
|
139
|
+
consistency = config[:write_consistency] || config[:consistency]
|
140
|
+
# puts "schemaless adapter: #{consistency}"
|
132
141
|
ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: statement) do
|
133
142
|
connection.execute statement, arguments: arguments, consistency: consistency, page_size: config[:page_size]
|
134
143
|
end
|
135
144
|
end
|
136
145
|
|
137
146
|
def execute_async(queries, arguments = [], per_page = nil, next_cursor = nil)
|
147
|
+
consistency = config[:consistency]
|
148
|
+
# puts "schemaless adapter async: #{consistency}"
|
149
|
+
|
138
150
|
retries = 0
|
139
151
|
per_page ||= config[:page_size]
|
140
152
|
futures = queries.map { |q|
|
@@ -164,7 +176,7 @@ module CassandraObject
|
|
164
176
|
item.rows.each { |x| ids << x[primary_key_column] }
|
165
177
|
new_next_cursor = item.paging_state unless item.last_page?
|
166
178
|
end
|
167
|
-
|
179
|
+
{ ids: ids, new_next_cursor: new_next_cursor }
|
168
180
|
end
|
169
181
|
|
170
182
|
def select(scope)
|
@@ -172,19 +184,19 @@ module CassandraObject
|
|
172
184
|
queries.compact! if queries.present?
|
173
185
|
raise CassandraObject::RecordNotFound if !queries.present?
|
174
186
|
|
175
|
-
arguments = scope.select_values.select{ |sv| sv != :column1 }.map(&:to_s)
|
176
|
-
arguments += scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.empty? }.map(&:to_s)
|
187
|
+
arguments = scope.select_values.select { |sv| sv != :column1 }.map(&:to_s)
|
188
|
+
arguments += scope.where_values.select.each_with_index { |_, i| i.odd? }.reject { |c| c.empty? }.map(&:to_s)
|
177
189
|
records = execute_async(queries, arguments).map do |item|
|
178
190
|
# pagination
|
179
191
|
elems = []
|
180
192
|
loop do
|
181
|
-
item.rows.each{ |x| elems << x }
|
193
|
+
item.rows.each { |x| elems << x }
|
182
194
|
break if item.last_page?
|
183
195
|
item = item.next_page
|
184
196
|
end
|
185
197
|
elems
|
186
198
|
end
|
187
|
-
{results: records.flatten!}
|
199
|
+
{ results: records.flatten! }
|
188
200
|
end
|
189
201
|
|
190
202
|
def select_paginated(scope)
|
@@ -192,15 +204,15 @@ module CassandraObject
|
|
192
204
|
queries.compact! if queries.present?
|
193
205
|
raise CassandraObject::RecordNotFound if !queries.present?
|
194
206
|
|
195
|
-
arguments = scope.select_values.select{ |sv| sv != :column1 }.map(&:to_s)
|
196
|
-
arguments += scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.empty? }.map(&:to_s)
|
207
|
+
arguments = scope.select_values.select { |sv| sv != :column1 }.map(&:to_s)
|
208
|
+
arguments += scope.where_values.select.each_with_index { |_, i| i.odd? }.reject { |c| c.empty? }.map(&:to_s)
|
197
209
|
new_next_cursor = nil
|
198
210
|
records = []
|
199
211
|
execute_async(queries, arguments, scope.limit_value, scope.next_cursor).each do |item|
|
200
212
|
new_next_cursor = item.paging_state unless item.last_page?
|
201
|
-
item.rows.each{ |x| records << x }
|
213
|
+
item.rows.each { |x| records << x }
|
202
214
|
end
|
203
|
-
{results: records, new_next_cursor: new_next_cursor}
|
215
|
+
{ results: records, new_next_cursor: new_next_cursor }
|
204
216
|
end
|
205
217
|
|
206
218
|
def insert(table, id, attributes, ttl = nil)
|
@@ -213,16 +225,15 @@ module CassandraObject
|
|
213
225
|
|
214
226
|
def write(table, id, attributes, ttl)
|
215
227
|
queries = []
|
216
|
-
# puts attributes
|
217
228
|
attributes.each do |column, value|
|
218
229
|
if !value.nil?
|
219
230
|
query = "INSERT INTO #{table} (#{primary_key_column},column1,value) VALUES (?,?,?)"
|
220
|
-
query += " USING TTL #{ttl
|
231
|
+
query += " USING TTL #{ttl}" if !ttl.nil?
|
221
232
|
args = [id.to_s, column.to_s, value.to_s]
|
222
233
|
|
223
|
-
queries << {query: query, arguments: args}
|
234
|
+
queries << { query: query, arguments: args }
|
224
235
|
else
|
225
|
-
queries << {query: "DELETE FROM #{table} WHERE #{primary_key_column} = ? AND column1= ?", arguments: [id.to_s, column.to_s]}
|
236
|
+
queries << { query: "DELETE FROM #{table} WHERE #{primary_key_column} = ? AND column1= ?", arguments: [id.to_s, column.to_s] }
|
226
237
|
end
|
227
238
|
end
|
228
239
|
execute_batchable(queries)
|
@@ -232,18 +243,20 @@ module CassandraObject
|
|
232
243
|
ids = [ids] if !ids.is_a?(Array)
|
233
244
|
arguments = nil
|
234
245
|
arguments = ids if ids.size == 1
|
235
|
-
statement = "DELETE FROM #{table} WHERE #{create_ids_where_clause(ids)}"
|
246
|
+
statement = "DELETE FROM #{table} WHERE #{create_ids_where_clause(ids)}" # .gsub('?', ids.map { |id| "'#{id}'" }.join(','))
|
236
247
|
execute(statement, arguments)
|
237
248
|
end
|
238
249
|
|
239
250
|
def execute_batch(statements)
|
240
|
-
|
251
|
+
consistency = config[:write_consistency] || config[:consistency]
|
252
|
+
# puts "schemaless execute batch #{consistency}"
|
253
|
+
raise 'Statements is empty!' if statements.empty?
|
241
254
|
batch = connection.batch do |b|
|
242
255
|
statements.each do |statement|
|
243
256
|
b.add(statement[:query], arguments: statement[:arguments])
|
244
257
|
end
|
245
258
|
end
|
246
|
-
connection.execute(batch, page_size: config[:page_size])
|
259
|
+
connection.execute(batch, consistency: consistency, page_size: config[:page_size])
|
247
260
|
end
|
248
261
|
|
249
262
|
# SCHEMA
|
@@ -270,7 +283,7 @@ module CassandraObject
|
|
270
283
|
def schema_execute(cql, keyspace)
|
271
284
|
schema_db = Cassandra.cluster cassandra_cluster_options
|
272
285
|
connection = schema_db.connect keyspace
|
273
|
-
connection.execute cql, consistency: consistency
|
286
|
+
connection.execute cql, consistency: config[:write_consistency] || config[:consistency]
|
274
287
|
end
|
275
288
|
|
276
289
|
def cassandra_version
|
@@ -279,14 +292,6 @@ module CassandraObject
|
|
279
292
|
|
280
293
|
# /SCHEMA
|
281
294
|
|
282
|
-
def consistency
|
283
|
-
defined?(@consistency) ? @consistency : nil
|
284
|
-
end
|
285
|
-
|
286
|
-
def consistency=(val)
|
287
|
-
@consistency = val
|
288
|
-
end
|
289
|
-
|
290
295
|
def statement_create_with_options(stmt, options)
|
291
296
|
if !options.nil?
|
292
297
|
statement_with_options stmt, options
|
@@ -299,7 +304,7 @@ module CassandraObject
|
|
299
304
|
AND caching = '{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}'
|
300
305
|
AND comment = ''
|
301
306
|
AND compaction = {'min_sstable_size': '52428800', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
|
302
|
-
AND compression = {'chunk_length_kb': '64', '
|
307
|
+
AND compression = {'chunk_length_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
|
303
308
|
AND dclocal_read_repair_chance = 0.0
|
304
309
|
AND default_time_to_live = 0
|
305
310
|
AND gc_grace_seconds = 864000
|
@@ -309,19 +314,20 @@ module CassandraObject
|
|
309
314
|
AND read_repair_chance = 1.0
|
310
315
|
AND speculative_retry = 'NONE';"
|
311
316
|
else
|
312
|
-
"#{stmt} WITH
|
313
|
-
AND
|
314
|
-
AND gc_grace_seconds = 864000
|
315
|
-
AND bloom_filter_fp_chance = 0.01
|
316
|
-
AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' }
|
317
|
+
"#{stmt} WITH bloom_filter_fp_chance = 0.01
|
318
|
+
AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}
|
317
319
|
AND comment = ''
|
318
|
-
AND compaction = {
|
319
|
-
AND compression = {
|
320
|
+
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
|
321
|
+
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
|
322
|
+
AND crc_check_chance = 1.0
|
323
|
+
AND dclocal_read_repair_chance = 0.1
|
320
324
|
AND default_time_to_live = 0
|
321
|
-
AND
|
322
|
-
AND min_index_interval = 128
|
325
|
+
AND gc_grace_seconds = 864000
|
323
326
|
AND max_index_interval = 2048
|
324
|
-
AND
|
327
|
+
AND memtable_flush_period_in_ms = 0
|
328
|
+
AND min_index_interval = 128
|
329
|
+
AND read_repair_chance = 0.0
|
330
|
+
AND speculative_retry = '99.0PERCENTILE';
|
325
331
|
"
|
326
332
|
|
327
333
|
end
|
@@ -332,9 +338,8 @@ module CassandraObject
|
|
332
338
|
return ids if ids.empty?
|
333
339
|
ids = ids.first if ids.is_a?(Array) && ids.one?
|
334
340
|
sql = ids.is_a?(Array) ? "#{primary_key_column} IN (#{ids.map { |id| "'#{id}'" }.join(',')})" : "#{primary_key_column} = ?"
|
335
|
-
|
341
|
+
sql
|
336
342
|
end
|
337
|
-
|
338
343
|
end
|
339
344
|
end
|
340
345
|
end
|
@@ -5,9 +5,9 @@ module CassandraObject
|
|
5
5
|
|
6
6
|
included do
|
7
7
|
if ActiveModel::VERSION::STRING < '3.2'
|
8
|
-
attribute_method_suffix(
|
8
|
+
attribute_method_suffix('', '=')
|
9
9
|
else
|
10
|
-
attribute_method_suffix(
|
10
|
+
attribute_method_suffix('=')
|
11
11
|
end
|
12
12
|
|
13
13
|
# (Alias for the protected read_attribute method).
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module CassandraObject
|
2
4
|
class Scope
|
3
5
|
module FinderMethods
|
@@ -16,50 +18,60 @@ module CassandraObject
|
|
16
18
|
end
|
17
19
|
|
18
20
|
def find_in_batches(id, next_cursor = nil)
|
19
|
-
obj =
|
21
|
+
obj = clone
|
20
22
|
obj.is_all = true
|
21
23
|
obj.next_cursor = next_cursor
|
22
24
|
obj.where_ids(id).execute_paged
|
23
25
|
end
|
24
26
|
|
25
27
|
def find_all_in_batches(next_cursor = nil)
|
26
|
-
obj =
|
28
|
+
obj = clone
|
27
29
|
obj.is_all = true
|
28
30
|
obj.next_cursor = next_cursor
|
29
31
|
obj.execute
|
30
32
|
end
|
31
33
|
|
32
34
|
def first
|
33
|
-
return limit(1).find_all_in_batches[:results].first if
|
35
|
+
return limit(1).find_all_in_batches[:results].first if schema_type == :dynamic_attributes || schema_type == :schemaless
|
34
36
|
limit(1).execute.first
|
35
37
|
end
|
36
38
|
|
37
39
|
private
|
38
40
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
41
|
+
def find_one(id)
|
42
|
+
if id.blank?
|
43
|
+
not_found(id)
|
44
|
+
elsif schema_type == :dynamic_attributes
|
45
|
+
record = where_ids(id).execute
|
46
|
+
not_found(id) if record.empty?
|
47
|
+
record
|
48
|
+
elsif record = where_ids(id)[0]
|
49
|
+
record
|
50
|
+
else
|
51
|
+
not_found(id)
|
52
|
+
end
|
50
53
|
end
|
51
|
-
end
|
52
54
|
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
ids = ids.compact.map(&:to_s).uniq
|
57
|
-
where_ids(ids).execute
|
58
|
-
end
|
55
|
+
def find_some(pids)
|
56
|
+
ids = pids.flatten.compact.uniq.map(&:to_s)
|
57
|
+
return [] if ids.empty?
|
59
58
|
|
60
|
-
|
61
|
-
|
62
|
-
|
59
|
+
qr = where_ids(ids).execute
|
60
|
+
is_dymnamic = qr.is_a?(Hash)
|
61
|
+
|
62
|
+
results = qr.sort_by do |r|
|
63
|
+
id = r.keys.first if r.is_a?(Hash)
|
64
|
+
id = r[0] if r.is_a?(Array)
|
65
|
+
id = r.id if id.nil?
|
66
|
+
ids.index(id)
|
67
|
+
end
|
68
|
+
|
69
|
+
is_dymnamic ? Hash[results] : results
|
70
|
+
end
|
71
|
+
|
72
|
+
def not_found(id)
|
73
|
+
raise CassandraObject::RecordNotFound, "Couldn't find #{name} with key #{id.inspect}"
|
74
|
+
end
|
63
75
|
end
|
64
76
|
end
|
65
77
|
end
|
@@ -15,7 +15,11 @@ module CassandraObject
|
|
15
15
|
|
16
16
|
before_update if: :changed? do
|
17
17
|
if self.class.timestamps
|
18
|
-
|
18
|
+
if store_updated_at.present?
|
19
|
+
self.updated_at = store_updated_at
|
20
|
+
else
|
21
|
+
self.updated_at = Time.current
|
22
|
+
end
|
19
23
|
end
|
20
24
|
end
|
21
25
|
end
|
data/test/support/cassandra.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
Bundler.require :cassandra
|
2
4
|
|
3
5
|
CassandraObject::Base.config = {
|
@@ -13,7 +15,7 @@ CassandraObject::Base.config = {
|
|
13
15
|
connections_per_local_node: 4,
|
14
16
|
schema_refresh_delay: 0.1,
|
15
17
|
schema_refresh_timeout: 0.1,
|
16
|
-
load_balancing_policy:
|
18
|
+
load_balancing_policy: Cassandra::LoadBalancing::Policies::RoundRobin.new,
|
17
19
|
reconnection_policy: { policy: 'Constant', params: [5] },
|
18
20
|
retry_policy: 'Default',
|
19
21
|
# connections_per_remote_node: nil,
|
@@ -30,15 +32,12 @@ end
|
|
30
32
|
sleep 1
|
31
33
|
CassandraObject::Schema.create_keyspace 'cassandra_object_test'
|
32
34
|
CassandraObject::Schemaless.create_column_family 'Issues'
|
33
|
-
CassandraObject::Schema.create_column_family 'IssueSchemas', {attributes: 'id text, title text, description text, field float, intero int, created_at timestamp, updated_at timestamp, PRIMARY KEY (id)', options: {}}
|
34
|
-
CassandraObject::Schema.create_column_family 'IssueSchemaCks', {attributes: 'id text, type text, date timestamp, value float, PRIMARY KEY (id, type, date)', options: {}}
|
35
|
+
CassandraObject::Schema.create_column_family 'IssueSchemas', { attributes: 'id text, title text, description text, field float, intero int, created_at timestamp, updated_at timestamp, PRIMARY KEY (id)', options: {} }
|
36
|
+
CassandraObject::Schema.create_column_family 'IssueSchemaCks', { attributes: 'id text, type text, date timestamp, value float, PRIMARY KEY (id, type, date)', options: {} }
|
35
37
|
CassandraObject::Schemaless.create_column_family 'IssueDynamics'
|
36
38
|
CassandraObject::Schemaless.create_column_family 'IssuesCustomConfig'
|
37
|
-
CassandraObject::Schema.create_column_family 'IssueSchemaFathers', {attributes: 'id text, title text, field float, created_at timestamp, updated_at timestamp, PRIMARY KEY (id)', options: {}}
|
38
|
-
CassandraObject::Schema.create_column_family 'IssueSchemaChildren', {attributes: 'id text, title text, description text, field float, created_at timestamp, updated_at timestamp, issue_schema_father_id text, PRIMARY KEY (id)', options: {}}
|
39
|
-
CassandraObject::BaseSchemaless.adapter.consistency = :quorum
|
40
|
-
CassandraObject::BaseSchemalessDynamic.adapter.consistency = :quorum
|
41
|
-
CassandraObject::BaseSchema.adapter.consistency = :quorum
|
39
|
+
CassandraObject::Schema.create_column_family 'IssueSchemaFathers', { attributes: 'id text, title text, field float, created_at timestamp, updated_at timestamp, PRIMARY KEY (id)', options: {} }
|
40
|
+
CassandraObject::Schema.create_column_family 'IssueSchemaChildren', { attributes: 'id text, title text, description text, field float, created_at timestamp, updated_at timestamp, issue_schema_father_id text, PRIMARY KEY (id)', options: {} }
|
42
41
|
|
43
42
|
CassandraObject::Base.class_eval do
|
44
43
|
class_attribute :created_records
|
@@ -60,7 +59,6 @@ end
|
|
60
59
|
|
61
60
|
module ActiveSupport
|
62
61
|
class TestCase
|
63
|
-
|
64
62
|
self.test_order = :random
|
65
63
|
|
66
64
|
def after_setup
|
@@ -4,7 +4,6 @@
|
|
4
4
|
require 'test_helper'
|
5
5
|
|
6
6
|
class CassandraObject::PersistenceSchemaCkTest < CassandraObject::TestCase
|
7
|
-
|
8
7
|
test 'composite key' do
|
9
8
|
time1 = Time.now
|
10
9
|
time2 = time1 + 1.second
|
@@ -25,19 +24,17 @@ class CassandraObject::PersistenceSchemaCkTest < CassandraObject::TestCase
|
|
25
24
|
item = res[1]
|
26
25
|
assert_equal '1', item.id
|
27
26
|
assert_equal time2.to_i, item.date.to_i
|
28
|
-
|
29
27
|
end
|
30
28
|
|
31
29
|
test 'delete' do
|
32
30
|
IssueSchemaCk.create(id: '1', type: 'first', date: Time.now, value: 1.to_f)
|
33
31
|
IssueSchemaCk.create(id: '1', type: 'second', date: Time.now, value: 1.to_f)
|
34
|
-
|
35
32
|
IssueSchemaCk.delete('1')
|
36
33
|
assert_equal 0, IssueSchemaCk.find_by_id([1]).size
|
37
34
|
end
|
38
35
|
|
39
36
|
test 'delete with attributes' do
|
40
|
-
time = Time.now
|
37
|
+
time = Time.now - 10.days
|
41
38
|
IssueSchemaCk.create(id: '1', type: 'first', date: time, value: 1.to_f)
|
42
39
|
IssueSchemaCk.create(id: '1', type: 'first', date: Time.now, value: 1.to_f)
|
43
40
|
IssueSchemaCk.create(id: '2', type: 'first', date: time, value: 1.to_f)
|
@@ -65,5 +62,4 @@ class CassandraObject::PersistenceSchemaCkTest < CassandraObject::TestCase
|
|
65
62
|
IssueSchemaCk.find_by_id(['1']).first.destroy
|
66
63
|
assert_equal 1, IssueSchemaCk.find_by_id([1]).size
|
67
64
|
end
|
68
|
-
|
69
65
|
end
|
@@ -26,6 +26,38 @@ class CassandraObject::FinderMethodsTest < CassandraObject::TestCase
|
|
26
26
|
assert_equal [first_issue, second_issue].to_set, Issue.find([first_issue.id, second_issue.id]).to_set
|
27
27
|
end
|
28
28
|
|
29
|
+
test 'IssueDynamic: find with ids sorted' do
|
30
|
+
ids = (0..999).to_a.map(&:to_s)
|
31
|
+
ids.each do |i|
|
32
|
+
IssueDynamic.create(key: i, title: "foo_title_#{i}")
|
33
|
+
end
|
34
|
+
ids_to_find = ids.sample(10)
|
35
|
+
assert_equal ids_to_find, IssueDynamic.find(ids_to_find).keys
|
36
|
+
IssueDynamic.delete_all
|
37
|
+
end
|
38
|
+
|
39
|
+
test 'Issue: find with ids sorted' do
|
40
|
+
ids = (0..999).to_a.map(&:to_s)
|
41
|
+
ids.each do |i|
|
42
|
+
Issue.create(id: i, title: "foo_title_#{i}")
|
43
|
+
end
|
44
|
+
ids_to_find = ids.sample(10)
|
45
|
+
assert_equal ids_to_find, Issue.find(ids_to_find).map(&:id)
|
46
|
+
Issue.delete_all
|
47
|
+
end
|
48
|
+
|
49
|
+
test 'IssueSchemaCk: find with ids sorted' do
|
50
|
+
ids = (0..999).to_a.map(&:to_s)
|
51
|
+
ids.each do |i|
|
52
|
+
IssueSchemaCk.create(id: i, type: 'first', date: Date.yesterday.to_time, value: 1.0)
|
53
|
+
IssueSchemaCk.create(id: i, type: 'first', date: Date.today.to_time, value: 2.0)
|
54
|
+
end
|
55
|
+
ids_to_find = ids.sample(10)
|
56
|
+
assert_equal ids_to_find.size * 2, IssueSchemaCk.find(ids_to_find).size
|
57
|
+
assert_equal ids_to_find, IssueSchemaCk.find(ids_to_find).map(&:id).uniq
|
58
|
+
IssueSchemaCk.delete_all
|
59
|
+
end
|
60
|
+
|
29
61
|
test 'find_by_id' do
|
30
62
|
Issue.create.tap do |issue|
|
31
63
|
assert_equal issue, Issue.find_by_id(issue.id)
|
@@ -64,9 +96,7 @@ class CassandraObject::FinderMethodsTest < CassandraObject::TestCase
|
|
64
96
|
IssueDynamic.delete(['1', '2'])
|
65
97
|
end
|
66
98
|
|
67
|
-
|
68
99
|
test 'find all in batches dynamic paged' do
|
69
|
-
|
70
100
|
issues = []
|
71
101
|
100.times.each do |i|
|
72
102
|
issues << IssueDynamic.create(key: i, title: 'tit', dynamic_field1: 'one', dynamic_field2: 'two')
|
@@ -135,5 +165,4 @@ class CassandraObject::FinderMethodsTest < CassandraObject::TestCase
|
|
135
165
|
# first_issue = IssueDynamic.create(key: '1', title: 'tit', dynamic_field1: 'one', dynamic_field2: 'two')
|
136
166
|
# f = IssueDynamic.first
|
137
167
|
# end
|
138
|
-
|
139
168
|
end
|
@@ -21,7 +21,30 @@ class CassandraObject::TimestampsTest < CassandraObject::TestCase
|
|
21
21
|
test 'created_at sets only if nil' do
|
22
22
|
time = 5.days.ago
|
23
23
|
issue = Issue.create created_at: time
|
24
|
-
|
25
24
|
assert_equal time, issue.created_at
|
26
25
|
end
|
26
|
+
|
27
|
+
test 'set updated_at to now when not passed as an attribute' do
|
28
|
+
udate = 1.year.ago
|
29
|
+
issue = Issue.create(description: 'foo', updated_at: udate)
|
30
|
+
assert_equal udate, issue.updated_at
|
31
|
+
issue.update_attributes(description: 'test')
|
32
|
+
assert_not_equal udate, issue.updated_at
|
33
|
+
end
|
34
|
+
|
35
|
+
test 'set updated_at to passed value' do
|
36
|
+
issue = Issue.create(description: 'foo')
|
37
|
+
updated_at = issue.updated_at
|
38
|
+
new_updated_at = updated_at + 5.days
|
39
|
+
issue.update_attributes(description: 'bar', store_updated_at: new_updated_at)
|
40
|
+
assert_equal new_updated_at, issue.updated_at
|
41
|
+
end
|
42
|
+
|
43
|
+
test 'set updated_at to passed value even if is equal to the stored value' do
|
44
|
+
udate = 1.year.ago
|
45
|
+
issue = Issue.create(description: 'foo', updated_at: udate)
|
46
|
+
assert_equal udate, issue.updated_at
|
47
|
+
issue.update_attributes(description: 'bar', store_updated_at: issue.updated_at)
|
48
|
+
assert_equal udate, issue.updated_at
|
49
|
+
end
|
27
50
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: extendi-cassandra_object
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.1.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Duccio Giovannelli
|
@@ -9,22 +9,28 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2020-06-18 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: activemodel
|
16
16
|
requirement: !ruby/object:Gem::Requirement
|
17
17
|
requirements:
|
18
|
-
- - "
|
18
|
+
- - ">="
|
19
|
+
- !ruby/object:Gem::Version
|
20
|
+
version: 4.2.0
|
21
|
+
- - "<"
|
19
22
|
- !ruby/object:Gem::Version
|
20
|
-
version:
|
23
|
+
version: 7.0.0
|
21
24
|
type: :runtime
|
22
25
|
prerelease: false
|
23
26
|
version_requirements: !ruby/object:Gem::Requirement
|
24
27
|
requirements:
|
25
|
-
- - "
|
28
|
+
- - ">="
|
29
|
+
- !ruby/object:Gem::Version
|
30
|
+
version: 4.2.0
|
31
|
+
- - "<"
|
26
32
|
- !ruby/object:Gem::Version
|
27
|
-
version:
|
33
|
+
version: 7.0.0
|
28
34
|
- !ruby/object:Gem::Dependency
|
29
35
|
name: cassandra-driver
|
30
36
|
requirement: !ruby/object:Gem::Requirement
|
@@ -198,8 +204,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
198
204
|
- !ruby/object:Gem::Version
|
199
205
|
version: 1.3.5
|
200
206
|
requirements: []
|
201
|
-
|
202
|
-
rubygems_version: 2.7.6
|
207
|
+
rubygems_version: 3.1.2
|
203
208
|
signing_key:
|
204
209
|
specification_version: 4
|
205
210
|
summary: Cassandra ActiveModel
|