dynamoid 3.0.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,144 @@
1
+ module Dynamoid
2
+ module AdapterPlugin
3
+ class Query
4
+ OPTIONS_KEYS = [
5
+ :limit, :hash_key, :hash_value, :range_key, :consistent_read, :scan_index_forward,
6
+ :select, :index_name, :batch_size, :exclusive_start_key, :record_limit, :scan_limit
7
+ ]
8
+
9
+ attr_reader :client, :table, :options, :conditions
10
+
11
+ def initialize(client, table, opts = {})
12
+ @client = client
13
+ @table = table
14
+
15
+ opts = opts.symbolize_keys
16
+ @options = opts.slice(*OPTIONS_KEYS)
17
+ @conditions = opts.except(*OPTIONS_KEYS)
18
+ end
19
+
20
+ def call
21
+ request = build_request
22
+
23
+ Enumerator.new do |yielder|
24
+ record_count = 0
25
+ scan_count = 0
26
+
27
+ backoff = Dynamoid.config.backoff ? Dynamoid.config.build_backoff : nil
28
+
29
+ loop do
30
+ # Adjust the limit down if the remaining record and/or scan limit are
31
+ # lower to obey limits. We can assume the difference won't be
32
+ # negative due to break statements below but choose smaller limit
33
+ # which is why we have 2 separate if statements.
34
+ # NOTE: Adjusting based on record_limit can cause many HTTP requests
35
+ # being made. We may want to change this behavior, but it affects
36
+ # filtering on data with potentially large gaps.
37
+ # Example:
38
+ # User.where('created_at.gte' => 1.day.ago).record_limit(1000)
39
+ # Records 1-999 User's that fit criteria
40
+ # Records 1000-2000 Users's that do not fit criteria
41
+ # Record 2001 fits criteria
42
+ # The underlying implementation will have 1 page for records 1-999
43
+ # then will request with limit 1 for records 1000-2000 (making 1000
44
+ # requests of limit 1) until hit record 2001.
45
+ if request[:limit] && record_limit && record_limit - record_count < request[:limit]
46
+ request[:limit] = record_limit - record_count
47
+ end
48
+ if request[:limit] && scan_limit && scan_limit - scan_count < request[:limit]
49
+ request[:limit] = scan_limit - scan_count
50
+ end
51
+
52
+ response = client.query(request)
53
+
54
+ yielder << response
55
+
56
+ record_count += response.count
57
+ break if record_limit && record_count >= record_limit
58
+
59
+ scan_count += response.scanned_count
60
+ break if scan_limit && scan_count >= scan_limit
61
+
62
+ if response.last_evaluated_key
63
+ request[:exclusive_start_key] = response.last_evaluated_key
64
+ else
65
+ break
66
+ end
67
+
68
+ backoff.call if backoff
69
+ end
70
+ end
71
+ end
72
+
73
+ private
74
+
75
+ def build_request
76
+ request = options.slice(
77
+ :consistent_read,
78
+ :scan_index_forward,
79
+ :select,
80
+ :index_name,
81
+ :exclusive_start_key
82
+ ).compact
83
+
84
+ # Deal with various limits and batching
85
+ batch_size = options[:batch_size]
86
+ limit = [record_limit, scan_limit, batch_size].compact.min
87
+
88
+ request[:limit] = limit if limit
89
+ request[:table_name] = table.name
90
+ request[:key_conditions] = key_conditions
91
+ request[:query_filter] = query_filter
92
+
93
+ request
94
+ end
95
+
96
+ def record_limit
97
+ options[:record_limit]
98
+ end
99
+
100
+ def scan_limit
101
+ options[:scan_limit]
102
+ end
103
+
104
+ def hash_key_name
105
+ (options[:hash_key] || table.hash_key)
106
+ end
107
+
108
+ def range_key_name
109
+ (options[:range_key] || table.range_key)
110
+ end
111
+
112
+ def key_conditions
113
+ result = {
114
+ hash_key_name => {
115
+ comparison_operator: AwsSdkV3::EQ,
116
+ attribute_value_list: AwsSdkV3.attribute_value_list(AwsSdkV3::EQ, options[:hash_value].freeze)
117
+ }
118
+ }
119
+
120
+ conditions.slice(*AwsSdkV3::RANGE_MAP.keys).each do |k, _v|
121
+ op = AwsSdkV3::RANGE_MAP[k]
122
+
123
+ result[range_key_name] = {
124
+ comparison_operator: op,
125
+ attribute_value_list: AwsSdkV3.attribute_value_list(op, conditions[k].freeze)
126
+ }
127
+ end
128
+
129
+ result
130
+ end
131
+
132
+ def query_filter
133
+ conditions.except(*AwsSdkV3::RANGE_MAP.keys).reduce({}) do |result, (attr, cond)|
134
+ condition = {
135
+ comparison_operator: AwsSdkV3::FIELD_MAP[cond.keys[0]],
136
+ attribute_value_list: AwsSdkV3.attribute_value_list(AwsSdkV3::FIELD_MAP[cond.keys[0]], cond.values[0].freeze)
137
+ }
138
+ result[attr] = condition
139
+ result
140
+ end
141
+ end
142
+ end
143
+ end
144
+ end
@@ -0,0 +1,107 @@
1
+ module Dynamoid
2
+ module AdapterPlugin
3
+ class Scan
4
+ attr_reader :client, :table, :conditions, :options
5
+
6
+ def initialize(client, table, conditions = {}, options = {})
7
+ @client = client
8
+ @table = table
9
+ @conditions = conditions
10
+ @options = options
11
+ end
12
+
13
+ def call
14
+ request = build_request
15
+
16
+ Enumerator.new do |yielder|
17
+ record_count = 0
18
+ scan_count = 0
19
+
20
+ backoff = Dynamoid.config.backoff ? Dynamoid.config.build_backoff : nil
21
+
22
+ loop do
23
+ # Adjust the limit down if the remaining record and/or scan limit are
24
+ # lower to obey limits. We can assume the difference won't be
25
+ # negative due to break statements below but choose smaller limit
26
+ # which is why we have 2 separate if statements.
27
+ # NOTE: Adjusting based on record_limit can cause many HTTP requests
28
+ # being made. We may want to change this behavior, but it affects
29
+ # filtering on data with potentially large gaps.
30
+ # Example:
31
+ # User.where('created_at.gte' => 1.day.ago).record_limit(1000)
32
+ # Records 1-999 User's that fit criteria
33
+ # Records 1000-2000 Users's that do not fit criteria
34
+ # Record 2001 fits criteria
35
+ # The underlying implementation will have 1 page for records 1-999
36
+ # then will request with limit 1 for records 1000-2000 (making 1000
37
+ # requests of limit 1) until hit record 2001.
38
+ if request[:limit] && record_limit && record_limit - record_count < request[:limit]
39
+ request[:limit] = record_limit - record_count
40
+ end
41
+ if request[:limit] && scan_limit && scan_limit - scan_count < request[:limit]
42
+ request[:limit] = scan_limit - scan_count
43
+ end
44
+
45
+ response = client.scan(request)
46
+
47
+ yielder << response
48
+
49
+ record_count += response.count
50
+ break if record_limit && record_count >= record_limit
51
+
52
+ scan_count += response.scanned_count
53
+ break if scan_limit && scan_count >= scan_limit
54
+
55
+ # Keep pulling if we haven't finished paging in all data
56
+ if response.last_evaluated_key
57
+ request[:exclusive_start_key] = response.last_evaluated_key
58
+ else
59
+ break
60
+ end
61
+
62
+ backoff.call if backoff
63
+ end
64
+ end
65
+ end
66
+
67
+ private
68
+
69
+ def build_request
70
+ request = options.slice(
71
+ :consistent_read,
72
+ :exclusive_start_key,
73
+ :select
74
+ ).compact
75
+
76
+ # Deal with various limits and batching
77
+ batch_size = options[:batch_size]
78
+ limit = [record_limit, scan_limit, batch_size].compact.min
79
+
80
+ request[:limit] = limit if limit
81
+ request[:table_name] = table.name
82
+ request[:scan_filter] = scan_filter
83
+
84
+ request
85
+ end
86
+
87
+ def record_limit
88
+ options[:record_limit]
89
+ end
90
+
91
+ def scan_limit
92
+ options[:scan_limit]
93
+ end
94
+
95
+ def scan_filter
96
+ conditions.reduce({}) do |result, (attr, cond)|
97
+ condition = {
98
+ comparison_operator: AwsSdkV3::FIELD_MAP[cond.keys[0]],
99
+ attribute_value_list: AwsSdkV3.attribute_value_list(AwsSdkV3::FIELD_MAP[cond.keys[0]], cond.values[0].freeze)
100
+ }
101
+ result[attr] = condition
102
+ result
103
+ end
104
+ end
105
+ end
106
+ end
107
+ end
@@ -14,7 +14,7 @@ module Dynamoid
14
14
 
15
15
  before_create :set_created_at
16
16
  before_save :set_updated_at
17
- after_initialize :set_type
17
+ after_initialize :set_inheritance_field
18
18
  end
19
19
 
20
20
  include ActiveModel::AttributeMethods
@@ -2,6 +2,7 @@
2
2
 
3
3
  require 'uri'
4
4
  require 'logger'
5
+ require 'null_logger'
5
6
  require 'dynamoid/config/options'
6
7
  require 'dynamoid/config/backoff_strategies/constant_backoff'
7
8
  require 'dynamoid/config/backoff_strategies/exponential_backoff'
@@ -41,6 +42,10 @@ module Dynamoid
41
42
  constant: BackoffStrategies::ConstantBackoff,
42
43
  exponential: BackoffStrategies::ExponentialBackoff
43
44
  }
45
+ option :http_continue_timeout, default: nil # specify if you'd like to overwrite Aws Configure - default: 1
46
+ option :http_idle_timeout, default: nil # - default: 5
47
+ option :http_open_timeout, default: nil # - default: 15
48
+ option :http_read_timeout, default: nil # - default: 60
44
49
 
45
50
  # The default logger for Dynamoid: either the Rails logger or just stdout.
46
51
  #
@@ -61,7 +66,7 @@ module Dynamoid
61
66
  # @since 0.2.0
62
67
  def logger=(logger)
63
68
  case logger
64
- when false, nil then @logger = Logger.new('/dev/null')
69
+ when false, nil then @logger = NullLogger.new
65
70
  when true then @logger = default_logger
66
71
  else
67
72
  @logger = logger if logger.respond_to?(:info)
@@ -18,8 +18,9 @@ module Dynamoid #:nodoc:
18
18
  @scan_index_forward = true
19
19
 
20
20
  # Honor STI and :type field if it presents
21
- if @source.attributes.key?(:type)
22
- @query[:'type.in'] = @source.deep_subclasses.map(&:name) << @source.name
21
+ type = @source.inheritance_field
22
+ if @source.attributes.key?(type)
23
+ @query[:"#{type}.in"] = @source.deep_subclasses.map(&:name) << @source.name
23
24
  end
24
25
  end
25
26
 
@@ -51,6 +52,14 @@ module Dynamoid #:nodoc:
51
52
  records
52
53
  end
53
54
 
55
+ def count
56
+ if key_present?
57
+ count_via_query
58
+ else
59
+ count_via_scan
60
+ end
61
+ end
62
+
54
63
  # Returns the last fetched record matched the criteria
55
64
  # Enumerable doesn't implement `last`, only `first`
56
65
  # So we have to implement it ourselves
@@ -163,6 +172,14 @@ module Dynamoid #:nodoc:
163
172
  end
164
173
  end
165
174
 
175
+ def count_via_query
176
+ Dynamoid.adapter.query_count(source.table_name, range_query)
177
+ end
178
+
179
+ def count_via_scan
180
+ Dynamoid.adapter.scan_count(source.table_name, scan_query, scan_opts)
181
+ end
182
+
166
183
  def range_hash(key)
167
184
  name, operation = key.to_s.split('.')
168
185
  val = type_cast_condition_parameter(name, query[key])
@@ -50,6 +50,11 @@ module Dynamoid #:nodoc:
50
50
  options[:write_capacity] || Dynamoid::Config.write_capacity
51
51
  end
52
52
 
53
+ # Returns the field name used to support STI for this table.
54
+ def inheritance_field
55
+ options[:inheritance_field] || :type
56
+ end
57
+
53
58
  # Returns the id field for this class.
54
59
  #
55
60
  # @since 0.4.0
@@ -102,7 +107,7 @@ module Dynamoid #:nodoc:
102
107
  #
103
108
  # @since 0.2.0
104
109
  def build(attrs = {})
105
- attrs[:type] ? attrs[:type].constantize.new(attrs) : new(attrs)
110
+ choose_right_class(attrs).new(attrs)
106
111
  end
107
112
 
108
113
  # Does this object exist?
@@ -251,18 +256,43 @@ module Dynamoid #:nodoc:
251
256
  attrs.each do |k, v|
252
257
  value_casted = TypeCasting.cast_field(v, attributes[k])
253
258
  value_dumped = Dumping.dump_field(value_casted, attributes[k])
259
+
254
260
  t.set(k => value_dumped)
255
261
  end
256
262
  end
263
+
257
264
  attrs_undumped = Undumping.undump_attributes(new_attrs, attributes)
258
265
  new(attrs_undumped)
259
266
  rescue Dynamoid::Errors::ConditionalCheckFailedException
260
267
  end
261
268
  end
262
269
 
270
+ def inc(hash_key_value, range_key_value=nil, counters)
271
+ options = if range_key
272
+ value_casted = TypeCasting.cast_field(range_key_value, attributes[range_key])
273
+ value_dumped = Dumping.dump_field(value_casted, attributes[range_key])
274
+ { range_key: value_dumped }
275
+ else
276
+ {}
277
+ end
278
+
279
+ Dynamoid.adapter.update_item(table_name, hash_key_value, options) do |t|
280
+ counters.each do |k, v|
281
+ value_casted = TypeCasting.cast_field(v, attributes[k])
282
+ value_dumped = Dumping.dump_field(value_casted, attributes[k])
283
+
284
+ t.add(k => value_dumped)
285
+ end
286
+ end
287
+ end
288
+
263
289
  def deep_subclasses
264
290
  subclasses + subclasses.map(&:deep_subclasses).flatten
265
291
  end
292
+
293
+ def choose_right_class(attrs)
294
+ attrs[inheritance_field] ? attrs[inheritance_field].constantize : self
295
+ end
266
296
  end
267
297
 
268
298
  # Initialize a new object.
@@ -281,6 +311,7 @@ module Dynamoid #:nodoc:
281
311
  @new_record = true
282
312
  @attributes ||= {}
283
313
  @associations ||= {}
314
+ @attributes_before_type_cast ||= {}
284
315
 
285
316
  self.class.attributes.each do |_, options|
286
317
  if options[:type].is_a?(Class) && options[:default]
@@ -11,7 +11,9 @@ module Dynamoid
11
11
  end
12
12
 
13
13
  def self.dump_field(value, options)
14
- dumper = field_dumper(options)
14
+ return nil if value.nil?
15
+
16
+ dumper = find_dumper(options)
15
17
 
16
18
  if dumper.nil?
17
19
  raise ArgumentError, "Unknown type #{options[:type]}"
@@ -20,7 +22,7 @@ module Dynamoid
20
22
  dumper.process(value)
21
23
  end
22
24
 
23
- def self.field_dumper(options)
25
+ def self.find_dumper(options)
24
26
  dumper_class = case options[:type]
25
27
  when :string then StringDumper
26
28
  when :integer then IntegerDumper
@@ -64,10 +66,106 @@ module Dynamoid
64
66
 
65
67
  # set -> set
66
68
  class SetDumper < Base
69
+ ALLOWED_TYPES = [:string, :integer, :number, :date, :datetime, :serialized]
70
+
71
+ def process(set)
72
+ if @options.key?(:of)
73
+ process_typed_collection(set)
74
+ else
75
+ set
76
+ end
77
+ end
78
+
79
+ private
80
+
81
+ def process_typed_collection(set)
82
+ if allowed_type?
83
+ dumper = Dumping.find_dumper(element_options)
84
+ result = set.map { |el| dumper.process(el) }
85
+
86
+ if element_type == :string
87
+ result.reject!(&:empty?)
88
+ end
89
+
90
+ result.to_set
91
+ else
92
+ raise ArgumentError, "Set element type #{element_type} isn't supported"
93
+ end
94
+ end
95
+
96
+ def allowed_type?
97
+ ALLOWED_TYPES.include?(element_type) || element_type.is_a?(Class)
98
+ end
99
+
100
+ def element_type
101
+ unless @options[:of].is_a?(Hash)
102
+ @options[:of]
103
+ else
104
+ @options[:of].keys.first
105
+ end
106
+ end
107
+
108
+ def element_options
109
+ unless @options[:of].is_a?(Hash)
110
+ { type: element_type }
111
+ else
112
+ @options[:of][element_type].dup.tap do |options|
113
+ options[:type] = element_type
114
+ end
115
+ end
116
+ end
67
117
  end
68
118
 
69
119
  # array -> array
70
120
  class ArrayDumper < Base
121
+ ALLOWED_TYPES = [:string, :integer, :number, :date, :datetime, :serialized]
122
+
123
+ def process(array)
124
+ if @options.key?(:of)
125
+ process_typed_collection(array)
126
+ else
127
+ array
128
+ end
129
+ end
130
+
131
+ private
132
+
133
+ def process_typed_collection(array)
134
+ if allowed_type?
135
+ dumper = Dumping.find_dumper(element_options)
136
+ result = array.map { |el| dumper.process(el) }
137
+
138
+ if element_type == :string
139
+ result.reject!(&:empty?)
140
+ end
141
+
142
+ result
143
+ else
144
+ raise ArgumentError, "Array element type #{element_type} isn't supported"
145
+ end
146
+ end
147
+
148
+ def allowed_type?
149
+ ALLOWED_TYPES.include?(element_type) || element_type.is_a?(Class)
150
+ end
151
+
152
+ def element_type
153
+ unless @options[:of].is_a?(Hash)
154
+ @options[:of]
155
+ else
156
+ @options[:of].keys.first
157
+ end
158
+ end
159
+
160
+ def element_options
161
+ unless @options[:of].is_a?(Hash)
162
+ { type: element_type }
163
+ else
164
+ @options[:of][element_type].dup.tap do |options|
165
+ options[:type] = element_type
166
+ end
167
+ end
168
+ end
71
169
  end
72
170
 
73
171
  # datetime -> integer/string
@@ -122,6 +220,34 @@ module Dynamoid
122
220
 
123
221
  # any standard Ruby object -> self
124
222
  class RawDumper < Base
223
+ def process(value)
224
+ deep_sanitize(value)
225
+ end
226
+
227
+ private
228
+
229
+ def deep_sanitize(el)
230
+ case el
231
+ when Hash
232
+ sanitize_hash(el).transform_values { |v| deep_sanitize(v) }
233
+ when Array
234
+ sanitize_array(el).map { |v| deep_sanitize(v) }
235
+ else
236
+ el
237
+ end
238
+ end
239
+
240
+ def sanitize_hash(h)
241
+ h.transform_values { |v| invalid_value?(v) ? nil : v }
242
+ end
243
+
244
+ def sanitize_array(a)
245
+ a.map { |v| invalid_value?(v) ? nil : v }
246
+ end
247
+
248
+ def invalid_value?(v)
249
+ (v.is_a?(Set) || v.is_a?(String)) && v.empty?
250
+ end
125
251
  end
126
252
 
127
253
  # object -> string