aws-sdk-rails 3.1.0 → 3.6.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/VERSION +1 -0
- data/bin/aws_sqs_active_job +5 -0
- data/lib/action_dispatch/session/dynamodb_store.rb +32 -0
- data/lib/active_job/queue_adapters/amazon_sqs_adapter.rb +61 -0
- data/lib/active_job/queue_adapters/amazon_sqs_async_adapter.rb +38 -0
- data/lib/aws-sdk-rails.rb +14 -43
- data/lib/aws/rails/middleware/ebs_sqs_active_job_middleware.rb +92 -0
- data/lib/aws/rails/notifications.rb +33 -0
- data/lib/aws/rails/railtie.rb +88 -0
- data/lib/aws/rails/sqs_active_job/configuration.rb +163 -0
- data/lib/aws/rails/sqs_active_job/executor.rb +58 -0
- data/lib/aws/rails/sqs_active_job/job_runner.rb +22 -0
- data/lib/aws/rails/sqs_active_job/lambda_handler.rb +66 -0
- data/lib/aws/rails/sqs_active_job/poller.rb +136 -0
- data/lib/generators/aws_record/base.rb +217 -0
- data/lib/generators/aws_record/generated_attribute.rb +129 -0
- data/lib/generators/aws_record/model/USAGE +24 -0
- data/lib/generators/aws_record/model/model_generator.rb +21 -0
- data/lib/generators/aws_record/model/templates/model.rb +48 -0
- data/lib/generators/aws_record/model/templates/table_config.rb +18 -0
- data/lib/generators/aws_record/secondary_index.rb +60 -0
- data/lib/generators/dynamo_db/session_store_migration/USAGE +13 -0
- data/lib/generators/dynamo_db/session_store_migration/session_store_migration_generator.rb +46 -0
- data/lib/generators/dynamo_db/session_store_migration/templates/dynamo_db_session_store.yml +70 -0
- data/lib/generators/dynamo_db/session_store_migration/templates/session_store_migration.rb +9 -0
- data/lib/tasks/aws_record/migrate.rake +12 -0
- data/lib/tasks/dynamo_db/session_store.rake +8 -0
- metadata +90 -7
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'concurrent'
|
4
|
+
|
5
|
+
module Aws
|
6
|
+
module Rails
|
7
|
+
module SqsActiveJob
|
8
|
+
# CLI runner for polling for SQS ActiveJobs
|
9
|
+
class Executor
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
min_threads: 0,
|
13
|
+
max_threads: Concurrent.processor_count,
|
14
|
+
auto_terminate: true,
|
15
|
+
idletime: 60, # 1 minute
|
16
|
+
fallback_policy: :caller_runs # slow down the producer thread
|
17
|
+
}.freeze
|
18
|
+
|
19
|
+
def initialize(options = {})
|
20
|
+
@executor = Concurrent::ThreadPoolExecutor.new(DEFAULTS.merge(options))
|
21
|
+
@logger = options[:logger] || ActiveSupport::Logger.new(STDOUT)
|
22
|
+
end
|
23
|
+
|
24
|
+
# TODO: Consider catching the exception and sleeping instead of using :caller_runs
|
25
|
+
def execute(message)
|
26
|
+
@executor.post(message) do |message|
|
27
|
+
begin
|
28
|
+
job = JobRunner.new(message)
|
29
|
+
@logger.info("Running job: #{job.id}[#{job.class_name}]")
|
30
|
+
job.run
|
31
|
+
message.delete
|
32
|
+
rescue Aws::Json::ParseError => e
|
33
|
+
@logger.error "Unable to parse message body: #{message.data.body}. Error: #{e}."
|
34
|
+
rescue StandardError => e
|
35
|
+
# message will not be deleted and will be retried
|
36
|
+
job_msg = job ? "#{job.id}[#{job.class_name}]" : 'unknown job'
|
37
|
+
@logger.info "Error processing job #{job_msg}: #{e}"
|
38
|
+
@logger.debug e.backtrace.join("\n")
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def shutdown(timeout=nil)
|
44
|
+
@executor.shutdown
|
45
|
+
clean_shutdown = @executor.wait_for_termination(timeout)
|
46
|
+
if clean_shutdown
|
47
|
+
@logger.info 'Clean shutdown complete. All executing jobs finished.'
|
48
|
+
else
|
49
|
+
@logger.info "Timeout (#{timeout}) exceeded. Some jobs may not have"\
|
50
|
+
" finished cleanly. Unfinished jobs will not be removed from"\
|
51
|
+
" the queue and can be ru-run once their visibility timeout"\
|
52
|
+
" passes."
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Aws
|
4
|
+
module Rails
|
5
|
+
module SqsActiveJob
|
6
|
+
|
7
|
+
class JobRunner
|
8
|
+
attr_reader :id, :class_name
|
9
|
+
|
10
|
+
def initialize(message)
|
11
|
+
@job_data = Aws::Json.load(message.data.body)
|
12
|
+
@class_name = @job_data['job_class'].constantize
|
13
|
+
@id = @job_data['job_id']
|
14
|
+
end
|
15
|
+
|
16
|
+
def run
|
17
|
+
ActiveJob::Base.execute @job_data
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'aws-sdk-sqs'
|
4
|
+
|
5
|
+
module Aws
|
6
|
+
module Rails
|
7
|
+
module SqsActiveJob
|
8
|
+
|
9
|
+
# A lambda event handler to run jobs from an SQS queue trigger
|
10
|
+
# Trigger the lambda from your SQS queue
|
11
|
+
# Configure the entrypoint to: +config/environment.Aws::Rails::SqsActiveJob.lambda_job_handler+
|
12
|
+
# This will load your Rails environment, and then use this method as the handler.
|
13
|
+
def self.lambda_job_handler(event:, context:)
|
14
|
+
return 'no records to process' unless event['Records']
|
15
|
+
|
16
|
+
event['Records'].each do |record|
|
17
|
+
sqs_msg = to_sqs_msg(record)
|
18
|
+
job = Aws::Rails::SqsActiveJob::JobRunner.new(sqs_msg)
|
19
|
+
puts("Running job: #{job.id}[#{job.class_name}]")
|
20
|
+
job.run
|
21
|
+
sqs_msg.delete
|
22
|
+
end
|
23
|
+
"Processed #{event['Records'].length} jobs."
|
24
|
+
end
|
25
|
+
|
26
|
+
private
|
27
|
+
|
28
|
+
def self.to_sqs_msg(record)
|
29
|
+
msg = Aws::SQS::Types::Message.new(
|
30
|
+
body: record['body'],
|
31
|
+
md5_of_body: record['md5OfBody'],
|
32
|
+
message_attributes: self.to_message_attributes(record),
|
33
|
+
message_id: record['messageId'],
|
34
|
+
receipt_handle: record['receiptHandle']
|
35
|
+
)
|
36
|
+
Aws::SQS::Message.new(
|
37
|
+
queue_url: to_queue_url(record),
|
38
|
+
receipt_handle: msg.receipt_handle,
|
39
|
+
data: msg,
|
40
|
+
client: Aws::Rails::SqsActiveJob.config.client
|
41
|
+
)
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.to_message_attributes(record)
|
45
|
+
record['messageAttributes'].each_with_object({}) do |(key, value), acc|
|
46
|
+
acc[key] = {
|
47
|
+
string_value: value['stringValue'],
|
48
|
+
binary_value: value['binaryValue'],
|
49
|
+
string_list_values: ['stringListValues'],
|
50
|
+
binary_list_values: value['binaryListValues'],
|
51
|
+
data_type: value['dataType']
|
52
|
+
}
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def self.to_queue_url(record)
|
57
|
+
source_arn = record['eventSourceARN']
|
58
|
+
raise ArgumentError, "Invalid queue arn: #{source_arn}" unless Aws::ARNParser.arn?(source_arn)
|
59
|
+
|
60
|
+
arn = Aws::ARNParser.parse(source_arn)
|
61
|
+
sfx = Aws::Partitions::EndpointProvider.dns_suffix_for(arn.region)
|
62
|
+
"https://sqs.#{arn.region}.#{sfx}/#{arn.account_id}/#{arn.resource}"
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
@@ -0,0 +1,136 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'aws-sdk-sqs'
|
4
|
+
require 'optparse'
|
5
|
+
require 'concurrent'
|
6
|
+
|
7
|
+
module Aws
|
8
|
+
module Rails
|
9
|
+
module SqsActiveJob
|
10
|
+
|
11
|
+
class Interrupt < Exception; end
|
12
|
+
|
13
|
+
# CLI runner for polling for SQS ActiveJobs
|
14
|
+
# Use `aws_sqs_active_job --help` for detailed usage
|
15
|
+
class Poller
|
16
|
+
|
17
|
+
DEFAULT_OPTS = {
|
18
|
+
threads: 2*Concurrent.processor_count,
|
19
|
+
max_messages: 10,
|
20
|
+
visibility_timeout: 60,
|
21
|
+
shutdown_timeout: 15,
|
22
|
+
backpressure: 10
|
23
|
+
}
|
24
|
+
|
25
|
+
def initialize(args = ARGV)
|
26
|
+
@options = parse_args(args)
|
27
|
+
# Set_environment must be run before we boot_rails
|
28
|
+
set_environment
|
29
|
+
end
|
30
|
+
|
31
|
+
def set_environment
|
32
|
+
@environment = @options[:environment] || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
|
33
|
+
end
|
34
|
+
|
35
|
+
def run
|
36
|
+
# exit 0
|
37
|
+
boot_rails
|
38
|
+
|
39
|
+
# cannot load config (from file or initializers) until after
|
40
|
+
# rails has been booted.
|
41
|
+
@options = DEFAULT_OPTS
|
42
|
+
.merge(Aws::Rails::SqsActiveJob.config.to_h)
|
43
|
+
.merge(@options.to_h)
|
44
|
+
validate_config
|
45
|
+
# ensure we have a logger configured
|
46
|
+
@logger = @options[:logger] || ActiveSupport::Logger.new(STDOUT)
|
47
|
+
@logger.info("Starting Poller with options=#{@options}")
|
48
|
+
|
49
|
+
|
50
|
+
Signal.trap('INT') { raise Interrupt }
|
51
|
+
Signal.trap('TERM') { raise Interrupt }
|
52
|
+
@executor = Executor.new(max_threads: @options[:threads], logger: @logger, max_queue: @options[:backpressure])
|
53
|
+
|
54
|
+
poll
|
55
|
+
rescue Interrupt
|
56
|
+
@logger.info 'Process Interrupted or killed - attempting to shutdown cleanly.'
|
57
|
+
shutdown
|
58
|
+
exit
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
def shutdown
|
64
|
+
@executor.shutdown(@options[:shutdown_timeout])
|
65
|
+
end
|
66
|
+
|
67
|
+
def poll
|
68
|
+
queue_url = Aws::Rails::SqsActiveJob.config.queue_url_for(@options[:queue])
|
69
|
+
@logger.info "Polling on: #{@options[:queue]} => #{queue_url}"
|
70
|
+
client = Aws::Rails::SqsActiveJob.config.client
|
71
|
+
@poller = Aws::SQS::QueuePoller.new(queue_url, client: client)
|
72
|
+
poller_options = {
|
73
|
+
skip_delete: true,
|
74
|
+
max_number_of_messages: @options[:max_messages],
|
75
|
+
visibility_timeout: @options[:visibility_timeout]
|
76
|
+
}
|
77
|
+
# Limit max_number_of_messages for FIFO queues to 1
|
78
|
+
# this ensures jobs with the same message_group_id are processed
|
79
|
+
# in order
|
80
|
+
# Jobs with different message_group_id will be processed in
|
81
|
+
# parallel and may be out of order.
|
82
|
+
if Aws::Rails::SqsActiveJob.fifo?(queue_url)
|
83
|
+
poller_options[:max_number_of_messages] = 1
|
84
|
+
end
|
85
|
+
|
86
|
+
single_message = poller_options[:max_number_of_messages] == 1
|
87
|
+
|
88
|
+
@poller.poll(poller_options) do |msgs|
|
89
|
+
msgs = [msgs] if single_message
|
90
|
+
@logger.info "Processing batch of #{msgs.length} messages"
|
91
|
+
msgs.each do |msg|
|
92
|
+
@executor.execute(Aws::SQS::Message.new(
|
93
|
+
queue_url: queue_url,
|
94
|
+
receipt_handle: msg.receipt_handle,
|
95
|
+
data: msg,
|
96
|
+
client: client
|
97
|
+
))
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
def boot_rails
|
103
|
+
ENV['RACK_ENV'] = ENV['RAILS_ENV'] = @environment
|
104
|
+
require "rails"
|
105
|
+
require File.expand_path("config/environment.rb")
|
106
|
+
end
|
107
|
+
|
108
|
+
def parse_args(argv)
|
109
|
+
out = {}
|
110
|
+
parser = ::OptionParser.new { |opts|
|
111
|
+
opts.on("-q", "--queue STRING", "[Required] Queue to poll") { |a| out[:queue] = a }
|
112
|
+
opts.on("-e", "--environment STRING", "Rails environment (defaults to development). You can also use the APP_ENV or RAILS_ENV environment variables to specify the environment.") { |a| out[:environment] = a }
|
113
|
+
opts.on("-t", "--threads INTEGER", Integer, "The maximum number of worker threads to create. Defaults to 2x the number of processors available on this system.") { |a| out[:threads] = a }
|
114
|
+
opts.on("-b", "--backpressure INTEGER", Integer, "The maximum number of messages to have waiting in the Executor queue. This should be a low, but non zero number. Messages in the Executor queue cannot be picked up by other processes and will slow down shutdown.") { |a| out[:backpressure] = a }
|
115
|
+
opts.on("-m", "--max_messages INTEGER", Integer, "Max number of messages to receive in a batch from SQS.") { |a| out[:max_messages] = a }
|
116
|
+
opts.on("-v", "--visibility_timeout INTEGER", Integer, "The visibility timeout is the number of seconds that a message will not be processable by any other consumers. You should set this value to be longer than your expected job runtime to prevent other processes from picking up an running job. See the SQS Visibility Timeout Documentation at https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html.") { |a| out[:visibility_timeout] = a }
|
117
|
+
opts.on("-s", "--shutdown_timeout INTEGER", Integer, "The amount of time to wait for a clean shutdown. Jobs that are unable to complete in this time will not be deleted from the SQS queue and will be retryable after the visibility timeout.") { |a| out[:shutdown_timeout] = a }
|
118
|
+
}
|
119
|
+
|
120
|
+
parser.banner = "aws_sqs_active_job [options]"
|
121
|
+
parser.on_tail "-h", "--help", "Show help" do
|
122
|
+
puts parser
|
123
|
+
exit 1
|
124
|
+
end
|
125
|
+
|
126
|
+
parser.parse(argv)
|
127
|
+
out
|
128
|
+
end
|
129
|
+
|
130
|
+
def validate_config
|
131
|
+
raise ArgumentError, 'You must specify the name of the queue to process jobs from' unless @options[:queue]
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|
@@ -0,0 +1,217 @@
|
|
1
|
+
require 'rails/generators'
|
2
|
+
require_relative 'generated_attribute'
|
3
|
+
require_relative 'secondary_index'
|
4
|
+
|
5
|
+
module AwsRecord
|
6
|
+
module Generators
|
7
|
+
class Base < Rails::Generators::NamedBase
|
8
|
+
argument :attributes, type: :array, default: [], banner: "field[:type][:opts]...", desc: "Describes the fields in the model"
|
9
|
+
check_class_collision
|
10
|
+
|
11
|
+
class_option :disable_mutation_tracking, type: :boolean, desc: "Disables dirty tracking"
|
12
|
+
class_option :timestamps, type: :boolean, desc: "Adds created, updated timestamps to the model"
|
13
|
+
class_option :table_config, type: :hash, default: {}, banner: "primary:R-W [SecondaryIndex1:R-W]...", desc: "Declares the r/w units for the model as well as any secondary indexes", :required => true
|
14
|
+
class_option :gsi, type: :array, default: [], banner: "name:hkey{field_name}[,rkey{field_name},proj_type{ALL|KEYS_ONLY|INCLUDE}]...", desc: "Allows for the declaration of secondary indexes"
|
15
|
+
class_option :table_name, type: :string, banner: "model_table_name"
|
16
|
+
class_option :password_digest, type: :boolean, desc: "Whether to add a password_digest field to the model"
|
17
|
+
|
18
|
+
class_option :required, type: :string, banner: "field1...", desc: "A list of attributes that are required for an instance of the model"
|
19
|
+
class_option :length_validations, type: :hash, default: {}, banner: "field1:MIN-MAX...", desc: "Validations on the length of attributes in a model"
|
20
|
+
|
21
|
+
attr_accessor :primary_read_units, :primary_write_units, :gsi_rw_units, :gsis, :required_attrs, :length_validations
|
22
|
+
|
23
|
+
private
|
24
|
+
|
25
|
+
def initialize(args, *options)
|
26
|
+
options[0] << "--skip-table-config" if options[1][:behavior] == :revoke
|
27
|
+
@parse_errors = []
|
28
|
+
|
29
|
+
super
|
30
|
+
ensure_unique_fields
|
31
|
+
ensure_hkey
|
32
|
+
parse_gsis!
|
33
|
+
parse_table_config!
|
34
|
+
parse_validations!
|
35
|
+
|
36
|
+
if !@parse_errors.empty?
|
37
|
+
STDERR.puts "The following errors were encountered while trying to parse the given attributes"
|
38
|
+
STDERR.puts
|
39
|
+
STDERR.puts @parse_errors
|
40
|
+
STDERR.puts
|
41
|
+
|
42
|
+
abort("Please fix the errors before proceeding.")
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def parse_attributes!
|
47
|
+
|
48
|
+
self.attributes = (attributes || []).map do |attr|
|
49
|
+
begin
|
50
|
+
GeneratedAttribute.parse(attr)
|
51
|
+
rescue ArgumentError => e
|
52
|
+
@parse_errors << e
|
53
|
+
next
|
54
|
+
end
|
55
|
+
end
|
56
|
+
self.attributes = self.attributes.compact
|
57
|
+
|
58
|
+
if options['password_digest']
|
59
|
+
self.attributes << GeneratedAttribute.new("password_digest", :string_attr, :digest => true)
|
60
|
+
end
|
61
|
+
|
62
|
+
if options['timestamps']
|
63
|
+
self.attributes << GeneratedAttribute.parse("created:datetime:default_value{Time.now}")
|
64
|
+
self.attributes << GeneratedAttribute.parse("updated:datetime:default_value{Time.now}")
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def ensure_unique_fields
|
69
|
+
used_names = Set.new
|
70
|
+
duplicate_fields = []
|
71
|
+
|
72
|
+
self.attributes.each do |attr|
|
73
|
+
|
74
|
+
if used_names.include? attr.name
|
75
|
+
duplicate_fields << [:attribute, attr.name]
|
76
|
+
end
|
77
|
+
used_names.add attr.name
|
78
|
+
|
79
|
+
if attr.options.key? :database_attribute_name
|
80
|
+
raw_db_attr_name = attr.options[:database_attribute_name].delete('"') # db attribute names are wrapped with " to make template generation easier
|
81
|
+
|
82
|
+
if used_names.include? raw_db_attr_name
|
83
|
+
duplicate_fields << [:database_attribute_name, raw_db_attr_name]
|
84
|
+
end
|
85
|
+
|
86
|
+
used_names.add raw_db_attr_name
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
if !duplicate_fields.empty?
|
91
|
+
duplicate_fields.each do |invalid_attr|
|
92
|
+
@parse_errors << ArgumentError.new("Found duplicated field name: #{invalid_attr[1]}, in attribute#{invalid_attr[0]}")
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def ensure_hkey
|
98
|
+
uuid_member = nil
|
99
|
+
hkey_member = nil
|
100
|
+
rkey_member = nil
|
101
|
+
|
102
|
+
self.attributes.each do |attr|
|
103
|
+
if attr.options.key? :hash_key
|
104
|
+
if hkey_member
|
105
|
+
@parse_errors << ArgumentError.new("Redefinition of hash_key attr: #{attr.name}, original declaration of hash_key on: #{hkey_member.name}")
|
106
|
+
next
|
107
|
+
end
|
108
|
+
|
109
|
+
hkey_member = attr
|
110
|
+
elsif attr.options.key? :range_key
|
111
|
+
if rkey_member
|
112
|
+
@parse_errors << ArgumentError.new("Redefinition of range_key attr: #{attr.name}, original declaration of range_key on: #{hkey_member.name}")
|
113
|
+
next
|
114
|
+
end
|
115
|
+
|
116
|
+
rkey_member = attr
|
117
|
+
end
|
118
|
+
|
119
|
+
if attr.name.include? "uuid"
|
120
|
+
uuid_member = attr
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
if !hkey_member
|
125
|
+
if uuid_member
|
126
|
+
uuid_member.options[:hash_key] = true
|
127
|
+
else
|
128
|
+
self.attributes.unshift GeneratedAttribute.parse("uuid:hkey")
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def mutation_tracking_disabled?
|
134
|
+
options['disable_mutation_tracking']
|
135
|
+
end
|
136
|
+
|
137
|
+
def has_validations?
|
138
|
+
!@required_attrs.empty? || !@length_validations.empty?
|
139
|
+
end
|
140
|
+
|
141
|
+
def parse_table_config!
|
142
|
+
return unless options['table_config']
|
143
|
+
|
144
|
+
@primary_read_units, @primary_write_units = parse_rw_units("primary")
|
145
|
+
|
146
|
+
@gsi_rw_units = @gsis.map { |idx|
|
147
|
+
[idx.name, parse_rw_units(idx.name)]
|
148
|
+
}.to_h
|
149
|
+
|
150
|
+
options['table_config'].each do |config, rw_units|
|
151
|
+
if config == "primary"
|
152
|
+
next
|
153
|
+
else
|
154
|
+
gsi = @gsis.select { |idx| idx.name == config}
|
155
|
+
|
156
|
+
if gsi.empty?
|
157
|
+
@parse_errors << ArgumentError.new("Could not find a gsi declaration for #{config}")
|
158
|
+
end
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
def parse_rw_units(name)
|
164
|
+
if !options['table_config'].key? name
|
165
|
+
@parse_errors << ArgumentError.new("Please provide a table_config definition for #{name}")
|
166
|
+
else
|
167
|
+
rw_units = options['table_config'][name]
|
168
|
+
return rw_units.gsub(/[,.-]/, ':').split(':').reject { |s| s.empty? }
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
def parse_gsis!
|
173
|
+
@gsis = (options['gsi'] || []).map do |raw_idx|
|
174
|
+
begin
|
175
|
+
idx = SecondaryIndex.parse(raw_idx)
|
176
|
+
|
177
|
+
attributes = self.attributes.select { |attr| attr.name == idx.hash_key}
|
178
|
+
if attributes.empty?
|
179
|
+
@parse_errors << ArgumentError.new("Could not find attribute #{idx.hash_key} for gsi #{idx.name} hkey")
|
180
|
+
next
|
181
|
+
end
|
182
|
+
|
183
|
+
if idx.range_key
|
184
|
+
attributes = self.attributes.select { |attr| attr.name == idx.range_key}
|
185
|
+
if attributes.empty?
|
186
|
+
@parse_errors << ArgumentError.new("Could not find attribute #{idx.range_key} for gsi #{idx.name} rkey")
|
187
|
+
next
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
idx
|
192
|
+
rescue ArgumentError => e
|
193
|
+
@parse_errors << e
|
194
|
+
next
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
198
|
+
@gsis = @gsis.compact
|
199
|
+
end
|
200
|
+
|
201
|
+
def parse_validations!
|
202
|
+
@required_attrs = options['required'] ? options['required'].split(',') : []
|
203
|
+
@required_attrs.each do |val_attr|
|
204
|
+
@parse_errors << ArgumentError.new("No such field #{val_attr} in required validations") if !self.attributes.any? { |attr| attr.name == val_attr }
|
205
|
+
end
|
206
|
+
|
207
|
+
@length_validations = options['length_validations'].map do |val_attr, bounds|
|
208
|
+
@parse_errors << ArgumentError.new("No such field #{val_attr} in required validations") if !self.attributes.any? { |attr| attr.name == val_attr }
|
209
|
+
|
210
|
+
bounds = bounds.gsub(/[,.-]/, ':').split(':').reject { |s| s.empty? }
|
211
|
+
[val_attr, "#{bounds[0]}..#{bounds[1]}"]
|
212
|
+
end
|
213
|
+
@length_validations = @length_validations.to_h
|
214
|
+
end
|
215
|
+
end
|
216
|
+
end
|
217
|
+
end
|