nypl_ruby_util 0.0.7 → 0.0.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/deploy_helper.rb +136 -136
- data/lib/kinesis_client.rb +51 -37
- data/lib/kms_client.rb +6 -6
- data/lib/platform_api_client.rb +10 -9
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5c6574f7bf8717173b11a9635daacec8d49be69a7fc35e5584d713f8ef37c673
|
4
|
+
data.tar.gz: 5035a2920d8c8f012c711050604f1ecd12832b4e8df3661eb0a16f707735c9d6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9349fa7cc85b2429daef2ecd358392f58425a107516f1392d8fbbdb8861e460ec4dd48fab3d4163f58e351dc3001a1981efcaa86b76e5758442510e06ccb7ac3
|
7
|
+
data.tar.gz: 7a1e9057927bad0ff9425617f534abf17e63e2cb25cabb651bc2dd10fe234c5ca9c0d80c8d4dc54c51539a8cede92a82ce289f35e10dd64031d0c9c7a34d4332
|
data/lib/deploy_helper.rb
CHANGED
@@ -1,152 +1,152 @@
|
|
1
|
-
require
|
2
|
-
require
|
3
|
-
require
|
1
|
+
require "aws-sdk-lambda"
|
2
|
+
require "aws-sdk-cloudwatchevents"
|
3
|
+
require "yaml"
|
4
4
|
|
5
5
|
# Utility class for running rake methods
|
6
6
|
class DeployHelper
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
7
|
+
attr_reader(
|
8
|
+
:travis_branch,
|
9
|
+
:aws_access_key_id,
|
10
|
+
:aws_secret_access_key,
|
11
|
+
:aws_configuration,
|
12
|
+
:region,
|
13
|
+
:lambda_client,
|
14
|
+
:yaml,
|
15
|
+
:lambda_config,
|
16
|
+
:function_name,
|
17
|
+
:event
|
18
|
+
)
|
19
|
+
|
20
|
+
def initialize
|
21
|
+
@travis_branch = ENV["TRAVIS_BRANCH"].upcase
|
22
|
+
@travis_branch = ["MAIN", "MASTER"].include?(@travis_branch) ? "PRODUCTION" : @travis_branch
|
23
|
+
@aws_access_key_id = ENV["AWS_ACCESS_KEY_ID_#{travis_branch}"]
|
24
|
+
@aws_secret_access_key = ENV["AWS_SECRET_ACCESS_KEY_#{travis_branch}"]
|
25
|
+
@yaml = YAML.safe_load(File.read(".travis.yml"))
|
26
|
+
@lambda_config = yaml["deploy"].find { |conf| name_matches_branch?(conf["function_name"], travis_branch) }
|
27
|
+
@region = @lambda_config["region"]
|
28
|
+
@function_name = @lambda_config["function_name"]
|
29
|
+
@aws_configuration = {
|
30
|
+
region: region,
|
31
|
+
access_key_id: aws_access_key_id,
|
32
|
+
secret_access_key: aws_secret_access_key
|
33
|
+
}
|
34
|
+
p("using configuration: ", aws_configuration)
|
35
|
+
p("lambda config: ", lambda_config)
|
36
|
+
@lambda_client = Aws::Lambda::Client.new(aws_configuration) if configured?
|
37
|
+
end
|
38
|
+
|
39
|
+
def configured?
|
40
|
+
aws_access_key_id && aws_secret_access_key && region
|
41
|
+
end
|
42
|
+
|
43
|
+
def name_matches_branch?(name, branch)
|
44
|
+
downcase_name = name.downcase
|
45
|
+
downcase_branch = branch.downcase
|
46
|
+
variants = [
|
47
|
+
["dev", "development"],
|
48
|
+
["qa"],
|
49
|
+
["main", "master", "production", "prod"],
|
50
|
+
]
|
51
|
+
variants.any? do |group|
|
52
|
+
group.any? { |variant| downcase_name.include?(variant) }\
|
53
|
+
&& group.any? { |variant| downcase_branch.include?(variant) }
|
37
54
|
end
|
55
|
+
end
|
38
56
|
|
39
|
-
|
40
|
-
|
57
|
+
def update_lambda_configuration
|
58
|
+
unless configured? && lambda_config
|
59
|
+
p("insufficient configuration")
|
60
|
+
return nil
|
41
61
|
end
|
42
62
|
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
63
|
+
updated_lambda_configuration = {
|
64
|
+
function_name: function_name,
|
65
|
+
vpc_config: lambda_config["vpc_config"],
|
66
|
+
environment: lambda_config["environment"],
|
67
|
+
layers: lambda_config["layers"]
|
68
|
+
}
|
69
|
+
updated_lambda_configuration[:function_name] = function_name
|
70
|
+
p("updating_function_configuration with: ", updated_lambda_configuration)
|
71
|
+
update_configuration_resp = lambda_client.update_function_configuration(updated_lambda_configuration)
|
72
|
+
p("update_configuration_resp: ", update_configuration_resp)
|
73
|
+
end
|
74
|
+
|
75
|
+
def update_event
|
76
|
+
unless lambda_config["event"]
|
77
|
+
p("no event config")
|
78
|
+
return nil
|
55
79
|
end
|
56
80
|
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
updated_lambda_configuration = {
|
64
|
-
function_name: function_name,
|
65
|
-
vpc_config: lambda_config['vpc_config'],
|
66
|
-
environment: lambda_config['environment'],
|
67
|
-
layers: lambda_config['layers']
|
68
|
-
}
|
69
|
-
updated_lambda_configuration[:function_name] = function_name
|
70
|
-
p 'updating_function_configuration with: ', updated_lambda_configuration
|
71
|
-
update_configuration_resp = lambda_client.update_function_configuration(updated_lambda_configuration)
|
72
|
-
p 'update_configuration_resp: ', update_configuration_resp
|
81
|
+
@event = lambda_config["event"]
|
82
|
+
if event["event_source_arn"]
|
83
|
+
add_event_source
|
84
|
+
elsif event["schedule_expression"]
|
85
|
+
add_cron
|
73
86
|
end
|
87
|
+
end
|
74
88
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
end
|
80
|
-
|
81
|
-
@event = lambda_config['event']
|
82
|
-
if event['event_source_arn']
|
83
|
-
add_event_source
|
84
|
-
elsif event['schedule_expression']
|
85
|
-
add_cron
|
86
|
-
end
|
87
|
-
end
|
89
|
+
def add_event_source
|
90
|
+
existing_events = lambda_client.list_event_source_mappings({
|
91
|
+
function_name: function_name
|
92
|
+
}).event_source_mappings
|
88
93
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
94
|
+
existing_events.each do |existing_event|
|
95
|
+
p("deleting event with uuid: ", existing_event.uuid, "and arn: ", existing_event.event_source_arn)
|
96
|
+
lambda_client.delete_event_source_mapping({ uuid: existing_event.uuid })
|
97
|
+
end
|
98
|
+
event_to_create = event.map { |k, v| [k.to_sym, v] }.to_h
|
99
|
+
event_to_create[:function_name] = function_name
|
100
|
+
p("creating event: ", event_to_create)
|
101
|
+
create_resp = lambda_client.create_event_source_mapping(event_to_create)
|
102
|
+
p("created: ", create_resp)
|
103
|
+
end
|
104
|
+
|
105
|
+
def add_cron
|
106
|
+
## create the event
|
107
|
+
events_client = Aws::CloudWatchEvents::Client.new(aws_configuration)
|
108
|
+
schedule_expression = event["schedule_expression"]
|
109
|
+
rule_name = "#{function_name}-rule"
|
110
|
+
p("rule_name: ", rule_name, "schedule_expression: ", schedule_expression)
|
111
|
+
events_client.put_rule(name: rule_name, schedule_expression: schedule_expression)
|
112
|
+
|
113
|
+
## next we have to connect the event to the lambda
|
114
|
+
## the first step is to get the lambda
|
115
|
+
|
116
|
+
return "missing function_name" unless function_name
|
117
|
+
|
118
|
+
p("getting lambda with function name", function_name)
|
119
|
+
lambda_resp = lambda_client.get_function(function_name: function_name).configuration
|
120
|
+
arn = lambda_resp.function_arn
|
121
|
+
|
122
|
+
## next figure out if the lambda already has granted cloudwatch
|
123
|
+
## permission to invoke
|
124
|
+
begin
|
125
|
+
policy_resp = lambda_client.get_policy(function_name: function_name)
|
126
|
+
if policy_resp.policy.include?("#{function_name}-permission")
|
127
|
+
p("lambda already has permission")
|
128
|
+
else
|
129
|
+
add_policy = true
|
130
|
+
end
|
131
|
+
rescue Aws::Lambda::Errors::ResourceNotFoundException
|
132
|
+
add_policy = true
|
133
|
+
p("no policy")
|
103
134
|
end
|
104
135
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
## the first step is to get the lambda
|
115
|
-
|
116
|
-
return 'missing function_name' unless function_name
|
117
|
-
|
118
|
-
p 'getting lambda with function name', function_name
|
119
|
-
lambda_resp = lambda_client.get_function(function_name: function_name).configuration
|
120
|
-
arn = lambda_resp.function_arn
|
121
|
-
|
122
|
-
## next figure out if the lambda already has granted cloudwatch
|
123
|
-
## permission to invoke
|
124
|
-
begin
|
125
|
-
policy_resp = lambda_client.get_policy(function_name: function_name)
|
126
|
-
unless policy_resp.policy.include?("#{function_name}-permission")
|
127
|
-
add_policy = true
|
128
|
-
else
|
129
|
-
p 'lambda already has permission'
|
130
|
-
end
|
131
|
-
rescue Aws::Lambda::Errors::ResourceNotFoundException
|
132
|
-
add_policy = true
|
133
|
-
p 'no policy'
|
134
|
-
end
|
135
|
-
|
136
|
-
## if not, add permission to invoke
|
137
|
-
if add_policy
|
138
|
-
permission = lambda_client.add_permission({
|
139
|
-
function_name: function_name,
|
140
|
-
principal: 'events.amazonaws.com',
|
141
|
-
statement_id: "#{function_name}-permission",
|
142
|
-
action: 'lambda:InvokeFunction'
|
143
|
-
})
|
144
|
-
p 'permission: ', permission
|
145
|
-
end
|
146
|
-
|
147
|
-
## finally we can tell the event to invoke the lambda
|
148
|
-
target_id = "#{function_name}-lambda"
|
149
|
-
p 'putting targets ', 'rule: ', rule_name, 'target_id: ', target_id, 'arn: ', arn
|
150
|
-
events_client.put_targets(rule: rule_name, targets: [{ id: target_id, arn: arn }])
|
136
|
+
## if not, add permission to invoke
|
137
|
+
if add_policy
|
138
|
+
permission = lambda_client.add_permission({
|
139
|
+
function_name: function_name,
|
140
|
+
principal: "events.amazonaws.com",
|
141
|
+
statement_id: "#{function_name}-permission",
|
142
|
+
action: "lambda:InvokeFunction"
|
143
|
+
})
|
144
|
+
p("permission: ", permission)
|
151
145
|
end
|
146
|
+
|
147
|
+
## finally we can tell the event to invoke the lambda
|
148
|
+
target_id = "#{function_name}-lambda"
|
149
|
+
p("putting targets ", "rule: ", rule_name, "target_id: ", target_id, "arn: ", arn)
|
150
|
+
events_client.put_targets(rule: rule_name, targets: [{ id: target_id, arn: arn }])
|
151
|
+
end
|
152
152
|
end
|
data/lib/kinesis_client.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
require
|
2
|
-
require
|
3
|
-
require_relative
|
4
|
-
require_relative
|
1
|
+
require "securerandom"
|
2
|
+
require "aws-sdk-kinesis"
|
3
|
+
require_relative "nypl_avro"
|
4
|
+
require_relative "errors"
|
5
5
|
# Model representing the result message posted to Kinesis stream about everything that has gone on here -- good, bad, or otherwise.
|
6
6
|
|
7
7
|
class KinesisClient
|
@@ -12,17 +12,25 @@ class KinesisClient
|
|
12
12
|
@stream_name = @config[:stream_name]
|
13
13
|
@avro = nil
|
14
14
|
@batch_size = @config[:batch_size] || 1
|
15
|
-
@
|
16
|
-
@
|
17
|
-
@
|
18
|
-
@
|
15
|
+
@client_options = set_config(config)
|
16
|
+
@batch_count = 0
|
17
|
+
@records = []
|
18
|
+
@automatically_push = !(@config[:automatically_push] == false)
|
19
|
+
@client = Aws::Kinesis::Client.new(@client_options)
|
19
20
|
|
20
|
-
if config[:schema_string]
|
21
|
-
@avro = NYPLAvro.by_name(config[:schema_string])
|
22
|
-
end
|
21
|
+
@avro = NYPLAvro.by_name(config[:schema_string]) if config[:schema_string]
|
23
22
|
|
24
|
-
@shovel_method = @batch_size > 1 ? :
|
23
|
+
@shovel_method = @batch_size > 1 ? :push_to_records : :push_record
|
24
|
+
end
|
25
25
|
|
26
|
+
def set_config(config)
|
27
|
+
if config[:profile]
|
28
|
+
{ profile: config[:profile] }
|
29
|
+
elsif config[:custom_aws_config]
|
30
|
+
config[:custom_aws_config]
|
31
|
+
else
|
32
|
+
{}
|
33
|
+
end
|
26
34
|
end
|
27
35
|
|
28
36
|
def convert_to_record(json_message)
|
@@ -37,18 +45,18 @@ class KinesisClient
|
|
37
45
|
data: message,
|
38
46
|
partition_key: partition_key
|
39
47
|
}
|
40
|
-
|
41
48
|
end
|
42
49
|
|
43
50
|
def <<(json_message)
|
44
51
|
send(@shovel_method, json_message)
|
45
52
|
end
|
46
53
|
|
54
|
+
#This method is broken
|
47
55
|
def push_record(json_message)
|
48
|
-
record = convert_to_record
|
56
|
+
record = convert_to_record(json_message)
|
49
57
|
record[:stream_name] = @stream_name
|
50
58
|
|
51
|
-
@client.put_record
|
59
|
+
@client.put_record(record)
|
52
60
|
|
53
61
|
return_hash = {}
|
54
62
|
|
@@ -57,21 +65,19 @@ class KinesisClient
|
|
57
65
|
return_hash["message"] = json_message, resp
|
58
66
|
$logger.info("Message sent to #{config[:stream_name]} #{json_message}, #{resp}") if $logger
|
59
67
|
else
|
60
|
-
$logger.error("message" => "FAILED to send message to
|
61
|
-
raise
|
68
|
+
$logger.error("message" => "FAILED to send message to #{@stream_name} #{json_message}, #{resp}.") if $logger
|
69
|
+
raise(NYPLError.new(json_message, resp))
|
62
70
|
end
|
63
71
|
return_hash
|
64
72
|
end
|
65
73
|
|
66
|
-
def
|
74
|
+
def push_to_records(json_message)
|
67
75
|
begin
|
68
|
-
@
|
76
|
+
@records << convert_to_record(json_message)
|
69
77
|
rescue AvroError => e
|
70
78
|
$logger.error("message" => "Avro encoding error #{e.message} for #{json_message}")
|
71
79
|
end
|
72
|
-
if @automatically_push && @
|
73
|
-
push_records
|
74
|
-
end
|
80
|
+
push_records if @automatically_push && @records.length >= @batch_size
|
75
81
|
end
|
76
82
|
|
77
83
|
def push_batch(batch)
|
@@ -80,23 +86,31 @@ class KinesisClient
|
|
80
86
|
stream_name: @stream_name
|
81
87
|
})
|
82
88
|
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
return {
|
93
|
-
"code": "200",
|
94
|
-
"message": return_message.to_json
|
95
|
-
}
|
89
|
+
if resp.failed_record_count > 0
|
90
|
+
failure_message = {
|
91
|
+
failures: resp.failed_record_count,
|
92
|
+
failures_data: filter_failures(resp)
|
93
|
+
}
|
94
|
+
$logger.warn("Batch sent to #{config[:stream_name]} with failures: #{failure_message}")
|
95
|
+
else
|
96
|
+
$logger.info("Batch sent to #{config[:stream_name]} successfully")
|
97
|
+
end
|
96
98
|
end
|
97
99
|
|
98
100
|
def push_records
|
99
|
-
@
|
100
|
-
|
101
|
+
if @records.length > 0
|
102
|
+
@records.each_slice(@batch_size) do |slice|
|
103
|
+
push_batch(slice)
|
104
|
+
@batch_count += 1
|
105
|
+
end
|
106
|
+
@records = []
|
107
|
+
@batch_count = 0
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
def filter_failures(resp)
|
112
|
+
resp.records.filter_map.with_index do |record, i|
|
113
|
+
avro.decode(@records[i + @batch_size * @batch_count]) if record.responds_to?(:error_message)
|
114
|
+
end
|
101
115
|
end
|
102
116
|
end
|
data/lib/kms_client.rb
CHANGED
@@ -1,5 +1,5 @@
|
|
1
|
-
require
|
2
|
-
require
|
1
|
+
require "aws-sdk-kms"
|
2
|
+
require "base64"
|
3
3
|
|
4
4
|
class KmsClient
|
5
5
|
@@kms = nil
|
@@ -11,15 +11,15 @@ class KmsClient
|
|
11
11
|
|
12
12
|
def decrypt(cipher)
|
13
13
|
# Assume value is base64 encoded:
|
14
|
-
decoded = Base64.decode64
|
15
|
-
decrypted = @kms.decrypt
|
14
|
+
decoded = Base64.decode64(cipher)
|
15
|
+
decrypted = @kms.decrypt(ciphertext_blob: decoded)
|
16
16
|
decrypted[:plaintext]
|
17
17
|
end
|
18
18
|
|
19
19
|
def self.aws_kms_client(options)
|
20
20
|
params = {
|
21
|
-
region:
|
22
|
-
stub_responses: ENV[
|
21
|
+
region: "us-east-1",
|
22
|
+
stub_responses: ENV["APP_ENV"] == "test"
|
23
23
|
}.merge(options)
|
24
24
|
@@kms = Aws::KMS::Client.new(params) if @@kms.nil?
|
25
25
|
@@kms
|
data/lib/platform_api_client.rb
CHANGED
@@ -21,7 +21,7 @@ class PlatformApiClient
|
|
21
21
|
@error_options = default_errors.merge(options[:errors] || {})
|
22
22
|
end
|
23
23
|
|
24
|
-
def get (path)
|
24
|
+
def get (path, transaction_data = {})
|
25
25
|
|
26
26
|
authenticate! if authenticated
|
27
27
|
|
@@ -38,7 +38,7 @@ class PlatformApiClient
|
|
38
38
|
|
39
39
|
$logger.debug "Got platform api response", { code: response.code, body: response.body }
|
40
40
|
|
41
|
-
parse_json_response response, path
|
41
|
+
parse_json_response response, path, transaction_data
|
42
42
|
|
43
43
|
rescue Exception => e
|
44
44
|
raise StandardError.new(e), "Failed to retrieve #{path} #{e.message}"
|
@@ -47,12 +47,12 @@ class PlatformApiClient
|
|
47
47
|
|
48
48
|
private
|
49
49
|
|
50
|
-
def parse_json_response (response, path)
|
50
|
+
def parse_json_response (response, path, transaction_data = {})
|
51
51
|
code = response.code.to_i
|
52
52
|
if code < 400
|
53
53
|
JSON.parse(response.body)
|
54
54
|
elsif error_options[code]
|
55
|
-
instance_exec(response, path, &error_options[code])
|
55
|
+
instance_exec(response, path, transaction_data, &error_options[code])
|
56
56
|
else
|
57
57
|
raise "Error interpretting response for path #{path}: (#{response.code}): #{response.body}"
|
58
58
|
{}
|
@@ -92,12 +92,13 @@ class PlatformApiClient
|
|
92
92
|
|
93
93
|
def default_errors
|
94
94
|
{
|
95
|
-
401 => lambda do |response, path|
|
96
|
-
|
95
|
+
401 => lambda do |response, path, transaction_data = {}|
|
96
|
+
transaction_data[:try_count] ||= 0
|
97
|
+
if transaction_data[:try_count] < 1
|
97
98
|
# Likely an expired access-token; Wipe it for next run
|
98
|
-
|
99
|
-
access_token = nil
|
100
|
-
get(path)
|
99
|
+
transaction_data[:try_count] += 1
|
100
|
+
self.access_token = nil
|
101
|
+
get(path, transaction_data)
|
101
102
|
else
|
102
103
|
raise "Error interpretting response for path #{path}: (#{response.code}): #{response.body}"
|
103
104
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: nypl_ruby_util
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.10
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Appel
|
@@ -141,7 +141,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
141
141
|
- !ruby/object:Gem::Version
|
142
142
|
version: '0'
|
143
143
|
requirements: []
|
144
|
-
rubygems_version: 3.2
|
144
|
+
rubygems_version: 3.1.2
|
145
145
|
signing_key:
|
146
146
|
specification_version: 4
|
147
147
|
summary: A repository of common utilities for NYPL Ruby application
|