lambda_wrap 0.20.0 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/lambda_wrap/dynamo_db_manager.rb +121 -121
- data/lib/lambda_wrap/lambda_manager.rb +194 -188
- data/lib/lambda_wrap/zip_file_generator.rb +67 -67
- data/lib/lambda_wrap.rb +5 -5
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 538942578f2271e578436d168e5cbc84e7691bb0
|
4
|
+
data.tar.gz: 6f25cf394a08908a0bbb39bf924a75c9dc36d216
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 06931dad00016c68f3b5064b6a368f9833dd4f818e5e3a09eb2f5467353773c6f37cd2194ab00e76647bfc7ec3d20c361801bd1962eac4da7cb7ef31b0203d88
|
7
|
+
data.tar.gz: 6128b14ac5a3cc406b0e604b9a509f00bc7b536c3a350f829e7f7fb85ca3d4c7384bbc4758ddaeaf49265d8ad001210c275d363f76114dc19e297a506993fca2
|
@@ -1,121 +1,121 @@
|
|
1
|
-
require 'aws-sdk'
|
2
|
-
|
3
|
-
module LambdaWrap
|
4
|
-
# The DynamoDBManager simplifies setting up and destroying a DynamoDB database.
|
5
|
-
#
|
6
|
-
# Note: In case an environment specific DynamoDB tablename such as +<baseTableName>-production+ should be used, then
|
7
|
-
# it has to be injected directly to the methods since not all environments necessarily need separated databases.
|
8
|
-
class DynamoDbManager
|
9
|
-
##
|
10
|
-
# The constructor does some basic setup
|
11
|
-
# * Validating basic AWS configuration
|
12
|
-
# * Creating the underlying client to interact with the AWS SDK.
|
13
|
-
def initialize
|
14
|
-
# AWS dynamodb client
|
15
|
-
@client = Aws::DynamoDB::Client.new
|
16
|
-
end
|
17
|
-
|
18
|
-
def set_table_capacity(table_name, read_capacity, write_capacity)
|
19
|
-
puts "Updating new read/write capacity for table #{table_name}.
|
20
|
-
Read #{table_details.provisioned_throughput.read_capacity_units} ==> #{read_capacity}.
|
21
|
-
Write #{table_details.provisioned_throughput.write_capacity_units} ==> #{write_capacity}."
|
22
|
-
@client.update_table(
|
23
|
-
table_name: table_name,
|
24
|
-
provisioned_throughput: { read_capacity_units: read_capacity, write_capacity_units: write_capacity }
|
25
|
-
)
|
26
|
-
end
|
27
|
-
|
28
|
-
##
|
29
|
-
# Publishes the database and awaits until it is fully available. If the table already exists,
|
30
|
-
# it only adjusts the read and write
|
31
|
-
# capacities upwards (it doesn't downgrade them to avoid a production environment being impacted with
|
32
|
-
# a default setting of an automated script).
|
33
|
-
#
|
34
|
-
# *Arguments*
|
35
|
-
# [table_name] The table name of the dynamoDB to be created.
|
36
|
-
# [attribute_definitions] The dynamoDB attribute definitions to be used when the table is created.
|
37
|
-
# [key_schema] The dynamoDB key definitions to be used when the table is created.
|
38
|
-
# [read_capacity] The read capacity to configure for the dynamoDB table.
|
39
|
-
# [write_capacity] The write capacity to configure for the dynamoDB table.
|
40
|
-
def publish_database(table_name, attribute_definitions, key_schema, read_capacity, write_capacity)
|
41
|
-
has_updates = false
|
42
|
-
|
43
|
-
# figure out whether the table exists
|
44
|
-
begin
|
45
|
-
table_details = @client.describe_table(table_name: table_name).table
|
46
|
-
rescue Aws::DynamoDB::Errors::ResourceNotFoundException
|
47
|
-
# skip this exception because we are using it for control flow.
|
48
|
-
table_details = nil
|
49
|
-
end
|
50
|
-
|
51
|
-
if table_details
|
52
|
-
wait_until_table_available(table_name) if table_details.table_status != 'ACTIVE'
|
53
|
-
|
54
|
-
if read_capacity > table_details.provisioned_throughput.read_capacity_units ||
|
55
|
-
write_capacity > table_details.provisioned_throughput.write_capacity_units
|
56
|
-
|
57
|
-
set_table_capacity read_capacity, write_capacity
|
58
|
-
has_updates = true
|
59
|
-
else
|
60
|
-
puts "Table #{table_name} already exists and the desired read capacity of #{read_capacity} and " \
|
61
|
-
"write capacity of #{write_capacity} has at least been configured. Downgrading capacity units is not " \
|
62
|
-
'supported. No changes were applied.'
|
63
|
-
end
|
64
|
-
else
|
65
|
-
puts "Creating table #{table_name}."
|
66
|
-
ad = attribute_definitions || [{ attribute_name: 'Id', attribute_type: 'S' }]
|
67
|
-
ks = key_schema || [{ attribute_name: 'Id', key_type: 'HASH' }]
|
68
|
-
@client.create_table(table_name: table_name, key_schema: ks, attribute_definitions: ad,
|
69
|
-
provisioned_throughput:
|
70
|
-
{ read_capacity_units: read_capacity, write_capacity_units: write_capacity })
|
71
|
-
has_updates = true
|
72
|
-
end
|
73
|
-
|
74
|
-
if has_updates
|
75
|
-
wait_until_table_available(table_name)
|
76
|
-
puts "DynamoDB table #{table_name} is now fully available."
|
77
|
-
end
|
78
|
-
end
|
79
|
-
|
80
|
-
##
|
81
|
-
# Deletes a DynamoDB table. It does not wait until the table has been deleted.
|
82
|
-
#
|
83
|
-
# *Arguments*
|
84
|
-
# [table_name] The dynamoDB table name to delete.
|
85
|
-
def delete_database(table_name)
|
86
|
-
table_details = @client.describe_table(table_name: table_name).table
|
87
|
-
wait_until_table_available(table_name) if table_details.table_status != 'ACTIVE'
|
88
|
-
@client.delete_table(table_name: table_name)
|
89
|
-
rescue Aws::DynamoDB::Errors::ResourceNotFoundException
|
90
|
-
puts 'Table did not exist. Nothing to delete.'
|
91
|
-
end
|
92
|
-
|
93
|
-
##
|
94
|
-
# Awaits a given status of a table.
|
95
|
-
#
|
96
|
-
# *Arguments*
|
97
|
-
# [table_name] The dynamoDB table name to watch until it reaches an active status.
|
98
|
-
def wait_until_table_available(table_name)
|
99
|
-
max_attempts = 24
|
100
|
-
delay_between_attempts = 5
|
101
|
-
|
102
|
-
# wait until the table has updated to being fully available
|
103
|
-
# waiting for ~2min at most; an error will be thrown afterwards
|
104
|
-
begin
|
105
|
-
@client.wait_until(:table_exists, table_name: table_name) do |w|
|
106
|
-
w.max_attempts = max_attempts
|
107
|
-
w.delay = delay_between_attempts
|
108
|
-
w.before_wait do |attempts, _|
|
109
|
-
puts "Waiting until table becomes available. Attempt #{attempts}/#{max_attempts} " \
|
110
|
-
"with polling interval #{delay_between_attempts}."
|
111
|
-
end
|
112
|
-
end
|
113
|
-
rescue Aws::Waiters::Errors::TooManyAttemptsError => e
|
114
|
-
puts "Table #{table_name} did not become available after #{e.attempts} attempts. " \
|
115
|
-
'Try again later or inspect the AWS console.'
|
116
|
-
end
|
117
|
-
end
|
118
|
-
|
119
|
-
private :wait_until_table_available
|
120
|
-
end
|
121
|
-
end
|
1
|
+
require 'aws-sdk'
|
2
|
+
|
3
|
+
module LambdaWrap
|
4
|
+
# The DynamoDBManager simplifies setting up and destroying a DynamoDB database.
|
5
|
+
#
|
6
|
+
# Note: In case an environment specific DynamoDB tablename such as +<baseTableName>-production+ should be used, then
|
7
|
+
# it has to be injected directly to the methods since not all environments necessarily need separated databases.
|
8
|
+
class DynamoDbManager
|
9
|
+
##
|
10
|
+
# The constructor does some basic setup
|
11
|
+
# * Validating basic AWS configuration
|
12
|
+
# * Creating the underlying client to interact with the AWS SDK.
|
13
|
+
def initialize
|
14
|
+
# AWS dynamodb client
|
15
|
+
@client = Aws::DynamoDB::Client.new
|
16
|
+
end
|
17
|
+
|
18
|
+
def set_table_capacity(table_name, read_capacity, write_capacity)
|
19
|
+
puts "Updating new read/write capacity for table #{table_name}.
|
20
|
+
Read #{table_details.provisioned_throughput.read_capacity_units} ==> #{read_capacity}.
|
21
|
+
Write #{table_details.provisioned_throughput.write_capacity_units} ==> #{write_capacity}."
|
22
|
+
@client.update_table(
|
23
|
+
table_name: table_name,
|
24
|
+
provisioned_throughput: { read_capacity_units: read_capacity, write_capacity_units: write_capacity }
|
25
|
+
)
|
26
|
+
end
|
27
|
+
|
28
|
+
##
|
29
|
+
# Publishes the database and awaits until it is fully available. If the table already exists,
|
30
|
+
# it only adjusts the read and write
|
31
|
+
# capacities upwards (it doesn't downgrade them to avoid a production environment being impacted with
|
32
|
+
# a default setting of an automated script).
|
33
|
+
#
|
34
|
+
# *Arguments*
|
35
|
+
# [table_name] The table name of the dynamoDB to be created.
|
36
|
+
# [attribute_definitions] The dynamoDB attribute definitions to be used when the table is created.
|
37
|
+
# [key_schema] The dynamoDB key definitions to be used when the table is created.
|
38
|
+
# [read_capacity] The read capacity to configure for the dynamoDB table.
|
39
|
+
# [write_capacity] The write capacity to configure for the dynamoDB table.
|
40
|
+
def publish_database(table_name, attribute_definitions, key_schema, read_capacity, write_capacity)
|
41
|
+
has_updates = false
|
42
|
+
|
43
|
+
# figure out whether the table exists
|
44
|
+
begin
|
45
|
+
table_details = @client.describe_table(table_name: table_name).table
|
46
|
+
rescue Aws::DynamoDB::Errors::ResourceNotFoundException
|
47
|
+
# skip this exception because we are using it for control flow.
|
48
|
+
table_details = nil
|
49
|
+
end
|
50
|
+
|
51
|
+
if table_details
|
52
|
+
wait_until_table_available(table_name) if table_details.table_status != 'ACTIVE'
|
53
|
+
|
54
|
+
if read_capacity > table_details.provisioned_throughput.read_capacity_units ||
|
55
|
+
write_capacity > table_details.provisioned_throughput.write_capacity_units
|
56
|
+
|
57
|
+
set_table_capacity read_capacity, write_capacity
|
58
|
+
has_updates = true
|
59
|
+
else
|
60
|
+
puts "Table #{table_name} already exists and the desired read capacity of #{read_capacity} and " \
|
61
|
+
"write capacity of #{write_capacity} has at least been configured. Downgrading capacity units is not " \
|
62
|
+
'supported. No changes were applied.'
|
63
|
+
end
|
64
|
+
else
|
65
|
+
puts "Creating table #{table_name}."
|
66
|
+
ad = attribute_definitions || [{ attribute_name: 'Id', attribute_type: 'S' }]
|
67
|
+
ks = key_schema || [{ attribute_name: 'Id', key_type: 'HASH' }]
|
68
|
+
@client.create_table(table_name: table_name, key_schema: ks, attribute_definitions: ad,
|
69
|
+
provisioned_throughput:
|
70
|
+
{ read_capacity_units: read_capacity, write_capacity_units: write_capacity })
|
71
|
+
has_updates = true
|
72
|
+
end
|
73
|
+
|
74
|
+
if has_updates
|
75
|
+
wait_until_table_available(table_name)
|
76
|
+
puts "DynamoDB table #{table_name} is now fully available."
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
##
|
81
|
+
# Deletes a DynamoDB table. It does not wait until the table has been deleted.
|
82
|
+
#
|
83
|
+
# *Arguments*
|
84
|
+
# [table_name] The dynamoDB table name to delete.
|
85
|
+
def delete_database(table_name)
|
86
|
+
table_details = @client.describe_table(table_name: table_name).table
|
87
|
+
wait_until_table_available(table_name) if table_details.table_status != 'ACTIVE'
|
88
|
+
@client.delete_table(table_name: table_name)
|
89
|
+
rescue Aws::DynamoDB::Errors::ResourceNotFoundException
|
90
|
+
puts 'Table did not exist. Nothing to delete.'
|
91
|
+
end
|
92
|
+
|
93
|
+
##
|
94
|
+
# Awaits a given status of a table.
|
95
|
+
#
|
96
|
+
# *Arguments*
|
97
|
+
# [table_name] The dynamoDB table name to watch until it reaches an active status.
|
98
|
+
def wait_until_table_available(table_name)
|
99
|
+
max_attempts = 24
|
100
|
+
delay_between_attempts = 5
|
101
|
+
|
102
|
+
# wait until the table has updated to being fully available
|
103
|
+
# waiting for ~2min at most; an error will be thrown afterwards
|
104
|
+
begin
|
105
|
+
@client.wait_until(:table_exists, table_name: table_name) do |w|
|
106
|
+
w.max_attempts = max_attempts
|
107
|
+
w.delay = delay_between_attempts
|
108
|
+
w.before_wait do |attempts, _|
|
109
|
+
puts "Waiting until table becomes available. Attempt #{attempts}/#{max_attempts} " \
|
110
|
+
"with polling interval #{delay_between_attempts}."
|
111
|
+
end
|
112
|
+
end
|
113
|
+
rescue Aws::Waiters::Errors::TooManyAttemptsError => e
|
114
|
+
puts "Table #{table_name} did not become available after #{e.attempts} attempts. " \
|
115
|
+
'Try again later or inspect the AWS console.'
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
private :wait_until_table_available
|
120
|
+
end
|
121
|
+
end
|
@@ -1,188 +1,194 @@
|
|
1
|
-
require 'aws-sdk'
|
2
|
-
|
3
|
-
module LambdaWrap
|
4
|
-
##
|
5
|
-
# The LambdaManager simplifies creating a package, publishing to S3, deploying a new version, & setting permissions.
|
6
|
-
#
|
7
|
-
# Note: The concept of an environment of the LambdaWrap gem matches an alias of AWS Lambda.
|
8
|
-
class LambdaManager
|
9
|
-
##
|
10
|
-
# The constructor does some basic setup
|
11
|
-
# * Validating basic AWS configuration
|
12
|
-
# * Creating the underlying client to interace with the AWS SDK
|
13
|
-
def initialize
|
14
|
-
# AWS lambda client
|
15
|
-
@client = Aws::Lambda::Client.new
|
16
|
-
end
|
17
|
-
|
18
|
-
##
|
19
|
-
# Packages a set of files and node modules into a deployable package.
|
20
|
-
#
|
21
|
-
# *Arguments*
|
22
|
-
# [directory] A temporary directory to copy all related files before they are packages into a single zip file.
|
23
|
-
# [zipfile] A path where the deployable package, a zip file, should be stored.
|
24
|
-
# [input_filenames] A list of file names that contain the source code.
|
25
|
-
# [node_modules] A list of node modules that need to be included in the package.
|
26
|
-
def package(directory, zipfile, input_filenames, node_modules)
|
27
|
-
FileUtils.mkdir_p directory
|
28
|
-
FileUtils.mkdir_p File.join(directory, 'node_modules')
|
29
|
-
|
30
|
-
input_filenames.each do |filename|
|
31
|
-
FileUtils.copy_file(File.join(filename), File.join(directory, File.basename(filename)))
|
32
|
-
end
|
33
|
-
|
34
|
-
node_modules.each do |dir|
|
35
|
-
FileUtils.cp_r(File.join('node_modules', dir), File.join(directory, 'node_modules'))
|
36
|
-
end
|
37
|
-
|
38
|
-
ZipFileGenerator.new(directory, zipfile).write
|
39
|
-
end
|
40
|
-
|
41
|
-
##
|
42
|
-
# Publishes a package to S3 so it can be deployed as a lambda function.
|
43
|
-
#
|
44
|
-
# *Arguments*
|
45
|
-
# [local_lambda_file] The location of the package that needs to be deployed.
|
46
|
-
# [bucket] The s3 bucket where the file needs to be uploaded to.
|
47
|
-
# [key] The S3 path (key) where the package should be stored.
|
48
|
-
def publish_lambda_to_s3(local_lambda_file, bucket, key)
|
49
|
-
# get s3 object
|
50
|
-
s3 = Aws::S3::Resource.new
|
51
|
-
obj = s3.bucket(bucket).object(key)
|
52
|
-
|
53
|
-
# upload
|
54
|
-
version_id = nil
|
55
|
-
File.open(local_lambda_file, 'rb') do |file|
|
56
|
-
version_id = obj.put(body: file).version_id
|
57
|
-
end
|
58
|
-
raise 'Upload to S3 failed' unless version_id
|
59
|
-
|
60
|
-
puts 'Uploaded object to S3 with version ' + version_id
|
61
|
-
version_id
|
62
|
-
end
|
63
|
-
|
64
|
-
##
|
65
|
-
# Deploys a package that has been uploaded to S3.
|
66
|
-
#
|
67
|
-
# *Arguments*
|
68
|
-
# [bucket] The S3 bucket where the package can be retrieved from.
|
69
|
-
# [key] The S3 path (key) where the package can be retrieved from.
|
70
|
-
# [version_id] The version of the file on S3 to retrieve.
|
71
|
-
# [function_name] The name of the lambda function.
|
72
|
-
# [handler] The handler that should be executed for this lambda function.
|
73
|
-
# [lambda_role] The arn of the IAM role that should be used when executing the lambda function.
|
74
|
-
# [lambda_description] The description of the lambda function.
|
75
|
-
# [vpc_subnet_ids] A list of subnet ids for the lambda's VPC configuration. All subnets must be on the same VPC.
|
76
|
-
# [vpc_security_group_ids] A list of security group ids for the lambda's VPC configuration. All of the
|
77
|
-
# security_group_ids must be on the same VPC.
|
78
|
-
# [runtime] The runtime the code is written for.
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
vpc_configuration = { subnet_ids: vpc_subnet_ids, security_group_ids: vpc_security_group_ids } unless (vpc_subnet_ids.empty? && vpc_security_group_ids.empty?)
|
91
|
-
func_config = @client.update_function_configuration(function_name: function_name, role: lambda_role, runtime: runtime,
|
92
|
-
handler: handler, timeout:
|
93
|
-
description: lambda_description,
|
94
|
-
vpc_config: vpc_configuration).data
|
95
|
-
puts func_config
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
#
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
1
|
+
require 'aws-sdk'
|
2
|
+
|
3
|
+
module LambdaWrap
|
4
|
+
##
|
5
|
+
# The LambdaManager simplifies creating a package, publishing to S3, deploying a new version, & setting permissions.
|
6
|
+
#
|
7
|
+
# Note: The concept of an environment of the LambdaWrap gem matches an alias of AWS Lambda.
|
8
|
+
class LambdaManager
|
9
|
+
##
|
10
|
+
# The constructor does some basic setup
|
11
|
+
# * Validating basic AWS configuration
|
12
|
+
# * Creating the underlying client to interace with the AWS SDK
|
13
|
+
def initialize
|
14
|
+
# AWS lambda client
|
15
|
+
@client = Aws::Lambda::Client.new
|
16
|
+
end
|
17
|
+
|
18
|
+
##
|
19
|
+
# Packages a set of files and node modules into a deployable package.
|
20
|
+
#
|
21
|
+
# *Arguments*
|
22
|
+
# [directory] A temporary directory to copy all related files before they are packages into a single zip file.
|
23
|
+
# [zipfile] A path where the deployable package, a zip file, should be stored.
|
24
|
+
# [input_filenames] A list of file names that contain the source code.
|
25
|
+
# [node_modules] A list of node modules that need to be included in the package.
|
26
|
+
def package(directory, zipfile, input_filenames, node_modules)
|
27
|
+
FileUtils.mkdir_p directory
|
28
|
+
FileUtils.mkdir_p File.join(directory, 'node_modules')
|
29
|
+
|
30
|
+
input_filenames.each do |filename|
|
31
|
+
FileUtils.copy_file(File.join(filename), File.join(directory, File.basename(filename)))
|
32
|
+
end
|
33
|
+
|
34
|
+
node_modules.each do |dir|
|
35
|
+
FileUtils.cp_r(File.join('node_modules', dir), File.join(directory, 'node_modules'))
|
36
|
+
end
|
37
|
+
|
38
|
+
ZipFileGenerator.new(directory, zipfile).write
|
39
|
+
end
|
40
|
+
|
41
|
+
##
|
42
|
+
# Publishes a package to S3 so it can be deployed as a lambda function.
|
43
|
+
#
|
44
|
+
# *Arguments*
|
45
|
+
# [local_lambda_file] The location of the package that needs to be deployed.
|
46
|
+
# [bucket] The s3 bucket where the file needs to be uploaded to.
|
47
|
+
# [key] The S3 path (key) where the package should be stored.
|
48
|
+
def publish_lambda_to_s3(local_lambda_file, bucket, key)
|
49
|
+
# get s3 object
|
50
|
+
s3 = Aws::S3::Resource.new
|
51
|
+
obj = s3.bucket(bucket).object(key)
|
52
|
+
|
53
|
+
# upload
|
54
|
+
version_id = nil
|
55
|
+
File.open(local_lambda_file, 'rb') do |file|
|
56
|
+
version_id = obj.put(body: file).version_id
|
57
|
+
end
|
58
|
+
raise 'Upload to S3 failed' unless version_id
|
59
|
+
|
60
|
+
puts 'Uploaded object to S3 with version ' + version_id
|
61
|
+
version_id
|
62
|
+
end
|
63
|
+
|
64
|
+
##
|
65
|
+
# Deploys a package that has been uploaded to S3.
|
66
|
+
#
|
67
|
+
# *Arguments*
|
68
|
+
# [bucket] The S3 bucket where the package can be retrieved from.
|
69
|
+
# [key] The S3 path (key) where the package can be retrieved from.
|
70
|
+
# [version_id] The version of the file on S3 to retrieve.
|
71
|
+
# [function_name] The name of the lambda function.
|
72
|
+
# [handler] The handler that should be executed for this lambda function.
|
73
|
+
# [lambda_role] The arn of the IAM role that should be used when executing the lambda function.
|
74
|
+
# [lambda_description] The description of the lambda function.
|
75
|
+
# [vpc_subnet_ids] A list of subnet ids for the lambda's VPC configuration. All subnets must be on the same VPC.
|
76
|
+
# [vpc_security_group_ids] A list of security group ids for the lambda's VPC configuration. All of the
|
77
|
+
# security_group_ids must be on the same VPC.
|
78
|
+
# [runtime] The runtime the code is written for.
|
79
|
+
# [timeout] The integer value of seconds until the lambda timesout. Minimum 1, Maximum 300
|
80
|
+
# [memory_size] The Memory/ProcessingPower allocated for the Lambda. Minimum 128. Maximum 1536. Only accepts
|
81
|
+
# integers in multiples of 64.
|
82
|
+
def deploy_lambda(
|
83
|
+
bucket, key, version_id, function_name, handler, lambda_role, lambda_description = 'Deployed with LambdaWrap',
|
84
|
+
vpc_subnet_ids = [], vpc_security_group_ids = [], runtime = 'nodejs4.3', timeout = 5, memory_size = 128
|
85
|
+
)
|
86
|
+
# create or update function
|
87
|
+
|
88
|
+
begin
|
89
|
+
@client.get_function(function_name: function_name)
|
90
|
+
vpc_configuration = { subnet_ids: vpc_subnet_ids, security_group_ids: vpc_security_group_ids } unless (vpc_subnet_ids.empty? && vpc_security_group_ids.empty?)
|
91
|
+
func_config = @client.update_function_configuration(function_name: function_name, role: lambda_role, runtime: runtime,
|
92
|
+
handler: handler, timeout: timeout, memory_size: memory_size,
|
93
|
+
description: lambda_description,
|
94
|
+
vpc_config: vpc_configuration).data
|
95
|
+
puts func_config
|
96
|
+
|
97
|
+
func_config = @client.update_function_code(function_name: function_name, s3_bucket: bucket, s3_key: key,
|
98
|
+
s3_object_version: version_id, publish: true).data
|
99
|
+
|
100
|
+
puts func_config
|
101
|
+
func_version = func_config.version
|
102
|
+
raise 'Error while publishing existing lambda function ' + function_name unless func_version
|
103
|
+
rescue Aws::Lambda::Errors::ResourceNotFoundException
|
104
|
+
# check if vpc_subnet_ids and vpc_security_group_ids are empty or not and set the vpc_config accordingly.
|
105
|
+
vpc_Configuration = nil
|
106
|
+
vpc_Configuration = { subnet_ids: vpc_subnet_ids, security_group_ids: vpc_security_group_ids } unless (vpc_subnet_ids.empty? && vpc_security_group_ids.empty?)
|
107
|
+
|
108
|
+
# if we cannot find it, we have to create it instead of updating it
|
109
|
+
func_config = @client.create_function(
|
110
|
+
function_name: function_name, runtime: runtime, role: lambda_role,
|
111
|
+
handler: handler, code: { s3_bucket: bucket, s3_key: key }, timeout: timeout, memory_size: memory_size, publish: true,
|
112
|
+
description: lambda_description,
|
113
|
+
vpc_config: vpc_Configuration
|
114
|
+
).data
|
115
|
+
puts func_config
|
116
|
+
func_version = func_config.version
|
117
|
+
raise "Error while publishing new lambda function #{function_name}" unless func_version
|
118
|
+
end
|
119
|
+
|
120
|
+
add_api_gateway_permissions(function_name, nil)
|
121
|
+
|
122
|
+
# return the version of the new code, not the config.
|
123
|
+
func_version
|
124
|
+
end
|
125
|
+
|
126
|
+
##
|
127
|
+
# Creates an alias for a given lambda function version.
|
128
|
+
#
|
129
|
+
# *Arguments*
|
130
|
+
# [function_name] The lambda function name for which the alias should be created.
|
131
|
+
# [func_version] The lambda function versino to which the alias should point.
|
132
|
+
# [alias_name] The name of the alias, matching the LambdaWrap environment concept.
|
133
|
+
def create_alias(function_name, func_version, alias_name)
|
134
|
+
# create or update alias
|
135
|
+
func_alias = @client.list_aliases(function_name: function_name).aliases.select { |a| a.name == alias_name }.first
|
136
|
+
if !func_alias
|
137
|
+
a = @client.create_alias(
|
138
|
+
function_name: function_name, name: alias_name, function_version: func_version,
|
139
|
+
description: 'created by an automated script'
|
140
|
+
).data
|
141
|
+
else
|
142
|
+
a = @client.update_alias(
|
143
|
+
function_name: function_name, name: alias_name, function_version: func_version,
|
144
|
+
description: 'updated by an automated script'
|
145
|
+
).data
|
146
|
+
end
|
147
|
+
puts a
|
148
|
+
|
149
|
+
add_api_gateway_permissions(function_name, alias_name)
|
150
|
+
end
|
151
|
+
|
152
|
+
##
|
153
|
+
# Removes an alias for a function.
|
154
|
+
#
|
155
|
+
# *Arguments*
|
156
|
+
# [function_name] The lambda function name for which the alias should be removed.
|
157
|
+
# [alias_name] The alias to remove.
|
158
|
+
def remove_alias(function_name, alias_name)
|
159
|
+
@client.delete_alias(function_name: function_name, name: alias_name)
|
160
|
+
end
|
161
|
+
|
162
|
+
##
|
163
|
+
# Adds permissions for API gateway to execute this function.
|
164
|
+
#
|
165
|
+
# *Arguments*
|
166
|
+
# [function_name] The function name which needs to be executed from API Gateway.
|
167
|
+
# [env] The environment (matching the function's alias) which needs to be executed from API Gateway.
|
168
|
+
# => If nil, the permissions are set of the $LATEST version.
|
169
|
+
def add_api_gateway_permissions(function_name, env)
|
170
|
+
# permissions to execute lambda
|
171
|
+
suffix = (':' + env if env) || ''
|
172
|
+
func = @client.get_function(function_name: function_name + suffix).data.configuration
|
173
|
+
statement_id = func.function_name + (('-' + env if env) || '')
|
174
|
+
begin
|
175
|
+
existing_policies = @client.get_policy(function_name: func.function_arn).data
|
176
|
+
existing_policy = JSON.parse(existing_policies.policy)
|
177
|
+
policy_exists = existing_policy['Statement'].select { |s| s['Sid'] == statement_id }.any?
|
178
|
+
rescue Aws::Lambda::Errors::ResourceNotFoundException
|
179
|
+
# policy does not exist, and that is ok
|
180
|
+
policy_exists = false
|
181
|
+
end
|
182
|
+
|
183
|
+
unless policy_exists
|
184
|
+
perm_add = @client.add_permission(
|
185
|
+
function_name: func.function_arn, statement_id: statement_id,
|
186
|
+
action: 'lambda:*', principal: 'apigateway.amazonaws.com'
|
187
|
+
)
|
188
|
+
puts perm_add.data
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
private :add_api_gateway_permissions
|
193
|
+
end
|
194
|
+
end
|
@@ -1,67 +1,67 @@
|
|
1
|
-
require 'rubygems'
|
2
|
-
require 'zip'
|
3
|
-
|
4
|
-
module LambdaWrap
|
5
|
-
##
|
6
|
-
# Allows to easily zip a directory recursively. It's intended for gem internal use only.
|
7
|
-
#
|
8
|
-
# From the original example:
|
9
|
-
# This is a simple example which uses rubyzip to
|
10
|
-
# recursively generate a zip file from the contents of
|
11
|
-
# a specified directory. The directory itself is not
|
12
|
-
# included in the archive, rather just its contents.
|
13
|
-
#
|
14
|
-
# Usage:
|
15
|
-
# require /path/to/the/ZipFileGenerator/Class
|
16
|
-
# directoryToZip = "/tmp/input"
|
17
|
-
# outputFile = "/tmp/out.zip"
|
18
|
-
# zf = ZipFileGenerator.new(directoryToZip, outputFile)
|
19
|
-
# zf.write()
|
20
|
-
class ZipFileGenerator
|
21
|
-
##
|
22
|
-
# Initialize with the directory to zip and the location of the output archive.
|
23
|
-
def initialize(input_dir, output_file)
|
24
|
-
@input_dir = input_dir
|
25
|
-
@output_file = output_file
|
26
|
-
end
|
27
|
-
|
28
|
-
##
|
29
|
-
# Zip the input directory.
|
30
|
-
def write
|
31
|
-
entries = Dir.entries(@input_dir) - %w(. ..)
|
32
|
-
|
33
|
-
Zip::File.open(@output_file, Zip::File::CREATE) do |io|
|
34
|
-
write_entries entries, '', io
|
35
|
-
end
|
36
|
-
end
|
37
|
-
|
38
|
-
private
|
39
|
-
|
40
|
-
# A helper method to make the recursion work.
|
41
|
-
def write_entries(entries, path, io)
|
42
|
-
entries.each do |e|
|
43
|
-
zip_file_path = path == '' ? e : File.join(path, e)
|
44
|
-
disk_file_path = File.join(@input_dir, zip_file_path)
|
45
|
-
puts "Deflating #{disk_file_path}"
|
46
|
-
|
47
|
-
if File.directory? disk_file_path
|
48
|
-
recursively_deflate_directory(disk_file_path, io, zip_file_path)
|
49
|
-
else
|
50
|
-
put_into_archive(disk_file_path, io, zip_file_path)
|
51
|
-
end
|
52
|
-
end
|
53
|
-
end
|
54
|
-
|
55
|
-
def recursively_deflate_directory(disk_file_path, io, zip_file_path)
|
56
|
-
io.mkdir zip_file_path
|
57
|
-
subdir = Dir.entries(disk_file_path) - %w(. ..)
|
58
|
-
write_entries subdir, zip_file_path, io
|
59
|
-
end
|
60
|
-
|
61
|
-
def put_into_archive(disk_file_path, io, zip_file_path)
|
62
|
-
io.get_output_stream(zip_file_path) do |f|
|
63
|
-
f.puts(File.open(disk_file_path, 'rb').read)
|
64
|
-
end
|
65
|
-
end
|
66
|
-
end
|
67
|
-
end
|
1
|
+
require 'rubygems'
|
2
|
+
require 'zip'
|
3
|
+
|
4
|
+
module LambdaWrap
|
5
|
+
##
|
6
|
+
# Allows to easily zip a directory recursively. It's intended for gem internal use only.
|
7
|
+
#
|
8
|
+
# From the original example:
|
9
|
+
# This is a simple example which uses rubyzip to
|
10
|
+
# recursively generate a zip file from the contents of
|
11
|
+
# a specified directory. The directory itself is not
|
12
|
+
# included in the archive, rather just its contents.
|
13
|
+
#
|
14
|
+
# Usage:
|
15
|
+
# require /path/to/the/ZipFileGenerator/Class
|
16
|
+
# directoryToZip = "/tmp/input"
|
17
|
+
# outputFile = "/tmp/out.zip"
|
18
|
+
# zf = ZipFileGenerator.new(directoryToZip, outputFile)
|
19
|
+
# zf.write()
|
20
|
+
class ZipFileGenerator
|
21
|
+
##
|
22
|
+
# Initialize with the directory to zip and the location of the output archive.
|
23
|
+
def initialize(input_dir, output_file)
|
24
|
+
@input_dir = input_dir
|
25
|
+
@output_file = output_file
|
26
|
+
end
|
27
|
+
|
28
|
+
##
|
29
|
+
# Zip the input directory.
|
30
|
+
def write
|
31
|
+
entries = Dir.entries(@input_dir) - %w(. ..)
|
32
|
+
|
33
|
+
Zip::File.open(@output_file, Zip::File::CREATE) do |io|
|
34
|
+
write_entries entries, '', io
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
private
|
39
|
+
|
40
|
+
# A helper method to make the recursion work.
|
41
|
+
def write_entries(entries, path, io)
|
42
|
+
entries.each do |e|
|
43
|
+
zip_file_path = path == '' ? e : File.join(path, e)
|
44
|
+
disk_file_path = File.join(@input_dir, zip_file_path)
|
45
|
+
puts "Deflating #{disk_file_path}"
|
46
|
+
|
47
|
+
if File.directory? disk_file_path
|
48
|
+
recursively_deflate_directory(disk_file_path, io, zip_file_path)
|
49
|
+
else
|
50
|
+
put_into_archive(disk_file_path, io, zip_file_path)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def recursively_deflate_directory(disk_file_path, io, zip_file_path)
|
56
|
+
io.mkdir zip_file_path
|
57
|
+
subdir = Dir.entries(disk_file_path) - %w(. ..)
|
58
|
+
write_entries subdir, zip_file_path, io
|
59
|
+
end
|
60
|
+
|
61
|
+
def put_into_archive(disk_file_path, io, zip_file_path)
|
62
|
+
io.get_output_stream(zip_file_path) do |f|
|
63
|
+
f.puts(File.open(disk_file_path, 'rb').read)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
data/lib/lambda_wrap.rb
CHANGED
@@ -1,5 +1,5 @@
|
|
1
|
-
# :nodoc:
|
2
|
-
Dir["#{File.expand_path(File.dirname(__FILE__))}/**/*.rb"].each { |f| require f }
|
3
|
-
|
4
|
-
STDOUT.sync = true
|
5
|
-
STDERR.sync = true
|
1
|
+
# :nodoc:
|
2
|
+
Dir["#{File.expand_path(File.dirname(__FILE__))}/**/*.rb"].each { |f| require f }
|
3
|
+
|
4
|
+
STDOUT.sync = true
|
5
|
+
STDERR.sync = true
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: lambda_wrap
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.21.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Markus Thurner
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date: 2016-10-
|
13
|
+
date: 2016-10-13 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: aws-sdk
|
@@ -73,7 +73,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
73
73
|
version: '0'
|
74
74
|
requirements: []
|
75
75
|
rubyforge_project:
|
76
|
-
rubygems_version: 2.
|
76
|
+
rubygems_version: 2.2.5
|
77
77
|
signing_key:
|
78
78
|
specification_version: 4
|
79
79
|
summary: Easy deployment of AWS Lambda functions and dependencies.
|