funktor 0.7.3 → 0.7.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +11 -11
- data/README.md +0 -2
- data/funktor-testapp/Dockerfile +63 -0
- data/funktor-testapp/app/workers/audit_worker.rb +5 -1
- data/funktor-testapp/funktor_config/boot.rb +3 -0
- data/funktor-testapp/funktor_config/environment.yml +10 -1
- data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/low_concurrency_queue_handler.yml +5 -1
- data/funktor-testapp/funktor_config/funktor.yml +1 -0
- data/funktor-testapp/funktor_config/package.yml +6 -6
- data/funktor-testapp/funktor_config/resources/cloudwatch_dashboard.yml +14 -14
- data/funktor-testapp/funktor_config/ruby_layer.yml +1 -1
- data/funktor-testapp/serverless.yml +13 -3
- data/lib/funktor/activity_tracker.rb +5 -1
- data/lib/funktor/cli/init.rb +7 -1
- data/lib/funktor/cli/templates/Dockerfile +63 -0
- data/lib/funktor/cli/templates/funktor_config/environment.yml +10 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/incoming_job_handler.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/work_queue_handler.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/funktor.yml +1 -0
- data/lib/funktor/cli/templates/funktor_config/package.yml +6 -6
- data/lib/funktor/cli/templates/funktor_config/resources/cloudwatch_dashboard.yml +5 -5
- data/lib/funktor/cli/templates/funktor_config/ruby_layer.yml +1 -1
- data/lib/funktor/cli/templates/serverless.yml +13 -3
- data/lib/funktor/counter.rb +5 -1
- data/lib/funktor/incoming_job_handler.rb +7 -5
- data/lib/funktor/job_activator.rb +32 -15
- data/lib/funktor/middleware/metrics.rb +5 -1
- data/lib/funktor/version.rb +1 -1
- data/lib/funktor/work_queue_handler.rb +19 -4
- data/lib/funktor.rb +10 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 00c5852fd10c7e6ad87b576ff0a1ed13b62f69fc19391202d4c2d289376a3fb2
|
4
|
+
data.tar.gz: e780ae7466a2c4dfc7060abfacd230a9284105c89aa95db376e99b7fe14a2b89
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e7015cc189689fbffb456727899417e1edb7f823436536b6a3f316a96a83291bdaf0ee43b860d2925b53c239be15d685df268bf277c0843efc853b2b07abe0d6
|
7
|
+
data.tar.gz: a8d58b3d8d45b5bd2ecd70c1f18956685af350959cd6d3263e212d611e9e4d7d069b52681866e725cc432af09a434465e0b9bff80dc94d7a444f88d21a87a36d
|
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
funktor (0.7.
|
4
|
+
funktor (0.7.5)
|
5
5
|
activesupport
|
6
6
|
aws-sdk-dynamodb (~> 1.62)
|
7
7
|
aws-sdk-sqs (~> 1.37)
|
@@ -22,19 +22,19 @@ GEM
|
|
22
22
|
addressable (2.8.0)
|
23
23
|
public_suffix (>= 2.0.2, < 5.0)
|
24
24
|
aws-eventstream (1.2.0)
|
25
|
-
aws-partitions (1.
|
26
|
-
aws-sdk-core (3.
|
25
|
+
aws-partitions (1.584.0)
|
26
|
+
aws-sdk-core (3.130.2)
|
27
27
|
aws-eventstream (~> 1, >= 1.0.2)
|
28
|
-
aws-partitions (~> 1, >= 1.
|
28
|
+
aws-partitions (~> 1, >= 1.525.0)
|
29
29
|
aws-sigv4 (~> 1.1)
|
30
30
|
jmespath (~> 1.0)
|
31
|
-
aws-sdk-dynamodb (1.
|
32
|
-
aws-sdk-core (~> 3, >= 3.
|
31
|
+
aws-sdk-dynamodb (1.74.0)
|
32
|
+
aws-sdk-core (~> 3, >= 3.127.0)
|
33
33
|
aws-sigv4 (~> 1.1)
|
34
|
-
aws-sdk-sqs (1.
|
35
|
-
aws-sdk-core (~> 3, >= 3.
|
34
|
+
aws-sdk-sqs (1.51.1)
|
35
|
+
aws-sdk-core (~> 3, >= 3.127.0)
|
36
36
|
aws-sigv4 (~> 1.1)
|
37
|
-
aws-sigv4 (1.
|
37
|
+
aws-sigv4 (1.5.0)
|
38
38
|
aws-eventstream (~> 1, >= 1.0.2)
|
39
39
|
byebug (11.1.3)
|
40
40
|
coderay (1.1.3)
|
@@ -48,7 +48,7 @@ GEM
|
|
48
48
|
hashdiff (1.0.1)
|
49
49
|
i18n (1.8.10)
|
50
50
|
concurrent-ruby (~> 1.0)
|
51
|
-
jmespath (1.
|
51
|
+
jmespath (1.6.1)
|
52
52
|
method_source (1.0.0)
|
53
53
|
minitest (5.14.4)
|
54
54
|
mustermann (1.1.1)
|
@@ -90,7 +90,7 @@ GEM
|
|
90
90
|
rack (~> 2.2)
|
91
91
|
rack-protection (= 2.1.0)
|
92
92
|
tilt (~> 2.0)
|
93
|
-
thor (1.1
|
93
|
+
thor (1.2.1)
|
94
94
|
tilt (2.0.10)
|
95
95
|
timecop (0.9.4)
|
96
96
|
tzinfo (2.0.4)
|
data/README.md
CHANGED
@@ -0,0 +1,63 @@
|
|
1
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS build_image
|
2
|
+
|
3
|
+
# Uncomment this as a cache buster
|
4
|
+
#RUN echo date
|
5
|
+
|
6
|
+
# If you supplied buildArgs to the ecr image you can access them here using ARG & ENV
|
7
|
+
#ARG BUNDLE_GEM__FURY__IO
|
8
|
+
#ENV BUNDLE_GEM__FURY__IO=${BUNDLE_GEM__FURY__IO}
|
9
|
+
|
10
|
+
# First we install some stuff that we need for gems that have to compile native extensions
|
11
|
+
#RUN yum groupinstall "Development Tools" -y
|
12
|
+
#RUN yum install -y amazon-linux-extras
|
13
|
+
#RUN amazon-linux-extras enable postgresql11
|
14
|
+
#RUN yum install -y postgresql-devel
|
15
|
+
|
16
|
+
# Now we copy the Gemfile and Gemfile.lock into the build image so we can install our gems
|
17
|
+
COPY Gemfile Gemfile.lock .
|
18
|
+
|
19
|
+
# Set a few bundle configuration options
|
20
|
+
RUN bundle lock --add-platform x86_64-linux
|
21
|
+
#RUN bundle config --local deployment true
|
22
|
+
#RUN bundle config --local plugins false
|
23
|
+
#RUN bundle config --local frozen true
|
24
|
+
#RUN bundle config --local without 'development test'
|
25
|
+
RUN bundle config --local path './vendor/bundle'
|
26
|
+
|
27
|
+
# Now install our gems
|
28
|
+
RUN bundle install --quiet --jobs 4
|
29
|
+
|
30
|
+
# Now we start a second stage in the build that is a clean image without build tools
|
31
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS deploy_image
|
32
|
+
|
33
|
+
#ENV RAILS_ENV=production
|
34
|
+
#ENV RACK_ENV=production
|
35
|
+
|
36
|
+
# Install node so that asset related gems have a JS runtime.
|
37
|
+
# We ship the node executeable to production to make it easier to get an app deployed.
|
38
|
+
# TODO: Document steps that could be taken to remove this dependency.
|
39
|
+
#RUN curl --silent --location https://rpm.nodesource.com/setup_14.x | bash -
|
40
|
+
#RUN yum install -y nodejs
|
41
|
+
|
42
|
+
# Then copy some postgres related files
|
43
|
+
#COPY --from=build_image /usr/lib64/libpq.so.5 /usr/lib64/
|
44
|
+
#COPY --from=build_image /usr/lib64/libldap_r-2.4.so.2 /usr/lib64/
|
45
|
+
#COPY --from=build_image /usr/lib64/liblber-2.4.so.2 /usr/lib64/
|
46
|
+
#COPY --from=build_image /usr/lib64/libsasl2.so.3 /usr/lib64/
|
47
|
+
#COPY --from=build_image /usr/lib64/libssl3.so /usr/lib64/
|
48
|
+
#COPY --from=build_image /usr/lib64/libsmime3.so /usr/lib64/
|
49
|
+
#COPY --from=build_image /usr/lib64/libnss3.so /usr/lib64/
|
50
|
+
|
51
|
+
|
52
|
+
# Next we copy the app from our local directory and we copy
|
53
|
+
# the bundled gems from the build image.
|
54
|
+
# We do this after copying dependencies becuase the app will
|
55
|
+
# change more frequently, and we can used caching up to here.
|
56
|
+
|
57
|
+
COPY --from=build_image /var/task .
|
58
|
+
COPY . .
|
59
|
+
|
60
|
+
# And finally we have the CMD for the deployed container
|
61
|
+
# You can overwrite command in `serverless.yml` template
|
62
|
+
CMD [ "app.LambdaFunction::Handler.process" ]
|
63
|
+
|
@@ -21,13 +21,17 @@ class AuditWorker
|
|
21
21
|
puts "So long from the #{self.class.name}, and thanks for all the fish!"
|
22
22
|
end
|
23
23
|
|
24
|
+
def metric_namespace
|
25
|
+
[ENV['FUNKTOR_APP_NAME'], ENV['SERVERLESS_STAGE']].join('-')
|
26
|
+
end
|
27
|
+
|
24
28
|
def metric_hash(time_diff)
|
25
29
|
{
|
26
30
|
"_aws": {
|
27
31
|
"Timestamp": Time.now.strftime('%s%3N').to_i,
|
28
32
|
"CloudWatchMetrics": [
|
29
33
|
{
|
30
|
-
"Namespace":
|
34
|
+
"Namespace": metric_namespace,
|
31
35
|
"Dimensions": [["WorkerClassName"]],
|
32
36
|
"Metrics": [ # CPU, Memory, Duration, etc...
|
33
37
|
{
|
@@ -1,7 +1,16 @@
|
|
1
|
-
BUNDLE_WITHOUT: development:test
|
1
|
+
BUNDLE_WITHOUT: development:test
|
2
2
|
BUNDLE_PLUGINS: false
|
3
3
|
BUNDLE_FROZEN: true
|
4
|
+
BUNDLE_DEPLOYMENT: true
|
4
5
|
SERVERLESS_STAGE: ${self:custom.stage}
|
6
|
+
|
7
|
+
FUNKTOR_LOG_LEVEL: INFO
|
8
|
+
|
9
|
+
RAILS_LOG_TO_STDOUT: true
|
10
|
+
RAILS_ENV: production
|
11
|
+
RACK_ENV: production
|
12
|
+
RAILS_MAX_THREADS: 1
|
13
|
+
|
5
14
|
FUNKTOR_APP_NAME: funktor-testapp
|
6
15
|
FUNKTOR_INCOMING_JOB_QUEUE:
|
7
16
|
Ref: IncomingJobQueue
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/default_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/default_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.DefaultQueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.DefaultQueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.DefaultQueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.DefaultQueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/default_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.DefaultQueueHandler.batchSize, 1}
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/incoming_job_handler.call
|
1
|
+
#handler: lambda_event_handlers/incoming_job_handler.call
|
2
2
|
timeout: ${self:custom.funktor.IncomingJobHandler.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.IncomingJobHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.IncomingJobHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.IncomingJobHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/incoming_job_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.IncomingJobHandler.batchSize, 1}
|
@@ -1,7 +1,11 @@
|
|
1
|
-
handler: lambda_event_handlers/job_activator.call
|
1
|
+
#handler: lambda_event_handlers/job_activator.call
|
2
2
|
timeout: ${self:custom.funktor.JobActivator.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.JobActivator.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.JobActivator.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.JobActivator.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/job_activator.call
|
6
10
|
events:
|
7
11
|
- schedule: rate(1 minute)
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/low_concurrency_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/low_concurrency_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.LowConcurrencyQueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.LowConcurrencyQueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.LowConcurrencyQueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.LowConcurrencyQueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/low_concurrency_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.LowConcurrencyQueueHandler.batchSize, 1}
|
@@ -103,6 +103,7 @@ IncomingDeadJobQueueName: ${self:service}-${self:custom.stage}-incoming-dead
|
|
103
103
|
IncomingJobHandlerName: ${self:service}-${self:custom.stage}-IncomingJobHandler
|
104
104
|
IncomingJobQueueAccessPolicyName: ${self:service}-${self:custom.stage}-incoming-job-queue-access
|
105
105
|
DashboardName: ${self:service}-${self:custom.stage}-dashboard
|
106
|
+
DashboardNamespace: ${self:service}-${self:custom.stage}
|
106
107
|
DefaultQueueName: ${self:service}-${self:custom.stage}-default
|
107
108
|
DefaultDeadJobQueueName: ${self:service}-${self:custom.stage}-default-dead
|
108
109
|
DefaultQueueHandlerName: ${self:service}-${self:custom.stage}-DefaultQueueHandler
|
@@ -1,10 +1,10 @@
|
|
1
1
|
# TODO - Figure out how to allow individual packaging to work out of the box.
|
2
|
-
individually: false
|
3
|
-
include:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
2
|
+
#individually: false
|
3
|
+
#include:
|
4
|
+
#- Gemfile
|
5
|
+
#- Gemfile.lock
|
6
|
+
#- funktor_config/boot.rb
|
7
|
+
#- app/**
|
8
8
|
# Evertyting is excluded by default with serverless-ruby-layer, but you could use
|
9
9
|
# the lines below to exlude files that are inside an include path.
|
10
10
|
#exclude:
|
@@ -29,13 +29,13 @@ Resources:
|
|
29
29
|
"type": "metric",
|
30
30
|
"properties": {
|
31
31
|
"metrics": [
|
32
|
-
[ "funktor
|
32
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "AuditWorker" ],
|
33
33
|
[ "...", { "stat": "p99" } ],
|
34
|
-
[ "funktor
|
34
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "GreetingsWorker" ],
|
35
35
|
[ "...", { "stat": "p99" } ],
|
36
|
-
[ "funktor
|
36
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "HelloWorker" ],
|
37
37
|
[ "...", { "stat": "p99" } ],
|
38
|
-
[ "funktor
|
38
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "SingleThreadAuditWorker" ],
|
39
39
|
[ "...", { "stat": "p99" } ]
|
40
40
|
],
|
41
41
|
"view": "timeSeries",
|
@@ -55,13 +55,13 @@ Resources:
|
|
55
55
|
"type": "metric",
|
56
56
|
"properties": {
|
57
57
|
"metrics": [
|
58
|
-
[ "funktor
|
58
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "AuditWorker" ],
|
59
59
|
[ ".", "failed", ".", "." ],
|
60
|
-
[ "funktor
|
60
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "GreetingsWorker" ],
|
61
61
|
[ ".", "failed", ".", "." ],
|
62
|
-
[ "funktor
|
62
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "HelloWorker" ],
|
63
63
|
[ ".", "failed", ".", "." ],
|
64
|
-
[ "funktor
|
64
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "SingleThreadAuditWorker" ],
|
65
65
|
[ ".", "failed", ".", "." ]
|
66
66
|
],
|
67
67
|
"view": "timeSeries",
|
@@ -81,9 +81,9 @@ Resources:
|
|
81
81
|
"type": "metric",
|
82
82
|
"properties": {
|
83
83
|
"metrics": [
|
84
|
-
[ "funktor
|
84
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "Queue", "default" ],
|
85
85
|
[ "...", { "stat": "p99" } ],
|
86
|
-
[ "funktor
|
86
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "Queue", "low_concurrency" ],
|
87
87
|
[ "...", { "stat": "p99" } ]
|
88
88
|
],
|
89
89
|
"view": "timeSeries",
|
@@ -102,9 +102,9 @@ Resources:
|
|
102
102
|
"type": "metric",
|
103
103
|
"properties": {
|
104
104
|
"metrics": [
|
105
|
-
[ "funktor
|
105
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "Queue", "default" ],
|
106
106
|
[ ".", "failed", ".", "." ],
|
107
|
-
[ "funktor
|
107
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "Queue", "low_concurrency" ],
|
108
108
|
[ ".", "failed", ".", "." ]
|
109
109
|
],
|
110
110
|
"view": "timeSeries",
|
@@ -117,17 +117,17 @@ Resources:
|
|
117
117
|
},
|
118
118
|
|
119
119
|
|
120
|
+
|
120
121
|
{
|
121
122
|
"height": 3,
|
122
123
|
"width": 24,
|
123
|
-
"y":
|
124
|
+
"y": 12,
|
124
125
|
"x": 0,
|
125
126
|
"type": "text",
|
126
127
|
"properties": {
|
127
128
|
"markdown": "\n# Behind the scenes\n\n The stats below give some insight into the inner workings of the Funktor apparatus."
|
128
129
|
}
|
129
130
|
},
|
130
|
-
|
131
131
|
|
132
132
|
|
133
133
|
|
@@ -25,6 +25,16 @@ provider:
|
|
25
25
|
runtime: ruby2.7
|
26
26
|
stage: ${opt:stage, 'dev'}
|
27
27
|
lambdaHashingVersion: 20201221
|
28
|
+
ecr:
|
29
|
+
scanOnPush: true
|
30
|
+
images:
|
31
|
+
funktorimage:
|
32
|
+
path: ./
|
33
|
+
file: Dockerfile
|
34
|
+
#platform: linux/amd64 # May be needed if you're on an M1.
|
35
|
+
buildArgs:
|
36
|
+
# Here you can specify ENV vars that you need at build time. For instance you may want to install private gems:
|
37
|
+
#BUNDLE_GEM__FURY__IO: ${env:BUNDLE_GEM__FURY__IO}
|
28
38
|
environment: ${file(funktor_config/environment.yml)}
|
29
39
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
30
40
|
logRetentionInDays: 7
|
@@ -42,9 +52,9 @@ custom:
|
|
42
52
|
# commands. Or fallsback to what we have set in the provider section.
|
43
53
|
stage: ${self:provider.stage, 'dev'}
|
44
54
|
funktor: ${file(funktor_config/funktor.yml)}
|
45
|
-
rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
55
|
+
#rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
46
56
|
|
47
|
-
package: ${file(funktor_config/package.yml)}
|
57
|
+
#package: ${file(funktor_config/package.yml)}
|
48
58
|
|
49
59
|
functions:
|
50
60
|
IncomingJobHandler: ${file(funktor_config/function_definitions/incoming_job_handler.yml)}
|
@@ -62,4 +72,4 @@ resources:
|
|
62
72
|
- ${file(funktor_config/resources/jobs_table.yml)}
|
63
73
|
|
64
74
|
plugins:
|
65
|
-
|
75
|
+
#- serverless-ruby-layer
|
@@ -77,13 +77,17 @@ module Funktor
|
|
77
77
|
@dynamodb_client ||= ::Aws::DynamoDB::Client.new
|
78
78
|
end
|
79
79
|
|
80
|
+
def metric_namespace
|
81
|
+
[ENV['FUNKTOR_APP_NAME'], ENV['SERVERLESS_STAGE']].join('-')
|
82
|
+
end
|
83
|
+
|
80
84
|
def put_metric_to_stdout(key, value)
|
81
85
|
data = {
|
82
86
|
"_aws": {
|
83
87
|
"Timestamp": Time.now.strftime('%s%3N').to_i,
|
84
88
|
"CloudWatchMetrics": [
|
85
89
|
{
|
86
|
-
"Namespace":
|
90
|
+
"Namespace": metric_namespace,
|
87
91
|
"Dimensions": [["functionVersion"]],
|
88
92
|
"Metrics": [ # CPU, Memory, Duration, etc...
|
89
93
|
{
|
data/lib/funktor/cli/init.rb
CHANGED
@@ -46,8 +46,10 @@ module Funktor
|
|
46
46
|
say "serverless-ruby-layer is already installed in package.json"
|
47
47
|
else
|
48
48
|
if File.exist?("package-lock.json")
|
49
|
-
run "npm install serverless-ruby-layer@1.4.0"
|
49
|
+
run "npm install serverless-ruby-layer@1.4.0 --save-dev"
|
50
50
|
# TODO - Add handers for yarn and what not
|
51
|
+
elsif File.exist?("yarn.lock")
|
52
|
+
run "yarn add --dev serverless-ruby-layer@1.4.0"
|
51
53
|
else
|
52
54
|
say "You should install serverless-ruby-layer version 1.4.0 using yor package manager of choice."
|
53
55
|
end
|
@@ -133,6 +135,10 @@ module Funktor
|
|
133
135
|
template "serverless.yml", File.join("serverless.yml")
|
134
136
|
end
|
135
137
|
|
138
|
+
def dockerfile
|
139
|
+
template "Dockerfile", File.join("Dockerfile")
|
140
|
+
end
|
141
|
+
|
136
142
|
private
|
137
143
|
|
138
144
|
def app_worker_names
|
@@ -0,0 +1,63 @@
|
|
1
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS build_image
|
2
|
+
|
3
|
+
# Uncomment this as a cache buster
|
4
|
+
#RUN echo date
|
5
|
+
|
6
|
+
# If you supplied buildArgs to the ecr image you can access them here using ARG & ENV
|
7
|
+
#ARG BUNDLE_GEM__FURY__IO
|
8
|
+
#ENV BUNDLE_GEM__FURY__IO=${BUNDLE_GEM__FURY__IO}
|
9
|
+
|
10
|
+
# First we install some stuff that we need for gems that have to compile native extensions
|
11
|
+
#RUN yum groupinstall "Development Tools" -y
|
12
|
+
#RUN yum install -y amazon-linux-extras
|
13
|
+
#RUN amazon-linux-extras enable postgresql11
|
14
|
+
#RUN yum install -y postgresql-devel
|
15
|
+
|
16
|
+
# Now we copy the Gemfile and Gemfile.lock into the build image so we can install our gems
|
17
|
+
COPY Gemfile Gemfile.lock .
|
18
|
+
|
19
|
+
# Set a few bundle configuration options
|
20
|
+
RUN bundle lock --add-platform x86_64-linux
|
21
|
+
#RUN bundle config --local deployment true
|
22
|
+
#RUN bundle config --local plugins false
|
23
|
+
#RUN bundle config --local frozen true
|
24
|
+
#RUN bundle config --local without 'development test'
|
25
|
+
RUN bundle config --local path './vendor/bundle'
|
26
|
+
|
27
|
+
# Now install our gems
|
28
|
+
RUN bundle install --quiet --jobs 4
|
29
|
+
|
30
|
+
# Now we start a second stage in the build that is a clean image without build tools
|
31
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS deploy_image
|
32
|
+
|
33
|
+
#ENV RAILS_ENV=production
|
34
|
+
#ENV RACK_ENV=production
|
35
|
+
|
36
|
+
# Install node so that asset related gems have a JS runtime.
|
37
|
+
# We ship the node executeable to production to make it easier to get an app deployed.
|
38
|
+
# TODO: Document steps that could be taken to remove this dependency.
|
39
|
+
#RUN curl --silent --location https://rpm.nodesource.com/setup_14.x | bash -
|
40
|
+
#RUN yum install -y nodejs
|
41
|
+
|
42
|
+
# Then copy some postgres related files
|
43
|
+
#COPY --from=build_image /usr/lib64/libpq.so.5 /usr/lib64/
|
44
|
+
#COPY --from=build_image /usr/lib64/libldap_r-2.4.so.2 /usr/lib64/
|
45
|
+
#COPY --from=build_image /usr/lib64/liblber-2.4.so.2 /usr/lib64/
|
46
|
+
#COPY --from=build_image /usr/lib64/libsasl2.so.3 /usr/lib64/
|
47
|
+
#COPY --from=build_image /usr/lib64/libssl3.so /usr/lib64/
|
48
|
+
#COPY --from=build_image /usr/lib64/libsmime3.so /usr/lib64/
|
49
|
+
#COPY --from=build_image /usr/lib64/libnss3.so /usr/lib64/
|
50
|
+
|
51
|
+
|
52
|
+
# Next we copy the app from our local directory and we copy
|
53
|
+
# the bundled gems from the build image.
|
54
|
+
# We do this after copying dependencies becuase the app will
|
55
|
+
# change more frequently, and we can used caching up to here.
|
56
|
+
|
57
|
+
COPY --from=build_image /var/task .
|
58
|
+
COPY . .
|
59
|
+
|
60
|
+
# And finally we have the CMD for the deployed container
|
61
|
+
# You can overwrite command in `serverless.yml` template
|
62
|
+
CMD [ "app.LambdaFunction::Handler.process" ]
|
63
|
+
|
@@ -1,7 +1,16 @@
|
|
1
|
-
BUNDLE_WITHOUT: development:test
|
1
|
+
BUNDLE_WITHOUT: development:test
|
2
2
|
BUNDLE_PLUGINS: false
|
3
3
|
BUNDLE_FROZEN: true
|
4
|
+
BUNDLE_DEPLOYMENT: true
|
4
5
|
SERVERLESS_STAGE: ${self:custom.stage}
|
6
|
+
|
7
|
+
FUNKTOR_LOG_LEVEL: INFO
|
8
|
+
|
9
|
+
RAILS_LOG_TO_STDOUT: true
|
10
|
+
RAILS_ENV: production
|
11
|
+
RACK_ENV: production
|
12
|
+
RAILS_MAX_THREADS: 1
|
13
|
+
|
5
14
|
FUNKTOR_APP_NAME: <%= app_name %>
|
6
15
|
FUNKTOR_INCOMING_JOB_QUEUE:
|
7
16
|
Ref: IncomingJobQueue
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/incoming_job_handler.call
|
1
|
+
#handler: lambda_event_handlers/incoming_job_handler.call
|
2
2
|
timeout: ${self:custom.funktor.IncomingJobHandler.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.IncomingJobHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.IncomingJobHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.IncomingJobHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/incoming_job_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.IncomingJobHandler.batchSize, 1}
|
@@ -1,7 +1,11 @@
|
|
1
|
-
handler: lambda_event_handlers/job_activator.call
|
1
|
+
#handler: lambda_event_handlers/job_activator.call
|
2
2
|
timeout: ${self:custom.funktor.JobActivator.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.JobActivator.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.JobActivator.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.JobActivator.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/job_activator.call
|
6
10
|
events:
|
7
11
|
- schedule: rate(1 minute)
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.batchSize, 1}
|
@@ -81,6 +81,7 @@ IncomingDeadJobQueueName: ${self:service}-${self:custom.stage}-incoming-dead
|
|
81
81
|
IncomingJobHandlerName: ${self:service}-${self:custom.stage}-IncomingJobHandler
|
82
82
|
IncomingJobQueueAccessPolicyName: ${self:service}-${self:custom.stage}-incoming-job-queue-access
|
83
83
|
DashboardName: ${self:service}-${self:custom.stage}-dashboard
|
84
|
+
DashboardNamespace: ${self:service}-${self:custom.stage}
|
84
85
|
<%- queue_names.each do |queue_name| -%>
|
85
86
|
<%= queue_name.camelize %>QueueName: ${self:service}-${self:custom.stage}-<%= queue_name.underscore.dasherize %>
|
86
87
|
<%= queue_name.camelize %>DeadJobQueueName: ${self:service}-${self:custom.stage}-<%= queue_name.underscore.dasherize %>-dead
|
@@ -1,10 +1,10 @@
|
|
1
1
|
# TODO - Figure out how to allow individual packaging to work out of the box.
|
2
|
-
individually: false
|
3
|
-
include:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
2
|
+
#individually: false
|
3
|
+
#include:
|
4
|
+
#- Gemfile
|
5
|
+
#- Gemfile.lock
|
6
|
+
#- funktor_config/boot.rb
|
7
|
+
#- app/**
|
8
8
|
# Evertyting is excluded by default with serverless-ruby-layer, but you could use
|
9
9
|
# the lines below to exlude files that are inside an include path.
|
10
10
|
#exclude:
|
@@ -30,7 +30,7 @@ Resources:
|
|
30
30
|
"properties": {
|
31
31
|
"metrics": [
|
32
32
|
<%- app_worker_names.each do |worker_name| -%>
|
33
|
-
[ "
|
33
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "<%= worker_name %>" ],
|
34
34
|
[ "...", { "stat": "p99" } ]<%= worker_name == app_worker_names.last ? "" : "," %>
|
35
35
|
<%- end -%>
|
36
36
|
],
|
@@ -52,7 +52,7 @@ Resources:
|
|
52
52
|
"properties": {
|
53
53
|
"metrics": [
|
54
54
|
<%- app_worker_names.each do |worker_name| -%>
|
55
|
-
[ "
|
55
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "<%= worker_name %>" ],
|
56
56
|
[ ".", "failed", ".", "." ]<%= worker_name == app_worker_names.last ? "" : "," %>
|
57
57
|
<%- end -%>
|
58
58
|
],
|
@@ -74,7 +74,7 @@ Resources:
|
|
74
74
|
"properties": {
|
75
75
|
"metrics": [
|
76
76
|
<%- queue_names.each do |queue_name| -%>
|
77
|
-
[ "
|
77
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "Queue", "<%= queue_name.underscore %>" ],
|
78
78
|
[ "...", { "stat": "p99" } ]<%= queue_name == queue_names.last ? "" : "," %>
|
79
79
|
<%- end -%>
|
80
80
|
],
|
@@ -95,7 +95,7 @@ Resources:
|
|
95
95
|
"properties": {
|
96
96
|
"metrics": [
|
97
97
|
<%- queue_names.each do |queue_name| -%>
|
98
|
-
[ "
|
98
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "Queue", "<%= queue_name.underscore %>" ],
|
99
99
|
[ ".", "failed", ".", "." ]<%= queue_name == queue_names.last ? "" : "," %>
|
100
100
|
<%- end -%>
|
101
101
|
],
|
@@ -109,6 +109,7 @@ Resources:
|
|
109
109
|
},
|
110
110
|
|
111
111
|
|
112
|
+
<% current_y += 3 %>
|
112
113
|
{ <% "Funktor Behind the Scenes Banner" %>
|
113
114
|
"height": 3,
|
114
115
|
"width": 24,
|
@@ -119,7 +120,6 @@ Resources:
|
|
119
120
|
"markdown": "\n# Behind the scenes\n\n The stats below give some insight into the inner workings of the Funktor apparatus."
|
120
121
|
}
|
121
122
|
},
|
122
|
-
<% current_y += 3 %>
|
123
123
|
|
124
124
|
|
125
125
|
<% current_y += 6 %>
|
@@ -25,6 +25,16 @@ provider:
|
|
25
25
|
runtime: <%= runtime %>
|
26
26
|
stage: ${opt:stage, 'dev'}
|
27
27
|
lambdaHashingVersion: 20201221
|
28
|
+
ecr:
|
29
|
+
scanOnPush: true
|
30
|
+
images:
|
31
|
+
funktorimage:
|
32
|
+
path: ./
|
33
|
+
file: Dockerfile
|
34
|
+
#platform: linux/amd64 # May be needed if you're on an M1.
|
35
|
+
buildArgs:
|
36
|
+
# Here you can specify ENV vars that you need at build time. For instance you may want to install private gems:
|
37
|
+
#BUNDLE_GEM__FURY__IO: ${env:BUNDLE_GEM__FURY__IO}
|
28
38
|
environment: ${file(funktor_config/environment.yml)}
|
29
39
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
30
40
|
logRetentionInDays: 7
|
@@ -39,9 +49,9 @@ custom:
|
|
39
49
|
# commands. Or fallsback to what we have set in the provider section.
|
40
50
|
stage: ${self:provider.stage, 'dev'}
|
41
51
|
funktor: ${file(funktor_config/funktor.yml)}
|
42
|
-
rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
52
|
+
#rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
43
53
|
|
44
|
-
package: ${file(funktor_config/package.yml)}
|
54
|
+
#package: ${file(funktor_config/package.yml)}
|
45
55
|
|
46
56
|
functions:
|
47
57
|
<%- all_function_definitions.each do |function_definition| -%>
|
@@ -54,4 +64,4 @@ resources:
|
|
54
64
|
<%- end -%>
|
55
65
|
|
56
66
|
plugins:
|
57
|
-
|
67
|
+
#- serverless-ruby-layer
|
data/lib/funktor/counter.rb
CHANGED
@@ -17,13 +17,17 @@ module Funktor
|
|
17
17
|
Funktor.raw_logger.unknown Funktor.dump_json(metric_hash(job))
|
18
18
|
end
|
19
19
|
|
20
|
+
def metric_namespace
|
21
|
+
[ENV['FUNKTOR_APP_NAME'], ENV['SERVERLESS_STAGE']].join('-')
|
22
|
+
end
|
23
|
+
|
20
24
|
def metric_hash(job)
|
21
25
|
{
|
22
26
|
"_aws": {
|
23
27
|
"Timestamp": Time.now.strftime('%s%3N').to_i,
|
24
28
|
"CloudWatchMetrics": [
|
25
29
|
{
|
26
|
-
"Namespace":
|
30
|
+
"Namespace": metric_namespace,
|
27
31
|
"Dimensions": [["WorkerClassName"], ["Queue"]],
|
28
32
|
"Metrics": [ # CPU, Memory, Duration, etc...
|
29
33
|
{
|
@@ -30,11 +30,13 @@ module Funktor
|
|
30
30
|
# TODO : This number should be configurable via ENV var
|
31
31
|
if job.delay < 60 # for now we're testing with just one minute * 5 # 5 minutes
|
32
32
|
Funktor.logger.debug "pushing to work queue for delay = #{job.delay}"
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
33
|
+
if Funktor.enable_work_queue_visibility
|
34
|
+
# We push to the jobs table first becauase the work queue handler will expect to be able
|
35
|
+
# to update the stats of a record that's already in the table.
|
36
|
+
# TODO : For time sensitive jobs this is probably less than optimal. Can we update the
|
37
|
+
# work queue handler to be ok with a job that's not yet in the table?
|
38
|
+
push_to_jobs_table(job, "queued")
|
39
|
+
end
|
38
40
|
push_to_work_queue(job)
|
39
41
|
if job.is_retry?
|
40
42
|
@tracker.track(:retryActivated, job)
|
@@ -61,23 +61,40 @@ module Funktor
|
|
61
61
|
end
|
62
62
|
|
63
63
|
def activate_job(job_shard, job_id, current_category, queue_immediately = false)
|
64
|
+
|
65
|
+
# TODO: WorkQueueVisibilityMiddleware to alter what happens here? Maybe we delete by default and then the middleware puts it back in the table?
|
64
66
|
# First we conditionally update the item in Dynamo to be sure that another scheduler hasn't gotten
|
65
67
|
# to it, and if that works then send to SQS. This is basically how Sidekiq scheduler works.
|
66
|
-
response =
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
68
|
+
response = if Funktor.enable_work_queue_visibility
|
69
|
+
dynamodb_client.update_item({
|
70
|
+
key: {
|
71
|
+
"jobShard" => job_shard,
|
72
|
+
"jobId" => job_id
|
73
|
+
},
|
74
|
+
update_expression: "SET category = :category, queueable = :queueable",
|
75
|
+
condition_expression: "category = :current_category",
|
76
|
+
expression_attribute_values: {
|
77
|
+
":current_category" => current_category,
|
78
|
+
":queueable" => "false",
|
79
|
+
":category" => "queued"
|
80
|
+
},
|
81
|
+
table_name: delayed_job_table,
|
82
|
+
return_values: "ALL_OLD"
|
83
|
+
})
|
84
|
+
else
|
85
|
+
dynamodb_client.delete_item({
|
86
|
+
key: {
|
87
|
+
"jobShard" => job_shard,
|
88
|
+
"jobId" => job_id
|
89
|
+
},
|
90
|
+
condition_expression: "category = :current_category",
|
91
|
+
expression_attribute_values: {
|
92
|
+
":current_category" => current_category
|
93
|
+
},
|
94
|
+
table_name: delayed_job_table,
|
95
|
+
return_values: "ALL_OLD"
|
96
|
+
})
|
97
|
+
end
|
81
98
|
if response.attributes # this means the record was still there in the state we expected
|
82
99
|
Funktor.logger.debug "response.attributes ====== "
|
83
100
|
Funktor.logger.debug response.attributes
|
@@ -18,13 +18,17 @@ module Funktor
|
|
18
18
|
Funktor.raw_logger.unknown Funktor.dump_json(metric_hash(time_diff, job))
|
19
19
|
end
|
20
20
|
|
21
|
+
def metric_namespace
|
22
|
+
[ENV['FUNKTOR_APP_NAME'], ENV['SERVERLESS_STAGE']].join('-')
|
23
|
+
end
|
24
|
+
|
21
25
|
def metric_hash(time_diff_in_seconds, job)
|
22
26
|
{
|
23
27
|
"_aws": {
|
24
28
|
"Timestamp": Time.now.strftime('%s%3N').to_i,
|
25
29
|
"CloudWatchMetrics": [
|
26
30
|
{
|
27
|
-
"Namespace":
|
31
|
+
"Namespace": metric_namespace,
|
28
32
|
"Dimensions": [["WorkerClassName"], ["Queue"]],
|
29
33
|
"Metrics": [ # CPU, Memory, Duration, etc...
|
30
34
|
{
|
data/lib/funktor/version.rb
CHANGED
@@ -30,13 +30,18 @@ module Funktor
|
|
30
30
|
def dispatch(job)
|
31
31
|
begin
|
32
32
|
@tracker.track(:processingStarted, job)
|
33
|
-
|
33
|
+
if Funktor.enable_work_queue_visibility
|
34
|
+
update_job_category(job, "processing")
|
35
|
+
end
|
34
36
|
Funktor.work_queue_handler_middleware.invoke(job) do
|
35
37
|
job.execute
|
36
38
|
end
|
37
39
|
@processed_counter.incr(job)
|
38
40
|
@tracker.track(:processingComplete, job)
|
39
|
-
|
41
|
+
|
42
|
+
if Funktor.enable_work_queue_visibility
|
43
|
+
delete_job_from_dynamodb(job)
|
44
|
+
end
|
40
45
|
# rescue Funktor::Job::InvalidJsonError # TODO Make this work
|
41
46
|
rescue Exception => e
|
42
47
|
handle_error(e, job)
|
@@ -44,11 +49,17 @@ module Funktor
|
|
44
49
|
job.error = e
|
45
50
|
if job.can_retry
|
46
51
|
@tracker.track(:retrying, job)
|
47
|
-
|
52
|
+
|
53
|
+
if Funktor.enable_work_queue_visibility
|
54
|
+
update_job_category(job, "retry")
|
55
|
+
end
|
48
56
|
trigger_retry(job)
|
49
57
|
else
|
50
58
|
@tracker.track(:bailingOut, job)
|
51
|
-
|
59
|
+
|
60
|
+
if Funktor.enable_work_queue_visibility
|
61
|
+
update_job_category(job, "dead")
|
62
|
+
end
|
52
63
|
Funktor.logger.error "We retried max times. We're bailing on this one."
|
53
64
|
Funktor.logger.error job.to_json
|
54
65
|
end
|
@@ -71,6 +82,7 @@ module Funktor
|
|
71
82
|
end
|
72
83
|
|
73
84
|
def update_job_category(job, category)
|
85
|
+
puts "starting update_job_category #{category}"
|
74
86
|
dynamodb_client.update_item({
|
75
87
|
key: {
|
76
88
|
"jobShard" => job.shard,
|
@@ -84,9 +96,11 @@ module Funktor
|
|
84
96
|
},
|
85
97
|
return_values: "ALL_OLD"
|
86
98
|
})
|
99
|
+
puts "ending update_job_category #{category}"
|
87
100
|
end
|
88
101
|
|
89
102
|
def delete_job_from_dynamodb(job)
|
103
|
+
puts "starting delete_job_from_dynamodb"
|
90
104
|
dynamodb_client.delete_item({
|
91
105
|
key: {
|
92
106
|
"jobShard" => job.shard,
|
@@ -95,6 +109,7 @@ module Funktor
|
|
95
109
|
table_name: delayed_job_table,
|
96
110
|
return_values: "ALL_OLD"
|
97
111
|
})
|
112
|
+
puts "ending delete_job_from_dynamodb"
|
98
113
|
end
|
99
114
|
|
100
115
|
end
|
data/lib/funktor.rb
CHANGED
@@ -18,7 +18,8 @@ module Funktor
|
|
18
18
|
|
19
19
|
DEFAULT_OPTIONS = {
|
20
20
|
error_handlers: [],
|
21
|
-
log_level: Logger::DEBUG # Set a high log level during early, active development
|
21
|
+
log_level: Logger::DEBUG, # Set a high log level during early, active development
|
22
|
+
enable_work_queue_visibility: true # Enable this by default during early, active development
|
22
23
|
}
|
23
24
|
|
24
25
|
def self.configure_job_pusher
|
@@ -110,6 +111,14 @@ module Funktor
|
|
110
111
|
@logger = logger
|
111
112
|
end
|
112
113
|
|
114
|
+
def self.enable_work_queue_visibility
|
115
|
+
options[:enable_work_queue_visibility]
|
116
|
+
end
|
117
|
+
|
118
|
+
def self.enable_work_queue_visibility= enabled
|
119
|
+
options[:enable_work_queue_visibility] = enabled
|
120
|
+
end
|
121
|
+
|
113
122
|
# We have a raw_logger that doesn't add timestamps and what not. This is used to publish
|
114
123
|
# CloudWatch metrics that can be used in dashboards.
|
115
124
|
def self.raw_logger
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: funktor
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jeremy Green
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2022-05-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-sqs
|
@@ -176,6 +176,7 @@ files:
|
|
176
176
|
- exe/funktor-deploy
|
177
177
|
- funktor-testapp/.envrc
|
178
178
|
- funktor-testapp/.gitignore
|
179
|
+
- funktor-testapp/Dockerfile
|
179
180
|
- funktor-testapp/Gemfile
|
180
181
|
- funktor-testapp/Gemfile.lock
|
181
182
|
- funktor-testapp/app/services/job_flood.rb
|
@@ -227,6 +228,7 @@ files:
|
|
227
228
|
- lib/funktor/cli/generate/base.rb
|
228
229
|
- lib/funktor/cli/generate/work_queue.rb
|
229
230
|
- lib/funktor/cli/init.rb
|
231
|
+
- lib/funktor/cli/templates/Dockerfile
|
230
232
|
- lib/funktor/cli/templates/Gemfile
|
231
233
|
- lib/funktor/cli/templates/app/workers/hello_worker.rb
|
232
234
|
- lib/funktor/cli/templates/funktor_config/boot.rb
|