funktor 0.7.5 → 0.7.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/funktor-testapp/Dockerfile +63 -0
- data/funktor-testapp/funktor_config/boot.rb +3 -0
- data/funktor-testapp/funktor_config/environment.yml +10 -1
- data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/low_concurrency_queue_handler.yml +5 -1
- data/funktor-testapp/funktor_config/package.yml +6 -6
- data/funktor-testapp/funktor_config/ruby_layer.yml +1 -1
- data/funktor-testapp/serverless.yml +13 -3
- data/lib/funktor/cli/init.rb +4 -0
- data/lib/funktor/cli/templates/Dockerfile +63 -0
- data/lib/funktor/cli/templates/funktor_config/environment.yml +10 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/incoming_job_handler.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/work_queue_handler.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/package.yml +6 -6
- data/lib/funktor/cli/templates/funktor_config/ruby_layer.yml +1 -1
- data/lib/funktor/cli/templates/serverless.yml +13 -3
- data/lib/funktor/incoming_job_handler.rb +7 -5
- data/lib/funktor/job_activator.rb +32 -15
- data/lib/funktor/version.rb +1 -1
- data/lib/funktor/work_queue_handler.rb +19 -4
- data/lib/funktor.rb +10 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 00c5852fd10c7e6ad87b576ff0a1ed13b62f69fc19391202d4c2d289376a3fb2
|
4
|
+
data.tar.gz: e780ae7466a2c4dfc7060abfacd230a9284105c89aa95db376e99b7fe14a2b89
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e7015cc189689fbffb456727899417e1edb7f823436536b6a3f316a96a83291bdaf0ee43b860d2925b53c239be15d685df268bf277c0843efc853b2b07abe0d6
|
7
|
+
data.tar.gz: a8d58b3d8d45b5bd2ecd70c1f18956685af350959cd6d3263e212d611e9e4d7d069b52681866e725cc432af09a434465e0b9bff80dc94d7a444f88d21a87a36d
|
data/Gemfile.lock
CHANGED
@@ -0,0 +1,63 @@
|
|
1
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS build_image
|
2
|
+
|
3
|
+
# Uncomment this as a cache buster
|
4
|
+
#RUN echo date
|
5
|
+
|
6
|
+
# If you supplied buildArgs to the ecr image you can access them here using ARG & ENV
|
7
|
+
#ARG BUNDLE_GEM__FURY__IO
|
8
|
+
#ENV BUNDLE_GEM__FURY__IO=${BUNDLE_GEM__FURY__IO}
|
9
|
+
|
10
|
+
# First we install some stuff that we need for gems that have to compile native extensions
|
11
|
+
#RUN yum groupinstall "Development Tools" -y
|
12
|
+
#RUN yum install -y amazon-linux-extras
|
13
|
+
#RUN amazon-linux-extras enable postgresql11
|
14
|
+
#RUN yum install -y postgresql-devel
|
15
|
+
|
16
|
+
# Now we copy the Gemfile and Gemfile.lock into the build image so we can install our gems
|
17
|
+
COPY Gemfile Gemfile.lock .
|
18
|
+
|
19
|
+
# Set a few bundle configuration options
|
20
|
+
RUN bundle lock --add-platform x86_64-linux
|
21
|
+
#RUN bundle config --local deployment true
|
22
|
+
#RUN bundle config --local plugins false
|
23
|
+
#RUN bundle config --local frozen true
|
24
|
+
#RUN bundle config --local without 'development test'
|
25
|
+
RUN bundle config --local path './vendor/bundle'
|
26
|
+
|
27
|
+
# Now install our gems
|
28
|
+
RUN bundle install --quiet --jobs 4
|
29
|
+
|
30
|
+
# Now we start a second stage in the build that is a clean image without build tools
|
31
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS deploy_image
|
32
|
+
|
33
|
+
#ENV RAILS_ENV=production
|
34
|
+
#ENV RACK_ENV=production
|
35
|
+
|
36
|
+
# Install node so that asset related gems have a JS runtime.
|
37
|
+
# We ship the node executeable to production to make it easier to get an app deployed.
|
38
|
+
# TODO: Document steps that could be taken to remove this dependency.
|
39
|
+
#RUN curl --silent --location https://rpm.nodesource.com/setup_14.x | bash -
|
40
|
+
#RUN yum install -y nodejs
|
41
|
+
|
42
|
+
# Then copy some postgres related files
|
43
|
+
#COPY --from=build_image /usr/lib64/libpq.so.5 /usr/lib64/
|
44
|
+
#COPY --from=build_image /usr/lib64/libldap_r-2.4.so.2 /usr/lib64/
|
45
|
+
#COPY --from=build_image /usr/lib64/liblber-2.4.so.2 /usr/lib64/
|
46
|
+
#COPY --from=build_image /usr/lib64/libsasl2.so.3 /usr/lib64/
|
47
|
+
#COPY --from=build_image /usr/lib64/libssl3.so /usr/lib64/
|
48
|
+
#COPY --from=build_image /usr/lib64/libsmime3.so /usr/lib64/
|
49
|
+
#COPY --from=build_image /usr/lib64/libnss3.so /usr/lib64/
|
50
|
+
|
51
|
+
|
52
|
+
# Next we copy the app from our local directory and we copy
|
53
|
+
# the bundled gems from the build image.
|
54
|
+
# We do this after copying dependencies becuase the app will
|
55
|
+
# change more frequently, and we can used caching up to here.
|
56
|
+
|
57
|
+
COPY --from=build_image /var/task .
|
58
|
+
COPY . .
|
59
|
+
|
60
|
+
# And finally we have the CMD for the deployed container
|
61
|
+
# You can overwrite command in `serverless.yml` template
|
62
|
+
CMD [ "app.LambdaFunction::Handler.process" ]
|
63
|
+
|
@@ -1,7 +1,16 @@
|
|
1
|
-
BUNDLE_WITHOUT: development:test
|
1
|
+
BUNDLE_WITHOUT: development:test
|
2
2
|
BUNDLE_PLUGINS: false
|
3
3
|
BUNDLE_FROZEN: true
|
4
|
+
BUNDLE_DEPLOYMENT: true
|
4
5
|
SERVERLESS_STAGE: ${self:custom.stage}
|
6
|
+
|
7
|
+
FUNKTOR_LOG_LEVEL: INFO
|
8
|
+
|
9
|
+
RAILS_LOG_TO_STDOUT: true
|
10
|
+
RAILS_ENV: production
|
11
|
+
RACK_ENV: production
|
12
|
+
RAILS_MAX_THREADS: 1
|
13
|
+
|
5
14
|
FUNKTOR_APP_NAME: funktor-testapp
|
6
15
|
FUNKTOR_INCOMING_JOB_QUEUE:
|
7
16
|
Ref: IncomingJobQueue
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/default_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/default_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.DefaultQueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.DefaultQueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.DefaultQueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.DefaultQueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/default_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.DefaultQueueHandler.batchSize, 1}
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/incoming_job_handler.call
|
1
|
+
#handler: lambda_event_handlers/incoming_job_handler.call
|
2
2
|
timeout: ${self:custom.funktor.IncomingJobHandler.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.IncomingJobHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.IncomingJobHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.IncomingJobHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/incoming_job_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.IncomingJobHandler.batchSize, 1}
|
@@ -1,7 +1,11 @@
|
|
1
|
-
handler: lambda_event_handlers/job_activator.call
|
1
|
+
#handler: lambda_event_handlers/job_activator.call
|
2
2
|
timeout: ${self:custom.funktor.JobActivator.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.JobActivator.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.JobActivator.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.JobActivator.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/job_activator.call
|
6
10
|
events:
|
7
11
|
- schedule: rate(1 minute)
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/low_concurrency_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/low_concurrency_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.LowConcurrencyQueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.LowConcurrencyQueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.LowConcurrencyQueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.LowConcurrencyQueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/low_concurrency_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.LowConcurrencyQueueHandler.batchSize, 1}
|
@@ -1,10 +1,10 @@
|
|
1
1
|
# TODO - Figure out how to allow individual packaging to work out of the box.
|
2
|
-
individually: false
|
3
|
-
include:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
2
|
+
#individually: false
|
3
|
+
#include:
|
4
|
+
#- Gemfile
|
5
|
+
#- Gemfile.lock
|
6
|
+
#- funktor_config/boot.rb
|
7
|
+
#- app/**
|
8
8
|
# Evertyting is excluded by default with serverless-ruby-layer, but you could use
|
9
9
|
# the lines below to exlude files that are inside an include path.
|
10
10
|
#exclude:
|
@@ -25,6 +25,16 @@ provider:
|
|
25
25
|
runtime: ruby2.7
|
26
26
|
stage: ${opt:stage, 'dev'}
|
27
27
|
lambdaHashingVersion: 20201221
|
28
|
+
ecr:
|
29
|
+
scanOnPush: true
|
30
|
+
images:
|
31
|
+
funktorimage:
|
32
|
+
path: ./
|
33
|
+
file: Dockerfile
|
34
|
+
#platform: linux/amd64 # May be needed if you're on an M1.
|
35
|
+
buildArgs:
|
36
|
+
# Here you can specify ENV vars that you need at build time. For instance you may want to install private gems:
|
37
|
+
#BUNDLE_GEM__FURY__IO: ${env:BUNDLE_GEM__FURY__IO}
|
28
38
|
environment: ${file(funktor_config/environment.yml)}
|
29
39
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
30
40
|
logRetentionInDays: 7
|
@@ -42,9 +52,9 @@ custom:
|
|
42
52
|
# commands. Or fallsback to what we have set in the provider section.
|
43
53
|
stage: ${self:provider.stage, 'dev'}
|
44
54
|
funktor: ${file(funktor_config/funktor.yml)}
|
45
|
-
rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
55
|
+
#rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
46
56
|
|
47
|
-
package: ${file(funktor_config/package.yml)}
|
57
|
+
#package: ${file(funktor_config/package.yml)}
|
48
58
|
|
49
59
|
functions:
|
50
60
|
IncomingJobHandler: ${file(funktor_config/function_definitions/incoming_job_handler.yml)}
|
@@ -62,4 +72,4 @@ resources:
|
|
62
72
|
- ${file(funktor_config/resources/jobs_table.yml)}
|
63
73
|
|
64
74
|
plugins:
|
65
|
-
|
75
|
+
#- serverless-ruby-layer
|
data/lib/funktor/cli/init.rb
CHANGED
@@ -0,0 +1,63 @@
|
|
1
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS build_image
|
2
|
+
|
3
|
+
# Uncomment this as a cache buster
|
4
|
+
#RUN echo date
|
5
|
+
|
6
|
+
# If you supplied buildArgs to the ecr image you can access them here using ARG & ENV
|
7
|
+
#ARG BUNDLE_GEM__FURY__IO
|
8
|
+
#ENV BUNDLE_GEM__FURY__IO=${BUNDLE_GEM__FURY__IO}
|
9
|
+
|
10
|
+
# First we install some stuff that we need for gems that have to compile native extensions
|
11
|
+
#RUN yum groupinstall "Development Tools" -y
|
12
|
+
#RUN yum install -y amazon-linux-extras
|
13
|
+
#RUN amazon-linux-extras enable postgresql11
|
14
|
+
#RUN yum install -y postgresql-devel
|
15
|
+
|
16
|
+
# Now we copy the Gemfile and Gemfile.lock into the build image so we can install our gems
|
17
|
+
COPY Gemfile Gemfile.lock .
|
18
|
+
|
19
|
+
# Set a few bundle configuration options
|
20
|
+
RUN bundle lock --add-platform x86_64-linux
|
21
|
+
#RUN bundle config --local deployment true
|
22
|
+
#RUN bundle config --local plugins false
|
23
|
+
#RUN bundle config --local frozen true
|
24
|
+
#RUN bundle config --local without 'development test'
|
25
|
+
RUN bundle config --local path './vendor/bundle'
|
26
|
+
|
27
|
+
# Now install our gems
|
28
|
+
RUN bundle install --quiet --jobs 4
|
29
|
+
|
30
|
+
# Now we start a second stage in the build that is a clean image without build tools
|
31
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS deploy_image
|
32
|
+
|
33
|
+
#ENV RAILS_ENV=production
|
34
|
+
#ENV RACK_ENV=production
|
35
|
+
|
36
|
+
# Install node so that asset related gems have a JS runtime.
|
37
|
+
# We ship the node executeable to production to make it easier to get an app deployed.
|
38
|
+
# TODO: Document steps that could be taken to remove this dependency.
|
39
|
+
#RUN curl --silent --location https://rpm.nodesource.com/setup_14.x | bash -
|
40
|
+
#RUN yum install -y nodejs
|
41
|
+
|
42
|
+
# Then copy some postgres related files
|
43
|
+
#COPY --from=build_image /usr/lib64/libpq.so.5 /usr/lib64/
|
44
|
+
#COPY --from=build_image /usr/lib64/libldap_r-2.4.so.2 /usr/lib64/
|
45
|
+
#COPY --from=build_image /usr/lib64/liblber-2.4.so.2 /usr/lib64/
|
46
|
+
#COPY --from=build_image /usr/lib64/libsasl2.so.3 /usr/lib64/
|
47
|
+
#COPY --from=build_image /usr/lib64/libssl3.so /usr/lib64/
|
48
|
+
#COPY --from=build_image /usr/lib64/libsmime3.so /usr/lib64/
|
49
|
+
#COPY --from=build_image /usr/lib64/libnss3.so /usr/lib64/
|
50
|
+
|
51
|
+
|
52
|
+
# Next we copy the app from our local directory and we copy
|
53
|
+
# the bundled gems from the build image.
|
54
|
+
# We do this after copying dependencies becuase the app will
|
55
|
+
# change more frequently, and we can used caching up to here.
|
56
|
+
|
57
|
+
COPY --from=build_image /var/task .
|
58
|
+
COPY . .
|
59
|
+
|
60
|
+
# And finally we have the CMD for the deployed container
|
61
|
+
# You can overwrite command in `serverless.yml` template
|
62
|
+
CMD [ "app.LambdaFunction::Handler.process" ]
|
63
|
+
|
@@ -1,7 +1,16 @@
|
|
1
|
-
BUNDLE_WITHOUT: development:test
|
1
|
+
BUNDLE_WITHOUT: development:test
|
2
2
|
BUNDLE_PLUGINS: false
|
3
3
|
BUNDLE_FROZEN: true
|
4
|
+
BUNDLE_DEPLOYMENT: true
|
4
5
|
SERVERLESS_STAGE: ${self:custom.stage}
|
6
|
+
|
7
|
+
FUNKTOR_LOG_LEVEL: INFO
|
8
|
+
|
9
|
+
RAILS_LOG_TO_STDOUT: true
|
10
|
+
RAILS_ENV: production
|
11
|
+
RACK_ENV: production
|
12
|
+
RAILS_MAX_THREADS: 1
|
13
|
+
|
5
14
|
FUNKTOR_APP_NAME: <%= app_name %>
|
6
15
|
FUNKTOR_INCOMING_JOB_QUEUE:
|
7
16
|
Ref: IncomingJobQueue
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/incoming_job_handler.call
|
1
|
+
#handler: lambda_event_handlers/incoming_job_handler.call
|
2
2
|
timeout: ${self:custom.funktor.IncomingJobHandler.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.IncomingJobHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.IncomingJobHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.IncomingJobHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/incoming_job_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.IncomingJobHandler.batchSize, 1}
|
@@ -1,7 +1,11 @@
|
|
1
|
-
handler: lambda_event_handlers/job_activator.call
|
1
|
+
#handler: lambda_event_handlers/job_activator.call
|
2
2
|
timeout: ${self:custom.funktor.JobActivator.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.JobActivator.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.JobActivator.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.JobActivator.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/job_activator.call
|
6
10
|
events:
|
7
11
|
- schedule: rate(1 minute)
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.batchSize, 1}
|
@@ -1,10 +1,10 @@
|
|
1
1
|
# TODO - Figure out how to allow individual packaging to work out of the box.
|
2
|
-
individually: false
|
3
|
-
include:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
2
|
+
#individually: false
|
3
|
+
#include:
|
4
|
+
#- Gemfile
|
5
|
+
#- Gemfile.lock
|
6
|
+
#- funktor_config/boot.rb
|
7
|
+
#- app/**
|
8
8
|
# Evertyting is excluded by default with serverless-ruby-layer, but you could use
|
9
9
|
# the lines below to exlude files that are inside an include path.
|
10
10
|
#exclude:
|
@@ -25,6 +25,16 @@ provider:
|
|
25
25
|
runtime: <%= runtime %>
|
26
26
|
stage: ${opt:stage, 'dev'}
|
27
27
|
lambdaHashingVersion: 20201221
|
28
|
+
ecr:
|
29
|
+
scanOnPush: true
|
30
|
+
images:
|
31
|
+
funktorimage:
|
32
|
+
path: ./
|
33
|
+
file: Dockerfile
|
34
|
+
#platform: linux/amd64 # May be needed if you're on an M1.
|
35
|
+
buildArgs:
|
36
|
+
# Here you can specify ENV vars that you need at build time. For instance you may want to install private gems:
|
37
|
+
#BUNDLE_GEM__FURY__IO: ${env:BUNDLE_GEM__FURY__IO}
|
28
38
|
environment: ${file(funktor_config/environment.yml)}
|
29
39
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
30
40
|
logRetentionInDays: 7
|
@@ -39,9 +49,9 @@ custom:
|
|
39
49
|
# commands. Or fallsback to what we have set in the provider section.
|
40
50
|
stage: ${self:provider.stage, 'dev'}
|
41
51
|
funktor: ${file(funktor_config/funktor.yml)}
|
42
|
-
rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
52
|
+
#rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
43
53
|
|
44
|
-
package: ${file(funktor_config/package.yml)}
|
54
|
+
#package: ${file(funktor_config/package.yml)}
|
45
55
|
|
46
56
|
functions:
|
47
57
|
<%- all_function_definitions.each do |function_definition| -%>
|
@@ -54,4 +64,4 @@ resources:
|
|
54
64
|
<%- end -%>
|
55
65
|
|
56
66
|
plugins:
|
57
|
-
|
67
|
+
#- serverless-ruby-layer
|
@@ -30,11 +30,13 @@ module Funktor
|
|
30
30
|
# TODO : This number should be configurable via ENV var
|
31
31
|
if job.delay < 60 # for now we're testing with just one minute * 5 # 5 minutes
|
32
32
|
Funktor.logger.debug "pushing to work queue for delay = #{job.delay}"
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
33
|
+
if Funktor.enable_work_queue_visibility
|
34
|
+
# We push to the jobs table first becauase the work queue handler will expect to be able
|
35
|
+
# to update the stats of a record that's already in the table.
|
36
|
+
# TODO : For time sensitive jobs this is probably less than optimal. Can we update the
|
37
|
+
# work queue handler to be ok with a job that's not yet in the table?
|
38
|
+
push_to_jobs_table(job, "queued")
|
39
|
+
end
|
38
40
|
push_to_work_queue(job)
|
39
41
|
if job.is_retry?
|
40
42
|
@tracker.track(:retryActivated, job)
|
@@ -61,23 +61,40 @@ module Funktor
|
|
61
61
|
end
|
62
62
|
|
63
63
|
def activate_job(job_shard, job_id, current_category, queue_immediately = false)
|
64
|
+
|
65
|
+
# TODO: WorkQueueVisibilityMiddleware to alter what happens here? Maybe we delete by default and then the middleware puts it back in the table?
|
64
66
|
# First we conditionally update the item in Dynamo to be sure that another scheduler hasn't gotten
|
65
67
|
# to it, and if that works then send to SQS. This is basically how Sidekiq scheduler works.
|
66
|
-
response =
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
68
|
+
response = if Funktor.enable_work_queue_visibility
|
69
|
+
dynamodb_client.update_item({
|
70
|
+
key: {
|
71
|
+
"jobShard" => job_shard,
|
72
|
+
"jobId" => job_id
|
73
|
+
},
|
74
|
+
update_expression: "SET category = :category, queueable = :queueable",
|
75
|
+
condition_expression: "category = :current_category",
|
76
|
+
expression_attribute_values: {
|
77
|
+
":current_category" => current_category,
|
78
|
+
":queueable" => "false",
|
79
|
+
":category" => "queued"
|
80
|
+
},
|
81
|
+
table_name: delayed_job_table,
|
82
|
+
return_values: "ALL_OLD"
|
83
|
+
})
|
84
|
+
else
|
85
|
+
dynamodb_client.delete_item({
|
86
|
+
key: {
|
87
|
+
"jobShard" => job_shard,
|
88
|
+
"jobId" => job_id
|
89
|
+
},
|
90
|
+
condition_expression: "category = :current_category",
|
91
|
+
expression_attribute_values: {
|
92
|
+
":current_category" => current_category
|
93
|
+
},
|
94
|
+
table_name: delayed_job_table,
|
95
|
+
return_values: "ALL_OLD"
|
96
|
+
})
|
97
|
+
end
|
81
98
|
if response.attributes # this means the record was still there in the state we expected
|
82
99
|
Funktor.logger.debug "response.attributes ====== "
|
83
100
|
Funktor.logger.debug response.attributes
|
data/lib/funktor/version.rb
CHANGED
@@ -30,13 +30,18 @@ module Funktor
|
|
30
30
|
def dispatch(job)
|
31
31
|
begin
|
32
32
|
@tracker.track(:processingStarted, job)
|
33
|
-
|
33
|
+
if Funktor.enable_work_queue_visibility
|
34
|
+
update_job_category(job, "processing")
|
35
|
+
end
|
34
36
|
Funktor.work_queue_handler_middleware.invoke(job) do
|
35
37
|
job.execute
|
36
38
|
end
|
37
39
|
@processed_counter.incr(job)
|
38
40
|
@tracker.track(:processingComplete, job)
|
39
|
-
|
41
|
+
|
42
|
+
if Funktor.enable_work_queue_visibility
|
43
|
+
delete_job_from_dynamodb(job)
|
44
|
+
end
|
40
45
|
# rescue Funktor::Job::InvalidJsonError # TODO Make this work
|
41
46
|
rescue Exception => e
|
42
47
|
handle_error(e, job)
|
@@ -44,11 +49,17 @@ module Funktor
|
|
44
49
|
job.error = e
|
45
50
|
if job.can_retry
|
46
51
|
@tracker.track(:retrying, job)
|
47
|
-
|
52
|
+
|
53
|
+
if Funktor.enable_work_queue_visibility
|
54
|
+
update_job_category(job, "retry")
|
55
|
+
end
|
48
56
|
trigger_retry(job)
|
49
57
|
else
|
50
58
|
@tracker.track(:bailingOut, job)
|
51
|
-
|
59
|
+
|
60
|
+
if Funktor.enable_work_queue_visibility
|
61
|
+
update_job_category(job, "dead")
|
62
|
+
end
|
52
63
|
Funktor.logger.error "We retried max times. We're bailing on this one."
|
53
64
|
Funktor.logger.error job.to_json
|
54
65
|
end
|
@@ -71,6 +82,7 @@ module Funktor
|
|
71
82
|
end
|
72
83
|
|
73
84
|
def update_job_category(job, category)
|
85
|
+
puts "starting update_job_category #{category}"
|
74
86
|
dynamodb_client.update_item({
|
75
87
|
key: {
|
76
88
|
"jobShard" => job.shard,
|
@@ -84,9 +96,11 @@ module Funktor
|
|
84
96
|
},
|
85
97
|
return_values: "ALL_OLD"
|
86
98
|
})
|
99
|
+
puts "ending update_job_category #{category}"
|
87
100
|
end
|
88
101
|
|
89
102
|
def delete_job_from_dynamodb(job)
|
103
|
+
puts "starting delete_job_from_dynamodb"
|
90
104
|
dynamodb_client.delete_item({
|
91
105
|
key: {
|
92
106
|
"jobShard" => job.shard,
|
@@ -95,6 +109,7 @@ module Funktor
|
|
95
109
|
table_name: delayed_job_table,
|
96
110
|
return_values: "ALL_OLD"
|
97
111
|
})
|
112
|
+
puts "ending delete_job_from_dynamodb"
|
98
113
|
end
|
99
114
|
|
100
115
|
end
|
data/lib/funktor.rb
CHANGED
@@ -18,7 +18,8 @@ module Funktor
|
|
18
18
|
|
19
19
|
DEFAULT_OPTIONS = {
|
20
20
|
error_handlers: [],
|
21
|
-
log_level: Logger::DEBUG # Set a high log level during early, active development
|
21
|
+
log_level: Logger::DEBUG, # Set a high log level during early, active development
|
22
|
+
enable_work_queue_visibility: true # Enable this by default during early, active development
|
22
23
|
}
|
23
24
|
|
24
25
|
def self.configure_job_pusher
|
@@ -110,6 +111,14 @@ module Funktor
|
|
110
111
|
@logger = logger
|
111
112
|
end
|
112
113
|
|
114
|
+
def self.enable_work_queue_visibility
|
115
|
+
options[:enable_work_queue_visibility]
|
116
|
+
end
|
117
|
+
|
118
|
+
def self.enable_work_queue_visibility= enabled
|
119
|
+
options[:enable_work_queue_visibility] = enabled
|
120
|
+
end
|
121
|
+
|
113
122
|
# We have a raw_logger that doesn't add timestamps and what not. This is used to publish
|
114
123
|
# CloudWatch metrics that can be used in dashboards.
|
115
124
|
def self.raw_logger
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: funktor
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jeremy Green
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2022-05-
|
11
|
+
date: 2022-05-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-sqs
|
@@ -176,6 +176,7 @@ files:
|
|
176
176
|
- exe/funktor-deploy
|
177
177
|
- funktor-testapp/.envrc
|
178
178
|
- funktor-testapp/.gitignore
|
179
|
+
- funktor-testapp/Dockerfile
|
179
180
|
- funktor-testapp/Gemfile
|
180
181
|
- funktor-testapp/Gemfile.lock
|
181
182
|
- funktor-testapp/app/services/job_flood.rb
|
@@ -227,6 +228,7 @@ files:
|
|
227
228
|
- lib/funktor/cli/generate/base.rb
|
228
229
|
- lib/funktor/cli/generate/work_queue.rb
|
229
230
|
- lib/funktor/cli/init.rb
|
231
|
+
- lib/funktor/cli/templates/Dockerfile
|
230
232
|
- lib/funktor/cli/templates/Gemfile
|
231
233
|
- lib/funktor/cli/templates/app/workers/hello_worker.rb
|
232
234
|
- lib/funktor/cli/templates/funktor_config/boot.rb
|