funktor 0.7.5 → 0.7.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +3 -3
- data/funktor-testapp/Dockerfile +63 -0
- data/funktor-testapp/app/services/job_flood.rb +1 -1
- data/funktor-testapp/funktor_config/boot.rb +3 -0
- data/funktor-testapp/funktor_config/environment.yml +10 -1
- data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +5 -1
- data/funktor-testapp/funktor_config/function_definitions/low_concurrency_queue_handler.yml +5 -1
- data/funktor-testapp/funktor_config/package.yml +6 -6
- data/funktor-testapp/funktor_config/resources/cloudwatch_dashboard.yml +645 -37
- data/funktor-testapp/funktor_config/ruby_layer.yml +1 -1
- data/funktor-testapp/serverless.yml +13 -3
- data/lib/funktor/activity_tracker.rb +1 -2
- data/lib/funktor/cli/init.rb +4 -0
- data/lib/funktor/cli/templates/Dockerfile +63 -0
- data/lib/funktor/cli/templates/funktor_config/environment.yml +10 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/incoming_job_handler.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/work_queue_handler.yml +5 -1
- data/lib/funktor/cli/templates/funktor_config/package.yml +6 -6
- data/lib/funktor/cli/templates/funktor_config/resources/cloudwatch_dashboard.yml +161 -0
- data/lib/funktor/cli/templates/funktor_config/ruby_layer.yml +1 -1
- data/lib/funktor/cli/templates/serverless.yml +13 -3
- data/lib/funktor/incoming_job_handler.rb +9 -7
- data/lib/funktor/job_activator.rb +33 -17
- data/lib/funktor/job_pusher.rb +1 -1
- data/lib/funktor/version.rb +1 -1
- data/lib/funktor/web/application.rb +1 -1
- data/lib/funktor/work_queue_handler.rb +21 -6
- data/lib/funktor.rb +20 -1
- metadata +4 -2
@@ -25,6 +25,16 @@ provider:
|
|
25
25
|
runtime: ruby2.7
|
26
26
|
stage: ${opt:stage, 'dev'}
|
27
27
|
lambdaHashingVersion: 20201221
|
28
|
+
ecr:
|
29
|
+
scanOnPush: true
|
30
|
+
images:
|
31
|
+
funktorimage:
|
32
|
+
path: ./
|
33
|
+
file: Dockerfile
|
34
|
+
#platform: linux/amd64 # May be needed if you're on an M1.
|
35
|
+
buildArgs:
|
36
|
+
# Here you can specify ENV vars that you need at build time. For instance you may want to install private gems:
|
37
|
+
#BUNDLE_GEM__FURY__IO: ${env:BUNDLE_GEM__FURY__IO}
|
28
38
|
environment: ${file(funktor_config/environment.yml)}
|
29
39
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
30
40
|
logRetentionInDays: 7
|
@@ -42,9 +52,9 @@ custom:
|
|
42
52
|
# commands. Or fallsback to what we have set in the provider section.
|
43
53
|
stage: ${self:provider.stage, 'dev'}
|
44
54
|
funktor: ${file(funktor_config/funktor.yml)}
|
45
|
-
rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
55
|
+
#rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
46
56
|
|
47
|
-
package: ${file(funktor_config/package.yml)}
|
57
|
+
#package: ${file(funktor_config/package.yml)}
|
48
58
|
|
49
59
|
functions:
|
50
60
|
IncomingJobHandler: ${file(funktor_config/function_definitions/incoming_job_handler.yml)}
|
@@ -62,4 +72,4 @@ resources:
|
|
62
72
|
- ${file(funktor_config/resources/jobs_table.yml)}
|
63
73
|
|
64
74
|
plugins:
|
65
|
-
|
75
|
+
#- serverless-ruby-layer
|
@@ -1,5 +1,4 @@
|
|
1
1
|
require 'json'
|
2
|
-
require 'aws-sdk-dynamodb'
|
3
2
|
|
4
3
|
module Funktor
|
5
4
|
class ActivityTracker
|
@@ -74,7 +73,7 @@ module Funktor
|
|
74
73
|
end
|
75
74
|
|
76
75
|
def dynamodb_client
|
77
|
-
|
76
|
+
Funktor.dynamodb_client
|
78
77
|
end
|
79
78
|
|
80
79
|
def metric_namespace
|
data/lib/funktor/cli/init.rb
CHANGED
@@ -0,0 +1,63 @@
|
|
1
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS build_image
|
2
|
+
|
3
|
+
# Uncomment this as a cache buster
|
4
|
+
#RUN echo date
|
5
|
+
|
6
|
+
# If you supplied buildArgs to the ecr image you can access them here using ARG & ENV
|
7
|
+
#ARG BUNDLE_GEM__FURY__IO
|
8
|
+
#ENV BUNDLE_GEM__FURY__IO=${BUNDLE_GEM__FURY__IO}
|
9
|
+
|
10
|
+
# First we install some stuff that we need for gems that have to compile native extensions
|
11
|
+
#RUN yum groupinstall "Development Tools" -y
|
12
|
+
#RUN yum install -y amazon-linux-extras
|
13
|
+
#RUN amazon-linux-extras enable postgresql11
|
14
|
+
#RUN yum install -y postgresql-devel
|
15
|
+
|
16
|
+
# Now we copy the Gemfile and Gemfile.lock into the build image so we can install our gems
|
17
|
+
COPY Gemfile Gemfile.lock .
|
18
|
+
|
19
|
+
# Set a few bundle configuration options
|
20
|
+
RUN bundle lock --add-platform x86_64-linux
|
21
|
+
#RUN bundle config --local deployment true
|
22
|
+
#RUN bundle config --local plugins false
|
23
|
+
#RUN bundle config --local frozen true
|
24
|
+
#RUN bundle config --local without 'development test'
|
25
|
+
RUN bundle config --local path './vendor/bundle'
|
26
|
+
|
27
|
+
# Now install our gems
|
28
|
+
RUN bundle install --quiet --jobs 4
|
29
|
+
|
30
|
+
# Now we start a second stage in the build that is a clean image without build tools
|
31
|
+
FROM public.ecr.aws/lambda/ruby:2.7 AS deploy_image
|
32
|
+
|
33
|
+
#ENV RAILS_ENV=production
|
34
|
+
#ENV RACK_ENV=production
|
35
|
+
|
36
|
+
# Install node so that asset related gems have a JS runtime.
|
37
|
+
# We ship the node executeable to production to make it easier to get an app deployed.
|
38
|
+
# TODO: Document steps that could be taken to remove this dependency.
|
39
|
+
#RUN curl --silent --location https://rpm.nodesource.com/setup_14.x | bash -
|
40
|
+
#RUN yum install -y nodejs
|
41
|
+
|
42
|
+
# Then copy some postgres related files
|
43
|
+
#COPY --from=build_image /usr/lib64/libpq.so.5 /usr/lib64/
|
44
|
+
#COPY --from=build_image /usr/lib64/libldap_r-2.4.so.2 /usr/lib64/
|
45
|
+
#COPY --from=build_image /usr/lib64/liblber-2.4.so.2 /usr/lib64/
|
46
|
+
#COPY --from=build_image /usr/lib64/libsasl2.so.3 /usr/lib64/
|
47
|
+
#COPY --from=build_image /usr/lib64/libssl3.so /usr/lib64/
|
48
|
+
#COPY --from=build_image /usr/lib64/libsmime3.so /usr/lib64/
|
49
|
+
#COPY --from=build_image /usr/lib64/libnss3.so /usr/lib64/
|
50
|
+
|
51
|
+
|
52
|
+
# Next we copy the app from our local directory and we copy
|
53
|
+
# the bundled gems from the build image.
|
54
|
+
# We do this after copying dependencies becuase the app will
|
55
|
+
# change more frequently, and we can used caching up to here.
|
56
|
+
|
57
|
+
COPY --from=build_image /var/task .
|
58
|
+
COPY . .
|
59
|
+
|
60
|
+
# And finally we have the CMD for the deployed container
|
61
|
+
# You can overwrite command in `serverless.yml` template
|
62
|
+
CMD [ "app.LambdaFunction::Handler.process" ]
|
63
|
+
|
@@ -1,7 +1,16 @@
|
|
1
|
-
BUNDLE_WITHOUT: development:test
|
1
|
+
BUNDLE_WITHOUT: development:test
|
2
2
|
BUNDLE_PLUGINS: false
|
3
3
|
BUNDLE_FROZEN: true
|
4
|
+
BUNDLE_DEPLOYMENT: true
|
4
5
|
SERVERLESS_STAGE: ${self:custom.stage}
|
6
|
+
|
7
|
+
FUNKTOR_LOG_LEVEL: INFO
|
8
|
+
|
9
|
+
RAILS_LOG_TO_STDOUT: true
|
10
|
+
RAILS_ENV: production
|
11
|
+
RACK_ENV: production
|
12
|
+
RAILS_MAX_THREADS: 1
|
13
|
+
|
5
14
|
FUNKTOR_APP_NAME: <%= app_name %>
|
6
15
|
FUNKTOR_INCOMING_JOB_QUEUE:
|
7
16
|
Ref: IncomingJobQueue
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/incoming_job_handler.call
|
1
|
+
#handler: lambda_event_handlers/incoming_job_handler.call
|
2
2
|
timeout: ${self:custom.funktor.IncomingJobHandler.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.IncomingJobHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.IncomingJobHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.IncomingJobHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/incoming_job_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.IncomingJobHandler.batchSize, 1}
|
@@ -1,7 +1,11 @@
|
|
1
|
-
handler: lambda_event_handlers/job_activator.call
|
1
|
+
#handler: lambda_event_handlers/job_activator.call
|
2
2
|
timeout: ${self:custom.funktor.JobActivator.functionTimeout, 30}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.JobActivator.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.JobActivator.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.JobActivator.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/job_activator.call
|
6
10
|
events:
|
7
11
|
- schedule: rate(1 minute)
|
@@ -1,8 +1,12 @@
|
|
1
|
-
handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
1
|
+
#handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
2
2
|
timeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.functionTimeout, 900}
|
3
3
|
reservedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.reservedConcurrency, null}
|
4
4
|
provisionedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.provisionedConcurrency, null}
|
5
5
|
memorySize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.memorySize, 256}
|
6
|
+
image:
|
7
|
+
name: funktorimage
|
8
|
+
command:
|
9
|
+
- lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
|
6
10
|
events:
|
7
11
|
- sqs:
|
8
12
|
batchSize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.batchSize, 1}
|
@@ -1,10 +1,10 @@
|
|
1
1
|
# TODO - Figure out how to allow individual packaging to work out of the box.
|
2
|
-
individually: false
|
3
|
-
include:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
2
|
+
#individually: false
|
3
|
+
#include:
|
4
|
+
#- Gemfile
|
5
|
+
#- Gemfile.lock
|
6
|
+
#- funktor_config/boot.rb
|
7
|
+
#- app/**
|
8
8
|
# Evertyting is excluded by default with serverless-ruby-layer, but you could use
|
9
9
|
# the lines below to exlude files that are inside an include path.
|
10
10
|
#exclude:
|
@@ -21,6 +21,160 @@ Resources:
|
|
21
21
|
},
|
22
22
|
<% current_y += 3 %>
|
23
23
|
|
24
|
+
|
25
|
+
<%- app_worker_names.each do |worker_name| -%>
|
26
|
+
{ <% "Worker" %>
|
27
|
+
"height": 3,
|
28
|
+
"width": 6,
|
29
|
+
"y": <%= current_y %>,
|
30
|
+
"x": 0,
|
31
|
+
"type": "text",
|
32
|
+
"properties": {
|
33
|
+
"markdown": "\n# <%= worker_name %>\n"
|
34
|
+
}
|
35
|
+
},
|
36
|
+
{ <% "Worker Jobs per minute" %>
|
37
|
+
"height": 3,
|
38
|
+
"width": 6,
|
39
|
+
"y": <%= current_y %>,
|
40
|
+
"x": 6,
|
41
|
+
"type": "metric",
|
42
|
+
"properties": {
|
43
|
+
"metrics": [
|
44
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "<%= worker_name %>" ],
|
45
|
+
[ "${self:custom.funktor.DashboardNamespace}", "failed", "WorkerClassName", "<%= worker_name %>", { "color": "#d62728" } ]
|
46
|
+
],
|
47
|
+
"view": "singleValue",
|
48
|
+
"region": "us-east-1",
|
49
|
+
"stat": "Sum",
|
50
|
+
"period": 60,
|
51
|
+
"sparkline": true,
|
52
|
+
"title": "<%= worker_name %> Jobs Per Minute"
|
53
|
+
}
|
54
|
+
},
|
55
|
+
{ <% "Worker Durations" %>
|
56
|
+
"height": 3,
|
57
|
+
"width": 12,
|
58
|
+
"y": <%= current_y %>,
|
59
|
+
"x": 12,
|
60
|
+
"type": "metric",
|
61
|
+
"properties": {
|
62
|
+
"metrics": [
|
63
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "<%= worker_name %>", { "label": "p10" } ],
|
64
|
+
[ "...", { "label": "p50", "stat": "p50" } ],
|
65
|
+
[ "...", { "label": "p99", "stat": "p99" } ],
|
66
|
+
[ "...", { "label": "Average", "stat": "Average" } ]
|
67
|
+
],
|
68
|
+
"view": "singleValue",
|
69
|
+
"region": "us-east-1",
|
70
|
+
"stat": "p10",
|
71
|
+
"period": 60,
|
72
|
+
"sparkline": true,
|
73
|
+
"title": "<%= worker_name %> Duration"
|
74
|
+
}
|
75
|
+
},
|
76
|
+
<% current_y += 3 %>
|
77
|
+
|
78
|
+
|
79
|
+
|
80
|
+
|
81
|
+
{ <% "Processed Jobs By Worker" %>
|
82
|
+
"height": 3,
|
83
|
+
"width": 6,
|
84
|
+
"y": <%= current_y %>,
|
85
|
+
"x": 0,
|
86
|
+
"type": "metric",
|
87
|
+
"properties": {
|
88
|
+
"metrics": [
|
89
|
+
[ "${self:custom.funktor.DashboardNamespace}", "processed", "WorkerClassName", "<%= worker_name %>" ]
|
90
|
+
],
|
91
|
+
"view": "timeSeries",
|
92
|
+
"stacked": false,
|
93
|
+
"region": "us-east-1",
|
94
|
+
"title": "<%= worker_name %> Processed Jobs",
|
95
|
+
"period": 60,
|
96
|
+
"stat": "Sum"
|
97
|
+
}
|
98
|
+
},
|
99
|
+
{ <% "Failed Jobs By Worker" %>
|
100
|
+
"height": 3,
|
101
|
+
"width": 6,
|
102
|
+
"y": <%= current_y + 3 %>,
|
103
|
+
"x": 0,
|
104
|
+
"type": "metric",
|
105
|
+
"properties": {
|
106
|
+
"metrics": [
|
107
|
+
[ "${self:custom.funktor.DashboardNamespace}", "failed", "WorkerClassName", "<%= worker_name %>", { "color": "#d62728" }]
|
108
|
+
],
|
109
|
+
"view": "timeSeries",
|
110
|
+
"stacked": false,
|
111
|
+
"region": "us-east-1",
|
112
|
+
"title": "<%= worker_name %> Failed Jobs",
|
113
|
+
"period": 60,
|
114
|
+
"stat": "Sum"
|
115
|
+
}
|
116
|
+
},
|
117
|
+
{ <% "Job Duration By Worker" %>
|
118
|
+
"height": 6,
|
119
|
+
"width": 12,
|
120
|
+
"y": <%= current_y %>,
|
121
|
+
"x": 6,
|
122
|
+
"type": "metric",
|
123
|
+
"properties": {
|
124
|
+
"metrics": [
|
125
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "<%= worker_name %>" ],
|
126
|
+
[ "...", { "stat": "p80" } ],
|
127
|
+
[ "...", { "stat": "p70" } ],
|
128
|
+
[ "...", { "stat": "p60" } ]
|
129
|
+
],
|
130
|
+
"view": "timeSeries",
|
131
|
+
"stacked": false,
|
132
|
+
"region": "us-east-1",
|
133
|
+
"stat": "p90",
|
134
|
+
"period": 60,
|
135
|
+
"title": "<%= worker_name %> Duration Percentiles"
|
136
|
+
}
|
137
|
+
},
|
138
|
+
{ <% "Maximum By Worker" %>
|
139
|
+
"height": 3,
|
140
|
+
"width": 6,
|
141
|
+
"y": <%= current_y %>,
|
142
|
+
"x": 18,
|
143
|
+
"type": "metric",
|
144
|
+
"properties": {
|
145
|
+
"metrics": [
|
146
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "<%= worker_name %>" ]
|
147
|
+
],
|
148
|
+
"view": "timeSeries",
|
149
|
+
"stacked": false,
|
150
|
+
"region": "us-east-1",
|
151
|
+
"stat": "Maximum",
|
152
|
+
"period": 60,
|
153
|
+
"title": "<%= worker_name %> Max"
|
154
|
+
}
|
155
|
+
},
|
156
|
+
{ <% "Minimum By Worker" %>
|
157
|
+
"height": 3,
|
158
|
+
"width": 6,
|
159
|
+
"y": <%= current_y %>,
|
160
|
+
"x": 18,
|
161
|
+
"type": "metric",
|
162
|
+
"properties": {
|
163
|
+
"metrics": [
|
164
|
+
[ "${self:custom.funktor.DashboardNamespace}", "Duration", "WorkerClassName", "<%= worker_name %>" ]
|
165
|
+
],
|
166
|
+
"view": "timeSeries",
|
167
|
+
"stacked": false,
|
168
|
+
"region": "us-east-1",
|
169
|
+
"stat": "Minimum",
|
170
|
+
"period": 60,
|
171
|
+
"title": "<%= worker_name %> Min"
|
172
|
+
}
|
173
|
+
},
|
174
|
+
<% current_y += 6 %>
|
175
|
+
<%- end -%>
|
176
|
+
|
177
|
+
|
24
178
|
{ <% "Job Duration By Worker" %>
|
25
179
|
"height": 6,
|
26
180
|
"width": 12,
|
@@ -65,6 +219,7 @@ Resources:
|
|
65
219
|
}
|
66
220
|
},
|
67
221
|
<% current_y += 6 %>
|
222
|
+
|
68
223
|
{ <% "Job Duration By Queue" %>
|
69
224
|
"height": 6,
|
70
225
|
"width": 12,
|
@@ -147,6 +302,7 @@ Resources:
|
|
147
302
|
"region": "us-east-1",
|
148
303
|
"stat": "Sum",
|
149
304
|
"period": 60,
|
305
|
+
"sparkline": true,
|
150
306
|
"title": "Messages Per Minute"
|
151
307
|
}
|
152
308
|
},
|
@@ -167,6 +323,7 @@ Resources:
|
|
167
323
|
"region": "us-east-1",
|
168
324
|
"stat": "p10",
|
169
325
|
"period": 60,
|
326
|
+
"sparkline": true,
|
170
327
|
"title": "Handler Duration"
|
171
328
|
}
|
172
329
|
},
|
@@ -294,6 +451,7 @@ Resources:
|
|
294
451
|
"region": "us-east-1",
|
295
452
|
"stat": "Sum",
|
296
453
|
"period": 60,
|
454
|
+
"sparkline": true,
|
297
455
|
"title": "Messages Per Minute"
|
298
456
|
}
|
299
457
|
},
|
@@ -316,6 +474,7 @@ Resources:
|
|
316
474
|
"region": "us-east-1",
|
317
475
|
"stat": "p10",
|
318
476
|
"period": 60,
|
477
|
+
"sparkline": true,
|
319
478
|
"title": "Handler Duration"
|
320
479
|
}
|
321
480
|
},
|
@@ -444,6 +603,7 @@ Resources:
|
|
444
603
|
"region": "us-east-1",
|
445
604
|
"stat": "Average",
|
446
605
|
"period": 60,
|
606
|
+
"sparkline": true,
|
447
607
|
"title": "Messages To Be Scheduled"
|
448
608
|
}
|
449
609
|
},
|
@@ -464,6 +624,7 @@ Resources:
|
|
464
624
|
"region": "us-east-1",
|
465
625
|
"stat": "p10",
|
466
626
|
"period": 60,
|
627
|
+
"sparkline": true,
|
467
628
|
"title": "Handler Duration"
|
468
629
|
}
|
469
630
|
},
|
@@ -25,6 +25,16 @@ provider:
|
|
25
25
|
runtime: <%= runtime %>
|
26
26
|
stage: ${opt:stage, 'dev'}
|
27
27
|
lambdaHashingVersion: 20201221
|
28
|
+
ecr:
|
29
|
+
scanOnPush: true
|
30
|
+
images:
|
31
|
+
funktorimage:
|
32
|
+
path: ./
|
33
|
+
file: Dockerfile
|
34
|
+
#platform: linux/amd64 # May be needed if you're on an M1.
|
35
|
+
buildArgs:
|
36
|
+
# Here you can specify ENV vars that you need at build time. For instance you may want to install private gems:
|
37
|
+
#BUNDLE_GEM__FURY__IO: ${env:BUNDLE_GEM__FURY__IO}
|
28
38
|
environment: ${file(funktor_config/environment.yml)}
|
29
39
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
30
40
|
logRetentionInDays: 7
|
@@ -39,9 +49,9 @@ custom:
|
|
39
49
|
# commands. Or fallsback to what we have set in the provider section.
|
40
50
|
stage: ${self:provider.stage, 'dev'}
|
41
51
|
funktor: ${file(funktor_config/funktor.yml)}
|
42
|
-
rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
52
|
+
#rubyLayer: ${file(funktor_config/ruby_layer.yml)}
|
43
53
|
|
44
|
-
package: ${file(funktor_config/package.yml)}
|
54
|
+
#package: ${file(funktor_config/package.yml)}
|
45
55
|
|
46
56
|
functions:
|
47
57
|
<%- all_function_definitions.each do |function_definition| -%>
|
@@ -54,4 +64,4 @@ resources:
|
|
54
64
|
<%- end -%>
|
55
65
|
|
56
66
|
plugins:
|
57
|
-
|
67
|
+
#- serverless-ruby-layer
|
@@ -18,11 +18,11 @@ module Funktor
|
|
18
18
|
end
|
19
19
|
|
20
20
|
def sqs_client
|
21
|
-
|
21
|
+
Funktor.sqs_client
|
22
22
|
end
|
23
23
|
|
24
24
|
def dynamodb_client
|
25
|
-
|
25
|
+
Funktor.dynamodb_client
|
26
26
|
end
|
27
27
|
|
28
28
|
def dispatch(job)
|
@@ -30,11 +30,13 @@ module Funktor
|
|
30
30
|
# TODO : This number should be configurable via ENV var
|
31
31
|
if job.delay < 60 # for now we're testing with just one minute * 5 # 5 minutes
|
32
32
|
Funktor.logger.debug "pushing to work queue for delay = #{job.delay}"
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
33
|
+
if Funktor.enable_work_queue_visibility
|
34
|
+
# We push to the jobs table first becauase the work queue handler will expect to be able
|
35
|
+
# to update the stats of a record that's already in the table.
|
36
|
+
# TODO : For time sensitive jobs this is probably less than optimal. Can we update the
|
37
|
+
# work queue handler to be ok with a job that's not yet in the table?
|
38
|
+
push_to_jobs_table(job, "queued")
|
39
|
+
end
|
38
40
|
push_to_work_queue(job)
|
39
41
|
if job.is_retry?
|
40
42
|
@tracker.track(:retryActivated, job)
|
@@ -9,11 +9,11 @@ module Funktor
|
|
9
9
|
end
|
10
10
|
|
11
11
|
def dynamodb_client
|
12
|
-
|
12
|
+
Funktor.dynamodb_client
|
13
13
|
end
|
14
14
|
|
15
15
|
def sqs_client
|
16
|
-
|
16
|
+
Funktor.sqs_client
|
17
17
|
end
|
18
18
|
|
19
19
|
def delayed_job_table
|
@@ -61,23 +61,39 @@ module Funktor
|
|
61
61
|
end
|
62
62
|
|
63
63
|
def activate_job(job_shard, job_id, current_category, queue_immediately = false)
|
64
|
+
# TODO: WorkQueueVisibilityMiddleware to alter what happens here? Maybe we delete by default and then the middleware puts it back in the table?
|
64
65
|
# First we conditionally update the item in Dynamo to be sure that another scheduler hasn't gotten
|
65
66
|
# to it, and if that works then send to SQS. This is basically how Sidekiq scheduler works.
|
66
|
-
response =
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
67
|
+
response = if Funktor.enable_work_queue_visibility
|
68
|
+
dynamodb_client.update_item({
|
69
|
+
key: {
|
70
|
+
"jobShard" => job_shard,
|
71
|
+
"jobId" => job_id
|
72
|
+
},
|
73
|
+
update_expression: "SET category = :category, queueable = :queueable",
|
74
|
+
condition_expression: "category = :current_category",
|
75
|
+
expression_attribute_values: {
|
76
|
+
":current_category" => current_category,
|
77
|
+
":queueable" => "false",
|
78
|
+
":category" => "queued"
|
79
|
+
},
|
80
|
+
table_name: delayed_job_table,
|
81
|
+
return_values: "ALL_OLD"
|
82
|
+
})
|
83
|
+
else
|
84
|
+
dynamodb_client.delete_item({
|
85
|
+
key: {
|
86
|
+
"jobShard" => job_shard,
|
87
|
+
"jobId" => job_id
|
88
|
+
},
|
89
|
+
condition_expression: "category = :current_category",
|
90
|
+
expression_attribute_values: {
|
91
|
+
":current_category" => current_category
|
92
|
+
},
|
93
|
+
table_name: delayed_job_table,
|
94
|
+
return_values: "ALL_OLD"
|
95
|
+
})
|
96
|
+
end
|
81
97
|
if response.attributes # this means the record was still there in the state we expected
|
82
98
|
Funktor.logger.debug "response.attributes ====== "
|
83
99
|
Funktor.logger.debug response.attributes
|
data/lib/funktor/job_pusher.rb
CHANGED
data/lib/funktor/version.rb
CHANGED