funktor 0.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +15 -0
- data/.rspec +3 -0
- data/.travis.yml +6 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +7 -0
- data/Gemfile.lock +84 -0
- data/LICENSE.txt +21 -0
- data/README.md +154 -0
- data/Rakefile +6 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/exe/funktor +13 -0
- data/exe/funktor-deploy +8 -0
- data/funktor.gemspec +38 -0
- data/lib/funktor.rb +63 -0
- data/lib/funktor/active_job_handler.rb +52 -0
- data/lib/funktor/aws/sqs/event.rb +20 -0
- data/lib/funktor/aws/sqs/record.rb +14 -0
- data/lib/funktor/cli/application.rb +23 -0
- data/lib/funktor/cli/bootstrap.rb +35 -0
- data/lib/funktor/cli/generate.rb +0 -0
- data/lib/funktor/cli/generate/base.rb +13 -0
- data/lib/funktor/cli/generate/work_queue.rb +25 -0
- data/lib/funktor/cli/init.rb +78 -0
- data/lib/funktor/cli/templates/Gemfile +9 -0
- data/lib/funktor/cli/templates/config/environment.yml +4 -0
- data/lib/funktor/cli/templates/config/funktor.yml +51 -0
- data/lib/funktor/cli/templates/config/package.yml +9 -0
- data/lib/funktor/cli/templates/config/ruby_layer.yml +11 -0
- data/lib/funktor/cli/templates/function_definitions/active_job_handler.yml +11 -0
- data/lib/funktor/cli/templates/function_definitions/incoming_job_handler.yml +11 -0
- data/lib/funktor/cli/templates/funktor.yml.tt +51 -0
- data/lib/funktor/cli/templates/gitignore +2 -0
- data/lib/funktor/cli/templates/handlers/active_job_handler.rb +17 -0
- data/lib/funktor/cli/templates/handlers/incoming_job_handler.rb +8 -0
- data/lib/funktor/cli/templates/iam_permissions/active_job_queue.yml +8 -0
- data/lib/funktor/cli/templates/iam_permissions/incoming_job_queue.yml +8 -0
- data/lib/funktor/cli/templates/iam_permissions/ssm.yml +5 -0
- data/lib/funktor/cli/templates/package.json +1 -0
- data/lib/funktor/cli/templates/resources/active_job_queue.yml +22 -0
- data/lib/funktor/cli/templates/resources/cloudwatch_dashboard.yml +518 -0
- data/lib/funktor/cli/templates/resources/incoming_job_queue.yml +22 -0
- data/lib/funktor/cli/templates/resources/incoming_job_queue_user.yml +26 -0
- data/lib/funktor/cli/templates/serverless.yml +54 -0
- data/lib/funktor/cli/templates/workers/hello_worker.rb +8 -0
- data/lib/funktor/deploy/cli.rb +42 -0
- data/lib/funktor/deploy/serverless.rb +60 -0
- data/lib/funktor/deploy/serverless_templates/serverless.yml +156 -0
- data/lib/funktor/fake_job_queue.rb +15 -0
- data/lib/funktor/incoming_job_handler.rb +39 -0
- data/lib/funktor/job.rb +76 -0
- data/lib/funktor/middleware/metrics.rb +51 -0
- data/lib/funktor/middleware_chain.rb +62 -0
- data/lib/funktor/testing.rb +69 -0
- data/lib/funktor/version.rb +3 -0
- data/lib/funktor/worker.rb +86 -0
- metadata +173 -0
data/lib/funktor.rb
ADDED
@@ -0,0 +1,63 @@
|
|
1
|
+
require "funktor/version"
|
2
|
+
require 'funktor/aws/sqs/event'
|
3
|
+
require 'funktor/aws/sqs/record'
|
4
|
+
require 'funktor/job'
|
5
|
+
require 'funktor/worker'
|
6
|
+
require 'funktor/middleware_chain'
|
7
|
+
require 'funktor/incoming_job_handler'
|
8
|
+
require 'funktor/active_job_handler'
|
9
|
+
|
10
|
+
require 'json'
|
11
|
+
|
12
|
+
module Funktor
|
13
|
+
class Error < StandardError; end
|
14
|
+
# Your code goes here...
|
15
|
+
|
16
|
+
def self.configure_job_pusher
|
17
|
+
yield self
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.job_pusher_middleware
|
21
|
+
@job_pusher_chain ||= MiddlewareChain.new
|
22
|
+
yield @job_pusher_chain if block_given?
|
23
|
+
@job_pusher_chain
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.configure_active_job_handler
|
27
|
+
yield self
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.active_job_handler_middleware
|
31
|
+
@active_job_handler_chain ||= MiddlewareChain.new
|
32
|
+
yield @active_job_handler_chain if block_given?
|
33
|
+
@active_job_handler_chain
|
34
|
+
end
|
35
|
+
|
36
|
+
# TODO - Maybe we don't need this either? Maybe this should be a super dumb thing that also
|
37
|
+
# just pushed JSON around? Maybe we want to centralize middlewares in only two spots?
|
38
|
+
# 1. Job pushing.
|
39
|
+
# 2. Job execution.
|
40
|
+
# 🤔
|
41
|
+
def self.configure_incoming_job_handler
|
42
|
+
yield self
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.incoming_job_handler_middleware
|
46
|
+
@incoming_job_handler_chain ||= MiddlewareChain.new
|
47
|
+
yield @incoming_job_handler_chain if block_given?
|
48
|
+
@incoming_job_handler_chain
|
49
|
+
end
|
50
|
+
|
51
|
+
def self.parse_json(string)
|
52
|
+
JSON.parse(string)
|
53
|
+
end
|
54
|
+
|
55
|
+
def self.dump_json(object)
|
56
|
+
JSON.generate(object)
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
# TODO - Should we require this by default or let people opt in?
|
61
|
+
# Is it a code smell that we need to include it at the bottom, after
|
62
|
+
# the main Funktor module is defined?
|
63
|
+
require 'funktor/middleware/metrics'
|
@@ -0,0 +1,52 @@
|
|
1
|
+
require 'aws-sdk-sqs'
|
2
|
+
|
3
|
+
module Funktor
|
4
|
+
class ActiveJobHandler
|
5
|
+
|
6
|
+
def call(event:, context:)
|
7
|
+
event = Funktor::Aws::Sqs::Event.new(event)
|
8
|
+
puts "event.jobs.count = #{event.jobs.count}"
|
9
|
+
event.jobs.each do |job|
|
10
|
+
dispatch(job)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def sqs_client
|
15
|
+
@sqs_client ||= ::Aws::SQS::Client.new
|
16
|
+
end
|
17
|
+
|
18
|
+
def dispatch(job)
|
19
|
+
begin
|
20
|
+
Funktor.active_job_handler_middleware.invoke(job) do
|
21
|
+
job.execute
|
22
|
+
end
|
23
|
+
# rescue Funktor::Job::InvalidJsonError # TODO Make this work
|
24
|
+
rescue Exception => e
|
25
|
+
puts "Error during processing: #{$!}"
|
26
|
+
puts "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
|
27
|
+
attempt_retry_or_bail(job)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def attempt_retry_or_bail(job)
|
32
|
+
if job.can_retry
|
33
|
+
trigger_retry(job)
|
34
|
+
else
|
35
|
+
puts "We retried max times. We're bailing on this one."
|
36
|
+
puts job.to_json
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def trigger_retry(job)
|
41
|
+
job.increment_retries
|
42
|
+
puts "scheduling retry # #{job.retries} with delay of #{job.delay}"
|
43
|
+
puts job.to_json
|
44
|
+
sqs_client.send_message({
|
45
|
+
queue_url: job.retry_queue_url,
|
46
|
+
message_body: job.to_json,
|
47
|
+
delay_seconds: job.delay
|
48
|
+
})
|
49
|
+
end
|
50
|
+
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
module Funktor
|
2
|
+
module Aws
|
3
|
+
module Sqs
|
4
|
+
class Event
|
5
|
+
attr_accessor :event_data
|
6
|
+
def initialize(event_data)
|
7
|
+
@event_data = event_data
|
8
|
+
end
|
9
|
+
|
10
|
+
def records
|
11
|
+
@records ||= @event_data["Records"].map{|record_data| Funktor::Aws::Sqs::Record.new(record_data) }
|
12
|
+
end
|
13
|
+
|
14
|
+
def jobs
|
15
|
+
records.map(&:job)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
require "thor"
|
2
|
+
|
3
|
+
require_relative "./bootstrap"
|
4
|
+
require_relative "./init"
|
5
|
+
require_relative "./generate/base"
|
6
|
+
|
7
|
+
module Funktor
|
8
|
+
module CLI
|
9
|
+
class Application < Thor
|
10
|
+
# This makes thor report the correct exit code in the event of a failure.
|
11
|
+
def self.exit_on_failure?
|
12
|
+
true
|
13
|
+
end
|
14
|
+
|
15
|
+
register(Funktor::CLI::Bootstrap, "bootstrap", "bootstrap [options]", "Bootstrap a new funktor application")
|
16
|
+
register(Funktor::CLI::Init, "init", "init [FRAMEWORK] [options]", "Initialize a new funktor directory")
|
17
|
+
register(Funktor::CLI::Generate::Base, "generate", "generate GENERATOR [args] [options]", "Generate new resources")
|
18
|
+
|
19
|
+
# Set up an alias so that "funktor g" is the same as "funktor generate"
|
20
|
+
map "g" => :generate
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
module Funktor
|
2
|
+
module CLI
|
3
|
+
class Bootstrap < Thor::Group
|
4
|
+
include Thor::Actions
|
5
|
+
|
6
|
+
class_option :file, :aliases => "-f",
|
7
|
+
:type => :string, :desc => "The bootstrap file to generate.",
|
8
|
+
:default => "funktor.yml"
|
9
|
+
class_option :directory, :aliases => "-d",
|
10
|
+
:type => :string, :desc => "The directory to initialize",
|
11
|
+
:default => "funktor"
|
12
|
+
|
13
|
+
desc <<~DESC
|
14
|
+
Description:
|
15
|
+
Bootstrap a new funktor application by generating a funktor.yml file."
|
16
|
+
DESC
|
17
|
+
|
18
|
+
def self.source_root
|
19
|
+
File.dirname(__FILE__)
|
20
|
+
end
|
21
|
+
|
22
|
+
def funktor_yml
|
23
|
+
puts funktor_file_target
|
24
|
+
template "templates/funktor.yml", funktor_file_target
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
def funktor_file_target
|
29
|
+
File.join options[:directory], options[:file]
|
30
|
+
end
|
31
|
+
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
File without changes
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module Funktor
|
2
|
+
module CLI
|
3
|
+
module Generate
|
4
|
+
class WorkQueue < Thor::Group
|
5
|
+
|
6
|
+
argument :name, :desc => "The name of the queue to generate"#, :default => "default"
|
7
|
+
|
8
|
+
def resource_yml
|
9
|
+
puts "queue-name.yml #{name}"
|
10
|
+
end
|
11
|
+
|
12
|
+
def lambda_handler
|
13
|
+
puts "handler.rb"
|
14
|
+
end
|
15
|
+
|
16
|
+
def function_definition
|
17
|
+
puts "function_definition.yml"
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
|
@@ -0,0 +1,78 @@
|
|
1
|
+
module Funktor
|
2
|
+
module CLI
|
3
|
+
class Init < Thor::Group
|
4
|
+
include Thor::Actions
|
5
|
+
|
6
|
+
class_option :framework, :aliases => "-f",
|
7
|
+
:type => :string, :desc => "The deployment/provisioning framework to use.",
|
8
|
+
:default => "serverless"
|
9
|
+
class_option :directory, :aliases => "-d",
|
10
|
+
:type => :string, :desc => "The directory to initialize",
|
11
|
+
:default => "funktor"
|
12
|
+
|
13
|
+
desc <<~DESC
|
14
|
+
Description:
|
15
|
+
Initialize a new funktor deployment directory.
|
16
|
+
DESC
|
17
|
+
|
18
|
+
def self.source_root
|
19
|
+
File.join File.dirname(__FILE__), 'templates'
|
20
|
+
end
|
21
|
+
|
22
|
+
def self.destination_root
|
23
|
+
options[:directory]
|
24
|
+
end
|
25
|
+
|
26
|
+
def serverless_yml
|
27
|
+
template "serverless.yml", File.join(options[:directory], "serverless.yml")
|
28
|
+
end
|
29
|
+
|
30
|
+
def funktor_config_yml
|
31
|
+
#template "funktor_config.yml", File.join(options[:directory], "funktor_config.yml")
|
32
|
+
template File.join("config", "funktor.yml"), File.join(options[:directory], "config", "funktor.yml")
|
33
|
+
template File.join("config", "ruby_layer.yml"), File.join(options[:directory], "config", "ruby_layer.yml")
|
34
|
+
template File.join("config", "package.yml"), File.join(options[:directory], "config", "package.yml")
|
35
|
+
template File.join("config", "environment.yml"), File.join(options[:directory], "config", "environment.yml")
|
36
|
+
end
|
37
|
+
|
38
|
+
def package_json
|
39
|
+
template "package.json", File.join(options[:directory], "package.json")
|
40
|
+
end
|
41
|
+
|
42
|
+
def gemfile
|
43
|
+
template "Gemfile", File.join(options[:directory], "Gemfile")
|
44
|
+
end
|
45
|
+
|
46
|
+
def gitignore
|
47
|
+
template "gitignore", File.join(options[:directory], ".gitignore")
|
48
|
+
end
|
49
|
+
|
50
|
+
def resources
|
51
|
+
template File.join("resources", "incoming_job_queue.yml"), File.join(options[:directory], "resources", "incoming_job_queue.yml")
|
52
|
+
template File.join("resources", "incoming_job_queue_user.yml"), File.join(options[:directory], "resources", "incoming_job_queue_user.yml")
|
53
|
+
template File.join("resources", "active_job_queue.yml"), File.join(options[:directory], "resources", "active_job_queue.yml")
|
54
|
+
template File.join("resources", "cloudwatch_dashboard.yml"), File.join(options[:directory], "resources", "cloudwatch_dashboard.yml")
|
55
|
+
end
|
56
|
+
|
57
|
+
def iam_permissions
|
58
|
+
template File.join("iam_permissions", "ssm.yml"), File.join(options[:directory], "iam_permissions", "ssm.yml")
|
59
|
+
template File.join("iam_permissions", "incoming_job_queue.yml"), File.join(options[:directory], "iam_permissions", "incoming_job_queue.yml")
|
60
|
+
template File.join("iam_permissions", "active_job_queue.yml"), File.join(options[:directory], "iam_permissions", "active_job_queue.yml")
|
61
|
+
end
|
62
|
+
|
63
|
+
def function_definitions
|
64
|
+
template File.join("function_definitions", "active_job_handler.yml"), File.join(options[:directory], "function_definitions", "active_job_handler.yml")
|
65
|
+
template File.join("function_definitions", "incoming_job_handler.yml"), File.join(options[:directory], "function_definitions", "incoming_job_handler.yml")
|
66
|
+
end
|
67
|
+
|
68
|
+
def lambda_handlers
|
69
|
+
template File.join("handlers", "active_job_handler.rb"), File.join(options[:directory], "handlers", "active_job_handler.rb")
|
70
|
+
template File.join("handlers", "incoming_job_handler.rb"), File.join(options[:directory], "handlers", "incoming_job_handler.rb")
|
71
|
+
end
|
72
|
+
|
73
|
+
def workers
|
74
|
+
template File.join("workers", "hello_worker.rb"), File.join(options[:directory], "workers", "hello_worker.rb")
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
incomingJobHandler:
|
2
|
+
# timeout is how long the handler can possibly run. Up to 10 messages may be delivered
|
3
|
+
# to a handler at one time, so you'll want this to be at least 10x the maximum time you
|
4
|
+
# expect to spend for one message. The incoming job handler usually will be pretty fast,
|
5
|
+
# but we default to a high number here to allow for the times when things go weird.
|
6
|
+
timeout: 900
|
7
|
+
# reservedConcurrency represents the maximum number of concurrent executions.
|
8
|
+
# For the incoming job handler you probably don't want to limit it because you
|
9
|
+
# want to get things onto work queues as quickly as possible.
|
10
|
+
reservedConcurrency: null
|
11
|
+
# provisionedConcurrency represents the number of lambda functions that will always
|
12
|
+
# be available. For the incoming jobs handler you probably don't need to set this
|
13
|
+
# unless your jobs are very bursty AND very time sensitive.
|
14
|
+
provisionedConcurrency: null
|
15
|
+
# Use memory_size to adjust the reousrces (both memory and CPU) available.
|
16
|
+
# For the incoming jobs handler you probably don't need this to be too large,
|
17
|
+
# but if you're seeing large delays at this stage it might help to bump it up.
|
18
|
+
memorySize: 256
|
19
|
+
|
20
|
+
activeJobHandler:
|
21
|
+
# timeout is how long the handler can possibly run. Up to 10 messages may be delivered
|
22
|
+
# to a handler at one time, so you'll want this to be at least 10x the maximum time you
|
23
|
+
# expect to spend for one message. The active job handler may be slow if your jobs are
|
24
|
+
# doing a lot of work, so we default to the maximum here.
|
25
|
+
timeout: 900
|
26
|
+
# reservedConcurrency represents the maximum number of concurrent executions.
|
27
|
+
# For the active job handler you may want to limit it if you have resource limitations
|
28
|
+
# like database connections that you need to avoid exhausting.
|
29
|
+
reservedConcurrency: null
|
30
|
+
# provisionedConcurrency represents the number of lambda functions that will always
|
31
|
+
# be available. For the active job handler you probably don't need to set this
|
32
|
+
# unless your jobs are very bursty AND very time sensitive.
|
33
|
+
provisionedConcurrency: null
|
34
|
+
# Use memory_size to adjust the reousrces (both memory and CPU) available.
|
35
|
+
# For the active jobs handler you'll want this to be at least as large as the memory
|
36
|
+
# required to actually do your jobs. You can choose an even higher number to increase
|
37
|
+
# the available CPU to make the jobs run faster.
|
38
|
+
memorySize: 256
|
39
|
+
|
40
|
+
|
41
|
+
|
42
|
+
# You shouldn't need to mess with these under most circumstances. But you could if you want to change
|
43
|
+
# the name of some of your resources in AWS.
|
44
|
+
incomingJobQueueName: ${self:service}-${self:custom.stage}-incoming-jobs
|
45
|
+
incomingJobQueueAccessPolicyName: ${self:service}-${self:custom.stage}-incoming-job-queue-access
|
46
|
+
incomingDeadJobQueueName: ${self:service}-${self:custom.stage}-incoming-dead-jobs
|
47
|
+
activeJobQueueName: ${self:service}-${self:custom.stage}-active-jobs
|
48
|
+
activityQueueName: ${self:service}-${self:custom.stage}-activity
|
49
|
+
activityDeadQueueName: ${self:service}-${self:custom.stage}-activity-dead
|
50
|
+
deadJobQueueName: ${self:service}-${self:custom.stage}-dead-jobs
|
51
|
+
dashboardName: ${self:service}-${self:custom.stage}-dashboard
|
@@ -0,0 +1,9 @@
|
|
1
|
+
include:
|
2
|
+
- Gemfile
|
3
|
+
- Gemfile.lock
|
4
|
+
- handlers/**
|
5
|
+
- workers/**
|
6
|
+
# Evertyting is excluded by default with serverless-ruby-layer, but you could use
|
7
|
+
# the lines below to exlude files that are inside an include path.
|
8
|
+
#exclude:
|
9
|
+
# - workers/excluded_worker.rb
|
@@ -0,0 +1,11 @@
|
|
1
|
+
use_docker: false
|
2
|
+
#docker_yums:
|
3
|
+
#- postgresql-devel
|
4
|
+
#native_libs:
|
5
|
+
#- /usr/lib64/libpq.so.5
|
6
|
+
#- /usr/lib64/libldap_r-2.4.so.2
|
7
|
+
#- /usr/lib64/liblber-2.4.so.2
|
8
|
+
#- /usr/lib64/libsasl2.so.3
|
9
|
+
#- /usr/lib64/libssl3.so
|
10
|
+
#- /usr/lib64/libsmime3.so
|
11
|
+
#- /usr/lib64/libnss3.so
|
@@ -0,0 +1,11 @@
|
|
1
|
+
handler: handlers/active_job_handler.call
|
2
|
+
timeout: ${self:custom.funktor.activeJobHandler.timeout, 900}
|
3
|
+
reservedConcurrency: ${self:custom.funktor.activeJobHandler.reservedConcurrency, null}
|
4
|
+
provisionedConcurrency: ${self:custom.funktor.activeJobHandler.provisionedConcurrency, null}
|
5
|
+
memorySize: ${self:custom.funktor.activeJobHandler.memorySize, 256}
|
6
|
+
events:
|
7
|
+
- sqs:
|
8
|
+
arn:
|
9
|
+
Fn::GetAtt:
|
10
|
+
- ActiveJobQueue
|
11
|
+
- Arn
|