funktor 0.5.0 → 0.6.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +26 -11
  3. data/funktor-testapp/Gemfile.lock +2 -2
  4. data/funktor-testapp/app/services/job_flood.rb +1 -1
  5. data/funktor-testapp/app/workers/single_thread_audit_worker.rb +3 -0
  6. data/funktor-testapp/funktor_config/environment.yml +2 -2
  7. data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +3 -1
  8. data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +3 -1
  9. data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +1 -2
  10. data/funktor-testapp/funktor_config/function_definitions/low_concurrency_queue_handler.yml +13 -0
  11. data/funktor-testapp/funktor_config/funktor.yml +25 -25
  12. data/funktor-testapp/funktor_config/iam_permissions/{single_thread_queue.yml → low_concurrency_queue.yml} +1 -1
  13. data/funktor-testapp/funktor_config/resources/cloudwatch_dashboard.yml +22 -17
  14. data/funktor-testapp/funktor_config/resources/default_queue.yml +2 -2
  15. data/funktor-testapp/funktor_config/resources/incoming_job_queue.yml +2 -2
  16. data/funktor-testapp/funktor_config/resources/jobs_table.yml +16 -4
  17. data/funktor-testapp/funktor_config/resources/low_concurrency_queue.yml +22 -0
  18. data/funktor-testapp/funktor_init.yml +16 -8
  19. data/funktor-testapp/lambda_event_handlers/{single_thread_queue_handler.rb → low_concurrency_queue_handler.rb} +0 -0
  20. data/funktor-testapp/serverless.yml +4 -3
  21. data/funktor.gemspec +3 -1
  22. data/lib/funktor/activity_tracker.rb +6 -2
  23. data/lib/funktor/cli/templates/funktor_config/function_definitions/incoming_job_handler.yml +3 -1
  24. data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +1 -2
  25. data/lib/funktor/cli/templates/funktor_config/function_definitions/work_queue_handler.yml +3 -1
  26. data/lib/funktor/cli/templates/funktor_config/funktor.yml +6 -6
  27. data/lib/funktor/cli/templates/funktor_config/resources/cloudwatch_dashboard.yml +3 -2
  28. data/lib/funktor/cli/templates/funktor_config/resources/incoming_job_queue.yml +2 -2
  29. data/lib/funktor/cli/templates/funktor_config/resources/jobs_table.yml +16 -4
  30. data/lib/funktor/cli/templates/funktor_config/resources/work_queue.yml +2 -2
  31. data/lib/funktor/cli/templates/funktor_init.yml.tt +14 -8
  32. data/lib/funktor/cli/templates/serverless.yml +1 -0
  33. data/lib/funktor/incoming_job_handler.rb +11 -15
  34. data/lib/funktor/job.rb +50 -5
  35. data/lib/funktor/job_activator.rb +52 -26
  36. data/lib/funktor/shard_utils.rb +6 -0
  37. data/lib/funktor/testing.rb +1 -0
  38. data/lib/funktor/version.rb +1 -1
  39. data/lib/funktor/web/application.rb +139 -0
  40. data/lib/funktor/web/views/index.erb +3 -0
  41. data/lib/funktor/web/views/layout.erb +58 -0
  42. data/lib/funktor/web/views/processing.erb +29 -0
  43. data/lib/funktor/web/views/queued.erb +29 -0
  44. data/lib/funktor/web/views/retries.erb +35 -0
  45. data/lib/funktor/web/views/scheduled.erb +26 -0
  46. data/lib/funktor/web/views/stats.erb +9 -0
  47. data/lib/funktor/web/views/table_stats_with_buttons.erb +11 -0
  48. data/lib/funktor/web.rb +1 -0
  49. data/lib/funktor/work_queue_handler.rb +41 -0
  50. data/lib/funktor/worker/funktor_options.rb +3 -1
  51. data/lib/funktor/worker.rb +8 -11
  52. data/lib/funktor.rb +16 -16
  53. metadata +46 -6
  54. data/funktor-testapp/funktor_config/function_definitions/single_thread_queue_handler.yml +0 -11
  55. data/funktor-testapp/funktor_config/resources/single_thread_queue.yml +0 -22
@@ -3,11 +3,11 @@ Resources:
3
3
  Type: AWS::SQS::Queue
4
4
  Properties:
5
5
  QueueName: ${self:custom.funktor.DefaultQueueName}
6
- VisibilityTimeout: 300
6
+ VisibilityTimeout: ${self:custom.funktor.DefaultQueueHandler.visibilityTimeout}
7
7
  RedrivePolicy:
8
8
  deadLetterTargetArn:
9
9
  "Fn::GetAtt": [ DefaultDeadLetterQueue, Arn ]
10
- maxReceiveCount: 5
10
+ maxReceiveCount: ${self:custom.funktor.DefaultQueueHandler.maxReceiveCount}
11
11
  DefaultDeadLetterQueue:
12
12
  Type: AWS::SQS::Queue
13
13
  Properties:
@@ -3,11 +3,11 @@ Resources:
3
3
  Type: AWS::SQS::Queue
4
4
  Properties:
5
5
  QueueName: ${self:custom.funktor.IncomingJobQueueName}
6
- VisibilityTimeout: 300
6
+ VisibilityTimeout: ${self:custom.funktor.IncomingJobHandler.visibilityTimeout}
7
7
  RedrivePolicy:
8
8
  deadLetterTargetArn:
9
9
  "Fn::GetAtt": [ IncomingJobDeadLetterQueue, Arn ]
10
- maxReceiveCount: 5
10
+ maxReceiveCount: ${self:custom.funktor.IncomingJobHandler.maxReceiveCount}
11
11
  IncomingJobDeadLetterQueue:
12
12
  Type: AWS::SQS::Queue
13
13
  Properties:
@@ -8,11 +8,11 @@ Resources:
8
8
  AttributeType: N
9
9
  - AttributeName: jobId
10
10
  AttributeType: S
11
- #- AttributeName: category
12
- #AttributeType: S
11
+ - AttributeName: category
12
+ AttributeType: S
13
13
  - AttributeName: performAt
14
14
  AttributeType: S
15
- - AttributeName: dummy
15
+ - AttributeName: queueable
16
16
  AttributeType: S
17
17
  KeySchema:
18
18
  - AttributeName: jobShard
@@ -22,7 +22,19 @@ Resources:
22
22
  GlobalSecondaryIndexes:
23
23
  - IndexName: performAtIndex
24
24
  KeySchema:
25
- - AttributeName: dummy
25
+ - AttributeName: queueable
26
+ KeyType: HASH
27
+ - AttributeName: performAt
28
+ KeyType: RANGE
29
+ Projection:
30
+ NonKeyAttributes:
31
+ - jobId
32
+ - payload
33
+ - category
34
+ ProjectionType: INCLUDE
35
+ - IndexName: categoryIndex
36
+ KeySchema:
37
+ - AttributeName: category
26
38
  KeyType: HASH
27
39
  - AttributeName: performAt
28
40
  KeyType: RANGE
@@ -0,0 +1,22 @@
1
+ Resources:
2
+ LowConcurrencyQueue:
3
+ Type: AWS::SQS::Queue
4
+ Properties:
5
+ QueueName: ${self:custom.funktor.LowConcurrencyQueueName}
6
+ VisibilityTimeout: ${self:custom.funktor.LowConcurrencyQueueHandler.visibilityTimeout}
7
+ RedrivePolicy:
8
+ deadLetterTargetArn:
9
+ "Fn::GetAtt": [ LowConcurrencyDeadLetterQueue, Arn ]
10
+ maxReceiveCount: ${self:custom.funktor.LowConcurrencyQueueHandler.maxReceiveCount}
11
+ LowConcurrencyDeadLetterQueue:
12
+ Type: AWS::SQS::Queue
13
+ Properties:
14
+ QueueName: ${self:custom.funktor.LowConcurrencyDeadJobQueueName}
15
+
16
+ Outputs:
17
+ LowConcurrencyQueueUrl:
18
+ Value:
19
+ Ref: LowConcurrencyQueue
20
+ LowConcurrencyDeadLetterQueueUrl:
21
+ Value:
22
+ Ref: LowConcurrencyDeadLetterQueue
@@ -10,7 +10,7 @@ handlerDefaults:
10
10
  # to a handler at one time, so you'll want this to be at least 10x the maximum time you
11
11
  # expect to spend for one message. We default to a high number here to allow for the
12
12
  # times when things go weird.
13
- timeout: 300
13
+ functionTimeout: 300
14
14
 
15
15
  # reservedConcurrency represents the maximum number of concurrent executions.
16
16
  # Usually you'll want to leave this as null so that handlers can scale infinitely
@@ -29,19 +29,21 @@ handlerDefaults:
29
29
  memorySize: 256
30
30
 
31
31
  # You can set the batch size. Max of 10_000 for normal queues, 10 for FIFO.
32
- batchSize: 10
32
+ batchSize: 1
33
33
 
34
34
  # How many seconds should AWS wait for a batch to fill up before executing lambda?
35
35
  # For immediate execution set the batch size to 1.
36
- maximumBatchingWindow : 1
36
+ maximumBatchingWindow : 0
37
37
 
38
38
  # Visibility timeout should only come into play in the case of Funktor errors.
39
39
  # Application level errors should be handled by Funktor retry mechanisms.
40
40
  # The visibility timeout should be at least as long as the function timeout, and up to 6 times larger.
41
- visibilityTimeout: 900
41
+ visibilityTimeout: 1800
42
42
 
43
- # Set log rentention to save money
44
- logRetentionInDays: 30
43
+ # Max recieve count affects how many times a job will retry that has been throttled at the SQS -> Lambda boundary.
44
+ # Amazon recommend this be at least 5, but I've found that higher numbers are better to avoid legit jobs ending
45
+ # up in the dead letter queue
46
+ maxReceiveCount: 20
45
47
 
46
48
  # Incoming Job Handler
47
49
  incomingJobHandler:
@@ -49,6 +51,8 @@ incomingJobHandler:
49
51
  # to quickly handle jobs at the beginning of a burst. Uncomment the line below if so.
50
52
  # provisionedConcurrency: 4
51
53
 
54
+ jobActivator:
55
+ # You probably don't need to adjust the defaults for this one.
52
56
 
53
57
  queues:
54
58
  - default:
@@ -56,6 +60,10 @@ queues:
56
60
  # memorySize: 512
57
61
  # TODO - Is it advisable to use FIFO queuues with Funktor? Maybe this isn't really even supported by CloudFormation?
58
62
  # fifo: false
59
- - singleThread:
60
- reservedConcurrency: 1
63
+ - lowConcurrency:
64
+ # BEWARE - Setting very low concurrency values (5 or lower) can contribute to "SQS Overpull", so you probably don't
65
+ # want to have any queues with extremely low concurrency.
66
+ # Details about "SQS Overpull" can be found in this article:
67
+ # https://zaccharles.medium.com/reproducing-the-sqs-trigger-and-lambda-concurrency-limit-issue-f4c09d384a18
68
+ reservedConcurrency: 10
61
69
 
@@ -26,12 +26,13 @@ provider:
26
26
  lambdaHashingVersion: 20201221
27
27
  environment: ${file(funktor_config/environment.yml)}
28
28
  versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
29
+ logRetentionInDays: 7
29
30
  iamRoleStatements:
30
31
  - ${file(funktor_config/iam_permissions/activity_table.yml)}
31
32
  - ${file(funktor_config/iam_permissions/default_queue.yml)}
32
33
  - ${file(funktor_config/iam_permissions/incoming_job_queue.yml)}
33
34
  - ${file(funktor_config/iam_permissions/ssm.yml)}
34
- - ${file(funktor_config/iam_permissions/single_thread_queue.yml)}
35
+ - ${file(funktor_config/iam_permissions/low_concurrency_queue.yml)}
35
36
  - ${file(funktor_config/iam_permissions/jobs_table.yml)}
36
37
  - ${file(funktor_config/iam_permissions/jobs_table_secondary_index.yml)}
37
38
 
@@ -49,7 +50,7 @@ functions:
49
50
  IncomingJobHandler: ${file(funktor_config/function_definitions/incoming_job_handler.yml)}
50
51
  DefaultQueueHandler: ${file(funktor_config/function_definitions/default_queue_handler.yml)}
51
52
  JobActivator: ${file(funktor_config/function_definitions/job_activator.yml)}
52
- SingleThreadQueueHandler: ${file(funktor_config/function_definitions/single_thread_queue_handler.yml)}
53
+ LowConcurrencyQueueHandler: ${file(funktor_config/function_definitions/low_concurrency_queue_handler.yml)}
53
54
  RandomJobGenerator: ${file(funktor_config/function_definitions/random_job_generator.yml)}
54
55
 
55
56
  resources:
@@ -58,7 +59,7 @@ resources:
58
59
  - ${file(funktor_config/resources/cloudwatch_dashboard.yml)}
59
60
  - ${file(funktor_config/resources/default_queue.yml)}
60
61
  - ${file(funktor_config/resources/incoming_job_queue.yml)}
61
- - ${file(funktor_config/resources/single_thread_queue.yml)}
62
+ - ${file(funktor_config/resources/low_concurrency_queue.yml)}
62
63
  - ${file(funktor_config/resources/jobs_table.yml)}
63
64
 
64
65
  plugins:
data/funktor.gemspec CHANGED
@@ -32,10 +32,12 @@ Gem::Specification.new do |spec|
32
32
  spec.add_dependency 'aws-sdk-sqs', '~> 1.37'
33
33
  spec.add_dependency 'aws-sdk-dynamodb', '~> 1.62'
34
34
  spec.add_dependency "activesupport" # TODO - Can we build our own verison of cattr_accessor to avoid this?
35
- spec.add_dependency "thor" # Thor drives the CLI
35
+ spec.add_dependency "thor" # Thor drives the CLI TODO - should this just be a dev dependency?
36
36
 
37
37
  spec.add_development_dependency 'activejob', '>= 5.1.5'
38
38
  spec.add_development_dependency 'simplecov'
39
39
  spec.add_development_dependency 'webmock'
40
40
  spec.add_development_dependency 'pry-byebug'
41
+ spec.add_development_dependency 'sinatra'
42
+ spec.add_development_dependency 'timecop'
41
43
  end
@@ -14,7 +14,9 @@ module Funktor
14
14
  bailingOut: 'failed',
15
15
  retrying: 'retries',
16
16
  retryActivated: 'queued',
17
- scheduledJobActivated: 'queued'
17
+ scheduledJobActivated: nil,
18
+ scheduledJobDeleted: 'scheduledJobDeleted',
19
+ retryDeleted: 'retryDeleted'
18
20
  #scheduledJobPushedToActive: 'active',
19
21
  #activeJobPushed: 'active',
20
22
  #scheduledJobPushed: 'scheduled'
@@ -30,7 +32,9 @@ module Funktor
30
32
  bailingOut: 'processing',
31
33
  retrying: 'processing',
32
34
  retryActivated: 'retries',
33
- scheduledJobActivated: 'scheduled'
35
+ scheduledJobActivated: 'scheduled',
36
+ scheduledJobDeleted: 'scheduled',
37
+ retryDeleted: 'retries'
34
38
  #scheduledJobPushedToActive: 'scheduled',
35
39
  #activeJobPushed: nil,
36
40
  #scheduledJobPushed: nil
@@ -1,10 +1,12 @@
1
1
  handler: lambda_event_handlers/incoming_job_handler.call
2
- timeout: ${self:custom.funktor.IncomingJobHandler.timeout, 30}
2
+ timeout: ${self:custom.funktor.IncomingJobHandler.functionTimeout, 30}
3
3
  reservedConcurrency: ${self:custom.funktor.IncomingJobHandler.reservedConcurrency, null}
4
4
  provisionedConcurrency: ${self:custom.funktor.IncomingJobHandler.provisionedConcurrency, null}
5
5
  memorySize: ${self:custom.funktor.IncomingJobHandler.memorySize, 256}
6
6
  events:
7
7
  - sqs:
8
+ batchSize: ${self:custom.funktor.IncomingJobHandler.batchSize, 1}
9
+ maximumBatchingWindow: ${self:custom.funktor.IncomingJobHandler.maximumBatchingWindow, 0}
8
10
  arn:
9
11
  Fn::GetAtt:
10
12
  - IncomingJobQueue
@@ -1,8 +1,7 @@
1
1
  handler: lambda_event_handlers/job_activator.call
2
- timeout: ${self:custom.funktor.JobActivator.timeout, 30}
2
+ timeout: ${self:custom.funktor.JobActivator.functionTimeout, 30}
3
3
  reservedConcurrency: ${self:custom.funktor.JobActivator.reservedConcurrency, null}
4
4
  provisionedConcurrency: ${self:custom.funktor.JobActivator.provisionedConcurrency, null}
5
5
  memorySize: ${self:custom.funktor.JobActivator.memorySize, 256}
6
- #reservedConcurrency: 1
7
6
  events:
8
7
  - schedule: rate(1 minute)
@@ -1,10 +1,12 @@
1
1
  handler: lambda_event_handlers/<%= work_queue_name.underscore %>_queue_handler.call
2
- timeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.timeout, 900}
2
+ timeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.functionTimeout, 900}
3
3
  reservedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.reservedConcurrency, null}
4
4
  provisionedConcurrency: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.provisionedConcurrency, null}
5
5
  memorySize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.memorySize, 256}
6
6
  events:
7
7
  - sqs:
8
+ batchSize: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.batchSize, 1}
9
+ maximumBatchingWindow: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.maximumBatchingWindow, 0}
8
10
  arn:
9
11
  Fn::GetAtt:
10
12
  - <%= work_queue_name.camelize %>Queue
@@ -3,7 +3,7 @@ IncomingJobHandler:
3
3
  # to a handler at one time, so you'll want this to be at least 10x the maximum time you
4
4
  # expect to spend for one message. The incoming job handler usually will be pretty fast,
5
5
  # but we default to a high number here to allow for the times when things go weird.
6
- timeout: <%= incoming_config_value 'timeout' %>
6
+ functionTimeout: <%= incoming_config_value 'functionTimeout' %>
7
7
  # reservedConcurrency represents the maximum number of concurrent executions.
8
8
  # For the incoming job handler you probably don't want to limit it because you
9
9
  # want to get things onto work queues as quickly as possible.
@@ -19,14 +19,14 @@ IncomingJobHandler:
19
19
  batchSize: <%= incoming_config_value 'batchSize' %>
20
20
  maximumBatchingWindow: <%= incoming_config_value 'maximumBatchingWindow' %>
21
21
  visibilityTimeout: <%= incoming_config_value 'visibilityTimeout' %>
22
- logRetentionInDays: <%= incoming_config_value 'logRetentionInDays' %>
22
+ maxReceiveCount: <%= incoming_config_value 'maxReceiveCount' %>
23
23
 
24
24
  JobActivator:
25
25
  # timeout is how long the handler can possibly run. Up to 10 messages may be delivered
26
26
  # to a handler at one time, so you'll want this to be at least 10x the maximum time you
27
27
  # expect to spend for one message. The job activator usually will be pretty fast,
28
28
  # but we default to a high number here to allow for the times when things go weird.
29
- timeout: <%= activator_config_value 'timeout' %>
29
+ functionTimeout: <%= activator_config_value 'functionTimeout' %>
30
30
  # reservedConcurrency represents the maximum number of concurrent executions.
31
31
  # For the job activator you probably don't want to limit it because you
32
32
  # want to get things onto work queues as quickly as possible when they're ready.
@@ -42,7 +42,7 @@ JobActivator:
42
42
  batchSize: <%= activator_config_value 'batchSize' %>
43
43
  maximumBatchingWindow: <%= activator_config_value 'maximumBatchingWindow' %>
44
44
  visibilityTimeout: <%= activator_config_value 'visibilityTimeout' %>
45
- logRetentionInDays: <%= activator_config_value 'logRetentionInDays' %>
45
+ maxReceiveCount: <%= activator_config_value 'maxReceiveCount' %>
46
46
 
47
47
 
48
48
 
@@ -52,7 +52,7 @@ JobActivator:
52
52
  # to a handler at one time, so you'll want this to be at least 10x the maximum time you
53
53
  # expect to spend for one message. The active job handler may be slow if your jobs are
54
54
  # doing a lot of work, so we default to the maximum here.
55
- timeout: <%= queue_config_value queue_name, 'timeout' %>
55
+ functionTimeout: <%= queue_config_value queue_name, 'functionTimeout' %>
56
56
  # reservedConcurrency represents the maximum number of concurrent executions.
57
57
  # For the active job handler you may want to limit it if you have resource limitations
58
58
  # like database connections that you need to avoid exhausting.
@@ -69,7 +69,7 @@ JobActivator:
69
69
  batchSize: <%= queue_config_value queue_name, 'batchSize' %>
70
70
  maximumBatchingWindow: <%= queue_config_value queue_name, 'maximumBatchingWindow' %>
71
71
  visibilityTimeout: <%= queue_config_value queue_name, 'visibilityTimeout' %>
72
- logRetentionInDays: <%= queue_config_value queue_name, 'logRetentionInDays' %>
72
+ maxReceiveCount: <%= queue_config_value queue_name, 'maxReceiveCount' %>
73
73
 
74
74
  <%- end -%>
75
75
 
@@ -74,7 +74,7 @@ Resources:
74
74
  "properties": {
75
75
  "metrics": [
76
76
  <%- queue_names.each do |queue_name| -%>
77
- [ "<%= app_name %>", "Duration", "Queue", "<%= queue_name %>" ],
77
+ [ "<%= app_name %>", "Duration", "Queue", "<%= queue_name.underscore %>" ],
78
78
  [ "...", { "stat": "p99" } ]<%= queue_name == queue_names.last ? "" : "," %>
79
79
  <%- end -%>
80
80
  ],
@@ -95,7 +95,7 @@ Resources:
95
95
  "properties": {
96
96
  "metrics": [
97
97
  <%- queue_names.each do |queue_name| -%>
98
- [ "<%= app_name %>", "processed", "Queue", "<%= queue_name %>" ],
98
+ [ "<%= app_name %>", "processed", "Queue", "<%= queue_name.underscore %>" ],
99
99
  [ ".", "failed", ".", "." ]<%= queue_name == queue_names.last ? "" : "," %>
100
100
  <%- end -%>
101
101
  ],
@@ -616,6 +616,7 @@ Resources:
616
616
  [ "...", "Query" ],
617
617
  [ ".", "ThrottledRequests", ".", ".", ".", "PutItem", { "yAxis": "right", "visible": false } ],
618
618
  [ ".", "SuccessfulRequestLatency", ".", ".", ".", "DeleteItem" ],
619
+ [ ".", "SuccessfulRequestLatency", ".", ".", ".", "UpdateItem" ],
619
620
  [ ".", "ThrottledRequests", ".", ".", ".", ".", { "yAxis": "right", "visible": false } ]
620
621
  ],
621
622
  "view": "timeSeries",
@@ -3,11 +3,11 @@ Resources:
3
3
  Type: AWS::SQS::Queue
4
4
  Properties:
5
5
  QueueName: ${self:custom.funktor.IncomingJobQueueName}
6
- VisibilityTimeout: 300
6
+ VisibilityTimeout: ${self:custom.funktor.IncomingJobHandler.visibilityTimeout}
7
7
  RedrivePolicy:
8
8
  deadLetterTargetArn:
9
9
  "Fn::GetAtt": [ IncomingJobDeadLetterQueue, Arn ]
10
- maxReceiveCount: 5
10
+ maxReceiveCount: ${self:custom.funktor.IncomingJobHandler.maxReceiveCount}
11
11
  IncomingJobDeadLetterQueue:
12
12
  Type: AWS::SQS::Queue
13
13
  Properties:
@@ -8,11 +8,11 @@ Resources:
8
8
  AttributeType: N
9
9
  - AttributeName: jobId
10
10
  AttributeType: S
11
- #- AttributeName: category
12
- #AttributeType: S
11
+ - AttributeName: category
12
+ AttributeType: S
13
13
  - AttributeName: performAt
14
14
  AttributeType: S
15
- - AttributeName: dummy
15
+ - AttributeName: queueable
16
16
  AttributeType: S
17
17
  KeySchema:
18
18
  - AttributeName: jobShard
@@ -22,7 +22,19 @@ Resources:
22
22
  GlobalSecondaryIndexes:
23
23
  - IndexName: performAtIndex
24
24
  KeySchema:
25
- - AttributeName: dummy
25
+ - AttributeName: queueable
26
+ KeyType: HASH
27
+ - AttributeName: performAt
28
+ KeyType: RANGE
29
+ Projection:
30
+ NonKeyAttributes:
31
+ - jobId
32
+ - payload
33
+ - category
34
+ ProjectionType: INCLUDE
35
+ - IndexName: categoryIndex
36
+ KeySchema:
37
+ - AttributeName: category
26
38
  KeyType: HASH
27
39
  - AttributeName: performAt
28
40
  KeyType: RANGE
@@ -3,11 +3,11 @@ Resources:
3
3
  Type: AWS::SQS::Queue
4
4
  Properties:
5
5
  QueueName: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueName}
6
- VisibilityTimeout: 300
6
+ VisibilityTimeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.visibilityTimeout}
7
7
  RedrivePolicy:
8
8
  deadLetterTargetArn:
9
9
  "Fn::GetAtt": [ <%= work_queue_name.camelize %>DeadLetterQueue, Arn ]
10
- maxReceiveCount: 5
10
+ maxReceiveCount: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.maxReceiveCount}
11
11
  <%= work_queue_name.camelize %>DeadLetterQueue:
12
12
  Type: AWS::SQS::Queue
13
13
  Properties:
@@ -10,7 +10,7 @@ handlerDefaults:
10
10
  # to a handler at one time, so you'll want this to be at least 10x the maximum time you
11
11
  # expect to spend for one message. We default to a high number here to allow for the
12
12
  # times when things go weird.
13
- timeout: 300
13
+ functionTimeout: 300
14
14
 
15
15
  # reservedConcurrency represents the maximum number of concurrent executions.
16
16
  # Usually you'll want to leave this as null so that handlers can scale infinitely
@@ -29,19 +29,21 @@ handlerDefaults:
29
29
  memorySize: 256
30
30
 
31
31
  # You can set the batch size. Max of 10_000 for normal queues, 10 for FIFO.
32
- batchSize: 10
32
+ batchSize: 1
33
33
 
34
34
  # How many seconds should AWS wait for a batch to fill up before executing lambda?
35
35
  # For immediate execution set the batch size to 1.
36
- maximumBatchingWindow : 1
36
+ maximumBatchingWindow : 0
37
37
 
38
38
  # Visibility timeout should only come into play in the case of Funktor errors.
39
39
  # Application level errors should be handled by Funktor retry mechanisms.
40
40
  # The visibility timeout should be at least as long as the function timeout, and up to 6 times larger.
41
- visibilityTimeout: 900
41
+ visibilityTimeout: 1800
42
42
 
43
- # Set log rentention to save money
44
- logRetentionInDays: 30
43
+ # Max recieve count affects how many times a job will retry that has been throttled at the SQS -> Lambda boundary.
44
+ # Amazon recommend this be at least 5, but I've found that higher numbers are better to avoid legit jobs ending
45
+ # up in the dead letter queue
46
+ maxReceiveCount: 20
45
47
 
46
48
  # Incoming Job Handler
47
49
  incomingJobHandler:
@@ -58,6 +60,10 @@ queues:
58
60
  # memorySize: 512
59
61
  # TODO - Is it advisable to use FIFO queuues with Funktor? Maybe this isn't really even supported by CloudFormation?
60
62
  # fifo: false
61
- - singleThread:
62
- reservedConcurrency: 1
63
+ - lowConcurrency:
64
+ # BEWARE - Setting very low concurrency values (5 or lower) can contribute to "SQS Overpull", so you probably don't
65
+ # want to have any queues with extremely low concurrency.
66
+ # Details about "SQS Overpull" can be found in this article:
67
+ # https://zaccharles.medium.com/reproducing-the-sqs-trigger-and-lambda-concurrency-limit-issue-f4c09d384a18
68
+ reservedConcurrency: 10
63
69