funktor 0.4.5 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (94) hide show
  1. checksums.yaml +4 -4
  2. data/.tool-versions +2 -0
  3. data/Gemfile.lock +24 -5
  4. data/funktor-testapp/.envrc +1 -0
  5. data/funktor-testapp/.gitignore +7 -0
  6. data/funktor-testapp/Gemfile +25 -0
  7. data/funktor-testapp/Gemfile.lock +51 -0
  8. data/funktor-testapp/app/services/job_flood.rb +38 -0
  9. data/funktor-testapp/app/workers/audit_worker.rb +49 -0
  10. data/funktor-testapp/app/workers/greetings_worker.rb +3 -0
  11. data/funktor-testapp/app/workers/hello_worker.rb +18 -0
  12. data/funktor-testapp/app/workers/single_thread_audit_worker.rb +3 -0
  13. data/funktor-testapp/deploy-dev.sh +5 -0
  14. data/funktor-testapp/funktor_config/boot.rb +17 -0
  15. data/funktor-testapp/funktor_config/environment.yml +15 -0
  16. data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +13 -0
  17. data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +13 -0
  18. data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +7 -0
  19. data/funktor-testapp/funktor_config/function_definitions/low_concurrency_queue_handler.yml +13 -0
  20. data/funktor-testapp/funktor_config/function_definitions/random_job_generator.yml +18 -0
  21. data/funktor-testapp/funktor_config/funktor.yml +114 -0
  22. data/funktor-testapp/funktor_config/iam_permissions/activity_table.yml +5 -0
  23. data/funktor-testapp/funktor_config/iam_permissions/default_queue.yml +8 -0
  24. data/funktor-testapp/funktor_config/iam_permissions/incoming_job_queue.yml +8 -0
  25. data/funktor-testapp/funktor_config/iam_permissions/jobs_table.yml +5 -0
  26. data/funktor-testapp/funktor_config/iam_permissions/jobs_table_secondary_index.yml +8 -0
  27. data/funktor-testapp/funktor_config/iam_permissions/low_concurrency_queue.yml +8 -0
  28. data/funktor-testapp/funktor_config/iam_permissions/ssm.yml +5 -0
  29. data/funktor-testapp/funktor_config/package.yml +11 -0
  30. data/funktor-testapp/funktor_config/resources/activity_table.yml +22 -0
  31. data/funktor-testapp/funktor_config/resources/cloudwatch_dashboard.yml +809 -0
  32. data/funktor-testapp/funktor_config/resources/default_queue.yml +22 -0
  33. data/funktor-testapp/funktor_config/resources/incoming_job_queue.yml +22 -0
  34. data/funktor-testapp/funktor_config/resources/incoming_job_queue_user.yml +26 -0
  35. data/funktor-testapp/funktor_config/resources/jobs_table.yml +56 -0
  36. data/funktor-testapp/funktor_config/resources/low_concurrency_queue.yml +22 -0
  37. data/funktor-testapp/funktor_config/ruby_layer.yml +11 -0
  38. data/funktor-testapp/funktor_init.yml +69 -0
  39. data/funktor-testapp/lambda_event_handlers/default_queue_handler.rb +8 -0
  40. data/funktor-testapp/lambda_event_handlers/incoming_job_handler.rb +8 -0
  41. data/funktor-testapp/lambda_event_handlers/job_activator.rb +8 -0
  42. data/funktor-testapp/lambda_event_handlers/low_concurrency_queue_handler.rb +8 -0
  43. data/funktor-testapp/lambda_event_handlers/random_job_generator.rb +35 -0
  44. data/funktor-testapp/package-lock.json +248 -0
  45. data/funktor-testapp/package.json +8 -0
  46. data/funktor-testapp/serverless.yml +66 -0
  47. data/funktor.gemspec +4 -1
  48. data/lib/active_job/queue_adapters/funktor_adapter.rb +7 -3
  49. data/lib/funktor/activity_tracker.rb +106 -0
  50. data/lib/funktor/cli/bootstrap.rb +0 -1
  51. data/lib/funktor/cli/init.rb +13 -0
  52. data/lib/funktor/cli/templates/app/workers/hello_worker.rb +1 -1
  53. data/lib/funktor/cli/templates/funktor_config/environment.yml +4 -0
  54. data/lib/funktor/cli/templates/funktor_config/function_definitions/incoming_job_handler.yml +3 -1
  55. data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +7 -0
  56. data/lib/funktor/cli/templates/funktor_config/function_definitions/work_queue_handler.yml +3 -1
  57. data/lib/funktor/cli/templates/funktor_config/funktor.yml +32 -6
  58. data/lib/funktor/cli/templates/funktor_config/iam_permissions/activity_table.yml +5 -0
  59. data/lib/funktor/cli/templates/funktor_config/iam_permissions/jobs_table.yml +5 -0
  60. data/lib/funktor/cli/templates/funktor_config/iam_permissions/jobs_table_secondary_index.yml +8 -0
  61. data/lib/funktor/cli/templates/funktor_config/resources/activity_table.yml +22 -0
  62. data/lib/funktor/cli/templates/funktor_config/resources/cloudwatch_dashboard.yml +13 -12
  63. data/lib/funktor/cli/templates/funktor_config/resources/incoming_job_queue.yml +2 -2
  64. data/lib/funktor/cli/templates/funktor_config/resources/jobs_table.yml +56 -0
  65. data/lib/funktor/cli/templates/funktor_config/resources/work_queue.yml +2 -2
  66. data/lib/funktor/cli/templates/funktor_init.yml.tt +16 -16
  67. data/lib/funktor/cli/templates/lambda_event_handlers/job_activator.rb +8 -0
  68. data/lib/funktor/cli/templates/lambda_event_handlers/work_queue_handler.rb +1 -1
  69. data/lib/funktor/cli/templates/serverless.yml +3 -2
  70. data/lib/funktor/counter.rb +4 -1
  71. data/lib/funktor/incoming_job_handler.rb +54 -18
  72. data/lib/funktor/job.rb +57 -7
  73. data/lib/funktor/job_activator.rb +124 -0
  74. data/lib/funktor/job_pusher.rb +0 -2
  75. data/lib/funktor/middleware/metrics.rb +8 -3
  76. data/lib/funktor/shard_utils.rb +6 -0
  77. data/lib/funktor/testing.rb +51 -29
  78. data/lib/funktor/version.rb +1 -1
  79. data/lib/funktor/web/application.rb +139 -0
  80. data/lib/funktor/web/views/index.erb +3 -0
  81. data/lib/funktor/web/views/layout.erb +58 -0
  82. data/lib/funktor/web/views/processing.erb +29 -0
  83. data/lib/funktor/web/views/queued.erb +29 -0
  84. data/lib/funktor/web/views/retries.erb +35 -0
  85. data/lib/funktor/web/views/scheduled.erb +26 -0
  86. data/lib/funktor/web/views/stats.erb +9 -0
  87. data/lib/funktor/web/views/table_stats_with_buttons.erb +11 -0
  88. data/lib/funktor/web.rb +1 -0
  89. data/lib/funktor/work_queue_handler.rb +101 -0
  90. data/lib/funktor/worker/funktor_options.rb +3 -1
  91. data/lib/funktor/worker.rb +8 -18
  92. data/lib/funktor.rb +52 -20
  93. metadata +109 -3
  94. data/lib/funktor/active_job_handler.rb +0 -58
@@ -74,7 +74,7 @@ Resources:
74
74
  "properties": {
75
75
  "metrics": [
76
76
  <%- queue_names.each do |queue_name| -%>
77
- [ "<%= app_name %>", "Duration", "Queue", "<%= queue_name %>" ],
77
+ [ "<%= app_name %>", "Duration", "Queue", "<%= queue_name.underscore %>" ],
78
78
  [ "...", { "stat": "p99" } ]<%= queue_name == queue_names.last ? "" : "," %>
79
79
  <%- end -%>
80
80
  ],
@@ -95,7 +95,7 @@ Resources:
95
95
  "properties": {
96
96
  "metrics": [
97
97
  <%- queue_names.each do |queue_name| -%>
98
- [ "<%= app_name %>", "processed", "Queue", "<%= queue_name %>" ],
98
+ [ "<%= app_name %>", "processed", "Queue", "<%= queue_name.underscore %>" ],
99
99
  [ ".", "failed", ".", "." ]<%= queue_name == queue_names.last ? "" : "," %>
100
100
  <%- end -%>
101
101
  ],
@@ -438,7 +438,7 @@ Resources:
438
438
  "type": "metric",
439
439
  "properties": {
440
440
  "metrics": [
441
- [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "Query" ]
441
+ [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "Query" ]
442
442
  ],
443
443
  "view": "singleValue",
444
444
  "region": "us-east-1",
@@ -455,7 +455,7 @@ Resources:
455
455
  "type": "metric",
456
456
  "properties": {
457
457
  "metrics": [
458
- [ "AWS/Lambda", "Duration", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", "Resource", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "label": "p10" } ],
458
+ [ "AWS/Lambda", "Duration", "FunctionName", "${self:custom.funktor.JobActivatorName}", "Resource", "${self:custom.funktor.JobActivatorName}", { "label": "p10" } ],
459
459
  [ "...", { "label": "p50", "stat": "p50" } ],
460
460
  [ "...", { "label": "p99", "stat": "p99" } ],
461
461
  [ "...", { "label": "Average", "stat": "Average" } ]
@@ -480,7 +480,7 @@ Resources:
480
480
  "properties": {
481
481
  "period": 60,
482
482
  "metrics": [
483
- [ "AWS/Lambda", "Errors", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "id": "errors", "stat": "Sum", "color": "#d13212" } ],
483
+ [ "AWS/Lambda", "Errors", "FunctionName", "${self:custom.funktor.JobActivatorName}", { "id": "errors", "stat": "Sum", "color": "#d13212" } ],
484
484
  [ ".", "Invocations", ".", ".", { "id": "invocations", "stat": "Sum", "visible": false } ],
485
485
  [ { "expression": "100 - 100 * errors / MAX([errors, invocations])", "label": "Success rate (%)", "id": "availability", "yAxis": "right", "region": "us-east-1" } ]
486
486
  ],
@@ -505,7 +505,7 @@ Resources:
505
505
  "properties": {
506
506
  "period": 60,
507
507
  "metrics": [
508
- [ "AWS/Lambda", "Duration", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "stat": "Minimum" } ],
508
+ [ "AWS/Lambda", "Duration", "FunctionName", "${self:custom.funktor.JobActivatorName}", { "stat": "Minimum" } ],
509
509
  [ "...", { "stat": "Average" } ],
510
510
  [ "...", { "stat": "Maximum" } ]
511
511
  ],
@@ -524,7 +524,7 @@ Resources:
524
524
  "type": "metric",
525
525
  "properties": {
526
526
  "metrics": [
527
- [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "Query" ]
527
+ [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "Query" ]
528
528
  ],
529
529
  "view": "timeSeries",
530
530
  "stacked": false,
@@ -547,7 +547,7 @@ Resources:
547
547
  "properties": {
548
548
  "metrics": [
549
549
  [ { "expression": "m2/PERIOD(m2)", "label": "Consumed Read Capacity Units", "id": "e1", "stat": "Sum", "region": "us-east-1" } ],
550
- [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", { "id": "m2", "visible": false } ],
550
+ [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:custom.funktor.JobsTableName}", { "id": "m2", "visible": false } ],
551
551
  [ ".", "ConsumedWriteCapacityUnits", ".", ".", { "yAxis": "left", "id": "m4", "visible": false } ],
552
552
  [ ".", "WriteThrottleEvents", ".", ".", { "yAxis": "right", "id": "m5", "visible": false } ]
553
553
  ],
@@ -569,7 +569,7 @@ Resources:
569
569
  "properties": {
570
570
  "period": 60,
571
571
  "metrics": [
572
- [ "AWS/Lambda", "ConcurrentExecutions", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "stat": "Maximum" } ]
572
+ [ "AWS/Lambda", "ConcurrentExecutions", "FunctionName", "${self:custom.funktor.JobActivatorName}", { "stat": "Maximum" } ]
573
573
  ],
574
574
  "region": "us-east-1",
575
575
  "title": "Delayd Job Schedule Concurrent executions",
@@ -587,7 +587,7 @@ Resources:
587
587
  "properties": {
588
588
  "metrics": [
589
589
  [ { "expression": "m4/PERIOD(m4)", "label": "Consumed Read Capacity Units", "id": "e1", "stat": "Sum", "region": "us-east-1" } ],
590
- [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", { "id": "m2", "visible": false } ],
590
+ [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:custom.funktor.JobsTableName}", { "id": "m2", "visible": false } ],
591
591
  [ ".", "ConsumedWriteCapacityUnits", ".", ".", { "yAxis": "left", "id": "m4", "visible": false } ],
592
592
  [ ".", "WriteThrottleEvents", ".", ".", { "yAxis": "right", "id": "m5", "visible": false } ]
593
593
  ],
@@ -612,10 +612,11 @@ Resources:
612
612
  "type": "metric",
613
613
  "properties": {
614
614
  "metrics": [
615
- [ "AWS/DynamoDB", "SuccessfulRequestLatency", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "PutItem", { "yAxis": "left" } ],
615
+ [ "AWS/DynamoDB", "SuccessfulRequestLatency", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "PutItem", { "yAxis": "left" } ],
616
616
  [ "...", "Query" ],
617
617
  [ ".", "ThrottledRequests", ".", ".", ".", "PutItem", { "yAxis": "right", "visible": false } ],
618
618
  [ ".", "SuccessfulRequestLatency", ".", ".", ".", "DeleteItem" ],
619
+ [ ".", "SuccessfulRequestLatency", ".", ".", ".", "UpdateItem" ],
619
620
  [ ".", "ThrottledRequests", ".", ".", ".", ".", { "yAxis": "right", "visible": false } ]
620
621
  ],
621
622
  "view": "timeSeries",
@@ -635,7 +636,7 @@ Resources:
635
636
  "type": "metric",
636
637
  "properties": {
637
638
  "metrics": [
638
- [ "AWS/DynamoDB", "ThrottledRequests", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "DeleteItem" ],
639
+ [ "AWS/DynamoDB", "ThrottledRequests", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "DeleteItem" ],
639
640
  [ "...", "PutItem" ]
640
641
  ],
641
642
  "view": "timeSeries",
@@ -3,11 +3,11 @@ Resources:
3
3
  Type: AWS::SQS::Queue
4
4
  Properties:
5
5
  QueueName: ${self:custom.funktor.IncomingJobQueueName}
6
- VisibilityTimeout: 300
6
+ VisibilityTimeout: ${self:custom.funktor.IncomingJobHandler.visibilityTimeout}
7
7
  RedrivePolicy:
8
8
  deadLetterTargetArn:
9
9
  "Fn::GetAtt": [ IncomingJobDeadLetterQueue, Arn ]
10
- maxReceiveCount: 5
10
+ maxReceiveCount: ${self:custom.funktor.IncomingJobHandler.maxReceiveCount}
11
11
  IncomingJobDeadLetterQueue:
12
12
  Type: AWS::SQS::Queue
13
13
  Properties:
@@ -0,0 +1,56 @@
1
+ Resources:
2
+ JobsTable:
3
+ Type: AWS::DynamoDB::Table
4
+ Properties:
5
+ TableName: ${self:custom.funktor.JobsTableName}
6
+ AttributeDefinitions:
7
+ - AttributeName: jobShard
8
+ AttributeType: N
9
+ - AttributeName: jobId
10
+ AttributeType: S
11
+ - AttributeName: category
12
+ AttributeType: S
13
+ - AttributeName: performAt
14
+ AttributeType: S
15
+ - AttributeName: queueable
16
+ AttributeType: S
17
+ KeySchema:
18
+ - AttributeName: jobShard
19
+ KeyType: HASH
20
+ - AttributeName: jobId
21
+ KeyType: RANGE
22
+ GlobalSecondaryIndexes:
23
+ - IndexName: performAtIndex
24
+ KeySchema:
25
+ - AttributeName: queueable
26
+ KeyType: HASH
27
+ - AttributeName: performAt
28
+ KeyType: RANGE
29
+ Projection:
30
+ NonKeyAttributes:
31
+ - jobId
32
+ - payload
33
+ - category
34
+ ProjectionType: INCLUDE
35
+ - IndexName: categoryIndex
36
+ KeySchema:
37
+ - AttributeName: category
38
+ KeyType: HASH
39
+ - AttributeName: performAt
40
+ KeyType: RANGE
41
+ Projection:
42
+ NonKeyAttributes:
43
+ - jobId
44
+ - payload
45
+ - category
46
+ ProjectionType: INCLUDE
47
+ BillingMode: PAY_PER_REQUEST
48
+ # Set the capacity based on the stage
49
+ #ProvisionedThroughput:
50
+ #ReadCapacityUnits: ${self:custom.tableThroughput}
51
+ #WriteCapacityUnits: ${self:custom.tableThroughput}
52
+
53
+ Outputs:
54
+ JobsTable:
55
+ Value:
56
+ Ref: JobsTable
@@ -3,11 +3,11 @@ Resources:
3
3
  Type: AWS::SQS::Queue
4
4
  Properties:
5
5
  QueueName: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueName}
6
- VisibilityTimeout: 300
6
+ VisibilityTimeout: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.visibilityTimeout}
7
7
  RedrivePolicy:
8
8
  deadLetterTargetArn:
9
9
  "Fn::GetAtt": [ <%= work_queue_name.camelize %>DeadLetterQueue, Arn ]
10
- maxReceiveCount: 5
10
+ maxReceiveCount: ${self:custom.funktor.<%= work_queue_name.camelize %>QueueHandler.maxReceiveCount}
11
11
  <%= work_queue_name.camelize %>DeadLetterQueue:
12
12
  Type: AWS::SQS::Queue
13
13
  Properties:
@@ -10,7 +10,7 @@ handlerDefaults:
10
10
  # to a handler at one time, so you'll want this to be at least 10x the maximum time you
11
11
  # expect to spend for one message. We default to a high number here to allow for the
12
12
  # times when things go weird.
13
- timeout: 300
13
+ functionTimeout: 300
14
14
 
15
15
  # reservedConcurrency represents the maximum number of concurrent executions.
16
16
  # Usually you'll want to leave this as null so that handlers can scale infinitely
@@ -29,19 +29,21 @@ handlerDefaults:
29
29
  memorySize: 256
30
30
 
31
31
  # You can set the batch size. Max of 10_000 for normal queues, 10 for FIFO.
32
- batchSize: 10
32
+ batchSize: 1
33
33
 
34
34
  # How many seconds should AWS wait for a batch to fill up before executing lambda?
35
35
  # For immediate execution set the batch size to 1.
36
- maximumBatchingWindow : 1
36
+ maximumBatchingWindow : 0
37
37
 
38
38
  # Visibility timeout should only come into play in the case of Funktor errors.
39
39
  # Application level errors should be handled by Funktor retry mechanisms.
40
40
  # The visibility timeout should be at least as long as the function timeout, and up to 6 times larger.
41
- visibilityTimeout: 900
41
+ visibilityTimeout: 1800
42
42
 
43
- # Set log rentention to save money
44
- logRetentionInDays: 30
43
+ # Max recieve count affects how many times a job will retry that has been throttled at the SQS -> Lambda boundary.
44
+ # Amazon recommend this be at least 5, but I've found that higher numbers are better to avoid legit jobs ending
45
+ # up in the dead letter queue
46
+ maxReceiveCount: 20
45
47
 
46
48
  # Incoming Job Handler
47
49
  incomingJobHandler:
@@ -49,6 +51,8 @@ incomingJobHandler:
49
51
  # to quickly handle jobs at the beginning of a burst. Uncomment the line below if so.
50
52
  # provisionedConcurrency: 4
51
53
 
54
+ jobActivator:
55
+ # You probably don't need to adjust the defaults for this one.
52
56
 
53
57
  queues:
54
58
  - default:
@@ -56,14 +60,10 @@ queues:
56
60
  # memorySize: 512
57
61
  # TODO - Is it advisable to use FIFO queuues with Funktor? Maybe this isn't really even supported by CloudFormation?
58
62
  # fifo: false
59
- - singleThread:
60
- reservedConcurrency: 1
63
+ - lowConcurrency:
64
+ # BEWARE - Setting very low concurrency values (5 or lower) can contribute to "SQS Overpull", so you probably don't
65
+ # want to have any queues with extremely low concurrency.
66
+ # Details about "SQS Overpull" can be found in this article:
67
+ # https://zaccharles.medium.com/reproducing-the-sqs-trigger-and-lambda-concurrency-limit-issue-f4c09d384a18
68
+ reservedConcurrency: 10
61
69
 
62
- # TODO - Maybe this shouldn't be surfaced this early?
63
- # TODO - This still needs to be wired up to do anything.
64
- package:
65
- patterns:
66
- - Gemfile
67
- - Gemfile.lock
68
- - app/**
69
- - funktor_config/**
@@ -0,0 +1,8 @@
1
+ require_relative '../funktor_config/boot'
2
+
3
+ $handler = Funktor::JobActivator.new
4
+
5
+ def call(event:, context:)
6
+ $handler.call(event: event, context: context)
7
+ end
8
+
@@ -1,6 +1,6 @@
1
1
  require_relative '../funktor_config/boot'
2
2
 
3
- $handler = Funktor::ActiveJobHandler.new
3
+ $handler = Funktor::WorkQueueHandler.new
4
4
 
5
5
  def call(event:, context:)
6
6
  $handler.call(event: event, context: context)
@@ -3,8 +3,8 @@
3
3
  # This file is the main config file for your service.
4
4
  # It's already configured to run Funktor, you just have to deploy it.
5
5
  #
6
- # For more info about Funktor:
7
- # TODO
6
+ # For more info about Funktor check the wiki:
7
+ # https://github.com/Octo-Labs/funktor/wiki
8
8
  #
9
9
  # For more about serverless, check their docs:
10
10
  # docs.serverless.com
@@ -26,6 +26,7 @@ provider:
26
26
  lambdaHashingVersion: 20201221
27
27
  environment: ${file(funktor_config/environment.yml)}
28
28
  versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
29
+ logRetentionInDays: 7
29
30
  iamRoleStatements:
30
31
  <%- all_iam_permissions.each do |iam_permission| -%>
31
32
  - ${file(<%= iam_permission %>)}
@@ -11,7 +11,10 @@ module Funktor
11
11
  end
12
12
 
13
13
  def put_metric_to_stdout(job)
14
- puts Funktor.dump_json(metric_hash(job))
14
+ # NOTE : We use raw puts here instead of Funktor.logger.something to avoid getting extra
15
+ # timestamps or log level information in the log line. We need this specific format to
16
+ # be the only thing in the line so that CloudWatch can parse the logs and use the data.
17
+ Funktor.raw_logger.unknown Funktor.dump_json(metric_hash(job))
15
18
  end
16
19
 
17
20
  def metric_hash(job)
@@ -1,12 +1,17 @@
1
1
  require 'aws-sdk-sqs'
2
+ require 'aws-sdk-dynamodb'
2
3
  require 'active_support/core_ext/string/inflections'
3
4
 
4
5
  module Funktor
5
6
  class IncomingJobHandler
6
7
 
8
+ def initialize
9
+ @tracker = Funktor::ActivityTracker.new
10
+ end
11
+
7
12
  def call(event:, context:)
8
13
  event = Funktor::Aws::Sqs::Event.new(event)
9
- puts "event.jobs.count = #{event.jobs.count}"
14
+ Funktor.logger.debug "event.jobs.count = #{event.jobs.count}"
10
15
  event.jobs.each do |job|
11
16
  dispatch(job)
12
17
  end
@@ -16,32 +21,63 @@ module Funktor
16
21
  @sqs_client ||= ::Aws::SQS::Client.new
17
22
  end
18
23
 
24
+ def dynamodb_client
25
+ @dynamodb_client ||= ::Aws::DynamoDB::Client.new
26
+ end
27
+
19
28
  def dispatch(job)
20
29
  Funktor.incoming_job_handler_middleware.invoke(job) do
21
- puts "pushing to active_job_queue for delay = #{job.delay}"
22
- push_to_active_job_queue(job)
30
+ # TODO : This number should be configurable via ENV var
31
+ if job.delay < 60 # for now we're testing with just one minute * 5 # 5 minutes
32
+ Funktor.logger.debug "pushing to work queue for delay = #{job.delay}"
33
+ # We push to the jobs table first becauase the work queue handler will expect to be able
34
+ # to update the stats of a record that's already in the table.
35
+ # TODO : For time sensitive jobs this is probably less than optimal. Can we update the
36
+ # work queue handler to be ok with a job that's not yet in the table?
37
+ push_to_jobs_table(job, "queued")
38
+ push_to_work_queue(job)
39
+ if job.is_retry?
40
+ @tracker.track(:retryActivated, job)
41
+ else
42
+ @tracker.track(:queued, job)
43
+ end
44
+ else
45
+ Funktor.logger.debug "pushing to jobs table for delay = #{job.delay}"
46
+ push_to_jobs_table(job, nil)
47
+ if job.is_retry?
48
+ # do nothing for tracking
49
+ else
50
+ @tracker.track(:scheduled, job)
51
+ end
52
+ end
53
+ @tracker.track(:incoming, job)
23
54
  end
24
55
  end
25
56
 
26
- def active_job_queue
27
- ENV['FUNKTOR_ACTIVE_JOB_QUEUE']
57
+ def push_to_work_queue(job)
58
+ Funktor.logger.debug "job = #{job.to_json}"
59
+ sqs_client.send_message({
60
+ queue_url: job.work_queue_url,
61
+ message_body: job.to_json,
62
+ delay_seconds: job.delay
63
+ })
28
64
  end
29
65
 
30
- def queue_for_job(job)
31
- queue_name = job.queue || 'default'
32
- queue_constant = "FUNKTOR_#{queue_name.underscore.upcase}_QUEUE"
33
- puts "queue_constant = #{queue_constant}"
34
- puts "ENV value = #{ENV[queue_constant]}"
35
- ENV[queue_constant] || ENV['FUNKTOR_DEFAULT_QUEUE']
66
+ def delayed_job_table
67
+ ENV['FUNKTOR_JOBS_TABLE']
36
68
  end
37
69
 
38
- def push_to_active_job_queue(job)
39
- puts "job = #{job.to_json}"
40
- sqs_client.send_message({
41
- # TODO : How to get this URL...
42
- queue_url: queue_for_job(job),
43
- message_body: job.to_json,
44
- delay_seconds: job.delay
70
+ def push_to_jobs_table(job, category = nil)
71
+ resp = dynamodb_client.put_item({
72
+ item: {
73
+ payload: job.to_json,
74
+ jobId: job.job_id,
75
+ performAt: job.perform_at.iso8601,
76
+ jobShard: job.shard,
77
+ queueable: category.present? ? "false" : "true",
78
+ category: category || (job.is_retry? ? "retry" : "scheduled")
79
+ },
80
+ table_name: delayed_job_table
45
81
  })
46
82
  end
47
83
 
data/lib/funktor/job.rb CHANGED
@@ -1,5 +1,8 @@
1
+ require_relative 'shard_utils'
2
+
1
3
  module Funktor
2
4
  class Job
5
+ include ShardUtils
3
6
  attr_accessor :job_string
4
7
  attr_accessor :job_data
5
8
  def initialize(job_string)
@@ -11,7 +14,15 @@ module Funktor
11
14
  end
12
15
 
13
16
  def queue
14
- job_data["queue"]
17
+ job_data["queue"] || 'default'
18
+ end
19
+
20
+ def work_queue_url
21
+ queue_name = self.queue
22
+ queue_constant = "FUNKTOR_#{queue_name.underscore.upcase}_QUEUE"
23
+ Funktor.logger.debug "queue_constant = #{queue_constant}"
24
+ Funktor.logger.debug "ENV value = #{ENV[queue_constant]}"
25
+ ENV[queue_constant] || ENV['FUNKTOR_DEFAULT_QUEUE']
15
26
  end
16
27
 
17
28
  def worker_class_name
@@ -22,6 +33,10 @@ module Funktor
22
33
  job_data["job_id"]
23
34
  end
24
35
 
36
+ def shard
37
+ calculate_shard(job_data["job_id"])
38
+ end
39
+
25
40
  def worker_params
26
41
  job_data["worker_params"]
27
42
  end
@@ -30,20 +45,51 @@ module Funktor
30
45
  job_data["retries"] || 0
31
46
  end
32
47
 
48
+ def is_retry?
49
+ job_data["retries"].present?
50
+ end
51
+
33
52
  def retries=(retries)
34
53
  job_data["retries"] = retries
35
54
  end
36
55
 
56
+ def perform_at
57
+ if job_data["perform_at"].present?
58
+ job_data["perform_at"].is_a?(Time) ? job_data["perform_at"] : Time.parse(job_data["perform_at"])
59
+ else
60
+ Time.now.utc
61
+ end
62
+ end
63
+
37
64
  def delay
38
- # TODO - In Funktor Pro we need to override this method (or do something else) so that
39
- # we can schedule jobs farther in the future than 15 minutes. We do this here in case a
40
- # retry sequence goes too long.
41
- jdelay = job_data["delay"] || 0
42
- return jdelay < 900 ? jdelay : 900
65
+ delay = (perform_at - Time.now.utc).to_i
66
+ if delay < 0
67
+ delay = 0
68
+ end
69
+ return delay
43
70
  end
44
71
 
45
72
  def delay=(delay)
46
- job_data["delay"] = delay
73
+ job_data["perform_at"] = Time.now.utc + delay
74
+ end
75
+
76
+ def error_class
77
+ job_data["error_class"]
78
+ end
79
+
80
+ def error_message
81
+ job_data["error_message"]
82
+ end
83
+
84
+ def error_backtrace
85
+ job_data["error_backtrace"].present? ? Funktor.parse_json(job_data["error_backtrace"]) : []
86
+ end
87
+
88
+ def error=(error)
89
+ # TODO We should maybe compress this?
90
+ job_data["error_class"] = error.class.name
91
+ job_data["error_message"] = error.message
92
+ job_data["error_backtrace"] = Funktor.dump_json(error.backtrace)
47
93
  end
48
94
 
49
95
  def execute
@@ -79,6 +125,10 @@ module Funktor
79
125
 
80
126
  def retry_queue_url
81
127
  worker_class&.custom_queue_url || ENV['FUNKTOR_INCOMING_JOB_QUEUE']
128
+ rescue NameError, TypeError
129
+ # In the web ui we may not have access to the the worker classes
130
+ # TODO : We should mayb handle this differently somehow? This just feels a bit icky...
131
+ ENV['FUNKTOR_INCOMING_JOB_QUEUE']
82
132
  end
83
133
  end
84
134
  end