funktor 0.4.7 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (76) hide show
  1. checksums.yaml +4 -4
  2. data/.tool-versions +2 -0
  3. data/Gemfile.lock +7 -3
  4. data/funktor-testapp/.envrc +1 -0
  5. data/funktor-testapp/.gitignore +7 -0
  6. data/funktor-testapp/Gemfile +25 -0
  7. data/funktor-testapp/Gemfile.lock +51 -0
  8. data/funktor-testapp/app/services/job_flood.rb +38 -0
  9. data/funktor-testapp/app/workers/audit_worker.rb +49 -0
  10. data/funktor-testapp/app/workers/greetings_worker.rb +3 -0
  11. data/funktor-testapp/app/workers/hello_worker.rb +18 -0
  12. data/funktor-testapp/deploy-dev.sh +5 -0
  13. data/funktor-testapp/funktor_config/boot.rb +17 -0
  14. data/funktor-testapp/funktor_config/environment.yml +15 -0
  15. data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +11 -0
  16. data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +11 -0
  17. data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +8 -0
  18. data/funktor-testapp/funktor_config/function_definitions/random_job_generator.yml +18 -0
  19. data/funktor-testapp/funktor_config/function_definitions/single_thread_queue_handler.yml +11 -0
  20. data/funktor-testapp/funktor_config/funktor.yml +114 -0
  21. data/funktor-testapp/funktor_config/iam_permissions/activity_table.yml +5 -0
  22. data/funktor-testapp/funktor_config/iam_permissions/default_queue.yml +8 -0
  23. data/funktor-testapp/funktor_config/iam_permissions/incoming_job_queue.yml +8 -0
  24. data/funktor-testapp/funktor_config/iam_permissions/jobs_table.yml +5 -0
  25. data/funktor-testapp/funktor_config/iam_permissions/jobs_table_secondary_index.yml +8 -0
  26. data/funktor-testapp/funktor_config/iam_permissions/single_thread_queue.yml +8 -0
  27. data/funktor-testapp/funktor_config/iam_permissions/ssm.yml +5 -0
  28. data/funktor-testapp/funktor_config/package.yml +11 -0
  29. data/funktor-testapp/funktor_config/resources/activity_table.yml +22 -0
  30. data/funktor-testapp/funktor_config/resources/cloudwatch_dashboard.yml +804 -0
  31. data/funktor-testapp/funktor_config/resources/default_queue.yml +22 -0
  32. data/funktor-testapp/funktor_config/resources/incoming_job_queue.yml +22 -0
  33. data/funktor-testapp/funktor_config/resources/incoming_job_queue_user.yml +26 -0
  34. data/funktor-testapp/funktor_config/resources/jobs_table.yml +44 -0
  35. data/funktor-testapp/funktor_config/resources/single_thread_queue.yml +22 -0
  36. data/funktor-testapp/funktor_config/ruby_layer.yml +11 -0
  37. data/funktor-testapp/funktor_init.yml +61 -0
  38. data/funktor-testapp/lambda_event_handlers/default_queue_handler.rb +8 -0
  39. data/funktor-testapp/lambda_event_handlers/incoming_job_handler.rb +8 -0
  40. data/funktor-testapp/lambda_event_handlers/job_activator.rb +8 -0
  41. data/funktor-testapp/lambda_event_handlers/random_job_generator.rb +35 -0
  42. data/funktor-testapp/lambda_event_handlers/single_thread_queue_handler.rb +8 -0
  43. data/funktor-testapp/package-lock.json +248 -0
  44. data/funktor-testapp/package.json +8 -0
  45. data/funktor-testapp/serverless.yml +65 -0
  46. data/funktor.gemspec +1 -0
  47. data/lib/active_job/queue_adapters/funktor_adapter.rb +3 -3
  48. data/lib/funktor.rb +39 -7
  49. data/lib/funktor/activity_tracker.rb +102 -0
  50. data/lib/funktor/cli/bootstrap.rb +0 -1
  51. data/lib/funktor/cli/init.rb +13 -0
  52. data/lib/funktor/cli/templates/app/workers/hello_worker.rb +1 -1
  53. data/lib/funktor/cli/templates/funktor_config/environment.yml +4 -0
  54. data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +8 -0
  55. data/lib/funktor/cli/templates/funktor_config/funktor.yml +28 -2
  56. data/lib/funktor/cli/templates/funktor_config/iam_permissions/activity_table.yml +5 -0
  57. data/lib/funktor/cli/templates/funktor_config/iam_permissions/jobs_table.yml +5 -0
  58. data/lib/funktor/cli/templates/funktor_config/iam_permissions/jobs_table_secondary_index.yml +8 -0
  59. data/lib/funktor/cli/templates/funktor_config/resources/activity_table.yml +22 -0
  60. data/lib/funktor/cli/templates/funktor_config/resources/cloudwatch_dashboard.yml +10 -10
  61. data/lib/funktor/cli/templates/funktor_config/resources/jobs_table.yml +44 -0
  62. data/lib/funktor/cli/templates/funktor_init.yml.tt +2 -8
  63. data/lib/funktor/cli/templates/lambda_event_handlers/job_activator.rb +8 -0
  64. data/lib/funktor/cli/templates/lambda_event_handlers/work_queue_handler.rb +1 -1
  65. data/lib/funktor/cli/templates/serverless.yml +2 -2
  66. data/lib/funktor/counter.rb +4 -1
  67. data/lib/funktor/incoming_job_handler.rb +52 -12
  68. data/lib/funktor/job.rb +10 -5
  69. data/lib/funktor/job_activator.rb +98 -0
  70. data/lib/funktor/job_pusher.rb +0 -2
  71. data/lib/funktor/middleware/metrics.rb +8 -3
  72. data/lib/funktor/testing.rb +49 -47
  73. data/lib/funktor/version.rb +1 -1
  74. data/lib/funktor/{active_job_handler.rb → work_queue_handler.rb} +17 -15
  75. data/lib/funktor/worker.rb +0 -7
  76. metadata +69 -3
@@ -438,7 +438,7 @@ Resources:
438
438
  "type": "metric",
439
439
  "properties": {
440
440
  "metrics": [
441
- [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "Query" ]
441
+ [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "Query" ]
442
442
  ],
443
443
  "view": "singleValue",
444
444
  "region": "us-east-1",
@@ -455,7 +455,7 @@ Resources:
455
455
  "type": "metric",
456
456
  "properties": {
457
457
  "metrics": [
458
- [ "AWS/Lambda", "Duration", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", "Resource", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "label": "p10" } ],
458
+ [ "AWS/Lambda", "Duration", "FunctionName", "${self:custom.funktor.JobActivatorName}", "Resource", "${self:custom.funktor.JobActivatorName}", { "label": "p10" } ],
459
459
  [ "...", { "label": "p50", "stat": "p50" } ],
460
460
  [ "...", { "label": "p99", "stat": "p99" } ],
461
461
  [ "...", { "label": "Average", "stat": "Average" } ]
@@ -480,7 +480,7 @@ Resources:
480
480
  "properties": {
481
481
  "period": 60,
482
482
  "metrics": [
483
- [ "AWS/Lambda", "Errors", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "id": "errors", "stat": "Sum", "color": "#d13212" } ],
483
+ [ "AWS/Lambda", "Errors", "FunctionName", "${self:custom.funktor.JobActivatorName}", { "id": "errors", "stat": "Sum", "color": "#d13212" } ],
484
484
  [ ".", "Invocations", ".", ".", { "id": "invocations", "stat": "Sum", "visible": false } ],
485
485
  [ { "expression": "100 - 100 * errors / MAX([errors, invocations])", "label": "Success rate (%)", "id": "availability", "yAxis": "right", "region": "us-east-1" } ]
486
486
  ],
@@ -505,7 +505,7 @@ Resources:
505
505
  "properties": {
506
506
  "period": 60,
507
507
  "metrics": [
508
- [ "AWS/Lambda", "Duration", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "stat": "Minimum" } ],
508
+ [ "AWS/Lambda", "Duration", "FunctionName", "${self:custom.funktor.JobActivatorName}", { "stat": "Minimum" } ],
509
509
  [ "...", { "stat": "Average" } ],
510
510
  [ "...", { "stat": "Maximum" } ]
511
511
  ],
@@ -524,7 +524,7 @@ Resources:
524
524
  "type": "metric",
525
525
  "properties": {
526
526
  "metrics": [
527
- [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "Query" ]
527
+ [ "AWS/DynamoDB", "ReturnedItemCount", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "Query" ]
528
528
  ],
529
529
  "view": "timeSeries",
530
530
  "stacked": false,
@@ -547,7 +547,7 @@ Resources:
547
547
  "properties": {
548
548
  "metrics": [
549
549
  [ { "expression": "m2/PERIOD(m2)", "label": "Consumed Read Capacity Units", "id": "e1", "stat": "Sum", "region": "us-east-1" } ],
550
- [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", { "id": "m2", "visible": false } ],
550
+ [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:custom.funktor.JobsTableName}", { "id": "m2", "visible": false } ],
551
551
  [ ".", "ConsumedWriteCapacityUnits", ".", ".", { "yAxis": "left", "id": "m4", "visible": false } ],
552
552
  [ ".", "WriteThrottleEvents", ".", ".", { "yAxis": "right", "id": "m5", "visible": false } ]
553
553
  ],
@@ -569,7 +569,7 @@ Resources:
569
569
  "properties": {
570
570
  "period": 60,
571
571
  "metrics": [
572
- [ "AWS/Lambda", "ConcurrentExecutions", "FunctionName", "${self:service}-${self:provider.stage}-delayed_job_scheduler", { "stat": "Maximum" } ]
572
+ [ "AWS/Lambda", "ConcurrentExecutions", "FunctionName", "${self:custom.funktor.JobActivatorName}", { "stat": "Maximum" } ]
573
573
  ],
574
574
  "region": "us-east-1",
575
575
  "title": "Delayd Job Schedule Concurrent executions",
@@ -587,7 +587,7 @@ Resources:
587
587
  "properties": {
588
588
  "metrics": [
589
589
  [ { "expression": "m4/PERIOD(m4)", "label": "Consumed Read Capacity Units", "id": "e1", "stat": "Sum", "region": "us-east-1" } ],
590
- [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", { "id": "m2", "visible": false } ],
590
+ [ "AWS/DynamoDB", "ConsumedReadCapacityUnits", "TableName", "${self:custom.funktor.JobsTableName}", { "id": "m2", "visible": false } ],
591
591
  [ ".", "ConsumedWriteCapacityUnits", ".", ".", { "yAxis": "left", "id": "m4", "visible": false } ],
592
592
  [ ".", "WriteThrottleEvents", ".", ".", { "yAxis": "right", "id": "m5", "visible": false } ]
593
593
  ],
@@ -612,7 +612,7 @@ Resources:
612
612
  "type": "metric",
613
613
  "properties": {
614
614
  "metrics": [
615
- [ "AWS/DynamoDB", "SuccessfulRequestLatency", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "PutItem", { "yAxis": "left" } ],
615
+ [ "AWS/DynamoDB", "SuccessfulRequestLatency", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "PutItem", { "yAxis": "left" } ],
616
616
  [ "...", "Query" ],
617
617
  [ ".", "ThrottledRequests", ".", ".", ".", "PutItem", { "yAxis": "right", "visible": false } ],
618
618
  [ ".", "SuccessfulRequestLatency", ".", ".", ".", "DeleteItem" ],
@@ -635,7 +635,7 @@ Resources:
635
635
  "type": "metric",
636
636
  "properties": {
637
637
  "metrics": [
638
- [ "AWS/DynamoDB", "ThrottledRequests", "TableName", "${self:service}-${self:custom.stage}-delayed-jobs", "Operation", "DeleteItem" ],
638
+ [ "AWS/DynamoDB", "ThrottledRequests", "TableName", "${self:custom.funktor.JobsTableName}", "Operation", "DeleteItem" ],
639
639
  [ "...", "PutItem" ]
640
640
  ],
641
641
  "view": "timeSeries",
@@ -0,0 +1,44 @@
1
+ Resources:
2
+ JobsTable:
3
+ Type: AWS::DynamoDB::Table
4
+ Properties:
5
+ TableName: ${self:custom.funktor.JobsTableName}
6
+ AttributeDefinitions:
7
+ - AttributeName: jobShard
8
+ AttributeType: N
9
+ - AttributeName: jobId
10
+ AttributeType: S
11
+ #- AttributeName: category
12
+ #AttributeType: S
13
+ - AttributeName: performAt
14
+ AttributeType: S
15
+ - AttributeName: dummy
16
+ AttributeType: S
17
+ KeySchema:
18
+ - AttributeName: jobShard
19
+ KeyType: HASH
20
+ - AttributeName: jobId
21
+ KeyType: RANGE
22
+ GlobalSecondaryIndexes:
23
+ - IndexName: performAtIndex
24
+ KeySchema:
25
+ - AttributeName: dummy
26
+ KeyType: HASH
27
+ - AttributeName: performAt
28
+ KeyType: RANGE
29
+ Projection:
30
+ NonKeyAttributes:
31
+ - jobId
32
+ - payload
33
+ - category
34
+ ProjectionType: INCLUDE
35
+ BillingMode: PAY_PER_REQUEST
36
+ # Set the capacity based on the stage
37
+ #ProvisionedThroughput:
38
+ #ReadCapacityUnits: ${self:custom.tableThroughput}
39
+ #WriteCapacityUnits: ${self:custom.tableThroughput}
40
+
41
+ Outputs:
42
+ JobsTable:
43
+ Value:
44
+ Ref: JobsTable
@@ -49,6 +49,8 @@ incomingJobHandler:
49
49
  # to quickly handle jobs at the beginning of a burst. Uncomment the line below if so.
50
50
  # provisionedConcurrency: 4
51
51
 
52
+ jobActivator:
53
+ # You probably don't need to adjust the defaults for this one.
52
54
 
53
55
  queues:
54
56
  - default:
@@ -59,11 +61,3 @@ queues:
59
61
  - singleThread:
60
62
  reservedConcurrency: 1
61
63
 
62
- # TODO - Maybe this shouldn't be surfaced this early?
63
- # TODO - This still needs to be wired up to do anything.
64
- package:
65
- patterns:
66
- - Gemfile
67
- - Gemfile.lock
68
- - app/**
69
- - funktor_config/**
@@ -0,0 +1,8 @@
1
+ require_relative '../funktor_config/boot'
2
+
3
+ $handler = Funktor::JobActivator.new
4
+
5
+ def call(event:, context:)
6
+ $handler.call(event: event, context: context)
7
+ end
8
+
@@ -1,6 +1,6 @@
1
1
  require_relative '../funktor_config/boot'
2
2
 
3
- $handler = Funktor::ActiveJobHandler.new
3
+ $handler = Funktor::WorkQueueHandler.new
4
4
 
5
5
  def call(event:, context:)
6
6
  $handler.call(event: event, context: context)
@@ -3,8 +3,8 @@
3
3
  # This file is the main config file for your service.
4
4
  # It's already configured to run Funktor, you just have to deploy it.
5
5
  #
6
- # For more info about Funktor:
7
- # TODO
6
+ # For more info about Funktor check the wiki:
7
+ # https://github.com/Octo-Labs/funktor/wiki
8
8
  #
9
9
  # For more about serverless, check their docs:
10
10
  # docs.serverless.com
@@ -11,7 +11,10 @@ module Funktor
11
11
  end
12
12
 
13
13
  def put_metric_to_stdout(job)
14
- puts Funktor.dump_json(metric_hash(job))
14
+ # NOTE : We use raw puts here instead of Funktor.logger.something to avoid getting extra
15
+ # timestamps or log level information in the log line. We need this specific format to
16
+ # be the only thing in the line so that CloudWatch can parse the logs and use the data.
17
+ Funktor.raw_logger.unknown Funktor.dump_json(metric_hash(job))
15
18
  end
16
19
 
17
20
  def metric_hash(job)
@@ -1,12 +1,17 @@
1
1
  require 'aws-sdk-sqs'
2
+ require 'aws-sdk-dynamodb'
2
3
  require 'active_support/core_ext/string/inflections'
3
4
 
4
5
  module Funktor
5
6
  class IncomingJobHandler
6
7
 
8
+ def initialize
9
+ @tracker = Funktor::ActivityTracker.new
10
+ end
11
+
7
12
  def call(event:, context:)
8
13
  event = Funktor::Aws::Sqs::Event.new(event)
9
- puts "event.jobs.count = #{event.jobs.count}"
14
+ Funktor.logger.debug "event.jobs.count = #{event.jobs.count}"
10
15
  event.jobs.each do |job|
11
16
  dispatch(job)
12
17
  end
@@ -16,34 +21,69 @@ module Funktor
16
21
  @sqs_client ||= ::Aws::SQS::Client.new
17
22
  end
18
23
 
24
+ def dynamodb_client
25
+ @dynamodb_client ||= ::Aws::DynamoDB::Client.new
26
+ end
27
+
19
28
  def dispatch(job)
20
29
  Funktor.incoming_job_handler_middleware.invoke(job) do
21
- puts "pushing to active_job_queue for delay = #{job.delay}"
22
- push_to_active_job_queue(job)
30
+ # TODO : This number should be configurable via ENV var
31
+ if job.delay < 60 # for now we're testing with just one minute * 5 # 5 minutes
32
+ Funktor.logger.debug "pushing to work queue for delay = #{job.delay}"
33
+ push_to_work_queue(job)
34
+ if job.is_retry?
35
+ @tracker.track(:retryActivated, job)
36
+ else
37
+ @tracker.track(:queued, job)
38
+ end
39
+ else
40
+ Funktor.logger.debug "pushing to jobs table for delay = #{job.delay}"
41
+ push_to_jobs_table(job)
42
+ if job.is_retry?
43
+ # do nothing for tracking
44
+ else
45
+ @tracker.track(:scheduled, job)
46
+ end
47
+ end
48
+ @tracker.track(:incoming, job)
23
49
  end
24
50
  end
25
51
 
26
- def active_job_queue
27
- ENV['FUNKTOR_ACTIVE_JOB_QUEUE']
28
- end
29
-
30
52
  def queue_for_job(job)
31
53
  queue_name = job.queue || 'default'
32
54
  queue_constant = "FUNKTOR_#{queue_name.underscore.upcase}_QUEUE"
33
- puts "queue_constant = #{queue_constant}"
34
- puts "ENV value = #{ENV[queue_constant]}"
55
+ Funktor.logger.debug "queue_constant = #{queue_constant}"
56
+ Funktor.logger.debug "ENV value = #{ENV[queue_constant]}"
35
57
  ENV[queue_constant] || ENV['FUNKTOR_DEFAULT_QUEUE']
36
58
  end
37
59
 
38
- def push_to_active_job_queue(job)
39
- puts "job = #{job.to_json}"
60
+ def push_to_work_queue(job)
61
+ Funktor.logger.debug "job = #{job.to_json}"
40
62
  sqs_client.send_message({
41
- # TODO : How to get this URL...
42
63
  queue_url: queue_for_job(job),
43
64
  message_body: job.to_json,
44
65
  delay_seconds: job.delay
45
66
  })
46
67
  end
47
68
 
69
+ def delayed_job_table
70
+ ENV['FUNKTOR_JOBS_TABLE']
71
+ end
72
+
73
+ def push_to_jobs_table(job)
74
+ perform_at = (Time.now + job.delay).utc
75
+ resp = dynamodb_client.put_item({
76
+ item: {
77
+ payload: job.to_json,
78
+ jobId: job.job_id,
79
+ performAt: perform_at.iso8601,
80
+ jobShard: job.shard,
81
+ dummy: "dummy",
82
+ category: job.is_retry? ? "retry" : "scheduled"
83
+ },
84
+ table_name: delayed_job_table
85
+ })
86
+ end
87
+
48
88
  end
49
89
  end
data/lib/funktor/job.rb CHANGED
@@ -22,6 +22,11 @@ module Funktor
22
22
  job_data["job_id"]
23
23
  end
24
24
 
25
+ def shard
26
+ # TODO - Should the number of shards be configurable?
27
+ job_data["job_id"].hash % 64
28
+ end
29
+
25
30
  def worker_params
26
31
  job_data["worker_params"]
27
32
  end
@@ -30,16 +35,16 @@ module Funktor
30
35
  job_data["retries"] || 0
31
36
  end
32
37
 
38
+ def is_retry?
39
+ job_data["retries"].present?
40
+ end
41
+
33
42
  def retries=(retries)
34
43
  job_data["retries"] = retries
35
44
  end
36
45
 
37
46
  def delay
38
- # TODO - In Funktor Pro we need to override this method (or do something else) so that
39
- # we can schedule jobs farther in the future than 15 minutes. We do this here in case a
40
- # retry sequence goes too long.
41
- jdelay = job_data["delay"] || 0
42
- return jdelay < 900 ? jdelay : 900
47
+ job_data["delay"] || 0
43
48
  end
44
49
 
45
50
  def delay=(delay)
@@ -0,0 +1,98 @@
1
+ require 'aws-sdk-dynamodb'
2
+ require 'aws-sdk-sqs'
3
+
4
+ module Funktor
5
+ class JobActivator
6
+
7
+ def initialize
8
+ @tracker = Funktor::ActivityTracker.new
9
+ end
10
+
11
+ def dynamodb_client
12
+ @dynamodb_client ||= ::Aws::DynamoDB::Client.new
13
+ end
14
+
15
+ def sqs_client
16
+ @sqs_client ||= ::Aws::SQS::Client.new
17
+ end
18
+
19
+ def active_job_queue
20
+ ENV['FUNKTOR_ACTIVE_JOB_QUEUE']
21
+ end
22
+
23
+ def delayed_job_table
24
+ ENV['FUNKTOR_JOBS_TABLE']
25
+ end
26
+
27
+ def jobs_to_activate
28
+ target_time = (Time.now + 90).utc
29
+ query_params = {
30
+ expression_attribute_values: {
31
+ ":dummy" => "dummy",
32
+ ":targetTime" => target_time.iso8601
33
+ },
34
+ key_condition_expression: "dummy = :dummy AND performAt < :targetTime",
35
+ projection_expression: "payload, performAt, jobId, jobShard",
36
+ table_name: delayed_job_table,
37
+ index_name: "performAtIndex"
38
+ }
39
+ resp = dynamodb_client.query(query_params)
40
+ return resp.items
41
+ end
42
+
43
+ def queue_for_job(job)
44
+ queue_name = job.queue || 'default'
45
+ queue_constant = "FUNKTOR_#{queue_name.underscore.upcase}_QUEUE"
46
+ Funktor.logger.debug "queue_constant = #{queue_constant}"
47
+ Funktor.logger.debug "ENV value = #{ENV[queue_constant]}"
48
+ ENV[queue_constant] || ENV['FUNKTOR_DEFAULT_QUEUE']
49
+ end
50
+
51
+ def handle_item(item)
52
+ job = Funktor::Job.new(item["payload"])
53
+ Funktor.logger.debug "we created a job from payload"
54
+ Funktor.logger.debug item["payload"]
55
+ delay = (Time.parse(item["performAt"]) - Time.now.utc).to_i
56
+ if delay < 0
57
+ delay = 0
58
+ end
59
+ Funktor.logger.debug "jobShard = #{item['jobShard']}"
60
+ Funktor.logger.debug "jobId = #{item['jobId']}"
61
+ # First we delete the item from Dynamo to be sure that another scheduler hasn't gotten to it,
62
+ # and if that works then send to SQS. This is basically how Sidekiq scheduler works.
63
+ response = dynamodb_client.delete_item({
64
+ key: {
65
+ "jobShard" => item["jobShard"],
66
+ "jobId" => item["jobId"]
67
+ },
68
+ table_name: delayed_job_table,
69
+ return_values: "ALL_OLD"
70
+ })
71
+ if response.attributes # this means the record was still there
72
+ sqs_client.send_message({
73
+ # TODO : How to get this URL...
74
+ queue_url: queue_for_job(job),
75
+ message_body: item["payload"],
76
+ delay_seconds: delay
77
+ })
78
+ if job.is_retry?
79
+ @tracker.track(:retryActivated, job)
80
+ else
81
+ @tracker.track(:scheduledJobActivated, job)
82
+ end
83
+ end
84
+ end
85
+
86
+ def call(event:, context:)
87
+ handled_item_count = 0
88
+ jobs_to_activate.each do |item|
89
+ if context.get_remaining_time_in_millis < 5_000 # This lets us exit gracefully and resume on the next round instead of getting forcibly killed.
90
+ puts "Bailing out due to milliseconds remaining #{context.get_remaining_time_in_millis}"
91
+ break
92
+ end
93
+ handle_item(item)
94
+ handled_item_count += 1
95
+ end
96
+ end
97
+ end
98
+ end
@@ -4,8 +4,6 @@ module Funktor
4
4
  class JobPusher
5
5
 
6
6
  def push(payload)
7
- puts "payload ============"
8
- pp payload
9
7
  job_id = SecureRandom.uuid
10
8
  payload[:job_id] = job_id
11
9
 
@@ -10,7 +10,12 @@ module Funktor
10
10
  end
11
11
 
12
12
  def put_metric_to_stdout(time_diff, job)
13
- puts Funktor.dump_json(metric_hash(time_diff, job))
13
+ # NOTE : We use raw_logger here instead of Funktor.loggert o avoid getting extra
14
+ # timestamps or log level information in the log line. We need this specific format to
15
+ # be the only thing in the line so that CloudWatch can parse the logs and use the data.
16
+ # 'unknown' is a log level that will always be logged, no matter what is set in the
17
+ # runtime environment as far as log level.
18
+ Funktor.raw_logger.unknown Funktor.dump_json(metric_hash(time_diff, job))
14
19
  end
15
20
 
16
21
  def metric_hash(time_diff_in_seconds, job)
@@ -43,8 +48,8 @@ module Funktor
43
48
  end
44
49
  end
45
50
 
46
- Funktor.configure_active_job_handler do |config|
47
- config.active_job_handler_middleware do |chain|
51
+ Funktor.configure_work_queue_handler do |config|
52
+ config.work_queue_handler_middleware do |chain|
48
53
  chain.add Funktor::Middleware::Metrics
49
54
  end
50
55
  end