postjob 0.2.2 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/lib/postjob/cli/db.rb +3 -0
  3. data/lib/postjob/cli/job.rb +15 -7
  4. data/lib/postjob/cli/ps.rb +31 -4
  5. data/lib/postjob/cli/run.rb +4 -2
  6. data/lib/postjob/migrations/002_statuses.rb +18 -0
  7. data/lib/postjob/migrations/003_postjobs.sql +41 -0
  8. data/lib/postjob/migrations/003a_processing.sql +3 -0
  9. data/lib/postjob/migrations/003b_processing_columns.sql +31 -0
  10. data/lib/postjob/migrations/004_tokens.sql +9 -0
  11. data/lib/postjob/migrations/005_helpers.sql +28 -0
  12. data/lib/postjob/migrations/006_enqueue.sql +44 -0
  13. data/lib/postjob/migrations/006a_processing.sql +48 -0
  14. data/lib/postjob/migrations/007_job_results.sql +157 -0
  15. data/lib/postjob/migrations/008_checkout_runnable.sql +78 -0
  16. data/lib/postjob/migrations/008a_childjobs.sql +82 -0
  17. data/lib/postjob/migrations/009_tokens.sql +25 -0
  18. data/lib/postjob/migrations.rb +30 -77
  19. data/lib/postjob/queue/encoder.rb +1 -1
  20. data/lib/postjob/queue/notifications.rb +6 -21
  21. data/lib/postjob/queue/search.rb +4 -4
  22. data/lib/postjob/queue.rb +47 -209
  23. data/lib/postjob/runner.rb +22 -13
  24. data/lib/postjob.rb +21 -19
  25. data/spec/postjob/enqueue_spec.rb +26 -14
  26. data/spec/postjob/job_control/error_status_spec.rb +2 -2
  27. data/spec/postjob/job_control/manual_spec.rb +4 -6
  28. data/spec/postjob/job_control/max_attempts_spec.rb +3 -1
  29. data/spec/postjob/process_job_spec.rb +3 -2
  30. data/spec/postjob/queue/encoder_spec.rb +4 -0
  31. data/spec/postjob/run_spec.rb +1 -1
  32. data/spec/postjob/step_spec.rb +2 -2
  33. data/spec/postjob/{sub_workflow_spec.rb → workflows/child_workflow_spec.rb} +2 -2
  34. data/spec/spec_helper.rb +1 -1
  35. data/spec/support/configure_database.rb +1 -0
  36. data/spec/support/test_helper.rb +4 -4
  37. metadata +19 -7
@@ -0,0 +1,82 @@
1
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.childjobs(v_parent_id BIGINT)
2
+ RETURNS SETOF {SCHEMA_NAME}.postjobs AS $$
3
+ BEGIN
4
+ RETURN QUERY
5
+ SELECT {TABLE_NAME}.* FROM {TABLE_NAME}
6
+ WHERE parent_id=v_parent_id
7
+ ORDER BY id;
8
+ END;
9
+ $$ LANGUAGE plpgsql;
10
+
11
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.next_unresolved_childjob(v_parent_id BIGINT)
12
+ RETURNS SETOF {SCHEMA_NAME}.postjobs AS $$
13
+ BEGIN
14
+ RETURN QUERY
15
+ SELECT {TABLE_NAME}.* FROM {TABLE_NAME}
16
+ WHERE status NOT IN ('ok', 'failed') AND parent_id=v_parent_id
17
+ ORDER BY next_run_at;
18
+ END;
19
+ $$ LANGUAGE plpgsql;
20
+
21
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.find_or_create_childjob(
22
+ v_queue VARCHAR,
23
+ v_workflow VARCHAR,
24
+ v_workflow_method VARCHAR,
25
+ v_workflow_version VARCHAR,
26
+ v_args JSONB,
27
+ v_parent_id BIGINT,
28
+ v_tags JSONB,
29
+ v_max_attempts INTEGER,
30
+ v_timeout DOUBLE PRECISION)
31
+ RETURNS SETOF {SCHEMA_NAME}.postjobs AS $$
32
+ DECLARE
33
+ child_id BIGINT;
34
+ parent {SCHEMA_NAME}.postjobs;
35
+ BEGIN
36
+ IF v_parent_id IS NULL THEN
37
+ RAISE 'Invalid parent job id NULL';
38
+ END IF;
39
+
40
+ IF v_parent_id IS NOT NULL THEN
41
+ SELECT INTO parent * FROM {TABLE_NAME} WHERE id=v_parent_id;
42
+ IF parent.id IS NULL THEN
43
+ RAISE 'No such job: %', v_parent_id;
44
+ END IF;
45
+ END IF;
46
+
47
+ -- check for existing child record
48
+
49
+ -- IF v_parent_id IS NOT NULL THEN
50
+ SELECT id INTO child_id FROM {TABLE_NAME}
51
+ WHERE parent_id=v_parent_id
52
+ AND workflow=v_workflow
53
+ AND workflow_method=v_workflow_method
54
+ AND args=v_args
55
+ ;
56
+
57
+ IF child_id IS NOT NULL THEN
58
+ -- note that RETURN QUERY does not return the function here. It 'only'
59
+ -- adds the specified query to the result set.
60
+ RETURN QUERY
61
+ SELECT * FROM {TABLE_NAME} WHERE id=child_id
62
+ ;
63
+ ELSE
64
+ IF v_tags IS NOT NULL THEN
65
+ RAISE WARNING 'Ignoring tags %', v_tags;
66
+ END IF;
67
+
68
+ RETURN QUERY
69
+ SELECT * FROM enqueue(
70
+ COALESCE(v_queue, parent.queue), -- queue VARCHAR,
71
+ v_workflow, -- workflow VARCHAR,
72
+ v_workflow_method, -- workflow_method VARCHAR,
73
+ NULL, -- workflow_version VARCHAR,
74
+ v_args, -- args JSONB,
75
+ v_parent_id, -- parent_id BIGINT,
76
+ parent.tags, -- tags JSONB,
77
+ v_max_attempts, -- max_attempts INTEGER,
78
+ v_timeout);
79
+ END IF;
80
+ -- END IF;
81
+ END;
82
+ $$ LANGUAGE plpgsql;
@@ -0,0 +1,25 @@
1
+ CREATE EXTENSION IF NOT EXISTS pgcrypto;
2
+
3
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.find_or_create_token(job_id BIGINT) RETURNS VARCHAR AS $$
4
+ DECLARE
5
+ v_token UUID;
6
+ BEGIN
7
+ SELECT token INTO v_token FROM postjob.tokens WHERE postjob_id=job_id;
8
+
9
+ if v_token IS NULL THEN
10
+ SELECT gen_random_uuid() INTO v_token;
11
+ INSERT INTO postjob.tokens(postjob_id, token) VALUES(job_id, v_token);
12
+ END IF;
13
+ RETURN v_token::varchar;
14
+ END;
15
+ $$ LANGUAGE plpgsql;
16
+
17
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.postjobs_by_token(v_token UUID)
18
+ RETURNS SETOF {SCHEMA_NAME}.postjobs AS $$
19
+ BEGIN
20
+ RETURN QUERY
21
+ SELECT {TABLE_NAME}.* FROM {TABLE_NAME}
22
+ INNER JOIN {SCHEMA_NAME}.tokens ON {SCHEMA_NAME}.tokens.postjob_id={TABLE_NAME}.id
23
+ WHERE {SCHEMA_NAME}.tokens.token=v_token;
24
+ END;
25
+ $$ LANGUAGE plpgsql;
@@ -1,97 +1,50 @@
1
+ # rubocop:disable Security/Eval
2
+
1
3
  module Postjob
2
4
  module Migrations
3
5
  extend self
4
6
 
5
7
  SQL = ::Simple::SQL
6
- SCHEMA_NAME = Postjob::Queue::SCHEMA_NAME
8
+ SCHEMA_NAME = ::Postjob::Queue::SCHEMA_NAME
9
+ CHANNEL = ::Postjob::Queue::Notifications::CHANNEL
10
+
11
+ # Note that the SCHEMA_NAME should not be the default name, since unmigrate!
12
+ # below drops that schema, and we don't want to drop the default schema.
13
+ expect! SCHEMA_NAME != "public"
14
+
15
+ TABLE_NAME = "#{SCHEMA_NAME}.postjobs"
7
16
 
8
17
  def unmigrate!
9
- if SCHEMA_NAME != "public"
10
- SQL.exec <<~SQL
11
- DROP SCHEMA IF EXISTS #{SCHEMA_NAME} CASCADE;
12
- SQL
13
- end
18
+ SQL.exec "DROP SCHEMA IF EXISTS #{SCHEMA_NAME} CASCADE"
14
19
  end
15
20
 
16
- PG_TYPES = <<~SQL
17
- SELECT pg_namespace.nspname AS schema, pg_type.typname AS name
18
- FROM pg_type
19
- LEFT JOIN pg_namespace on pg_namespace.oid=pg_type.typnamespace
20
- SQL
21
-
22
21
  def migrate!
23
- SQL.exec <<~SQL
24
- CREATE SCHEMA IF NOT EXISTS #{SCHEMA_NAME};
25
- SQL
22
+ SQL.exec "CREATE SCHEMA IF NOT EXISTS #{SCHEMA_NAME}"
26
23
 
27
- unless SQL.ask("SELECT 1 FROM (#{PG_TYPES}) sq WHERE (schema,name) = ($1, $2)", SCHEMA_NAME, "statuses")
28
- SQL.exec <<~SQL
29
- CREATE TYPE #{SCHEMA_NAME}.statuses AS ENUM (
30
- 'ready', -- process can run
31
- 'sleep', -- process has external dependencies to wait for.
32
- 'failed', -- process failed, with nonrecoverable error
33
- 'err', -- process errored (with recoverable error)
34
- 'timeout', -- process timed out
35
- 'ok' -- process succeeded
36
- );
37
- SQL
24
+ Dir.glob(__FILE__.gsub(/\.rb$/, "/**/*.{sql,rb}")).sort.each do |file|
25
+ run_migration file
38
26
  end
27
+ end
39
28
 
40
- SQL.exec <<~SQL
41
- CREATE TABLE IF NOT EXISTS #{SCHEMA_NAME}.postjobs (
42
- -- id values, readonly once created
43
- id BIGSERIAL PRIMARY KEY, -- process id
44
- parent_id BIGINT REFERENCES #{SCHEMA_NAME}.postjobs ON DELETE CASCADE, -- parent process id
45
- full_id VARCHAR, -- full process id
46
- root_id BIGINT, -- root process id
47
-
48
- created_at timestamp NOT NULL DEFAULT (now() at time zone 'utc'), -- creation timestamp
49
- updated_at timestamp NOT NULL DEFAULT (now() at time zone 'utc'), -- update timestamp
50
-
51
- queue VARCHAR, -- queue name. (readonly)
52
- workflow VARCHAR NOT NULL, -- e.g. "MyJobModule" (readonly)
53
- workflow_method VARCHAR NOT NULL DEFAULT 'run', -- e.g. "run" (readonly)
54
- workflow_version VARCHAR NOT NULL DEFAULT '', -- e.g. "1.0"
55
- args JSONB, -- args
56
-
57
- -- process state ----------------------------------------------------
58
-
59
- status #{SCHEMA_NAME}.statuses DEFAULT 'ready',
60
- next_run_at timestamp DEFAULT (now() at time zone 'utc'), -- when possible to run next?
61
- timing_out_at timestamp, -- job times out after this timestamp
62
- failed_attempts INTEGER NOT NULL DEFAULT 0, -- failed how often?
63
- max_attempts INTEGER NOT NULL DEFAULT 1, -- maximum attempts before failing
64
-
65
- -- process result ---------------------------------------------------
66
-
67
- results JSONB,
68
- error VARCHAR,
69
- error_message VARCHAR,
70
- error_backtrace JSONB,
29
+ private
71
30
 
72
- -- custom fields
73
- workflow_status VARCHAR,
74
- tags JSONB
75
- );
31
+ def run_migration(file)
32
+ Postjob.logger.info "Postjob: migrating #{file}"
76
33
 
77
- -- [TODO] check indices
78
- CREATE INDEX IF NOT EXISTS postjobs_tags_idx
79
- ON #{SCHEMA_NAME}.postjobs USING GIN (tags jsonb_path_ops);
80
- CREATE INDEX IF NOT EXISTS postjobs_parent_id_idx
81
- ON #{SCHEMA_NAME}.postjobs(parent_id);
82
- SQL
34
+ case file
35
+ when /\.rb$/ then run_migration_ruby(file)
36
+ when /\.sql$/ then run_migration_sql(file)
37
+ end
38
+ end
83
39
 
84
- SQL.exec <<~SQL
85
- CREATE TABLE IF NOT EXISTS #{SCHEMA_NAME}.tokens (
86
- id BIGSERIAL PRIMARY KEY,
87
- postjob_id BIGINT REFERENCES #{SCHEMA_NAME}.postjobs ON DELETE CASCADE,
88
- token UUID NOT NULL,
89
- created_at timestamp NOT NULL DEFAULT (now() at time zone 'utc')
90
- );
40
+ def run_migration_ruby(file)
41
+ eval File.read(file)
42
+ end
91
43
 
92
- CREATE INDEX IF NOT EXISTS tokens_postjob_id_idx ON #{SCHEMA_NAME}.tokens(postjob_id);
93
- CREATE INDEX IF NOT EXISTS tokens_token_idx ON #{SCHEMA_NAME}.tokens(token);
94
- SQL
44
+ def run_migration_sql(file)
45
+ sql = File.read(file)
46
+ sql.gsub!(/\{([^\}]+)\}/) { |_| const_get($1) }
47
+ SQL.exec sql
95
48
  end
96
49
  end
97
50
  end
@@ -17,7 +17,7 @@ module Postjob::Queue::Encoder
17
17
  extend self
18
18
 
19
19
  def encode(data)
20
- return if data.nil?
20
+ return nil if data.nil?
21
21
 
22
22
  verify_encodable!(data)
23
23
  JSON.generate(data)
@@ -3,14 +3,9 @@
3
3
  module Postjob::Queue::Notifications
4
4
  extend self
5
5
 
6
- SQL = ::Postjob::Queue::SQL
7
- TABLE_NAME = ::Postjob::Queue::TABLE_NAME
8
- CHANNEL = "postjob_notifications"
9
-
10
- def notify_listeners
11
- SQL.ask "NOTIFY #{CHANNEL}"
12
- end
13
-
6
+ SQL = ::Postjob::Queue::SQL
7
+ CHANNEL = "postjob_notifications"
8
+ SCHEMA_NAME = ::Postjob::Queue::SCHEMA_NAME
14
9
  MAX_WAIT_TIME = 120
15
10
 
16
11
  def wait_for_new_job
@@ -23,8 +18,8 @@ module Postjob::Queue::Notifications
23
18
  wait_time = time_to_next_job
24
19
  return if wait_time && wait_time <= 0
25
20
 
26
- wait_time ||= MAX_WAIT_TIME
27
- Postjob.logger.debug "postjob: waiting for notification for up to #{wait_time.inspect} seconds"
21
+ wait_time = MAX_WAIT_TIME if !wait_time || wait_time > MAX_WAIT_TIME
22
+ Postjob.logger.debug "postjob: waiting for notification for up to #{wait_time} seconds"
28
23
  Simple::SQL.wait_for_notify(wait_time)
29
24
 
30
25
  # flush notifications. It is possible that a huge number of notifications
@@ -49,16 +44,6 @@ module Postjob::Queue::Notifications
49
44
  # returns the maximum number of seconds to wait until the
50
45
  # next runnable or timeoutable job comes up.
51
46
  def time_to_next_job
52
- Simple::SQL.ask <<~SQL, Postjob::Registry.workflows_with_versions
53
- SELECT EXTRACT(EPOCH FROM (MIN(next_event_at) - (now() at time zone 'utc'))) FROM (
54
- SELECT MIN(timing_out_at) AS next_event_at
55
- FROM #{TABLE_NAME}
56
- WHERE status IN ('ready', 'sleep', 'err')
57
- UNION
58
- SELECT MIN(next_run_at) AS next_event_at
59
- FROM #{TABLE_NAME}
60
- WHERE status = 'ready' AND (workflow || workflow_version = ANY ($1))
61
- ) sq
62
- SQL
47
+ Simple::SQL.ask "SELECT * FROM #{SCHEMA_NAME}.time_to_next_job($1)", ::Postjob::Registry.workflows_with_versions
63
48
  end
64
49
  end
@@ -6,14 +6,14 @@ end
6
6
  module Postjob::Queue::Search
7
7
  extend self
8
8
 
9
- def one(id, filter: {}, into: Hash)
9
+ def one(id, filter: {}, into: nil)
10
10
  query = query(page: 0, per: 1, filter: filter, id: id)
11
- Simple::SQL.ask(query, into: into)
11
+ Simple::SQL.record(query, into: into)
12
12
  end
13
13
 
14
- def all(page: 0, per: 100, filter: {}, into: Hash)
14
+ def all(page: 0, per: 100, filter: {}, into: nil)
15
15
  query = query(page: page, per: per, filter: filter)
16
- Simple::SQL.all(query, into: into)
16
+ Simple::SQL.records(query, into: into)
17
17
  end
18
18
 
19
19
  private
data/lib/postjob/queue.rb CHANGED
@@ -1,8 +1,4 @@
1
- # rubocop:disable Layout/IndentationWidth
2
- # rubocop:disable Style/UnneededInterpolation
3
- # rubocop:disable Metrics/ModuleLength
4
- # rubocop:disable Metrics/LineLength
5
- # rubocop:disable Lint/EndAlignment
1
+ # rubocop:disable Layout/AlignParameters
6
2
  # rubocop:disable Metrics/MethodLength
7
3
  # rubocop:disable Metrics/ParameterLists
8
4
 
@@ -25,12 +21,6 @@ require_relative "queue/search"
25
21
  module Postjob::Queue
26
22
  Job = ::Postjob::Job
27
23
 
28
- DEFAULT_OPTIONS = {
29
- version: "",
30
- queue: "q",
31
- max_attempts: 5
32
- }
33
-
34
24
  # enqueues a new job with the given arguments
35
25
  #
36
26
  # Parameters:
@@ -55,197 +45,71 @@ module Postjob::Queue
55
45
 
56
46
  workflow, workflow_method = parse_workflow(workflow)
57
47
 
58
- options.update(DEFAULT_OPTIONS) { |_k, v1, v2| v1 || v2 }
59
-
60
- SQL.transaction do
61
- queue, version, parent_id, tags, timeout, max_attempts =
62
- options.values_at :queue, :version, :parent_id, :tags, :timeout, :max_attempts
63
-
64
- args = Encoder.encode(args) if args
65
- tags = Encoder.encode(tags) if tags
66
-
67
- id = SQL.ask <<~SQL, queue, workflow, workflow_method, version, args, parent_id, tags, max_attempts
68
- INSERT INTO #{TABLE_NAME}(queue, workflow, workflow_method, workflow_version, args, parent_id, tags, max_attempts)
69
- VALUES($1, $2, $3, $4, $5, $6, $7, $8)
70
- RETURNING id
71
- SQL
72
-
73
- if timeout
74
- SQL.ask <<~SQL, id, timeout
75
- UPDATE #{TABLE_NAME}
76
- SET timing_out_at = (now() at time zone 'utc') + $2 * interval '1 second'
77
- WHERE id=$1
78
- SQL
79
- end
80
-
81
- root_id, parent_full_id = if parent_id
82
- SQL.ask "SELECT root_id, full_id FROM #{TABLE_NAME} WHERE id=$1", parent_id
83
- end
84
-
85
- root_id ||= id
86
- full_id = parent_full_id ? "#{parent_full_id}.#{id}" : "#{id}"
87
-
88
- SQL.ask "UPDATE #{TABLE_NAME} SET full_id=$2, root_id=$3 WHERE id=$1",
89
- id, full_id, root_id
90
-
91
- Notifications.notify_listeners
92
-
93
- SQL.ask "SELECT * FROM #{TABLE_NAME} WHERE id=$1", id, into: Job
94
- end
48
+ # The use of a `SELECT * FROM function()` here is due to
49
+ #
50
+ # a) a limitation in Simple::SQL which would not be able to unpack a
51
+ # "SELECT function()" usefully when the return value is a record;
52
+ # b) and/or my inability to write better SQL functions;
53
+ SQL.record "SELECT * FROM #{SCHEMA_NAME}.enqueue($1, $2, $3, $4, $5, $6, $7, $8, $9)",
54
+ options[:queue],
55
+ workflow,
56
+ workflow_method,
57
+ options[:version],
58
+ Encoder.encode(args),
59
+ options[:parent_id],
60
+ Encoder.encode(options[:tags]),
61
+ options[:max_attempts],
62
+ options[:timeout],
63
+ into: Job
95
64
  end
96
65
 
97
66
  def set_job_result(job, value, version:)
98
- update_job(job, version: version)
99
- results = Encoder.encode([value]) unless value.nil?
100
-
101
- SQL.ask <<~SQL, job.id, results
102
- UPDATE #{TABLE_NAME}
103
- SET results=$2, status='ok', next_run_at=NULL, error=NULL, error_message=NULL, error_backtrace=NULL
104
- WHERE id=$1
105
- SQL
106
-
107
- wakeup(job.parent_id)
67
+ value = Encoder.encode([value]) unless value.nil?
68
+ SQL.ask "SELECT #{SCHEMA_NAME}.set_job_result($1, $2, $3)", job.id, value, version
108
69
  end
109
70
 
110
71
  def set_job_pending(job, version:)
111
- update_job(job, version: version)
112
- SQL.ask <<~SQL, job.id
113
- UPDATE #{TABLE_NAME}
114
- SET status='sleep', next_run_at=NULL
115
- WHERE id=$1
116
- SQL
72
+ SQL.ask "SELECT #{SCHEMA_NAME}.set_job_pending($1, $2)", job.id, version
117
73
  end
118
74
 
119
- private
120
-
121
- def remaining_attempts(job)
122
- SQL.ask <<~SQL, job.id
123
- SELECT max_attempts - failed_attempts
124
- FROM #{TABLE_NAME}
125
- WHERE id=$1
126
- SQL
127
- end
128
-
129
- def next_status_and_next_run_of_failed_job(job, status)
130
- # If this is a recoverable error and if we have another run possible we'll
131
- # set next_run_at, and the status to "sleep", otherwise next_run_at will be
132
- # NULL and the status would be "failed"
133
- #
134
- # To check if we have another run we check (max_attempts - failed_attempts).
135
- #
136
- # This is only necessary with a status of :err. Note that we need to
137
- # subtract 1, since this check runs *after* the current run was done,
138
- # but before it was written to the database.
139
- if status == :err && remaining_attempts(job) > 1
140
- [ "err", next_run_at_fragment ]
141
- elsif status == :timeout
142
- [ "timeout", "NULL" ]
143
- else
144
- [ "failed", "NULL" ]
145
- end
146
- end
147
-
148
- public
149
-
150
75
  def set_job_error(job, error, error_message, error_backtrace = nil, status:, version:)
151
- update_job(job, version: version)
152
-
153
- new_status, next_run_at = next_status_and_next_run_of_failed_job job, status
154
-
155
- unless error_backtrace.nil?
156
- error_backtrace = error_backtrace.map { |path| make_relative_path(path) }
157
- error_backtrace = Encoder.encode(error_backtrace)
158
- end
159
-
160
- SQL.ask <<~SQL, job.id, new_status, error, error_message, error_backtrace
161
- UPDATE #{TABLE_NAME}
162
- SET
163
- status=$2, error=$3, error_message=$4, error_backtrace=$5,
164
- failed_attempts=failed_attempts+1, next_run_at=#{next_run_at}
165
- WHERE id=$1
166
- SQL
167
-
168
- wakeup(job.parent_id)
169
- end
170
-
171
- private
172
-
173
- def make_relative_path(path)
174
- @here ||= "#{Dir.getwd}/"
175
- path.start_with?(@here) ? path[@here.length..-1] : path
176
- end
177
-
178
- def update_job(job, version:)
179
- return unless version
180
-
181
- SQL.ask <<~SQL, job.id, version
182
- UPDATE #{TABLE_NAME}
183
- SET workflow_version=$2, updated_at=(now() at time zone 'utc')
184
- WHERE id=$1
185
- SQL
186
- end
187
-
188
- #
189
- # The timeout until a job can be run next is calculated by running the
190
- # +next_run_at_fragment+ sql in the database.
191
- #
192
- def next_run_at_fragment
193
- # The basetime to use with the NEXT_RUN_AT_FRAGMENT below.
194
- next_run_at_basetime = Postjob.fast_mode ? 0.01 : 10
195
- "(now() at time zone 'utc') + #{next_run_at_basetime} * pow(1.5, failed_attempts) * interval '1 second'"
196
- end
197
-
198
- def wakeup(id)
199
- return unless id
76
+ expect! status => [ :failed, :err, :timeout ]
200
77
 
201
- SQL.ask <<~SQL, id
202
- UPDATE #{TABLE_NAME}
203
- SET status='ready', next_run_at=(now() at time zone 'utc'), updated_at=(now() at time zone 'utc')
204
- WHERE id=$1 AND status='sleep'
205
- SQL
206
-
207
- Notifications.notify_listeners
78
+ SQL.ask "SELECT #{SCHEMA_NAME}.set_job_error($1, $2, $3, $4, $5, $6, $7)",
79
+ job.id, error, error_message, Encoder.encode(error_backtrace), status, version, Postjob.fast_mode
208
80
  end
209
81
 
210
- public
211
-
212
82
  def childjobs(parent)
213
83
  expect! parent => Job
214
-
215
- SQL.all <<~SQL, parent.id, into: Job
216
- SELECT * FROM #{TABLE_NAME}
217
- WHERE parent_id=$1
218
- ORDER BY id
219
- SQL
84
+ SQL.records "SELECT * FROM #{SCHEMA_NAME}.childjobs($1)", parent.id, into: Job
220
85
  end
221
86
 
222
87
  def next_unresolved_childjob(parent)
223
88
  expect! parent => Job
224
-
225
- SQL.ask <<~SQL, parent.id, into: Job
226
- SELECT * FROM #{TABLE_NAME}
227
- WHERE parent_id=$1 AND status NOT IN ('ok', 'failed')
228
- ORDER BY next_run_at
229
- LIMIT 1
230
- SQL
89
+ SQL.records "SELECT * FROM #{SCHEMA_NAME}.next_unresolved_childjob($1)", parent.id, into: Job
231
90
  end
232
91
 
233
- def find_or_create_childjob(parent, workflow, args, timeout:, max_attempts:)
92
+ def find_or_create_childjob(parent, workflow, args, timeout:, max_attempts:, queue: nil)
234
93
  expect! parent => Job, workflow => String, args => Array
235
94
 
236
95
  workflow, workflow_method = parse_workflow(workflow)
237
96
 
238
- job = SQL.ask <<~SQL, parent.id, workflow, workflow_method, Encoder.encode(args), into: Job
239
- SELECT * FROM #{TABLE_NAME}
240
- WHERE parent_id=$1
241
- AND workflow=$2
242
- AND workflow_method=$3
243
- AND args=$4
244
- SQL
245
-
246
- return job if job
247
-
248
- enqueue_job("#{workflow}.#{workflow_method}", *args, queue: parent.queue, parent_id: parent.id, timeout: timeout, max_attempts: max_attempts, tags: parent.tags)
97
+ # The use of a `SELECT * FROM function()` here is due to
98
+ #
99
+ # a) a limitation in Simple::SQL which would not be able to unpack a
100
+ # "SELECT function()" usefully when the return value is a record;
101
+ # b) and/or my inability to write better SQL functions;
102
+ return SQL.record "SELECT * FROM #{SCHEMA_NAME}.find_or_create_childjob($1, $2, $3, $4, $5, $6, $7, $8, $9)",
103
+ queue,
104
+ workflow,
105
+ workflow_method,
106
+ nil, # version
107
+ Encoder.encode(args),
108
+ parent.id,
109
+ nil, # tags will be read from parent
110
+ max_attempts,
111
+ timeout,
112
+ into: Job
249
113
  end
250
114
 
251
115
  def set_workflow_status(job, status)
@@ -275,42 +139,16 @@ module Postjob::Queue
275
139
 
276
140
  public
277
141
 
278
- def checkout_runnable
279
- sql = <<~SQL
280
- SELECT
281
- *,
282
- timing_out_at <= (now() at time zone 'utc') AS timed_out
283
- FROM #{TABLE_NAME}
284
- WHERE
285
- (next_run_at <= (now() at time zone 'utc') AND status IN ('ready', 'err') AND workflow || workflow_version = ANY ($1))
286
- OR
287
- (timing_out_at <= (now() at time zone 'utc') AND status IN ('ready', 'err', 'sleep'))
288
- ORDER BY (LEAST(next_run_at, timing_out_at))
289
- FOR UPDATE SKIP LOCKED
290
- LIMIT 1
291
- SQL
292
-
293
- SQL.transaction do
294
- job = SQL.ask sql, Postjob::Registry.workflows_with_versions, into: Job
295
- yield job if job
296
- job
297
- end
142
+ def checkout(workflows_with_versions)
143
+ SQL.record "SELECT * FROM #{SCHEMA_NAME}.checkout($1, $2)",
144
+ workflows_with_versions, Postjob.fast_mode, into: Job
298
145
  end
299
146
 
300
147
  def find_or_create_token(job)
301
- token = SQL.ask "SELECT token FROM postjob.tokens WHERE postjob_id=$1", job.id
302
- return token if token
303
-
304
- token = SecureRandom.uuid
305
- SQL.ask "INSERT INTO postjob.tokens(postjob_id, token) VALUES($1, $2)", job.id, token
306
- token
148
+ SQL.ask "SELECT #{SCHEMA_NAME}.find_or_create_token($1)", job.id
307
149
  end
308
150
 
309
151
  def find_job_by_token(token)
310
- SQL.ask <<~SQL, token, into: Job
311
- SELECT postjob.postjobs.* FROM postjob.postjobs
312
- INNER JOIN postjob.tokens ON postjob.tokens.postjob_id=postjob.postjobs.id
313
- WHERE postjob.tokens.token=$1
314
- SQL
152
+ SQL.record "SELECT * FROM #{SCHEMA_NAME}.postjobs_by_token($1)", token, into: Job
315
153
  end
316
154
  end