postjob 0.4.5 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/lib/postjob.rb +22 -13
  3. data/lib/postjob/cli/events.rb +60 -0
  4. data/lib/postjob/cli/heartbeat.rb +55 -0
  5. data/lib/postjob/cli/hosts.rb +67 -0
  6. data/lib/postjob/cli/ps.rb +1 -13
  7. data/lib/postjob/cli/sessions.rb +83 -0
  8. data/lib/postjob/job.rb +4 -15
  9. data/lib/postjob/migrations/003_postjobs.sql +10 -8
  10. data/lib/postjob/migrations/003b_processing_columns.sql +8 -8
  11. data/lib/postjob/migrations/005_helpers.sql +3 -1
  12. data/lib/postjob/migrations/006_enqueue.sql +3 -0
  13. data/lib/postjob/migrations/006a_processing.sql +6 -26
  14. data/lib/postjob/migrations/007_job_results.sql +32 -13
  15. data/lib/postjob/migrations/008_checkout_runnable.sql +15 -21
  16. data/lib/postjob/migrations/008a_childjobs.sql +13 -0
  17. data/lib/postjob/migrations/010_settings.sql +18 -3
  18. data/lib/postjob/migrations/011_null_uuid.sql +7 -0
  19. data/lib/postjob/migrations/012_hosts.sql +42 -0
  20. data/lib/postjob/migrations/013_worker_sessions.sql +44 -0
  21. data/lib/postjob/migrations/014_postjob_session_id.sql +17 -0
  22. data/lib/postjob/migrations/015_events.sql +76 -0
  23. data/lib/postjob/migrations/016_sessions_functions.sql +16 -0
  24. data/lib/postjob/migrations/017_zombie_check.sql +58 -0
  25. data/lib/postjob/migrations/018_heartbeat.sql +28 -0
  26. data/lib/postjob/migrations/019_heartbeat_indices.sql +5 -0
  27. data/lib/postjob/queue.rb +41 -27
  28. data/lib/postjob/queue/notifications.rb +5 -4
  29. data/lib/postjob/queue/search.rb +2 -0
  30. data/lib/postjob/queue/settings.rb +11 -1
  31. data/lib/postjob/record.rb +17 -0
  32. data/lib/postjob/runner.rb +9 -2
  33. data/lib/postjob/worker_session.rb +76 -0
  34. data/lib/postjob/workflow.rb +0 -4
  35. data/lib/tools/atomic_store.rb +17 -0
  36. data/lib/tools/heartbeat.rb +151 -0
  37. data/lib/tools/history.rb +25 -0
  38. data/spec/postjob/events/heartbeat_event_spec.rb +85 -0
  39. data/spec/postjob/events/job_event_spec.rb +80 -0
  40. data/spec/postjob/job_control/max_attempts_spec.rb +0 -2
  41. data/spec/postjob/queue/search_spec.rb +0 -14
  42. data/spec/postjob/worker_session_spec.rb +41 -0
  43. data/spec/spec_helper.rb +9 -0
  44. data/spec/support/test_helper.rb +11 -1
  45. metadata +43 -3
  46. data/spec/postjob/job_control/workflow_status_spec.rb +0 -52
@@ -0,0 +1,44 @@
1
+ -- worker_sessions ------------------------------------------------------------
2
+
3
+ -- The worker_sessions table records available "worker worker_sessions". The
4
+ -- following information is generated/recorded:
5
+ --
6
+ -- - id: a UUID, which is unique per worker_session.
7
+ -- - host_id: a UUID, which is unique across all workers on a machine.
8
+ -- - client_socket: contains "host:port" of a connection
9
+ -- - workflows: an array of workflows with and without version numbers.
10
+ -- This describes which workflows can be checked out in this
11
+ -- session.
12
+ --
13
+ -- The host_id value will be used for sticky workflows: A workflow with
14
+ -- `sticky: true` will only be checked out to workers with the same host_id.
15
+ -- This is useful for workflows that use temporary files to share state
16
+ -- between jobs.
17
+ --
18
+ -- The +id+ and +host_id+ UUIDs are generated on the queue. The client is
19
+ -- supposed to get a worker_session via +worker_session_start(id, host_id, workflows)+
20
+ --
21
+
22
+ CREATE TABLE IF NOT EXISTS {SCHEMA_NAME}.worker_sessions (
23
+ id UUID PRIMARY KEY DEFAULT (gen_random_uuid()), -- UUID identifying a worker **process**
24
+ host_id UUID NOT NULL REFERENCES {SCHEMA_NAME}.hosts ON DELETE CASCADE, -- UUID identifying a worker **host**
25
+ client_socket VARCHAR, -- host:port of connection (from pg_stat_activity)
26
+ workflows VARCHAR[] NOT NULL, -- array of workflow versions available on that worker
27
+ attributes JSONB NOT NULL DEFAULT '{}'::JSONB,
28
+ created_at timestamp NOT NULL DEFAULT (now() at time zone 'utc')
29
+ );
30
+
31
+ CREATE INDEX IF NOT EXISTS worker_sessions_attributes_idx
32
+ ON {SCHEMA_NAME}.worker_sessions USING GIN (attributes jsonb_path_ops);
33
+
34
+ -- worker_session_start: starts or reuses a worker_session ----------------------------------
35
+
36
+ DO $$
37
+ DECLARE
38
+ null_uuid UUID := {SCHEMA_NAME}._null_uuid();
39
+ BEGIN
40
+ IF NOT EXISTS (SELECT 1 FROM {SCHEMA_NAME}.worker_sessions WHERE id = null_uuid) THEN
41
+ INSERT INTO {SCHEMA_NAME}.worker_sessions(id, host_id, workflows) VALUES(null_uuid, null_uuid, '{}');
42
+ END IF;
43
+ END;
44
+ $$ LANGUAGE plpgsql;
@@ -0,0 +1,17 @@
1
+ DO $$
2
+ BEGIN
3
+ ALTER TABLE {SCHEMA_NAME}.postjobs ADD COLUMN last_worker_session_id UUID REFERENCES {SCHEMA_NAME}.worker_sessions ON DELETE CASCADE;
4
+ UPDATE {SCHEMA_NAME}.postjobs SET last_worker_session_id={SCHEMA_NAME}._null_uuid();
5
+ ALTER TABLE {SCHEMA_NAME}.postjobs ALTER COLUMN last_worker_session_id SET NOT NULL;
6
+ EXCEPTION
7
+ WHEN duplicate_column THEN RAISE DEBUG 'column {SCHEMA_NAME}.postjobs.last_worker_session_id already exists';
8
+ END;
9
+ $$;
10
+
11
+ DO $$
12
+ BEGIN
13
+ ALTER TABLE {SCHEMA_NAME}.postjobs DROP COLUMN workflow_status;
14
+ EXCEPTION
15
+ WHEN undefined_column THEN RAISE DEBUG 'column {SCHEMA_NAME}.postjobs.workflow_status already dropped';
16
+ END;
17
+ $$;
@@ -0,0 +1,76 @@
1
+ CREATE TABLE IF NOT EXISTS {SCHEMA_NAME}.events (
2
+ id BIGSERIAL PRIMARY KEY,
3
+ postjob_id BIGINT REFERENCES {SCHEMA_NAME}.postjobs ON DELETE SET NULL,
4
+ host_id UUID NOT NULL REFERENCES {SCHEMA_NAME}.hosts ON DELETE CASCADE,
5
+ worker_session_id UUID REFERENCES {SCHEMA_NAME}.worker_sessions ON DELETE SET NULL,
6
+ name VARCHAR NOT NULL,
7
+ attributes JSONB not null DEFAULT '{}'::JSONB,
8
+ created_at timestamp NOT NULL DEFAULT (now() at time zone 'utc')
9
+ );
10
+
11
+ -- We do not create an index on events.attributes just yet. events has a high
12
+ -- write rate, and until we know for sure that we'll need this index we
13
+ -- better not have it.
14
+ --
15
+ -- CREATE INDEX IF NOT EXISTS events_attributes_idx
16
+ -- ON {SCHEMA_NAME}.events USING GIN (attributes jsonb_path_ops);
17
+
18
+ CREATE INDEX IF NOT EXISTS events_postjob_id_idx
19
+ ON {SCHEMA_NAME}.events(postjob_id);
20
+
21
+ --- define triggers to automatically create events -----------------------------
22
+
23
+ --
24
+ -- An event is created whenever a job's status changes.
25
+ --
26
+
27
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}._create_postjob_event() RETURNS TRIGGER AS $$
28
+ BEGIN
29
+ IF TG_OP = 'UPDATE' AND (OLD.status = NEW.status) THEN
30
+ RETURN NEW;
31
+ END IF;
32
+
33
+ INSERT INTO {SCHEMA_NAME}.events (postjob_id, name, host_id, worker_session_id)
34
+ VALUES (
35
+ NEW.id,
36
+ NEW.status,
37
+ (SELECT host_id FROM {SCHEMA_NAME}.worker_sessions WHERE id=NEW.last_worker_session_id),
38
+ NEW.last_worker_session_id
39
+ );
40
+ RETURN NEW;
41
+ END;
42
+ $$ LANGUAGE plpgsql;
43
+
44
+ BEGIN;
45
+ DROP TRIGGER IF EXISTS _create_postjob_event ON {SCHEMA_NAME}.postjobs;
46
+
47
+ CREATE TRIGGER _create_postjob_event AFTER INSERT OR UPDATE
48
+ ON {SCHEMA_NAME}.postjobs
49
+ FOR EACH ROW
50
+ EXECUTE PROCEDURE {SCHEMA_NAME}._create_postjob_event();
51
+ COMMIT;
52
+
53
+ --
54
+ -- An event is also created whenever a worker sends in a heartbeat, see
55
+ -- 013_worker_sessions_functions.sql for details.
56
+ --
57
+
58
+ --- create an initial set of events -------------------------------------------
59
+ --
60
+ -- This is a best-effort procedure; we cannot generate data that isn't here.
61
+ --
62
+
63
+ DO $$
64
+ DECLARE
65
+ null_uuid UUID := {SCHEMA_NAME}._null_uuid();
66
+ BEGIN
67
+ IF NOT EXISTS (SELECT id FROM {SCHEMA_NAME}.events LIMIT 1) THEN
68
+ INSERT INTO {SCHEMA_NAME}.events (postjob_id, name, created_at, host_id, worker_session_id)
69
+ SELECT * FROM (
70
+ SELECT id, 'ready', created_at AS created_at, null_uuid, null_uuid FROM {SCHEMA_NAME}.postjobs
71
+ UNION
72
+ SELECT id, status::varchar, updated_at AS created_at, null_uuid, null_uuid FROM {SCHEMA_NAME}.postjobs WHERE status != 'ready'
73
+ ) sq ORDER BY created_at ;
74
+ END IF;
75
+ END
76
+ $$;
@@ -0,0 +1,16 @@
1
+ -- worker_session_start: starts or reuses a worker_session ----------------------------------
2
+
3
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.worker_session_start(p_host_id UUID, p_workflows VARCHAR[])
4
+ RETURNS SETOF {SCHEMA_NAME}.worker_sessions AS $$
5
+ DECLARE
6
+ v_worker_session_id UUID;
7
+ v_client_socket VARCHAR;
8
+ BEGIN
9
+ SELECT client_addr || ':' || client_port INTO v_client_socket FROM pg_stat_activity WHERE pid = pg_backend_pid();
10
+
11
+ INSERT INTO {SCHEMA_NAME}.worker_sessions (host_id, client_socket, workflows)
12
+ VALUES (p_host_id, v_client_socket, p_workflows) RETURNING id INTO v_worker_session_id;
13
+
14
+ RETURN QUERY SELECT * FROM {SCHEMA_NAME}.worker_sessions WHERE id = v_worker_session_id;
15
+ END;
16
+ $$ LANGUAGE plpgsql;
@@ -0,0 +1,58 @@
1
+ -- zombie checks --------------------------------------------------------------
2
+
3
+ -- This method runs a zombie check. Its result is written into the database as
4
+ -- a 'zombie' event. A zombie check should only happen once per minute.
5
+ --
6
+ -- A zombie is a job whichs runs on a host which hasn't checked in for a while.
7
+ -- (This is currently at 5 minutes)
8
+
9
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}._zombie_check(p_fast_mode BOOLEAN) RETURNS int AS $$
10
+ DECLARE
11
+ zombie_id bigint;
12
+ _one int;
13
+ zombie_count int;
14
+ zombie_threshold interval := interval '5 minutes';
15
+ BEGIN
16
+ zombie_count := 0;
17
+ FOR zombie_id, _one IN
18
+ SELECT jobs.id, 1
19
+ FROM {SCHEMA_NAME}.postjobs jobs
20
+ LEFT JOIN {SCHEMA_NAME}.worker_sessions sessions ON jobs.last_worker_session_id=sessions.id
21
+ LEFT JOIN
22
+ (
23
+ SELECT host_id, MAX(created_at) AS created_at
24
+ FROM {SCHEMA_NAME}.events
25
+ WHERE name = 'heartbeat'
26
+ GROUP BY host_id
27
+ ) heartbeat ON sessions.host_id = heartbeat.host_id
28
+ WHERE
29
+ jobs.status IN ('processing')
30
+ AND (
31
+ heartbeat.created_at IS NULL OR
32
+ heartbeat.created_at < ((now() at time zone 'utc') - zombie_threshold)
33
+ )
34
+ LOOP
35
+ PERFORM {SCHEMA_NAME}._set_job_zombie(zombie_id, p_fast_mode);
36
+ zombie_count := zombie_count + 1;
37
+ END LOOP;
38
+
39
+ RETURN zombie_count;
40
+ END;
41
+ $$ LANGUAGE plpgsql;
42
+
43
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.zombie_check(p_fast_mode BOOLEAN)
44
+ RETURNS VOID AS $$
45
+ DECLARE
46
+ zombie_check_interval interval := '1 minute';
47
+ p_zombie_count int;
48
+ BEGIN
49
+
50
+ -- once per minute run a zombie check. This is marked in the database as a zombie
51
+ -- event, which has a zombie count value in its attributes.
52
+ IF NOT EXISTS (SELECT 1 FROM {SCHEMA_NAME}.events WHERE name='zombie' AND created_at > (now() at time zone 'utc') - zombie_check_interval) THEN
53
+ p_zombie_count := {SCHEMA_NAME}._zombie_check(p_fast_mode);
54
+ INSERT INTO {SCHEMA_NAME}.events(name, host_id, attributes)
55
+ VALUES('zombie', {SCHEMA_NAME}._null_uuid(), jsonb_build_object('zombie_count', p_zombie_count));
56
+ END IF;
57
+ END;
58
+ $$ LANGUAGE plpgsql;
@@ -0,0 +1,28 @@
1
+ -- host_heartbeat: sends a heartbeat ------------------------------------------
2
+
3
+ -- This method is called regularily by workers to pass in a hosts heartbeat
4
+ -- metrics. The database also uses this to regularily run a zombie check.
5
+
6
+ CREATE OR REPLACE FUNCTION {SCHEMA_NAME}.host_heartbeat(
7
+ p_host_id UUID,
8
+ p_metrics JSONB,
9
+ p_fast_mode BOOLEAN)
10
+ RETURNS VOID AS $$
11
+ DECLARE
12
+ p_latest_zombie_event_id bigint;
13
+ p_zombie_count int;
14
+ BEGIN
15
+ -- We perform the zombie_check first. This should not make any difference,
16
+ -- functionality-wise, but helps us test this.
17
+ PERFORM {SCHEMA_NAME}.zombie_check(p_fast_mode);
18
+
19
+ IF NOT EXISTS (
20
+ SELECT 1 FROM {SCHEMA_NAME}.events
21
+ WHERE (name,host_id)=('heartbeat', p_host_id) AND created_at > (now() at time zone 'utc') - interval '1 minute'
22
+ )
23
+ THEN
24
+ INSERT INTO {SCHEMA_NAME}.events(name, host_id, attributes)
25
+ VALUES ('heartbeat', p_host_id, p_metrics);
26
+ END IF;
27
+ END;
28
+ $$ LANGUAGE plpgsql;
@@ -0,0 +1,5 @@
1
+ -- These indices might be useful for zombie detection.
2
+
3
+ CREATE INDEX IF NOT EXISTS events_created_at_idx ON {SCHEMA_NAME}.events(created_at);
4
+ CREATE INDEX IF NOT EXISTS events_host_id_idx ON {SCHEMA_NAME}.events(host_id);
5
+ CREATE INDEX IF NOT EXISTS events_worker_session_id_idx ON {SCHEMA_NAME}.events(worker_session_id);
@@ -31,7 +31,7 @@ module Postjob::Queue
31
31
  # - parent_id - the id of the parent job, if any
32
32
  # - tags - # a Hash[String => String]
33
33
  #
34
- def enqueue_job(workflow, *args, options)
34
+ def enqueue_job(worker_session_id, workflow, *args, options)
35
35
  expect! workflow => String
36
36
  expect! options => {
37
37
  queue: [String, nil],
@@ -49,7 +49,8 @@ module Postjob::Queue
49
49
  # a) a limitation in Simple::SQL which would not be able to unpack a
50
50
  # "SELECT function()" usefully when the return value is a record;
51
51
  # b) and/or my inability to write better SQL functions;
52
- SQL.ask "SELECT * FROM #{SCHEMA_NAME}.enqueue($1, $2, $3, $4, $5, $6, $7, $8, $9)",
52
+ SQL.ask "SELECT * FROM #{SCHEMA_NAME}.enqueue($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
53
+ worker_session_id,
53
54
  options[:queue],
54
55
  workflow,
55
56
  workflow_method,
@@ -62,20 +63,20 @@ module Postjob::Queue
62
63
  into: Job
63
64
  end
64
65
 
65
- def set_job_result(job, value, version:)
66
+ def set_job_result(worker_session_id, job, value, version:)
66
67
  value = Encoder.encode([value]) unless value.nil?
67
- SQL.ask "SELECT #{SCHEMA_NAME}.set_job_result($1, $2, $3)", job.id, value, version
68
+ SQL.ask "SELECT #{SCHEMA_NAME}.set_job_result($1, $2, $3, $4)", worker_session_id, job.id, value, version
68
69
  end
69
70
 
70
- def set_job_pending(job, version:)
71
- SQL.ask "SELECT #{SCHEMA_NAME}.set_job_pending($1, $2)", job.id, version
71
+ def set_job_pending(worker_session_id, job, version:)
72
+ SQL.ask "SELECT #{SCHEMA_NAME}.set_job_pending($1, $2, $3)", worker_session_id, job.id, version
72
73
  end
73
74
 
74
- def set_job_error(job, error, error_message, error_backtrace = nil, status:, version:)
75
+ def set_job_error(worker_session_id, job, error, error_message, error_backtrace = nil, status:, version:)
75
76
  expect! status => [ :failed, :err, :timeout ]
76
77
 
77
- SQL.ask "SELECT #{SCHEMA_NAME}.set_job_error($1, $2, $3, $4, $5, $6, $7)",
78
- job.id, error, error_message, Encoder.encode(error_backtrace), status, version, Postjob.fast_mode
78
+ SQL.ask "SELECT #{SCHEMA_NAME}.set_job_error($1, $2, $3, $4, $5, $6, $7, $8)",
79
+ worker_session_id, job.id, error, error_message, Encoder.encode(error_backtrace), status, version, Postjob.fast_mode
79
80
  end
80
81
 
81
82
  def childjobs(parent)
@@ -88,7 +89,7 @@ module Postjob::Queue
88
89
  SQL.ask "SELECT COUNT(*) FROM #{SCHEMA_NAME}.unresolved_childjobs($1)", parent.id
89
90
  end
90
91
 
91
- def find_or_create_childjob(parent, workflow, args, timeout:, max_attempts:, queue: nil)
92
+ def find_or_create_childjob(worker_session_id, parent, workflow, args, timeout:, max_attempts:, queue: nil)
92
93
  expect! parent => Job
93
94
  expect! workflow => String
94
95
  expect! args => Array
@@ -105,7 +106,8 @@ module Postjob::Queue
105
106
  # a) a limitation in Simple::SQL which would not be able to unpack a
106
107
  # "SELECT function()" usefully when the return value is a record;
107
108
  # b) and/or my inability to write better SQL functions;
108
- return SQL.ask "SELECT * FROM #{SCHEMA_NAME}.find_or_create_childjob($1, $2, $3, $4, $5, $6, $7, $8, $9)",
109
+ return SQL.ask "SELECT * FROM #{SCHEMA_NAME}.find_or_create_childjob($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
110
+ worker_session_id,
109
111
  queue,
110
112
  workflow,
111
113
  workflow_method,
@@ -118,19 +120,6 @@ module Postjob::Queue
118
120
  into: Job
119
121
  end
120
122
 
121
- def set_workflow_status(job, status)
122
- # [TODO] Try to reduce the number of writes.
123
- #
124
- # The current implementation updates a status potentially multiple times
125
- # within a single run of a job (all within the same transaction and therefore
126
- # invisible to the outside).
127
- SQL.ask <<~SQL, job.id, status
128
- UPDATE #{SCHEMA_NAME}.postjobs
129
- SET workflow_status=$2
130
- WHERE id=$1
131
- SQL
132
- end
133
-
134
123
  private
135
124
 
136
125
  def parse_workflow(workflow)
@@ -145,9 +134,11 @@ module Postjob::Queue
145
134
 
146
135
  public
147
136
 
148
- def checkout(workflows_with_versions)
149
- SQL.ask "SELECT * FROM #{SCHEMA_NAME}.checkout($1, $2)",
150
- workflows_with_versions, Postjob.fast_mode, into: Job
137
+ UUID_REGEXP = /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
138
+
139
+ def checkout(worker_session_id)
140
+ expect! worker_session_id => UUID_REGEXP
141
+ SQL.ask "SELECT * FROM #{SCHEMA_NAME}.checkout($1, $2)", worker_session_id, Postjob.fast_mode, into: Job
151
142
  end
152
143
 
153
144
  def find_or_create_token(job)
@@ -157,4 +148,27 @@ module Postjob::Queue
157
148
  def find_job_by_token(token)
158
149
  SQL.ask "SELECT * FROM #{SCHEMA_NAME}.postjobs_by_token($1)", token, into: Job
159
150
  end
151
+
152
+ # -- registers a host -------------------------------------------------------
153
+
154
+ # returns the host id
155
+ def host_register(attributes)
156
+ expect! attributes => [ nil, Hash ]
157
+ Simple::SQL.ask "SELECT postjob.host_register($1)", JSON.generate(attributes)
158
+ end
159
+
160
+ # starts a session
161
+ WorkerSession = ::Postjob::WorkerSession
162
+
163
+ def start_worker_session(workflows_with_versions, host_id:)
164
+ expect! host_id => UUID_REGEXP
165
+
166
+ Simple::SQL.ask "SELECT * FROM postjob.worker_session_start($1, $2)", host_id, workflows_with_versions, into: ::Postjob::WorkerSession
167
+ end
168
+
169
+ # sends in a heartbeat
170
+ def host_heartbeat(host_id, measurement)
171
+ Simple::SQL.ask "SELECT postjob.host_heartbeat($1::uuid, $2::jsonb, $3)",
172
+ host_id, JSON.generate(measurement), ::Postjob.fast_mode
173
+ end
160
174
  end
@@ -8,14 +8,14 @@ module Postjob::Queue::Notifications
8
8
  SCHEMA_NAME = ::Postjob::Queue::SCHEMA_NAME
9
9
  MAX_WAIT_TIME = 120
10
10
 
11
- def wait_for_new_job
11
+ def wait_for_new_job(worker_session_id)
12
12
  started_at = Time.now
13
13
 
14
14
  start_listening
15
15
 
16
16
  # Determine when the next job is up. If we don't have a next job within MAX_WAIT_TIME
17
17
  # we wake up regardless.
18
- wait_time = time_to_next_job
18
+ wait_time = time_to_next_job(worker_session_id)
19
19
  return if wait_time && wait_time <= 0
20
20
 
21
21
  wait_time = MAX_WAIT_TIME if !wait_time || wait_time > MAX_WAIT_TIME
@@ -43,7 +43,8 @@ module Postjob::Queue::Notifications
43
43
 
44
44
  # returns the maximum number of seconds to wait until the
45
45
  # next runnable or timeoutable job comes up.
46
- def time_to_next_job
47
- Simple::SQL.ask "SELECT * FROM #{SCHEMA_NAME}.time_to_next_job($1)", ::Postjob::Registry.workflows_with_versions
46
+ def time_to_next_job(worker_session_id)
47
+ expect! worker_session_id => /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
48
+ Simple::SQL.ask "SELECT * FROM #{SCHEMA_NAME}.time_to_next_job($1)", worker_session_id
48
49
  end
49
50
  end
@@ -14,6 +14,8 @@ module Postjob::Queue
14
14
  end
15
15
 
16
16
  # Builds a search scope (see Simple::SQL::Scope) for the passed in filter criteria.
17
+ #
18
+ # Note that the search scope is unsorted.
17
19
  def search(filter = {})
18
20
  expect! filter => Hash
19
21
 
@@ -8,6 +8,16 @@ module Postjob::Queue
8
8
  def version
9
9
  return "0.3.*" unless settings?
10
10
 
11
- SQL.ask("SELECT value FROM postjob.settings WHERE name=$1", "version") || "unknown"
11
+ sql = <<~SQL
12
+ SELECT 1 from pg_proc
13
+ left join pg_namespace on pg_proc.pronamespace=pg_namespace.oid
14
+ where pg_namespace.nspname='postjob' AND pg_proc.proname='settings_get'
15
+ SQL
16
+
17
+ if Simple::SQL.ask(sql)
18
+ Simple::SQL.ask "SELECT postjob.settings_get('version')"
19
+ else
20
+ Simple::SQL.ask("SELECT value FROM postjob.settings WHERE name=$1", "version") || "unknown"
21
+ end
12
22
  end
13
23
  end
@@ -0,0 +1,17 @@
1
+ # rubocop:disable Style/EvalWithLocation
2
+ # rubocop:disable Security/Eval
3
+
4
+ #
5
+ # A job class in-memory representation.
6
+ #
7
+ class Postjob::Record < Hash
8
+ def initialize(hsh)
9
+ replace hsh.dup
10
+ end
11
+
12
+ def self.attribute(sym)
13
+ eval <<~RUBY
14
+ define_method(:#{sym}) { self[:#{sym}] }
15
+ RUBY
16
+ end
17
+ end