queue_classic 3.2.0.RC1 → 4.0.0.pre.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +151 -0
  3. data/.gitignore +2 -0
  4. data/{changelog → CHANGELOG.md} +80 -34
  5. data/CODE_OF_CONDUCT.md +46 -0
  6. data/Gemfile +8 -5
  7. data/README.md +77 -85
  8. data/Rakefile +2 -0
  9. data/lib/generators/queue_classic/install_generator.rb +6 -0
  10. data/lib/generators/queue_classic/templates/add_queue_classic.rb +3 -1
  11. data/lib/generators/queue_classic/templates/update_queue_classic_3_0_0.rb +3 -1
  12. data/lib/generators/queue_classic/templates/update_queue_classic_3_0_2.rb +3 -1
  13. data/lib/generators/queue_classic/templates/update_queue_classic_3_1_0.rb +3 -1
  14. data/lib/generators/queue_classic/templates/update_queue_classic_4_0_0.rb +11 -0
  15. data/lib/queue_classic.rb +4 -11
  16. data/lib/queue_classic/config.rb +2 -1
  17. data/lib/queue_classic/conn_adapter.rb +28 -14
  18. data/lib/queue_classic/queue.rb +65 -11
  19. data/lib/queue_classic/railtie.rb +2 -0
  20. data/lib/queue_classic/setup.rb +24 -7
  21. data/lib/queue_classic/tasks.rb +4 -5
  22. data/lib/queue_classic/version.rb +3 -1
  23. data/lib/queue_classic/worker.rb +10 -5
  24. data/queue_classic.gemspec +1 -1
  25. data/sql/create_table.sql +7 -16
  26. data/sql/ddl.sql +6 -82
  27. data/sql/downgrade_from_4_0_0.sql +88 -0
  28. data/sql/update_to_3_0_0.sql +5 -5
  29. data/sql/update_to_3_1_0.sql +6 -6
  30. data/sql/update_to_4_0_0.sql +6 -0
  31. data/test/benchmark_test.rb +2 -0
  32. data/test/config_test.rb +2 -0
  33. data/test/helper.rb +34 -0
  34. data/test/lib/queue_classic_rails_connection_test.rb +9 -6
  35. data/test/lib/queue_classic_test.rb +2 -0
  36. data/test/queue_test.rb +62 -2
  37. data/test/rails-tests/.gitignore +2 -0
  38. data/test/rails-tests/rails523.sh +23 -0
  39. data/test/worker_test.rb +138 -17
  40. metadata +15 -7
  41. data/.travis.yml +0 -15
@@ -1,27 +1,37 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require 'uri'
2
4
  require 'pg'
3
5
 
4
6
  module QC
5
7
  class ConnAdapter
6
8
 
7
- attr_accessor :connection
8
- def initialize(c=nil)
9
- @connection = c.nil? ? establish_new : validate!(c)
9
+ def initialize(args={})
10
+ @active_record_connection_share = args[:active_record_connection_share]
11
+ @_connection = args[:connection]
10
12
  @mutex = Mutex.new
11
13
  end
12
14
 
15
+ def connection
16
+ if @active_record_connection_share && Object.const_defined?('ActiveRecord')
17
+ ActiveRecord::Base.connection.raw_connection
18
+ else
19
+ @_connection ||= establish_new
20
+ end
21
+ end
22
+
13
23
  def execute(stmt, *params)
14
24
  @mutex.synchronize do
15
25
  QC.log(:at => "exec_sql", :sql => stmt.inspect)
16
26
  begin
17
27
  params = nil if params.empty?
18
- r = @connection.exec(stmt, params)
28
+ r = connection.exec(stmt, params)
19
29
  result = []
20
30
  r.each {|t| result << t}
21
31
  result.length > 1 ? result : result.pop
22
- rescue PGError => e
32
+ rescue PG::Error => e
23
33
  QC.log(:error => e.inspect)
24
- @connection.reset
34
+ connection.reset
25
35
  raise
26
36
  end
27
37
  end
@@ -30,10 +40,10 @@ module QC
30
40
  def wait(time, *channels)
31
41
  @mutex.synchronize do
32
42
  listen_cmds = channels.map {|c| 'LISTEN "' + c.to_s + '"'}
33
- @connection.exec(listen_cmds.join(';'))
43
+ connection.exec(listen_cmds.join(';'))
34
44
  wait_for_notify(time)
35
45
  unlisten_cmds = channels.map {|c| 'UNLISTEN "' + c.to_s + '"'}
36
- @connection.exec(unlisten_cmds.join(';'))
46
+ connection.exec(unlisten_cmds.join(';'))
37
47
  drain_notify
38
48
  end
39
49
  end
@@ -41,7 +51,7 @@ module QC
41
51
  def disconnect
42
52
  @mutex.synchronize do
43
53
  begin
44
- @connection.close
54
+ connection.close
45
55
  rescue => e
46
56
  QC.log(:at => 'disconnect', :error => e.message)
47
57
  end
@@ -59,12 +69,12 @@ module QC
59
69
 
60
70
  def wait_for_notify(t)
61
71
  Array.new.tap do |msgs|
62
- @connection.wait_for_notify(t) {|event, pid, msg| msgs << msg}
72
+ connection.wait_for_notify(t) {|event, pid, msg| msgs << msg}
63
73
  end
64
74
  end
65
75
 
66
76
  def drain_notify
67
- until @connection.notifies.nil?
77
+ until connection.notifies.nil?
68
78
  QC.log(:at => "drain_notifications")
69
79
  end
70
80
  end
@@ -77,10 +87,15 @@ module QC
77
87
 
78
88
  def establish_new
79
89
  QC.log(:at => "establish_conn")
80
- conn = PGconn.connect(*normalize_db_url(db_url))
81
- if conn.status != PGconn::CONNECTION_OK
90
+ conn = PG.connect(*normalize_db_url(db_url))
91
+ if conn.status != PG::CONNECTION_OK
82
92
  QC.log(:error => conn.error)
83
93
  end
94
+
95
+ if conn.server_version < 90600
96
+ raise "This version of Queue Classic does not support Postgres older than 9.6 (90600). This version is #{conn.server_version}. If you need that support, please use an older version."
97
+ end
98
+
84
99
  conn.exec("SET application_name = '#{QC.app_name}'")
85
100
  conn
86
101
  end
@@ -106,6 +121,5 @@ module QC
106
121
  raise(ArgumentError, "missing QC_DATABASE_URL or DATABASE_URL")
107
122
  @db_url = URI.parse(url)
108
123
  end
109
-
110
124
  end
111
125
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require_relative 'conn_adapter'
2
4
  require 'json'
3
5
  require 'time'
@@ -5,8 +7,8 @@ require 'time'
5
7
  module QC
6
8
  # The queue class maps a queue abstraction onto a database table.
7
9
  class Queue
8
-
9
10
  attr_reader :name, :top_bound
11
+
10
12
  def initialize(name, top_bound=nil)
11
13
  @name = name
12
14
  @top_bound = top_bound || QC.top_bound
@@ -38,7 +40,16 @@ module QC
38
40
  def enqueue(method, *args)
39
41
  QC.log_yield(:measure => 'queue.enqueue') do
40
42
  s = "INSERT INTO #{QC.table_name} (q_name, method, args) VALUES ($1, $2, $3) RETURNING id"
41
- conn_adapter.execute(s, name, method, JSON.dump(args))
43
+ begin
44
+ retries ||= 0
45
+ conn_adapter.execute(s, name, method, JSON.dump(args))
46
+ rescue PG::Error
47
+ if (retries += 1) < 2
48
+ retry
49
+ else
50
+ raise
51
+ end
52
+ end
42
53
  end
43
54
  end
44
55
 
@@ -64,14 +75,42 @@ module QC
64
75
  s = "INSERT INTO #{QC.table_name} (q_name, method, args, scheduled_at)
65
76
  VALUES ($1, $2, $3, now() + interval '#{seconds.to_i} seconds')
66
77
  RETURNING id"
67
- conn_adapter.execute(s, name, method, JSON.dump(args))
78
+ begin
79
+ retries ||= 0
80
+ conn_adapter.execute(s, name, method, JSON.dump(args))
81
+ rescue PG::Error
82
+ if (retries += 1) < 2
83
+ retry
84
+ else
85
+ raise
86
+ end
87
+ end
68
88
  end
69
89
  end
70
90
 
71
91
  def lock
72
92
  QC.log_yield(:measure => 'queue.lock') do
73
- s = "SELECT * FROM lock_head($1, $2)"
74
- if r = conn_adapter.execute(s, name, top_bound)
93
+ s = <<~SQL
94
+ WITH selected_job AS (
95
+ SELECT id
96
+ FROM queue_classic_jobs
97
+ WHERE
98
+ locked_at IS NULL AND
99
+ q_name = $1 AND
100
+ scheduled_at <= now()
101
+ LIMIT 1
102
+ FOR NO KEY UPDATE SKIP LOCKED
103
+ )
104
+ UPDATE queue_classic_jobs
105
+ SET
106
+ locked_at = now(),
107
+ locked_by = pg_backend_pid()
108
+ FROM selected_job
109
+ WHERE queue_classic_jobs.id = selected_job.id
110
+ RETURNING *
111
+ SQL
112
+
113
+ if r = conn_adapter.execute(s, name)
75
114
  {}.tap do |job|
76
115
  job[:id] = r["id"]
77
116
  job[:q_name] = r["q_name"]
@@ -89,14 +128,14 @@ module QC
89
128
 
90
129
  def unlock(id)
91
130
  QC.log_yield(:measure => 'queue.unlock') do
92
- s = "UPDATE #{QC.table_name} set locked_at = null where id = $1"
131
+ s = "UPDATE #{QC.table_name} SET locked_at = NULL WHERE id = $1"
93
132
  conn_adapter.execute(s, id)
94
133
  end
95
134
  end
96
135
 
97
136
  def delete(id)
98
137
  QC.log_yield(:measure => 'queue.delete') do
99
- conn_adapter.execute("DELETE FROM #{QC.table_name} where id = $1", id)
138
+ conn_adapter.execute("DELETE FROM #{QC.table_name} WHERE id = $1", id)
100
139
  end
101
140
  end
102
141
 
@@ -107,13 +146,28 @@ module QC
107
146
  end
108
147
  end
109
148
 
149
+ # Count the number of jobs in a specific queue. This returns all
150
+ # jobs, including ones that are scheduled in the future.
110
151
  def count
111
- QC.log_yield(:measure => 'queue.count') do
112
- s = "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1"
113
- r = conn_adapter.execute(s, name)
152
+ _count('queue.count', "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1")
153
+ end
154
+
155
+ # Count the number of jobs in a specific queue, except ones scheduled in the future
156
+ def count_ready
157
+ _count('queue.count_scheduled', "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1 AND scheduled_at <= now()")
158
+ end
159
+
160
+ # Count the number of jobs in a specific queue scheduled in the future
161
+ def count_scheduled
162
+ _count('queue.count_scheduled', "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1 AND scheduled_at > now()")
163
+ end
164
+
165
+ private
166
+ def _count(metric_name, sql)
167
+ QC.log_yield(measure: metric_name) do
168
+ r = conn_adapter.execute(sql, name)
114
169
  r["count"].to_i
115
170
  end
116
171
  end
117
-
118
172
  end
119
173
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require 'rails/railtie'
2
4
 
3
5
  module QC
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module QC
2
4
  module Setup
3
5
  Root = File.expand_path("../..", File.dirname(__FILE__))
@@ -8,51 +10,66 @@ module QC
8
10
  DowngradeFrom_3_0_0 = File.join(Root, "/sql/downgrade_from_3_0_0.sql")
9
11
  UpgradeTo_3_1_0 = File.join(Root, "/sql/update_to_3_1_0.sql")
10
12
  DowngradeFrom_3_1_0 = File.join(Root, "/sql/downgrade_from_3_1_0.sql")
13
+ UpgradeTo_4_0_0 = File.join(Root, "/sql/update_to_4_0_0.sql")
14
+ DowngradeFrom_4_0_0 = File.join(Root, "/sql/downgrade_from_4_0_0.sql")
11
15
 
12
16
  def self.create(c = QC::default_conn_adapter.connection)
13
- conn = QC::ConnAdapter.new(c)
17
+ conn = QC::ConnAdapter.new(connection: c)
14
18
  conn.execute(File.read(CreateTable))
15
19
  conn.execute(File.read(SqlFunctions))
16
20
  conn.disconnect if c.nil? #Don't close a conn we didn't create.
17
21
  end
18
22
 
19
23
  def self.drop(c = QC::default_conn_adapter.connection)
20
- conn = QC::ConnAdapter.new(c)
24
+ conn = QC::ConnAdapter.new(connection: c)
21
25
  conn.execute("DROP TABLE IF EXISTS queue_classic_jobs CASCADE")
22
26
  conn.execute(File.read(DropSqlFunctions))
23
27
  conn.disconnect if c.nil? #Don't close a conn we didn't create.
24
28
  end
25
29
 
26
30
  def self.update(c = QC::default_conn_adapter.connection)
27
- conn = QC::ConnAdapter.new(c)
31
+ conn = QC::ConnAdapter.new(connection: c)
28
32
  conn.execute(File.read(UpgradeTo_3_0_0))
29
33
  conn.execute(File.read(UpgradeTo_3_1_0))
34
+ conn.execute(File.read(UpgradeTo_4_0_0))
30
35
  conn.execute(File.read(DropSqlFunctions))
31
36
  conn.execute(File.read(SqlFunctions))
32
37
  end
33
38
 
34
39
  def self.update_to_3_0_0(c = QC::default_conn_adapter.connection)
35
- conn = QC::ConnAdapter.new(c)
40
+ conn = QC::ConnAdapter.new(connection: c)
36
41
  conn.execute(File.read(UpgradeTo_3_0_0))
37
42
  conn.execute(File.read(DropSqlFunctions))
38
43
  conn.execute(File.read(SqlFunctions))
39
44
  end
40
45
 
41
46
  def self.downgrade_from_3_0_0(c = QC::default_conn_adapter.connection)
42
- conn = QC::ConnAdapter.new(c)
47
+ conn = QC::ConnAdapter.new(connection: c)
43
48
  conn.execute(File.read(DowngradeFrom_3_0_0))
44
49
  end
45
50
 
46
51
  def self.update_to_3_1_0(c = QC::default_conn_adapter.connection)
47
- conn = QC::ConnAdapter.new(c)
52
+ conn = QC::ConnAdapter.new(connection: c)
48
53
  conn.execute(File.read(UpgradeTo_3_1_0))
49
54
  conn.execute(File.read(DropSqlFunctions))
50
55
  conn.execute(File.read(SqlFunctions))
51
56
  end
52
57
 
53
58
  def self.downgrade_from_3_1_0(c = QC::default_conn_adapter.connection)
54
- conn = QC::ConnAdapter.new(c)
59
+ conn = QC::ConnAdapter.new(connection: c)
55
60
  conn.execute(File.read(DowngradeFrom_3_1_0))
56
61
  end
62
+
63
+ def self.update_to_4_0_0(c = QC::default_conn_adapter.connection)
64
+ conn = QC::ConnAdapter.new(connection: c)
65
+ conn.execute(File.read(UpgradeTo_4_0_0))
66
+ conn.execute(File.read(DropSqlFunctions))
67
+ conn.execute(File.read(SqlFunctions))
68
+ end
69
+
70
+ def self.downgrade_from_4_0_0(c = QC::default_conn_adapter.connection)
71
+ conn = QC::ConnAdapter.new(connection: c)
72
+ conn.execute(File.read(DowngradeFrom_4_0_0))
73
+ end
57
74
  end
58
75
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  task :environment
2
4
 
3
5
  namespace :jobs do
@@ -11,11 +13,8 @@ namespace :qc do
11
13
  @worker = QC.default_worker_class.new
12
14
 
13
15
  trap('INT') do
14
- $stderr.puts("Received INT. Shutting down.")
15
- if !@worker.running
16
- $stderr.puts("Worker has stopped running. Exit.")
17
- exit(1)
18
- end
16
+ $stderr.puts("Received INT. Shutting down.")
17
+ abort("Worker has stopped running. Exit.") unless @worker.running
19
18
  @worker.stop
20
19
  end
21
20
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module QC
2
- VERSION = "3.2.0.RC1"
4
+ VERSION = "4.0.0-alpha1"
3
5
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # -*- coding: utf-8 -*-
2
4
  require_relative 'queue'
3
5
  require_relative 'conn_adapter'
@@ -12,7 +14,7 @@ module QC
12
14
  # This method takes a single hash argument. The following keys are read:
13
15
  # fork_worker:: Worker forks each job execution.
14
16
  # wait_interval:: Time to wait between failed lock attempts
15
- # connection:: PGConn object.
17
+ # connection:: PG::Connection object.
16
18
  # q_name:: Name of a single queue to process.
17
19
  # q_names:: Names of queues to process. Will process left to right.
18
20
  # top_bound:: Offset to the head of the queue. 1 == strict FIFO.
@@ -21,7 +23,7 @@ module QC
21
23
  @wait_interval = args[:wait_interval] || QC.wait_time
22
24
 
23
25
  if args[:connection]
24
- @conn_adapter = ConnAdapter.new(args[:connection])
26
+ @conn_adapter = ConnAdapter.new(connection: args[:connection])
25
27
  else
26
28
  @conn_adapter = QC.default_conn_adapter
27
29
  end
@@ -112,7 +114,10 @@ module QC
112
114
  queue.delete(job[:id])
113
115
  finished = true
114
116
  end
115
- rescue => e
117
+ rescue StandardError, ScriptError, NoMemoryError => e
118
+ # We really only want to unlock the job for signal and system exit
119
+ # exceptions. If we encounter a ScriptError or a NoMemoryError any
120
+ # future run will likely encounter the same error.
116
121
  handle_failure(job, e)
117
122
  finished = true
118
123
  ensure
@@ -134,8 +139,8 @@ module QC
134
139
  receiver.send(message, *args)
135
140
  end
136
141
 
137
- # This method will be called when an exception
138
- # is raised during the execution of the job.
142
+ # This method will be called when a StandardError, ScriptError or
143
+ # NoMemoryError is raised during the execution of the job.
139
144
  def handle_failure(job,e)
140
145
  $stderr.puts("count#qc.job-error=1 job=#{job} error=#{e.inspect} at=#{e.backtrace.first}")
141
146
  end
@@ -20,5 +20,5 @@ Gem::Specification.new do |spec|
20
20
 
21
21
  spec.require_paths = %w[lib]
22
22
 
23
- spec.add_dependency "pg", ">= 0.17", "< 0.19"
23
+ spec.add_dependency "pg", ">= 0.17", "< 2.0"
24
24
  end
@@ -1,26 +1,17 @@
1
- do $$ begin
1
+ DO $$ BEGIN
2
2
 
3
3
  CREATE TABLE queue_classic_jobs (
4
4
  id bigserial PRIMARY KEY,
5
- q_name text not null check (length(q_name) > 0),
6
- method text not null check (length(method) > 0),
7
- args text not null,
5
+ q_name text NOT NULL CHECK (length(q_name) > 0),
6
+ method text NOT NULL CHECK (length(method) > 0),
7
+ args jsonb NOT NULL,
8
8
  locked_at timestamptz,
9
9
  locked_by integer,
10
- created_at timestamptz default now(),
11
- scheduled_at timestamptz default now()
10
+ created_at timestamptz DEFAULT now(),
11
+ scheduled_at timestamptz DEFAULT now()
12
12
  );
13
13
 
14
- -- If jsonb type is available, use it for the args column
15
- if exists (select 1 from pg_type where typname = 'jsonb') then
16
- alter table queue_classic_jobs alter column args type jsonb using args::jsonb;
17
- -- Otherwise, use json type for the args column if available
18
- elsif exists (select 1 from pg_type where typname = 'json') then
19
- alter table queue_classic_jobs alter column args type json using args::json;
20
- end if;
21
-
22
- end $$ language plpgsql;
14
+ END $$ LANGUAGE plpgsql;
23
15
 
24
16
  CREATE INDEX idx_qc_on_name_only_unlocked ON queue_classic_jobs (q_name, id) WHERE locked_at IS NULL;
25
17
  CREATE INDEX idx_qc_on_scheduled_at_only_unlocked ON queue_classic_jobs (scheduled_at, id) WHERE locked_at IS NULL;
26
-
@@ -1,84 +1,8 @@
1
- -- We are declaring the return type to be queue_classic_jobs.
2
- -- This is ok since I am assuming that all of the users added queues will
3
- -- have identical columns to queue_classic_jobs.
4
- -- When QC supports queues with columns other than the default, we will have to change this.
5
-
6
- CREATE OR REPLACE FUNCTION lock_head(q_name varchar, top_boundary integer)
7
- RETURNS SETOF queue_classic_jobs AS $$
8
- DECLARE
9
- unlocked bigint;
10
- relative_top integer;
11
- job_count integer;
12
- BEGIN
13
- -- The purpose is to release contention for the first spot in the table.
14
- -- The select count(*) is going to slow down dequeue performance but allow
15
- -- for more workers. Would love to see some optimization here...
16
-
17
- EXECUTE 'SELECT count(*) FROM '
18
- || '(SELECT * FROM queue_classic_jobs '
19
- || ' WHERE locked_at IS NULL'
20
- || ' AND q_name = '
21
- || quote_literal(q_name)
22
- || ' AND scheduled_at <= '
23
- || quote_literal(now())
24
- || ' LIMIT '
25
- || quote_literal(top_boundary)
26
- || ') limited'
27
- INTO job_count;
28
-
29
- SELECT TRUNC(random() * (top_boundary - 1))
30
- INTO relative_top;
31
-
32
- IF job_count < top_boundary THEN
33
- relative_top = 0;
34
- END IF;
35
-
36
- LOOP
37
- BEGIN
38
- EXECUTE 'SELECT id FROM queue_classic_jobs '
39
- || ' WHERE locked_at IS NULL'
40
- || ' AND q_name = '
41
- || quote_literal(q_name)
42
- || ' AND scheduled_at <= '
43
- || quote_literal(now())
44
- || ' ORDER BY id ASC'
45
- || ' LIMIT 1'
46
- || ' OFFSET ' || quote_literal(relative_top)
47
- || ' FOR UPDATE NOWAIT'
48
- INTO unlocked;
49
- EXIT;
50
- EXCEPTION
51
- WHEN lock_not_available THEN
52
- -- do nothing. loop again and hope we get a lock
53
- END;
54
- END LOOP;
55
-
56
- RETURN QUERY EXECUTE 'UPDATE queue_classic_jobs '
57
- || ' SET locked_at = (CURRENT_TIMESTAMP),'
58
- || ' locked_by = (select pg_backend_pid())'
59
- || ' WHERE id = $1'
60
- || ' AND locked_at is NULL'
61
- || ' RETURNING *'
62
- USING unlocked;
63
-
64
- RETURN;
65
- END;
66
- $$ LANGUAGE plpgsql;
67
-
68
- CREATE OR REPLACE FUNCTION lock_head(tname varchar)
69
- RETURNS SETOF queue_classic_jobs AS $$
70
- BEGIN
71
- RETURN QUERY EXECUTE 'SELECT * FROM lock_head($1,10)' USING tname;
72
- END;
73
- $$ LANGUAGE plpgsql;
74
-
75
1
  -- queue_classic_notify function and trigger
76
- create function queue_classic_notify() returns trigger as $$ begin
77
- perform pg_notify(new.q_name, '');
78
- return null;
79
- end $$ language plpgsql;
2
+ CREATE FUNCTION queue_classic_notify() RETURNS TRIGGER AS $$ BEGIN
3
+ perform pg_notify(new.q_name, ''); RETURN NULL;
4
+ END $$ LANGUAGE plpgsql;
80
5
 
81
- create trigger queue_classic_notify
82
- after insert on queue_classic_jobs
83
- for each row
84
- execute procedure queue_classic_notify();
6
+ CREATE TRIGGER queue_classic_notify
7
+ AFTER INSERT ON queue_classic_jobs FOR EACH ROW
8
+ EXECUTE PROCEDURE queue_classic_notify();