webhookdb 1.3.1 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. checksums.yaml +4 -4
  2. data/admin-dist/assets/{index-6aebf805.js → index-9306dd28.js} +39 -39
  3. data/admin-dist/index.html +1 -1
  4. data/data/messages/templates/errors/generic_backfill.email.liquid +30 -0
  5. data/data/messages/templates/errors/icalendar_fetch.email.liquid +8 -2
  6. data/data/messages/templates/specs/with_fields.email.liquid +6 -0
  7. data/db/migrations/026_undo_integration_backfill_cursor.rb +2 -0
  8. data/db/migrations/032_remove_db_defaults.rb +2 -0
  9. data/db/migrations/043_text_search.rb +2 -0
  10. data/db/migrations/045_system_log.rb +15 -0
  11. data/db/migrations/046_indices.rb +14 -0
  12. data/db/migrations/047_sync_parallelism.rb +9 -0
  13. data/db/migrations/048_sync_stats.rb +9 -0
  14. data/db/migrations/049_error_handlers.rb +18 -0
  15. data/db/migrations/050_logged_webhook_indices.rb +25 -0
  16. data/db/migrations/051_partitioning.rb +9 -0
  17. data/integration/async_spec.rb +0 -2
  18. data/integration/service_integrations_spec.rb +0 -2
  19. data/lib/amigo/durable_job.rb +2 -2
  20. data/lib/amigo/job_in_context.rb +12 -0
  21. data/lib/webhookdb/admin.rb +6 -0
  22. data/lib/webhookdb/admin_api/data_provider.rb +1 -0
  23. data/lib/webhookdb/admin_api/entities.rb +8 -0
  24. data/lib/webhookdb/aggregate_result.rb +1 -1
  25. data/lib/webhookdb/api/entities.rb +6 -2
  26. data/lib/webhookdb/api/error_handlers.rb +104 -0
  27. data/lib/webhookdb/api/helpers.rb +25 -1
  28. data/lib/webhookdb/api/icalproxy.rb +22 -0
  29. data/lib/webhookdb/api/install.rb +2 -1
  30. data/lib/webhookdb/api/organizations.rb +6 -0
  31. data/lib/webhookdb/api/saved_queries.rb +1 -0
  32. data/lib/webhookdb/api/saved_views.rb +1 -0
  33. data/lib/webhookdb/api/service_integrations.rb +2 -1
  34. data/lib/webhookdb/api/sync_targets.rb +1 -1
  35. data/lib/webhookdb/api/system.rb +5 -0
  36. data/lib/webhookdb/api/webhook_subscriptions.rb +1 -0
  37. data/lib/webhookdb/api.rb +4 -1
  38. data/lib/webhookdb/apps.rb +4 -0
  39. data/lib/webhookdb/async/autoscaler.rb +10 -0
  40. data/lib/webhookdb/async/job.rb +4 -0
  41. data/lib/webhookdb/async/scheduled_job.rb +4 -0
  42. data/lib/webhookdb/async.rb +2 -0
  43. data/lib/webhookdb/backfiller.rb +17 -4
  44. data/lib/webhookdb/concurrent.rb +96 -0
  45. data/lib/webhookdb/connection_cache.rb +57 -10
  46. data/lib/webhookdb/console.rb +1 -1
  47. data/lib/webhookdb/customer/reset_code.rb +1 -1
  48. data/lib/webhookdb/customer.rb +5 -4
  49. data/lib/webhookdb/database_document.rb +1 -1
  50. data/lib/webhookdb/db_adapter/default_sql.rb +1 -14
  51. data/lib/webhookdb/db_adapter/partition.rb +14 -0
  52. data/lib/webhookdb/db_adapter/partitioning.rb +8 -0
  53. data/lib/webhookdb/db_adapter/pg.rb +77 -5
  54. data/lib/webhookdb/db_adapter/snowflake.rb +15 -6
  55. data/lib/webhookdb/db_adapter.rb +25 -3
  56. data/lib/webhookdb/dbutil.rb +2 -0
  57. data/lib/webhookdb/errors.rb +34 -0
  58. data/lib/webhookdb/fixtures/logged_webhooks.rb +4 -0
  59. data/lib/webhookdb/fixtures/organization_error_handlers.rb +20 -0
  60. data/lib/webhookdb/http.rb +30 -16
  61. data/lib/webhookdb/icalendar.rb +30 -9
  62. data/lib/webhookdb/jobs/amigo_test_jobs.rb +1 -1
  63. data/lib/webhookdb/jobs/backfill.rb +21 -25
  64. data/lib/webhookdb/jobs/create_mirror_table.rb +3 -4
  65. data/lib/webhookdb/jobs/deprecated_jobs.rb +3 -0
  66. data/lib/webhookdb/jobs/emailer.rb +2 -1
  67. data/lib/webhookdb/jobs/front_signalwire_message_channel_sync_inbound.rb +15 -0
  68. data/lib/webhookdb/jobs/icalendar_delete_stale_cancelled_events.rb +7 -2
  69. data/lib/webhookdb/jobs/icalendar_enqueue_syncs.rb +74 -11
  70. data/lib/webhookdb/jobs/icalendar_enqueue_syncs_for_urls.rb +22 -0
  71. data/lib/webhookdb/jobs/icalendar_sync.rb +21 -9
  72. data/lib/webhookdb/jobs/increase_event_handler.rb +3 -2
  73. data/lib/webhookdb/jobs/{logged_webhook_replay.rb → logged_webhooks_replay.rb} +5 -3
  74. data/lib/webhookdb/jobs/message_dispatched.rb +1 -0
  75. data/lib/webhookdb/jobs/model_event_system_log_tracker.rb +112 -0
  76. data/lib/webhookdb/jobs/monitor_metrics.rb +29 -0
  77. data/lib/webhookdb/jobs/organization_database_migration_notify.rb +32 -0
  78. data/lib/webhookdb/jobs/organization_database_migration_run.rb +4 -6
  79. data/lib/webhookdb/jobs/organization_error_handler_dispatch.rb +26 -0
  80. data/lib/webhookdb/jobs/prepare_database_connections.rb +1 -0
  81. data/lib/webhookdb/jobs/process_webhook.rb +11 -12
  82. data/lib/webhookdb/jobs/renew_watch_channel.rb +10 -10
  83. data/lib/webhookdb/jobs/replication_migration.rb +5 -2
  84. data/lib/webhookdb/jobs/reset_code_create_dispatch.rb +1 -2
  85. data/lib/webhookdb/jobs/scheduled_backfills.rb +2 -2
  86. data/lib/webhookdb/jobs/send_invite.rb +3 -2
  87. data/lib/webhookdb/jobs/send_test_webhook.rb +1 -3
  88. data/lib/webhookdb/jobs/send_webhook.rb +4 -5
  89. data/lib/webhookdb/jobs/stale_row_deleter.rb +31 -0
  90. data/lib/webhookdb/jobs/sync_target_enqueue_scheduled.rb +3 -0
  91. data/lib/webhookdb/jobs/sync_target_run_sync.rb +9 -15
  92. data/lib/webhookdb/jobs/{webhook_subscription_delivery_attempt.rb → webhook_subscription_delivery_event.rb} +5 -8
  93. data/lib/webhookdb/liquid/expose.rb +1 -1
  94. data/lib/webhookdb/liquid/filters.rb +1 -1
  95. data/lib/webhookdb/liquid/partial.rb +2 -2
  96. data/lib/webhookdb/logged_webhook/resilient.rb +3 -3
  97. data/lib/webhookdb/logged_webhook.rb +16 -2
  98. data/lib/webhookdb/message/email_transport.rb +1 -1
  99. data/lib/webhookdb/message/transport.rb +1 -1
  100. data/lib/webhookdb/message.rb +55 -4
  101. data/lib/webhookdb/messages/error_generic_backfill.rb +47 -0
  102. data/lib/webhookdb/messages/error_icalendar_fetch.rb +5 -0
  103. data/lib/webhookdb/messages/error_signalwire_send_sms.rb +2 -0
  104. data/lib/webhookdb/messages/specs.rb +16 -0
  105. data/lib/webhookdb/organization/alerting.rb +56 -6
  106. data/lib/webhookdb/organization/database_migration.rb +2 -2
  107. data/lib/webhookdb/organization/db_builder.rb +5 -4
  108. data/lib/webhookdb/organization/error_handler.rb +141 -0
  109. data/lib/webhookdb/organization.rb +76 -10
  110. data/lib/webhookdb/postgres/model.rb +1 -0
  111. data/lib/webhookdb/postgres/model_utilities.rb +2 -0
  112. data/lib/webhookdb/postgres.rb +3 -4
  113. data/lib/webhookdb/replicator/base.rb +202 -68
  114. data/lib/webhookdb/replicator/base_stale_row_deleter.rb +165 -0
  115. data/lib/webhookdb/replicator/column.rb +2 -0
  116. data/lib/webhookdb/replicator/email_octopus_contact_v1.rb +0 -1
  117. data/lib/webhookdb/replicator/fake.rb +106 -88
  118. data/lib/webhookdb/replicator/front_signalwire_message_channel_app_v1.rb +131 -61
  119. data/lib/webhookdb/replicator/github_repo_v1_mixin.rb +17 -0
  120. data/lib/webhookdb/replicator/icalendar_calendar_v1.rb +197 -32
  121. data/lib/webhookdb/replicator/icalendar_event_v1.rb +20 -44
  122. data/lib/webhookdb/replicator/icalendar_event_v1_partitioned.rb +33 -0
  123. data/lib/webhookdb/replicator/intercom_contact_v1.rb +1 -0
  124. data/lib/webhookdb/replicator/intercom_conversation_v1.rb +1 -0
  125. data/lib/webhookdb/replicator/intercom_v1_mixin.rb +49 -6
  126. data/lib/webhookdb/replicator/partitionable_mixin.rb +116 -0
  127. data/lib/webhookdb/replicator/shopify_v1_mixin.rb +1 -1
  128. data/lib/webhookdb/replicator/signalwire_message_v1.rb +31 -1
  129. data/lib/webhookdb/replicator/sponsy_v1_mixin.rb +1 -1
  130. data/lib/webhookdb/replicator/transistor_episode_stats_v1.rb +0 -1
  131. data/lib/webhookdb/replicator/transistor_episode_v1.rb +11 -5
  132. data/lib/webhookdb/replicator/webhook_request.rb +8 -0
  133. data/lib/webhookdb/replicator.rb +6 -3
  134. data/lib/webhookdb/service/helpers.rb +4 -0
  135. data/lib/webhookdb/service/middleware.rb +6 -2
  136. data/lib/webhookdb/service/view_api.rb +1 -1
  137. data/lib/webhookdb/service.rb +10 -10
  138. data/lib/webhookdb/service_integration.rb +19 -1
  139. data/lib/webhookdb/signalwire.rb +1 -1
  140. data/lib/webhookdb/spec_helpers/async.rb +0 -4
  141. data/lib/webhookdb/spec_helpers/sentry.rb +32 -0
  142. data/lib/webhookdb/spec_helpers/shared_examples_for_replicators.rb +239 -64
  143. data/lib/webhookdb/spec_helpers.rb +1 -0
  144. data/lib/webhookdb/sync_target.rb +202 -34
  145. data/lib/webhookdb/system_log_event.rb +9 -0
  146. data/lib/webhookdb/tasks/admin.rb +1 -1
  147. data/lib/webhookdb/tasks/annotate.rb +1 -1
  148. data/lib/webhookdb/tasks/db.rb +13 -1
  149. data/lib/webhookdb/tasks/docs.rb +1 -1
  150. data/lib/webhookdb/tasks/fixture.rb +1 -1
  151. data/lib/webhookdb/tasks/message.rb +1 -1
  152. data/lib/webhookdb/tasks/regress.rb +1 -1
  153. data/lib/webhookdb/tasks/release.rb +1 -1
  154. data/lib/webhookdb/tasks/sidekiq.rb +1 -1
  155. data/lib/webhookdb/tasks/specs.rb +1 -1
  156. data/lib/webhookdb/version.rb +1 -1
  157. data/lib/webhookdb/webhook_subscription.rb +3 -4
  158. data/lib/webhookdb.rb +34 -8
  159. metadata +114 -64
  160. data/lib/webhookdb/jobs/customer_created_notify_internal.rb +0 -22
  161. data/lib/webhookdb/jobs/organization_database_migration_notify_finished.rb +0 -21
  162. data/lib/webhookdb/jobs/organization_database_migration_notify_started.rb +0 -21
  163. /data/lib/webhookdb/jobs/{logged_webhook_resilient_replay.rb → logged_webhooks_resilient_replay.rb} +0 -0
  164. /data/lib/webhookdb/jobs/{webhook_resource_notify_integrations.rb → webhookdb_resource_notify_integrations.rb} +0 -0
@@ -15,6 +15,8 @@ require "webhookdb/service"
15
15
  require "webhookdb/api/auth"
16
16
  require "webhookdb/api/db"
17
17
  require "webhookdb/api/demo"
18
+ require "webhookdb/api/error_handlers"
19
+ require "webhookdb/api/icalproxy"
18
20
  require "webhookdb/api/install"
19
21
  require "webhookdb/api/me"
20
22
  require "webhookdb/api/organizations"
@@ -77,6 +79,8 @@ module Webhookdb::Apps
77
79
  mount Webhookdb::API::Auth
78
80
  mount Webhookdb::API::Db
79
81
  mount Webhookdb::API::Demo
82
+ mount Webhookdb::API::ErrorHandlers
83
+ mount Webhookdb::API::Icalproxy
80
84
  mount Webhookdb::API::Install
81
85
  mount Webhookdb::API::Me
82
86
  mount Webhookdb::API::Organizations
@@ -28,6 +28,7 @@ module Webhookdb::Async::Autoscaler
28
28
  setting :hostname_regex, /^web\.1$/, convert: ->(s) { Regexp.new(s) }
29
29
  setting :heroku_app_id_or_app_name, "", key: "HEROKU_APP_NAME"
30
30
  setting :heroku_formation_id_or_formation_type, "worker"
31
+ setting :sentry_alert_interval, 180
31
32
 
32
33
  after_configured do
33
34
  self._check_provider!
@@ -65,6 +66,7 @@ module Webhookdb::Async::Autoscaler
65
66
  latency_restored_threshold: self.latency_restored_threshold,
66
67
  latency_restored_handlers: [self.method(:scale_down)],
67
68
  log: ->(level, msg, kw={}) { self.logger.send(level, msg, kw) },
69
+ on_unhandled_exception: ->(e) { Sentry.capture_exception(e) },
68
70
  )
69
71
  return @instance.start
70
72
  end
@@ -78,10 +80,18 @@ module Webhookdb::Async::Autoscaler
78
80
  scale_action = @impl.scale_up(names_and_latencies, depth:, duration:, **)
79
81
  kw = {queues: names_and_latencies, depth:, duration:, scale_action:}
80
82
  self.logger.warn("high_latency_queues_event", **kw)
83
+ self._alert_sentry_latency(kw)
84
+ end
85
+
86
+ def _alert_sentry_latency(kw)
87
+ call_sentry = @last_called_sentry.nil? ||
88
+ @last_called_sentry < (Time.now - self.sentry_alert_interval)
89
+ return unless call_sentry
81
90
  Sentry.with_scope do |scope|
82
91
  scope&.set_extras(**kw)
83
92
  Sentry.capture_message("Some queues have a high latency")
84
93
  end
94
+ @last_called_sentry = Time.now
85
95
  end
86
96
 
87
97
  def scale_down(depth:, duration:, **)
@@ -14,5 +14,9 @@ module Webhookdb::Async::Job
14
14
  def with_log_tags(tags, &)
15
15
  Webhookdb::Async::JobLogger.with_log_tags(tags, &)
16
16
  end
17
+
18
+ def set_job_tags(tags)
19
+ Webhookdb::Async::JobLogger.set_job_tags(**tags)
20
+ end
17
21
  end
18
22
  end
@@ -14,5 +14,9 @@ module Webhookdb::Async::ScheduledJob
14
14
  def with_log_tags(tags, &)
15
15
  Webhookdb::Async::JobLogger.with_log_tags(tags, &)
16
16
  end
17
+
18
+ def set_job_tags(**tags)
19
+ Webhookdb::Async::JobLogger.set_job_tags(**tags)
20
+ end
17
21
  end
18
22
  end
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "amigo/retry"
4
4
  require "amigo/durable_job"
5
+ require "amigo/job_in_context"
5
6
  require "amigo/rate_limited_error_handler"
6
7
  require "appydays/configurable"
7
8
  require "appydays/loggable"
@@ -62,6 +63,7 @@ module Webhookdb::Async
62
63
  ttl: self.error_reporting_ttl,
63
64
  )
64
65
  config.death_handlers << Webhookdb::Async::JobLogger.method(:death_handler)
66
+ config.server_middleware.add(Amigo::JobInContext::ServerMiddleware)
65
67
  config.server_middleware.add(Amigo::DurableJob::ServerMiddleware)
66
68
  # We use the dead set to move jobs that we need to retry manually
67
69
  config.options[:dead_max_jobs] = 999_999_999
@@ -88,16 +88,29 @@ class Webhookdb::Backfiller
88
88
  return k, inserting
89
89
  end
90
90
 
91
+ # Return the conditional update expression.
92
+ # Usually this is:
93
+ # - +nil+ if +conditional_upsert?+ is false.
94
+ # - the +_update_where_expr+ if +conditional_upsert?+ is true.
95
+ # - Can be overridden by a subclass if they need to use a specific conditional update expression
96
+ # in certain cases (should be rare).
97
+ def update_where_expr = self.conditional_upsert? ? self.upserting_replicator._update_where_expr : nil
98
+
99
+ # The upsert 'UPDATE' expression, calculated using the first row of a multi-row upsert.
100
+ # Defaults to +_upsert_update_expr+, but may need to be overridden in rare cases.
101
+ def upsert_update_expr(first_inserting_row) = self.upserting_replicator._upsert_update_expr(first_inserting_row)
102
+
91
103
  def flush_pending_inserts
92
104
  return if self.dry_run?
93
105
  return if self.pending_inserts.empty?
94
106
  rows_to_insert = self.pending_inserts.values
95
- update_where = self.conditional_upsert? ? self.upserting_replicator._update_where_expr : nil
107
+ update_where_expr = self.update_where_expr
108
+ update_expr = self.upserting_replicator._upsert_update_expr(rows_to_insert.first)
96
109
  self.upserting_replicator.admin_dataset(timeout: :fast) do |ds|
97
110
  insert_ds = ds.insert_conflict(
98
- target: self.upserting_replicator._remote_key_column.name,
99
- update: self.upserting_replicator._upsert_update_expr(rows_to_insert.first),
100
- update_where:,
111
+ target: self.upserting_replicator._upsert_conflict_target,
112
+ update: update_expr,
113
+ update_where: update_where_expr,
101
114
  )
102
115
  insert_ds.multi_insert(rows_to_insert)
103
116
  end
@@ -0,0 +1,96 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Webhookdb::Concurrent
4
+ class Timeout < Timeout::Error; end
5
+
6
+ # Baseclass for pools for doing work across threads.
7
+ # Note that these concurrent pools are not for repeated use,
8
+ # like a normal threadpool. They are for 'fanning out' a single operation
9
+ # across multiple threads.
10
+ #
11
+ # Tasks should not error; if they error, the pool will becomes inoperable:
12
+ # +post+ and +join+ will re-raise the first task error.
13
+ class Pool
14
+ # Add work to the pool.
15
+ # Will block if no workers are free.
16
+ # Re-raises the pool's error if the pool has an error.
17
+ # This is important as we don't want the caller to keep adding work,
18
+ # if the pool is inoperable.
19
+ def post(&) = raise NotImplementedError
20
+
21
+ # Wait for all work to finish.
22
+ # Re-raise the first exception for any pool error.
23
+ def join = raise NotImplementedError
24
+ end
25
+
26
+ # Fake +Pool+ that does work in the calling thread,
27
+ # but behaves like a threaded pool (ie, tasks do not raise).
28
+ class SerialPool < Pool
29
+ def post
30
+ raise @exception if @exception
31
+ begin
32
+ yield
33
+ rescue StandardError => e
34
+ @exception = e
35
+ end
36
+ end
37
+
38
+ def join
39
+ raise @exception if @exception
40
+ end
41
+ end
42
+
43
+ # Pool that does work across a given number of threads.
44
+ # +queue_size+ is how many items can be in the queue before +post+ blocks.
45
+ # +threads+ defaults to +queue_size+, allowing at most +queue_size+ concurrent work,
46
+ # which fits the idea of a parallelized pool well.
47
+ #
48
+ # If you want the calling thread to queue up a bunch of work ahead of time,
49
+ # you can use a +Concurrent::ThreadPoolExecutor+. This pool will not allow the enqueing of more work
50
+ # while the queue is full.
51
+ class ParallelizedPool < Pool
52
+ def initialize(queue_size, timeout: nil, threads: nil)
53
+ super()
54
+ threads ||= queue_size
55
+ @timeout = timeout
56
+ @threads = (1..threads).map do
57
+ Thread.new do
58
+ loop { break unless self.do_work }
59
+ end
60
+ end
61
+ @queue = Thread::SizedQueue.new(queue_size)
62
+ @exception = nil
63
+ end
64
+
65
+ protected def do_work
66
+ task = @queue.pop
67
+ return false if task.nil?
68
+ if task == STOP
69
+ @queue.close
70
+ return false
71
+ end
72
+ begin
73
+ task.call
74
+ rescue StandardError => e
75
+ @exception ||= e
76
+ return false
77
+ end
78
+ return true
79
+ end
80
+
81
+ def post(&task)
82
+ raise @exception if @exception
83
+ added = @queue.push(task, timeout: @timeout)
84
+ raise Timeout, "waited #{@timeout} to add to the queue" if added.nil?
85
+ return true
86
+ end
87
+
88
+ def join
89
+ @queue.push(STOP)
90
+ @threads.each(&:join)
91
+ raise @exception if @exception
92
+ end
93
+
94
+ STOP = :stop
95
+ end
96
+ end
@@ -45,13 +45,16 @@ class Webhookdb::ConnectionCache
45
45
  extend Webhookdb::MethodUtilities
46
46
  include Webhookdb::Dbutil
47
47
 
48
- class ReentranceError < StandardError; end
48
+ class ReentranceError < Webhookdb::ProgrammingError; end
49
49
 
50
50
  configurable(:connection_cache) do
51
51
  # If this many seconds has elapsed since the last connecton was borrowed,
52
52
  # prune connections with no pending borrows.
53
53
  setting :prune_interval, 120
54
54
 
55
+ # If a connection hasn't been used in this long, validate it before reusing it.
56
+ setting :idle_timeout, 20.minutes
57
+
55
58
  # Seconds for the :fast timeout option.
56
59
  setting :timeout_fast, 30
57
60
  # Seconds for the :slow timeout option.
@@ -83,6 +86,25 @@ class Webhookdb::ConnectionCache
83
86
  @last_pruned_at = Time.now
84
87
  end
85
88
 
89
+ Available = Struct.new(:connection, :at) do
90
+ delegate :disconnect, to: :connection
91
+
92
+ # Return +connection+ if it has not been idle long enough,
93
+ # or if it has been idle, then validate it (SELECT 1), and return +connection+
94
+ # if it's valid, or +nil+ if the database disconnected it.
95
+ def validated_connection
96
+ needs_validation_at = self.at + Webhookdb::ConnectionCache.idle_timeout
97
+ return self.connection if needs_validation_at > Time.now
98
+ begin
99
+ self.connection << "SELECT 1"
100
+ return self.connection
101
+ rescue Sequel::DatabaseDisconnectError
102
+ self.connection.disconnect
103
+ return nil
104
+ end
105
+ end
106
+ end
107
+
86
108
  # Connect to the database at the given URL.
87
109
  # borrow is not re-entrant, so if the current thread already owns a connection
88
110
  # to the given url, raise a ReentrantError.
@@ -111,23 +133,48 @@ class Webhookdb::ConnectionCache
111
133
  raise ReentranceError,
112
134
  "ConnectionCache#borrow is not re-entrant for the same database since the connection has stateful config"
113
135
  end
114
- conn = db_loans[:available].pop || take_conn(url, single_threaded: true, extensions: [:pg_json, :pg_streaming])
136
+ if (available = db_loans[:available].pop)
137
+ # If the connection doesn't validate, it won't be in :available at this point, so don't worry about it.
138
+ conn = available.validated_connection
139
+ end
140
+ conn ||= take_conn(url, single_threaded: true, extensions: [:pg_json, :pg_streaming])
115
141
  db_loans[:loaned][t] = conn
116
142
  end
117
- conn << "SET statement_timeout TO #{timeout * 1000}" if timeout.present?
118
- conn << "BEGIN;" if transaction
143
+ trash_conn = false
119
144
  begin
120
- result = yield conn
121
- conn << "COMMIT;" if transaction
122
- rescue Sequel::DatabaseError
123
- conn << "ROLLBACK;" if transaction
145
+ # All database operations need global handling to ensure property pool management.
146
+ conn << "SET statement_timeout TO #{timeout * 1000}" if timeout.present?
147
+ conn << "BEGIN;" if transaction
148
+ begin
149
+ result = yield conn
150
+ conn << "COMMIT;" if transaction
151
+ rescue Sequel::DatabaseError => e
152
+ # Roll back on any database error; but if we're disconnected, don't bother
153
+ # since we know the rollback won't reach the database.
154
+ conn << "ROLLBACK;" if transaction && !e.is_a?(Sequel::DatabaseDisconnectError)
155
+ raise
156
+ end
157
+ rescue Sequel::DatabaseDisconnectError
158
+ # If we're disconnected, trash this connection rather than re-adding it back to the pool.
159
+ trash_conn = true
124
160
  raise
125
161
  ensure
126
- conn << "SET statement_timeout TO 0" if timeout.present?
162
+ reraise = nil
163
+ if timeout.present?
164
+ begin
165
+ # If the timeout fails for whatever reason, assume the connection is toast
166
+ # and don't return it to the pool.
167
+ conn << "SET statement_timeout TO 0"
168
+ rescue Sequel::DatabaseError => e
169
+ reraise = e
170
+ trash_conn = true
171
+ end
172
+ end
127
173
  @mutex.synchronize do
128
174
  @dbs_for_urls[url][:loaned].delete(t)
129
- @dbs_for_urls[url][:available] << conn
175
+ @dbs_for_urls[url][:available] << Available.new(conn, Time.now) unless trash_conn
130
176
  end
177
+ raise reraise if reraise
131
178
  end
132
179
  self.prune(url) if now > self.next_prune_at
133
180
  return result
@@ -5,7 +5,7 @@ require "webhookdb"
5
5
  module Webhookdb::Console
6
6
  extend Webhookdb::MethodUtilities
7
7
 
8
- class Error < StandardError; end
8
+ class Error < Webhookdb::WebhookdbError; end
9
9
 
10
10
  class UnsafeOperation < Error; end
11
11
 
@@ -6,7 +6,7 @@ require "webhookdb/postgres"
6
6
  require "webhookdb/customer"
7
7
 
8
8
  class Webhookdb::Customer::ResetCode < Webhookdb::Postgres::Model(:customer_reset_codes)
9
- class Unusable < StandardError; end
9
+ class Unusable < Webhookdb::WebhookdbError; end
10
10
 
11
11
  TOKEN_LENGTH = 6
12
12
 
@@ -10,14 +10,15 @@ require "webhookdb/demo_mode"
10
10
  class Webhookdb::Customer < Webhookdb::Postgres::Model(:customers)
11
11
  extend Webhookdb::MethodUtilities
12
12
  include Appydays::Configurable
13
+ include Webhookdb::Admin::Linked
13
14
 
14
- class InvalidPassword < StandardError; end
15
- class SignupDisabled < StandardError; end
15
+ class InvalidPassword < Webhookdb::WebhookdbError; end
16
+ class SignupDisabled < Webhookdb::WebhookdbError; end
16
17
 
17
18
  configurable(:customer) do
18
- setting :signup_email_allowlist, ["*"], convert: ->(s) { s.split }
19
+ setting :signup_email_allowlist, ["*"], convert: lambda(&:split)
19
20
  setting :skip_authentication, false
20
- setting :skip_authentication_allowlist, [], convert: ->(s) { s.split }
21
+ setting :skip_authentication_allowlist, [], convert: lambda(&:split)
21
22
  end
22
23
 
23
24
  # The bcrypt hash cost. Changing this would invalidate all passwords!
@@ -10,7 +10,7 @@ class Webhookdb::DatabaseDocument < Webhookdb::Postgres::Model(:database_documen
10
10
  include Appydays::Configurable
11
11
  configurable(:database_document) do
12
12
  setting :skip_authentication, false
13
- setting :skip_authentication_allowlist, [], convert: ->(s) { s.split }
13
+ setting :skip_authentication_allowlist, [], convert: lambda(&:split)
14
14
  end
15
15
 
16
16
  plugin :column_encryption do |enc|
@@ -8,20 +8,7 @@ module Webhookdb::DBAdapter::DefaultSql
8
8
  return s
9
9
  end
10
10
 
11
- def create_table_sql(table, columns, if_not_exists: false)
12
- createtable = +"CREATE TABLE "
13
- createtable << "IF NOT EXISTS " if if_not_exists
14
- createtable << self.qualify_table(table)
15
- lines = ["#{createtable} ("]
16
- columns[0...-1]&.each { |c| lines << " #{self.column_create_sql(c)}," }
17
- lines << " #{self.column_create_sql(columns.last)}"
18
- lines << ")"
19
- return lines.join("\n")
20
- end
21
-
22
- def identifier_quote_char
23
- raise NotImplementedError
24
- end
11
+ def identifier_quote_char = raise NotImplementedError
25
12
 
26
13
  # We write our own escaper because we want to only escape what's needed;
27
14
  # otherwise we want to avoid quoting identifiers.
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ class Webhookdb::DBAdapter::Partition < Webhookdb::TypedStruct
4
+ attr_reader :parent_table, :partition_name, :suffix
5
+
6
+ def initialize(**kwargs)
7
+ super
8
+ self.typecheck!(:parent_table, Webhookdb::DBAdapter::Table)
9
+ self.typecheck!(:partition_name, Symbol)
10
+ self.typecheck!(:suffix, Symbol)
11
+ end
12
+
13
+ def partition_table = Webhookdb::DBAdapter::Table.new(name: self.partition_name, schema: self.parent_table.schema)
14
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ class Webhookdb::DBAdapter::Partitioning < Webhookdb::TypedStruct
4
+ HASH = :hash
5
+ RANGE = :range
6
+
7
+ attr_reader :by, :column
8
+ end
@@ -11,9 +11,7 @@ class Webhookdb::DBAdapter::PG < Webhookdb::DBAdapter
11
11
  VERIFY_TIMEOUT = 2
12
12
  VERIFY_STATEMENT = "SELECT 1"
13
13
 
14
- def identifier_quote_char
15
- return '"'
16
- end
14
+ def identifier_quote_char = '"'
17
15
 
18
16
  def create_index_sql(index, concurrently:)
19
17
  tgts = index.targets.map { |c| self.escape_identifier(c.name) }.join(", ")
@@ -26,7 +24,72 @@ class Webhookdb::DBAdapter::PG < Webhookdb::DBAdapter
26
24
  return "CREATE#{uniq} INDEX#{concurrent} IF NOT EXISTS #{idxname} ON #{tblname} (#{tgts})#{where}"
27
25
  end
28
26
 
29
- def column_create_sql(column)
27
+ def create_index_sqls(index, concurrently:, partitions: [])
28
+ return super if partitions.empty?
29
+ result = []
30
+ result << self.create_index_sql(index, concurrently: false).gsub(" ON ", " ON ONLY ")
31
+ partitions.each do |partition|
32
+ partition_idx = index.change(table: partition.partition_table, name: "#{index.name}#{partition.suffix}")
33
+ result << self.create_index_sql(partition_idx, concurrently:)
34
+ result << "ALTER INDEX #{index.name} ATTACH PARTITION #{partition_idx.name}"
35
+ end
36
+ return result
37
+ end
38
+
39
+ def create_table_sql(table, columns, if_not_exists: false, partition: nil)
40
+ columns = columns.to_a
41
+ createtable = +"CREATE TABLE "
42
+ createtable << "IF NOT EXISTS " if if_not_exists
43
+ createtable << self.qualify_table(table)
44
+
45
+ partitioned_pks = []
46
+ partitioned_uniques = []
47
+ if partition
48
+ # We cannot use PRIMARY KEY or UNIQUE when partitioning,
49
+ # so set those columns as if they're not
50
+ columns.each_with_index do |c, i|
51
+ if c.pk?
52
+ # Set the type to the serial type as if it's a normal PK
53
+ type = case c.type
54
+ when BIGINT
55
+ :bigserial
56
+ when INTEGER
57
+ :serial
58
+ else
59
+ c.type
60
+ end
61
+ columns[i] = c.change(pk: false, type:)
62
+ partitioned_pks << c
63
+ elsif c.unique?
64
+ columns[i] = c.change(unique: false)
65
+ partitioned_uniques << c
66
+ end
67
+ end
68
+ end
69
+ tbl_lines = columns.map { |c| self.create_column_sql(c) }
70
+ tbl_lines.concat(partitioned_pks.map do |c|
71
+ pkcols = [partition.column, c.name].uniq.join(", ")
72
+ "PRIMARY KEY (#{pkcols})"
73
+ end)
74
+ tbl_lines.concat(partitioned_uniques.map { |c| "UNIQUE (#{partition.column}, #{c.name})" })
75
+ lines = ["#{createtable} ("]
76
+ lines << (" " + tbl_lines.join(",\n "))
77
+ lines << ")"
78
+ if partition
79
+ m = case partition.by
80
+ when Webhookdb::DBAdapter::Partitioning::HASH
81
+ "HASH"
82
+ when Webhookdb::DBAdapter::Partitioning::RANGE
83
+ "RANGE"
84
+ else
85
+ raise ArgumentError, "unknown partition method: #{partition.by}"
86
+ end
87
+ lines << "PARTITION BY #{m} (#{partition.column})"
88
+ end
89
+ return lines.join("\n")
90
+ end
91
+
92
+ def create_column_sql(column)
30
93
  modifiers = +""
31
94
  coltype = COLTYPE_MAP.fetch(column.type)
32
95
  if column.pk?
@@ -42,8 +105,15 @@ class Webhookdb::DBAdapter::PG < Webhookdb::DBAdapter
42
105
  return "#{colname} #{coltype}#{modifiers}"
43
106
  end
44
107
 
108
+ def create_hash_partition_sql(table, partition_count, remainder)
109
+ tbl = self.qualify_table(table)
110
+ s = "CREATE TABLE #{tbl}_#{remainder} PARTITION OF #{tbl} " \
111
+ "FOR VALUES WITH (MODULUS #{partition_count}, REMAINDER #{remainder})"
112
+ return s
113
+ end
114
+
45
115
  def add_column_sql(table, column, if_not_exists: false)
46
- c = self.column_create_sql(column)
116
+ c = self.create_column_sql(column)
47
117
  ifne = if_not_exists ? " IF NOT EXISTS" : ""
48
118
  return "ALTER TABLE #{self.qualify_table(table)} ADD COLUMN#{ifne} #{c}"
49
119
  end
@@ -92,5 +162,7 @@ class Webhookdb::DBAdapter::PG < Webhookdb::DBAdapter
92
162
  TEXT_ARRAY => "text[]",
93
163
  TIMESTAMP => "timestamptz",
94
164
  UUID => "uuid",
165
+ :serial => "serial",
166
+ :bigserial => "bigserial",
95
167
  }.freeze
96
168
  end
@@ -28,10 +28,21 @@ class Webhookdb::DBAdapter::Snowflake < Webhookdb::DBAdapter
28
28
  end
29
29
 
30
30
  def create_index_sql(*)
31
- raise NotImplementedError, "Snowflake does not support indices"
31
+ raise Webhookdb::InvalidPrecondition, "Snowflake does not support indices"
32
32
  end
33
33
 
34
- def column_create_sql(column)
34
+ def create_table_sql(table, columns, if_not_exists: false, **)
35
+ createtable = +"CREATE TABLE "
36
+ createtable << "IF NOT EXISTS " if if_not_exists
37
+ createtable << self.qualify_table(table)
38
+ lines = ["#{createtable} ("]
39
+ columns[0...-1]&.each { |c| lines << " #{self.create_column_sql(c)}," }
40
+ lines << " #{self.create_column_sql(columns.last)}"
41
+ lines << ")"
42
+ return lines.join("\n")
43
+ end
44
+
45
+ def create_column_sql(column)
35
46
  modifiers = +""
36
47
  if column.unique?
37
48
  modifiers << " UNIQUE NOT NULL"
@@ -44,7 +55,7 @@ class Webhookdb::DBAdapter::Snowflake < Webhookdb::DBAdapter
44
55
  end
45
56
 
46
57
  def add_column_sql(table, column, if_not_exists: false)
47
- c = self.column_create_sql(column)
58
+ c = self.create_column_sql(column)
48
59
  # Snowflake has no 'ADD COLUMN IF NOT EXISTS' so we need to query the long way around
49
60
  add_sql = "ALTER TABLE #{self.qualify_table(table)} ADD COLUMN #{c}"
50
61
  return add_sql unless if_not_exists
@@ -118,9 +129,7 @@ class Webhookdb::DBAdapter::Snowflake < Webhookdb::DBAdapter
118
129
  conn.execute(statement)
119
130
  end
120
131
 
121
- def identifier_quote_char
122
- return ""
123
- end
132
+ def identifier_quote_char = ""
124
133
 
125
134
  COLTYPE_MAP = {
126
135
  BIGINT => "bigint",
@@ -2,8 +2,10 @@
2
2
 
3
3
  class Webhookdb::DBAdapter
4
4
  require "webhookdb/db_adapter/column_types"
5
+ require "webhookdb/db_adapter/partition"
6
+ require "webhookdb/db_adapter/partitioning"
5
7
 
6
- class UnsupportedAdapter < StandardError; end
8
+ class UnsupportedAdapter < Webhookdb::ProgrammingError; end
7
9
 
8
10
  VALID_IDENTIFIER = /^[a-zA-Z][a-zA-Z\d_ ]*$/
9
11
  INVALID_IDENTIFIER_PROMPT =
@@ -149,21 +151,41 @@ class Webhookdb::DBAdapter
149
151
  raise NotImplementedError
150
152
  end
151
153
 
154
+ # Return the CREATE TABLE sql to create table with columns.
152
155
  # @param [Table] table
153
156
  # @param [Array<Column>] columns
154
157
  # @param [Schema] schema
155
- # @param [Boolean] if_not_exists
158
+ # @param [TrueClass,FalseClass] if_not_exists If true, use CREATE TABLE IF NOT EXISTS.
159
+ # @param partition [Webhookdb::DBAdapter::Partitioning,nil] If provided,
160
+ # adds a "PARTITION BY HASH (partition_column_name)" to the returned SQL.
156
161
  # @return [String]
157
- def create_table_sql(table, columns, schema: nil, if_not_exists: false)
162
+ def create_table_sql(table, columns, schema: nil, if_not_exists: false, partition: nil)
158
163
  raise NotImplementedError
159
164
  end
160
165
 
166
+ # We write our own escaper because we want to only escape what's needed;
167
+ # otherwise we want to avoid quoting identifiers.
168
+ def escape_identifier(s) = raise NotImplementedError
169
+
161
170
  # @param [Index] index
162
171
  # @return [String]
163
172
  def create_index_sql(index, concurrently:)
164
173
  raise NotImplementedError
165
174
  end
166
175
 
176
+ # Create indices, including for partitions.
177
+ # By default, just call create_index_sql and return it in a single-item array.
178
+ # Override if creating indices while using partitions requires extra logic.
179
+ # @param partitions [Array<Webhookdb::DBAdapter::Partition>]
180
+ # @return [Array<String>]
181
+ def create_index_sqls(index, concurrently:, partitions: [])
182
+ _ = partitions
183
+ return [self.create_index_sql(index, concurrently:)]
184
+ end
185
+
186
+ # @param column [Column] The column to create SQL for.
187
+ def create_column_sql(column) = raise NotImplementedError
188
+
167
189
  # @param [Table] table
168
190
  # @param [Column] column
169
191
  # @param [Boolean] if_not_exists
@@ -35,6 +35,7 @@ module Webhookdb::Dbutil
35
35
  4
36
36
  end)
37
37
  setting :pool_timeout, 10
38
+ setting :pool_class, :timed_queue
38
39
  # Set to 'disable' to work around segfault.
39
40
  # See https://github.com/ged/ruby-pg/issues/538
40
41
  setting :gssencmode, ""
@@ -70,6 +71,7 @@ module Webhookdb::Dbutil
70
71
  res[:log_warn_duration] ||= Webhookdb::Dbutil.slow_query_seconds
71
72
  res[:max_connections] ||= Webhookdb::Dbutil.max_connections
72
73
  res[:pool_timeout] ||= Webhookdb::Dbutil.pool_timeout
74
+ res[:pool_class] ||= Webhookdb::Dbutil.pool_class
73
75
  res[:driver_options] = {}
74
76
  (res[:driver_options][:gssencmode] = Webhookdb::Dbutil.gssencmode) if Webhookdb::Dbutil.gssencmode.present?
75
77
  return res