webhookdb 1.3.1 → 1.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (63) hide show
  1. checksums.yaml +4 -4
  2. data/admin-dist/assets/{index-6aebf805.js → index-9306dd28.js} +39 -39
  3. data/admin-dist/index.html +1 -1
  4. data/data/messages/templates/errors/generic_backfill.email.liquid +30 -0
  5. data/data/messages/templates/errors/icalendar_fetch.email.liquid +8 -2
  6. data/data/messages/templates/specs/with_fields.email.liquid +6 -0
  7. data/db/migrations/045_system_log.rb +15 -0
  8. data/db/migrations/046_indices.rb +14 -0
  9. data/lib/webhookdb/admin.rb +6 -0
  10. data/lib/webhookdb/admin_api/data_provider.rb +1 -0
  11. data/lib/webhookdb/admin_api/entities.rb +8 -0
  12. data/lib/webhookdb/aggregate_result.rb +1 -1
  13. data/lib/webhookdb/api/helpers.rb +17 -0
  14. data/lib/webhookdb/api/organizations.rb +6 -0
  15. data/lib/webhookdb/api/service_integrations.rb +1 -0
  16. data/lib/webhookdb/connection_cache.rb +29 -3
  17. data/lib/webhookdb/console.rb +1 -1
  18. data/lib/webhookdb/customer/reset_code.rb +1 -1
  19. data/lib/webhookdb/customer.rb +3 -2
  20. data/lib/webhookdb/db_adapter.rb +1 -1
  21. data/lib/webhookdb/dbutil.rb +2 -0
  22. data/lib/webhookdb/errors.rb +34 -0
  23. data/lib/webhookdb/http.rb +1 -1
  24. data/lib/webhookdb/jobs/deprecated_jobs.rb +1 -0
  25. data/lib/webhookdb/jobs/model_event_system_log_tracker.rb +105 -0
  26. data/lib/webhookdb/jobs/monitor_metrics.rb +29 -0
  27. data/lib/webhookdb/jobs/renew_watch_channel.rb +3 -0
  28. data/lib/webhookdb/message/transport.rb +1 -1
  29. data/lib/webhookdb/message.rb +53 -2
  30. data/lib/webhookdb/messages/error_generic_backfill.rb +45 -0
  31. data/lib/webhookdb/messages/error_icalendar_fetch.rb +3 -0
  32. data/lib/webhookdb/messages/specs.rb +16 -0
  33. data/lib/webhookdb/organization/alerting.rb +7 -3
  34. data/lib/webhookdb/organization/database_migration.rb +1 -1
  35. data/lib/webhookdb/organization/db_builder.rb +1 -1
  36. data/lib/webhookdb/organization.rb +14 -1
  37. data/lib/webhookdb/postgres/model.rb +1 -0
  38. data/lib/webhookdb/postgres.rb +2 -1
  39. data/lib/webhookdb/replicator/base.rb +66 -39
  40. data/lib/webhookdb/replicator/column.rb +2 -0
  41. data/lib/webhookdb/replicator/fake.rb +6 -0
  42. data/lib/webhookdb/replicator/front_signalwire_message_channel_app_v1.rb +28 -19
  43. data/lib/webhookdb/replicator/icalendar_calendar_v1.rb +55 -11
  44. data/lib/webhookdb/replicator/intercom_v1_mixin.rb +25 -4
  45. data/lib/webhookdb/replicator/signalwire_message_v1.rb +31 -0
  46. data/lib/webhookdb/replicator/transistor_episode_v1.rb +11 -5
  47. data/lib/webhookdb/replicator/webhook_request.rb +8 -0
  48. data/lib/webhookdb/replicator.rb +2 -2
  49. data/lib/webhookdb/service/view_api.rb +1 -1
  50. data/lib/webhookdb/service.rb +10 -10
  51. data/lib/webhookdb/service_integration.rb +14 -1
  52. data/lib/webhookdb/spec_helpers/shared_examples_for_replicators.rb +153 -64
  53. data/lib/webhookdb/sync_target.rb +7 -5
  54. data/lib/webhookdb/system_log_event.rb +9 -0
  55. data/lib/webhookdb/version.rb +1 -1
  56. data/lib/webhookdb/webhook_subscription.rb +1 -1
  57. data/lib/webhookdb.rb +31 -7
  58. metadata +32 -16
  59. data/lib/webhookdb/jobs/customer_created_notify_internal.rb +0 -22
  60. /data/lib/webhookdb/jobs/{logged_webhook_replay.rb → logged_webhooks_replay.rb} +0 -0
  61. /data/lib/webhookdb/jobs/{logged_webhook_resilient_replay.rb → logged_webhooks_resilient_replay.rb} +0 -0
  62. /data/lib/webhookdb/jobs/{webhook_subscription_delivery_attempt.rb → webhook_subscription_delivery_event.rb} +0 -0
  63. /data/lib/webhookdb/jobs/{webhook_resource_notify_integrations.rb → webhookdb_resource_notify_integrations.rb} +0 -0
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "webhookdb/message/template"
4
+
5
+ class Webhookdb::Messages::ErrorGenericBackfill < Webhookdb::Message::Template
6
+ def self.fixtured(_recipient)
7
+ sint = Webhookdb::Fixtures.service_integration.create
8
+ return self.new(
9
+ sint,
10
+ response_status: 422,
11
+ request_url: "https://whdbtest.signalwire.com/2010-04-01/Accounts/projid/Messages.json",
12
+ request_method: "POST",
13
+ response_body: "Unauthorized",
14
+ )
15
+ end
16
+
17
+ def initialize(service_integration, request_url:, request_method:, response_status:, response_body:)
18
+ @service_integration = service_integration
19
+ @request_url = request_url
20
+ @request_method = request_method
21
+ @response_status = response_status
22
+ @response_body = response_body
23
+ super()
24
+ end
25
+
26
+ def signature
27
+ # Only alert on the backfill once a day
28
+ return "msg-#{self.full_template_name}-sint:#{@service_integration.id}"
29
+ end
30
+
31
+ def template_folder = "errors"
32
+ def template_name = "generic_backfill"
33
+
34
+ def liquid_drops
35
+ return super.merge(
36
+ friendly_name: @service_integration.replicator.descriptor.resource_name_singular,
37
+ service_name: @service_integration.service_name,
38
+ opaque_id: @service_integration.opaque_id,
39
+ request_method: @request_method,
40
+ request_url: @request_url,
41
+ response_status: @response_status,
42
+ response_body: @response_body,
43
+ )
44
+ end
45
+ end
@@ -36,6 +36,9 @@ class Webhookdb::Messages::ErrorIcalendarFetch < Webhookdb::Message::Template
36
36
  response_status: @response_status,
37
37
  response_body: @response_body,
38
38
  external_calendar_id: @external_calendar_id,
39
+ webhook_endpoint: @service_integration.replicator.webhook_endpoint,
40
+ org_name: @service_integration.organization.name,
41
+ org_key: @service_integration.organization.key,
39
42
  )
40
43
  end
41
44
  end
@@ -27,6 +27,22 @@ module Webhookdb::Messages::Testers
27
27
  end
28
28
  end
29
29
 
30
+ class WithFields < Base
31
+ # noinspection RubyInstanceVariableNamingConvention
32
+ def initialize(a: nil, b: nil, c: nil, d: nil, e: nil)
33
+ @a = a
34
+ @b = b
35
+ @c = c
36
+ @d = d
37
+ @e = e
38
+ super()
39
+ end
40
+
41
+ def liquid_drops
42
+ return super.merge(a: @a, b: @b, c: @c, d: @d, e: @e)
43
+ end
44
+ end
45
+
30
46
  class Nonextant < Base
31
47
  end
32
48
 
@@ -23,7 +23,10 @@ class Webhookdb::Organization::Alerting
23
23
 
24
24
  # Dispatch the message template to administrators of the org.
25
25
  # @param message_template [Webhookdb::Message::Template]
26
- def dispatch_alert(message_template)
26
+ # @param separate_connection [true,false] If true, send the alert on a separate connection.
27
+ # See +Webhookdb::Idempotency+. Defaults to true since this is an alert method and we
28
+ # don't want it to error accidentally, if the code is called from an unexpected situation.
29
+ def dispatch_alert(message_template, separate_connection: true)
27
30
  unless message_template.respond_to?(:signature)
28
31
  raise Webhookdb::InvalidPrecondition,
29
32
  "message template #{message_template.template_name} must define a #signature method, " \
@@ -33,8 +36,9 @@ class Webhookdb::Organization::Alerting
33
36
  max_alerts_per_customer_per_day = Webhookdb::Organization::Alerting.max_alerts_per_customer_per_day
34
37
  yesterday = Time.now - 24.hours
35
38
  self.org.admin_customers.each do |c|
36
- idemkey = "orgalert-#{signature}-#{c.id}"
37
- Webhookdb::Idempotency.every(Webhookdb::Organization::Alerting.interval).under_key(idemkey) do
39
+ idem = Webhookdb::Idempotency.every(Webhookdb::Organization::Alerting.interval)
40
+ idem = idem.using_seperate_connection if separate_connection
41
+ idem.under_key("orgalert-#{signature}-#{c.id}") do
38
42
  sent_last_day = Webhookdb::Message::Delivery.
39
43
  where(template: message_template.full_template_name, recipient: c).
40
44
  where { created_at > yesterday }.
@@ -4,7 +4,7 @@ class Webhookdb::Organization::DatabaseMigration < Webhookdb::Postgres::Model(:o
4
4
  include Webhookdb::Dbutil
5
5
 
6
6
  class MigrationInProgress < Webhookdb::DatabaseLocked; end
7
- class MigrationAlreadyFinished < StandardError; end
7
+ class MigrationAlreadyFinished < Webhookdb::WebhookdbError; end
8
8
 
9
9
  plugin :timestamps
10
10
  plugin :text_searchable, terms: [:organization, :started_by]
@@ -22,7 +22,7 @@ class Webhookdb::Organization::DbBuilder
22
22
  include Webhookdb::Dbutil
23
23
  extend Webhookdb::MethodUtilities
24
24
 
25
- class IsolatedOperationError < StandardError; end
25
+ class IsolatedOperationError < Webhookdb::ProgrammingError; end
26
26
 
27
27
  DATABASE = "database"
28
28
  SCHEMA = "schema"
@@ -7,7 +7,9 @@ require "webhookdb/stripe"
7
7
  require "webhookdb/jobs/replication_migration"
8
8
 
9
9
  class Webhookdb::Organization < Webhookdb::Postgres::Model(:organizations)
10
- class SchemaMigrationError < StandardError; end
10
+ include Webhookdb::Admin::Linked
11
+
12
+ class SchemaMigrationError < Webhookdb::ProgrammingError; end
11
13
 
12
14
  plugin :timestamps
13
15
  plugin :soft_deletes
@@ -454,6 +456,17 @@ class Webhookdb::Organization < Webhookdb::Postgres::Model(:organizations)
454
456
  return self.add_all_membership(opts)
455
457
  end
456
458
 
459
+ def close(confirm:)
460
+ raise Webhookdb::InvalidPrecondition, "confirm must be true to close the org" unless confirm
461
+ unless self.service_integrations_dataset.empty?
462
+ msg = "Organization[#{self.key} cannot close with active service integrations"
463
+ raise Webhookdb::InvalidPrecondition, msg
464
+ end
465
+ memberships = self.all_memberships_dataset.all.each(&:destroy)
466
+ self.destroy
467
+ return [self, memberships]
468
+ end
469
+
457
470
  # SUBSCRIPTION PERMISSIONS
458
471
 
459
472
  def active_subscription?
@@ -7,6 +7,7 @@ require "sequel"
7
7
  require "tsort"
8
8
 
9
9
  require "webhookdb"
10
+ require "webhookdb/admin"
10
11
  require "webhookdb/postgres"
11
12
  require "webhookdb/postgres/validations"
12
13
  require "webhookdb/postgres/model_utilities"
@@ -13,7 +13,7 @@ module Webhookdb::Postgres
13
13
  extend Webhookdb::MethodUtilities
14
14
  include Appydays::Loggable
15
15
 
16
- class InTransaction < StandardError; end
16
+ class InTransaction < Webhookdb::ProgrammingError; end
17
17
 
18
18
  singleton_attr_accessor :unsafe_skip_transaction_check
19
19
  @unsafe_skip_transaction_check = false
@@ -66,6 +66,7 @@ module Webhookdb::Postgres
66
66
  "webhookdb/service_integration",
67
67
  "webhookdb/subscription",
68
68
  "webhookdb/sync_target",
69
+ "webhookdb/system_log_event",
69
70
  "webhookdb/webhook_subscription",
70
71
  "webhookdb/webhook_subscription/delivery",
71
72
  ].freeze
@@ -651,6 +651,9 @@ for information on how to refresh data.)
651
651
  # @param [Webhookdb::Replicator::WebhookRequest] request
652
652
  def upsert_webhook(request, **kw)
653
653
  return self._upsert_webhook(request, **kw)
654
+ rescue Amigo::Retry::Error
655
+ # Do not log this since it's expected/handled by Amigo
656
+ raise
654
657
  rescue StandardError => e
655
658
  self.logger.error("upsert_webhook_error", request: request.as_json, error: e)
656
659
  raise
@@ -999,42 +1002,18 @@ for information on how to refresh data.)
999
1002
  job.update(started_at: Time.now)
1000
1003
 
1001
1004
  backfillers = self._backfillers(**job.criteria.symbolize_keys)
1002
- if self._parallel_backfill && self._parallel_backfill > 1
1003
- # Create a dedicated threadpool for these backfillers,
1004
- # with max parallelism determined by the replicator.
1005
- pool = Concurrent::FixedThreadPool.new(self._parallel_backfill)
1006
- # Record any errors that occur, since they won't raise otherwise.
1007
- # Initialize a sized array to avoid any potential race conditions (though GIL should make it not an issue?).
1008
- errors = Array.new(backfillers.size)
1009
- backfillers.each_with_index do |bf, idx|
1010
- pool.post do
1011
- bf.backfill(last_backfilled)
1012
- rescue StandardError => e
1013
- errors[idx] = e
1014
- end
1015
- end
1016
- # We've enqueued all backfillers; do not accept anymore work.
1017
- pool.shutdown
1018
- loop do
1019
- # We want to stop early if we find an error, so check for errors every 10 seconds.
1020
- completed = pool.wait_for_termination(10)
1021
- first_error = errors.find { |e| !e.nil? }
1022
- if first_error.nil?
1023
- # No error, and wait_for_termination returned true, so all work is done.
1024
- break if completed
1025
- # No error, but work is still going on, so loop again.
1026
- next
1027
- end
1028
- # We have an error; don't run any more backfillers.
1029
- pool.kill
1030
- # Wait for all ongoing backfills before raising.
1031
- pool.wait_for_termination
1032
- raise first_error
1005
+ begin
1006
+ if self._parallel_backfill && self._parallel_backfill > 1
1007
+ _do_parallel_backfill(backfillers, last_backfilled)
1008
+ else
1009
+ _do_serial_backfill(backfillers, last_backfilled)
1033
1010
  end
1034
- else
1035
- backfillers.each do |backfiller|
1036
- backfiller.backfill(last_backfilled)
1011
+ rescue StandardError => e
1012
+ if self.on_backfill_error(e) == true
1013
+ job.update(finished_at: Time.now)
1014
+ return
1037
1015
  end
1016
+ raise e
1038
1017
  end
1039
1018
 
1040
1019
  sint.update(last_backfilled_at: new_last_backfilled) if job.incremental?
@@ -1042,6 +1021,54 @@ for information on how to refresh data.)
1042
1021
  job.enqueue_children
1043
1022
  end
1044
1023
 
1024
+ protected def _do_parallel_backfill(backfillers, last_backfilled)
1025
+ # Create a dedicated threadpool for these backfillers,
1026
+ # with max parallelism determined by the replicator.
1027
+ pool = Concurrent::FixedThreadPool.new(self._parallel_backfill)
1028
+ # Record any errors that occur, since they won't raise otherwise.
1029
+ # Initialize a sized array to avoid any potential race conditions (though GIL should make it not an issue?).
1030
+ errors = Array.new(backfillers.size)
1031
+ backfillers.each_with_index do |bf, idx|
1032
+ pool.post do
1033
+ bf.backfill(last_backfilled)
1034
+ rescue StandardError => e
1035
+ errors[idx] = e
1036
+ end
1037
+ end
1038
+ # We've enqueued all backfillers; do not accept anymore work.
1039
+ pool.shutdown
1040
+ loop do
1041
+ # We want to stop early if we find an error, so check for errors every 10 seconds.
1042
+ completed = pool.wait_for_termination(10)
1043
+ first_error = errors.find { |e| !e.nil? }
1044
+ if first_error.nil?
1045
+ # No error, and wait_for_termination returned true, so all work is done.
1046
+ break if completed
1047
+ # No error, but work is still going on, so loop again.
1048
+ next
1049
+ end
1050
+ # We have an error; don't run any more backfillers.
1051
+ pool.kill
1052
+ # Wait for all ongoing backfills before raising.
1053
+ pool.wait_for_termination
1054
+ raise first_error
1055
+ end
1056
+ end
1057
+
1058
+ protected def _do_serial_backfill(backfillers, last_backfilled)
1059
+ backfillers.each do |backfiller|
1060
+ backfiller.backfill(last_backfilled)
1061
+ end
1062
+ end
1063
+
1064
+ # Called when the #backfill method errors.
1065
+ # This can do something like dispatch a developer alert.
1066
+ # The handler must raise in order to stop the job from processing-
1067
+ # if nothing is raised, the original exception will be raised instead.
1068
+ # By default, this method noops, so the original exception is raised.
1069
+ # @param e [Exception]
1070
+ def on_backfill_error(e) = nil
1071
+
1045
1072
  # If this replicator supports backfilling in parallel (running multiple backfillers at a time),
1046
1073
  # return the degree of paralellism (or nil if not running in parallel).
1047
1074
  # We leave parallelism up to the replicator, not CPU count, since most work
@@ -1096,15 +1123,15 @@ for information on how to refresh data.)
1096
1123
 
1097
1124
  def fetch_backfill_page(pagination_token, last_backfilled:)
1098
1125
  return @svc._fetch_backfill_page(pagination_token, last_backfilled:)
1099
- rescue ::Timeout::Error, ::SocketError
1100
- self.__retryordie
1126
+ rescue ::Timeout::Error, ::SocketError => e
1127
+ self.__retryordie(e)
1101
1128
  rescue Webhookdb::Http::Error => e
1102
- self.__retryordie if e.status >= 500
1129
+ self.__retryordie(e) if e.status >= 500
1103
1130
  raise
1104
1131
  end
1105
1132
 
1106
- def __retryordie
1107
- raise Amigo::Retry::OrDie.new(self.server_error_retries, self.server_error_backoff)
1133
+ def __retryordie(e)
1134
+ raise Amigo::Retry::OrDie.new(self.server_error_retries, self.server_error_backoff, e)
1108
1135
  end
1109
1136
  end
1110
1137
 
@@ -349,6 +349,8 @@ class Webhookdb::Replicator::Column
349
349
 
350
350
  # If provided, use this expression as the UPDATE value when adding the column
351
351
  # to an existing table.
352
+ # To explicitly backfill using NULL, use the value +Sequel[nil]+
353
+ # rather than +nil+.
352
354
  # @return [String,Sequel,Sequel::SQL::Expression]
353
355
  attr_reader :backfill_expr
354
356
 
@@ -409,6 +409,12 @@ class Webhookdb::Replicator::FakeExhaustiveConverter < Webhookdb::Replicator::Fa
409
409
  data_key: "my_id",
410
410
  backfill_expr: "hi there",
411
411
  ),
412
+ Webhookdb::Replicator::Column.new(
413
+ :using_null_backfill_expr,
414
+ TEXT,
415
+ data_key: "my_id",
416
+ backfill_expr: Sequel[nil],
417
+ ),
412
418
  Webhookdb::Replicator::Column.new(
413
419
  :using_backfill_statement,
414
420
  TEXT,
@@ -239,35 +239,44 @@ All of this information can be found in the WebhookDB docs, at https://docs.webh
239
239
  @signalwire_sint = replicator.service_integration.depends_on
240
240
  end
241
241
 
242
- def handle_item(item)
243
- front_id = item.fetch(:front_message_id)
244
- sw_id = item.fetch(:signalwire_sid)
242
+ def handle_item(db_row)
243
+ front_id = db_row.fetch(:front_message_id)
244
+ sw_id = db_row.fetch(:signalwire_sid)
245
+ # This is sort of gross- we get the db row here, and need to re-update it with certain fields
246
+ # as a result of the signalwire or front sync. To do that, we need to run the upsert on 'data',
247
+ # but what's in 'data' is incomplete. So we use the db row to form a more fully complete 'data'.
248
+ upserting_data = db_row.dup
249
+ # Remove the columns that don't belong in 'data'
250
+ upserting_data.delete(:pk)
251
+ upserting_data.delete(:row_updated_at)
252
+ # Splat the 'data' column into the row so it all gets put back into 'data'
253
+ upserting_data.merge!(**upserting_data.delete(:data))
245
254
  if (front_id && sw_id) || (!front_id && !sw_id)
246
- msg = "row should have a front id OR signalwire id, should not have been inserted, or selected: #{item}"
255
+ msg = "row should have a front id OR signalwire id, should not have been inserted, or selected: #{db_row}"
247
256
  raise Webhookdb::InvariantViolation, msg
248
257
  end
249
- sender = @replicator.format_phone(item.fetch(:sender))
250
- recipient = @replicator.format_phone(item.fetch(:recipient))
251
- body = item.fetch(:body)
252
- idempotency_key = "fsmca-fims-#{item.fetch(:external_id)}"
258
+ sender = @replicator.format_phone(db_row.fetch(:sender))
259
+ recipient = @replicator.format_phone(db_row.fetch(:recipient))
260
+ body = db_row.fetch(:body)
261
+ idempotency_key = "fsmca-fims-#{db_row.fetch(:external_id)}"
253
262
  idempotency = Webhookdb::Idempotency.once_ever.stored.using_seperate_connection.under_key(idempotency_key)
254
263
  if front_id.nil?
255
- texted_at = Time.parse(item.fetch(:data).fetch("date_created"))
264
+ texted_at = Time.parse(db_row.fetch(:data).fetch("date_created"))
256
265
  if texted_at < Webhookdb::Front.channel_sync_refreshness_cutoff.seconds.ago
257
266
  # Do not sync old rows, just mark them synced
258
- item[:front_message_id] = "skipped_due_to_age"
267
+ upserting_data[:front_message_id] = "skipped_due_to_age"
259
268
  else
260
269
  # sync the message into Front
261
270
  front_response_body = idempotency.execute do
262
- self._sync_front_inbound(sender:, texted_at:, item:, body:)
271
+ self._sync_front_inbound(sender:, texted_at:, db_row:, body:)
263
272
  end
264
- item[:front_message_id] = front_response_body.fetch("message_uid")
273
+ upserting_data[:front_message_id] = front_response_body.fetch("message_uid")
265
274
  end
266
275
  else
267
- messaged_at = Time.at(item.fetch(:data).fetch("payload").fetch("created_at"))
276
+ messaged_at = Time.at(db_row.fetch(:data).fetch("payload").fetch("created_at"))
268
277
  if messaged_at < Webhookdb::Front.channel_sync_refreshness_cutoff.seconds.ago
269
278
  # Do not sync old rows, just mark them synced
270
- item[:signalwire_sid] = "skipped_due_to_age"
279
+ upserting_data[:signalwire_sid] = "skipped_due_to_age"
271
280
  else
272
281
  # send the SMS via signalwire
273
282
  signalwire_resp = _send_sms(
@@ -276,10 +285,10 @@ All of this information can be found in the WebhookDB docs, at https://docs.webh
276
285
  to: recipient,
277
286
  body:,
278
287
  )
279
- item[:signalwire_sid] = signalwire_resp.fetch("sid") if signalwire_resp
288
+ upserting_data[:signalwire_sid] = signalwire_resp.fetch("sid") if signalwire_resp
280
289
  end
281
290
  end
282
- @replicator.upsert_webhook_body(item.deep_stringify_keys)
291
+ @replicator.upsert_webhook_body(upserting_data.deep_stringify_keys)
283
292
  end
284
293
 
285
294
  def _send_sms(idempotency, from:, to:, body:)
@@ -321,14 +330,14 @@ All of this information can be found in the WebhookDB docs, at https://docs.webh
321
330
  return nil
322
331
  end
323
332
 
324
- def _sync_front_inbound(sender:, texted_at:, item:, body:)
333
+ def _sync_front_inbound(sender:, texted_at:, db_row:, body:)
325
334
  body = {
326
335
  sender: {handle: sender},
327
336
  body: body || "<no body>",
328
337
  delivered_at: texted_at.to_i,
329
338
  metadata: {
330
- external_id: item.fetch(:external_id),
331
- external_conversation_id: item.fetch(:external_conversation_id),
339
+ external_id: db_row.fetch(:external_id),
340
+ external_conversation_id: db_row.fetch(:external_conversation_id),
332
341
  },
333
342
  }
334
343
  token = JWT.encode(
@@ -74,6 +74,9 @@ The secret to use for signing is:
74
74
  col.new(:row_updated_at, TIMESTAMP, index: true, optional: true, defaulter: :now),
75
75
  col.new(:last_synced_at, TIMESTAMP, index: true, optional: true),
76
76
  col.new(:ics_url, TEXT, converter: col.converter_gsub("^webcal", "https")),
77
+ col.new(:event_count, INTEGER, optional: true),
78
+ col.new(:feed_bytes, INTEGER, optional: true),
79
+ col.new(:last_sync_duration_ms, INTEGER, optional: true),
77
80
  ]
78
81
  end
79
82
 
@@ -166,11 +169,20 @@ The secret to use for signing is:
166
169
  def sync_row(row)
167
170
  Appydays::Loggable.with_log_tags(icalendar_url: row.fetch(:ics_url)) do
168
171
  self.with_advisory_lock(row.fetch(:pk)) do
172
+ start = Time.now
169
173
  now = Time.now
170
174
  if (dep = self.find_dependent("icalendar_event_v1"))
171
- self._sync_row(row, dep, now:)
175
+ processor = self._sync_row(row, dep, now:)
176
+ end
177
+ self.admin_dataset do |ds|
178
+ ds.where(pk: row.fetch(:pk)).
179
+ update(
180
+ last_synced_at: now,
181
+ event_count: processor&.upserted_identities&.count,
182
+ feed_bytes: processor&.read_bytes,
183
+ last_sync_duration_ms: (Time.now - start).in_milliseconds,
184
+ )
172
185
  end
173
- self.admin_dataset { |ds| ds.where(pk: row.fetch(:pk)).update(last_synced_at: now) }
174
186
  end
175
187
  end
176
188
  end
@@ -204,6 +216,7 @@ The secret to use for signing is:
204
216
  row_updated_at: now,
205
217
  )
206
218
  end
219
+ return processor
207
220
  end
208
221
 
209
222
  # We get all sorts of strange urls, fix up what we can.
@@ -224,7 +237,20 @@ The secret to use for signing is:
224
237
  self.logger.info("icalendar_fetch_not_modified", response_status: 304, request_url:, calendar_external_id:)
225
238
  return
226
239
  when Down::SSLError
227
- self._handle_retryable_down_error!(e, request_url:, calendar_external_id:)
240
+ # Most SSL errors are transient and can be retried, but some are due to a long-term misconfiguration.
241
+ # Handle these with an alert, like if we had a 404, which indicates a longer-term issue.
242
+ is_fatal =
243
+ # There doesn't appear to be a way to allow unsafe legacy content negotiation on a per-request basis,
244
+ # it is compiled into OpenSSL (may be wrong about this).
245
+ e.to_s.include?("unsafe legacy renegotiation disabled") ||
246
+ # Certificate failures are not transient
247
+ e.to_s.include?("certificate verify failed")
248
+ if is_fatal
249
+ response_status = 0
250
+ response_body = e.to_s
251
+ else
252
+ self._handle_retryable_down_error!(e, request_url:, calendar_external_id:)
253
+ end
228
254
  when Down::TimeoutError, Down::ConnectionError, Down::InvalidUrl, URI::InvalidURIError
229
255
  response_status = 0
230
256
  response_body = e.to_s
@@ -259,8 +285,9 @@ The secret to use for signing is:
259
285
  response_status = nil
260
286
  end
261
287
  raise e if response_status.nil?
288
+ loggable_body = response_body && response_body[..256]
262
289
  self.logger.warn("icalendar_fetch_error",
263
- response_body:, response_status:, request_url:, calendar_external_id:,)
290
+ response_body: loggable_body, response_status:, request_url:, calendar_external_id:,)
264
291
  message = Webhookdb::Messages::ErrorIcalendarFetch.new(
265
292
  self.service_integration,
266
293
  calendar_external_id,
@@ -269,7 +296,7 @@ The secret to use for signing is:
269
296
  request_url:,
270
297
  request_method: "GET",
271
298
  )
272
- self.service_integration.organization.alerting.dispatch_alert(message)
299
+ self.service_integration.organization.alerting.dispatch_alert(message, separate_connection: false)
273
300
  end
274
301
 
275
302
  def _retryable_client_error?(e, request_url:)
@@ -299,7 +326,7 @@ The secret to use for signing is:
299
326
  end
300
327
 
301
328
  class EventProcessor
302
- attr_reader :upserted_identities
329
+ attr_reader :upserted_identities, :read_bytes
303
330
 
304
331
  def initialize(io, upserter)
305
332
  @io = io
@@ -316,6 +343,9 @@ The secret to use for signing is:
316
343
  # We need to keep track of how many events each UID spawns,
317
344
  # so we can delete any with a higher count.
318
345
  @max_sequence_num_by_uid = {}
346
+ # Keep track of the bytes we've read from the file.
347
+ # Never trust Content-Length headers for ical feeds.
348
+ @read_bytes = 0
319
349
  end
320
350
 
321
351
  def delete_condition
@@ -474,7 +504,11 @@ The secret to use for signing is:
474
504
  def _ical_entry_from_ruby(r, entry, is_date)
475
505
  return {"v" => r.strftime("%Y%m%d")} if is_date
476
506
  return {"v" => r.strftime("%Y%m%dT%H%M%SZ")} if r.zone == "UTC"
477
- return {"v" => r.strftime("%Y%m%dT%H%M%S"), "TZID" => entry.fetch("TZID")}
507
+ tzid = entry["TZID"]
508
+ return {"v" => r.strftime("%Y%m%dT%H%M%S"), "TZID" => tzid} if tzid
509
+ value = entry.fetch("v")
510
+ return {"v" => value} if value.end_with?("Z")
511
+ raise "Cannot create ical entry from: #{r}, #{entry}, is_date: #{is_date}"
478
512
  end
479
513
 
480
514
  def _icecube_rule_from_ical(ical)
@@ -483,11 +517,20 @@ The secret to use for signing is:
483
517
  # IceCube errors, because `day_of_month` isn't valid on a WeeklyRule.
484
518
  # In this case, we need to sanitize the string to remove the offending rule piece.
485
519
  # There are probably many other offending formats, but we'll add them here as needed.
520
+ unambiguous_ical = nil
486
521
  if ical.include?("FREQ=WEEKLY") && ical.include?("BYMONTHDAY=")
487
- ical = ical.gsub(/BYMONTHDAY=[\d,]+/, "")
488
- ical.delete_prefix! ";"
489
- ical.delete_suffix! ";"
490
- ical.squeeze!(";")
522
+ unambiguous_ical = ical.gsub(/BYMONTHDAY=[\d,]+/, "")
523
+ elsif ical.include?("FREQ=MONTHLY") && ical.include?("BYYEARDAY=") && ical.include?("BYMONTHDAY=")
524
+ # Another rule: FREQ=MONTHLY;INTERVAL=3;BYYEARDAY=14;BYMONTHDAY=14
525
+ # Apple interprets this as monthly on the 14th; rrule.js interprets this as never happening.
526
+ # 'day_of_year' isn't valid on a MonthlyRule, so delete the BYYEARDAY component.
527
+ unambiguous_ical = ical.gsub(/BYYEARDAY=[\d,]+/, "")
528
+ end
529
+ if unambiguous_ical
530
+ unambiguous_ical.delete_prefix! ";"
531
+ unambiguous_ical.delete_suffix! ";"
532
+ unambiguous_ical.squeeze!(";")
533
+ ical = unambiguous_ical
491
534
  end
492
535
  return IceCube::IcalParser.rule_from_ical(ical)
493
536
  end
@@ -507,6 +550,7 @@ The secret to use for signing is:
507
550
  vevent_lines = []
508
551
  in_vevent = false
509
552
  while (line = @io.gets)
553
+ @read_bytes += line.size
510
554
  begin
511
555
  line.rstrip!
512
556
  rescue Encoding::CompatibilityError
@@ -22,10 +22,9 @@ module Webhookdb::Replicator::IntercomV1Mixin
22
22
  # webhook verification, which means that webhooks actually don't require any setup on the integration level. Thus,
23
23
  # `supports_webhooks` is false.
24
24
  def find_auth_integration
25
- # rubocop:disable Naming/MemoizedInstanceVariableName
26
- return @auth ||= Webhookdb::Replicator.find_at_root!(self.service_integration,
27
- service_name: "intercom_marketplace_root_v1",)
28
- # rubocop:enable Naming/MemoizedInstanceVariableName
25
+ return @find_auth_integration ||= Webhookdb::Replicator.find_at_root!(
26
+ self.service_integration, service_name: "intercom_marketplace_root_v1",
27
+ )
29
28
  end
30
29
 
31
30
  def intercom_auth_headers
@@ -93,6 +92,28 @@ module Webhookdb::Replicator::IntercomV1Mixin
93
92
  timeout: Webhookdb::Intercom.http_timeout,
94
93
  )
95
94
  rescue Webhookdb::Http::Error => e
95
+ is_token_suspended = e.status == 401 &&
96
+ e.response["errors"].present? &&
97
+ e.response["errors"].any? { |er| er["code"] == "token_suspended" }
98
+ if is_token_suspended
99
+ root_sint = self.find_auth_integration
100
+ message = "Organization has closed their Intercom workspace and this integration should be deleted. " \
101
+ "From a console, run: " \
102
+ "Webhookdb::ServiceIntegration[#{root_sint.id}].destroy_self_and_all_dependents"
103
+ Webhookdb::DeveloperAlert.new(
104
+ subsystem: "Intercom Workspace Closed Error",
105
+ emoji: ":hook:",
106
+ fallback: message,
107
+ fields: [
108
+ {title: "Organization", value: root_sint.organization.name, short: true},
109
+ {title: "Integration ID", value: root_sint.id.to_s, short: true},
110
+ {title: "Instructions", value: message},
111
+ ],
112
+ ).emit
113
+ # Noop here since there's nothing to do, the developer alert takes care of notifying
114
+ # so no need to error or log.
115
+ return [], nil
116
+ end
96
117
  # We are looking to catch the "api plan restricted" error. This is always a 403 and every
97
118
  # 403 will be an "api plan restricted" error according to the API documentation. Because we
98
119
  # specify the API version in our headers we can expect that this won't change.
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "webhookdb/errors"
3
4
  require "webhookdb/signalwire"
5
+ require "webhookdb/messages/error_generic_backfill"
4
6
 
5
7
  class Webhookdb::Replicator::SignalwireMessageV1 < Webhookdb::Replicator::Base
6
8
  include Appydays::Loggable
@@ -180,4 +182,33 @@ Press 'Show' next to the newly-created API token, and copy it.)
180
182
 
181
183
  return messages, data["next_page_uri"]
182
184
  end
185
+
186
+ def on_backfill_error(be)
187
+ e = Webhookdb::Errors.find_cause(be) do |ex|
188
+ next true if ex.is_a?(Webhookdb::Http::Error) && ex.status == 401
189
+ next true if ex.is_a?(::SocketError)
190
+ end
191
+ return unless e
192
+ if e.is_a?(::SocketError)
193
+ response_status = 0
194
+ response_body = e.message
195
+ request_url = "<unknown>"
196
+ request_method = "<unknown>"
197
+ else
198
+ response_status = e.status
199
+ response_body = e.body
200
+ request_url = e.uri.to_s
201
+ request_method = e.http_method
202
+ end
203
+ self.logger.warn("signalwire_backfill_error", response_body:, response_status:, request_url:)
204
+ message = Webhookdb::Messages::ErrorGenericBackfill.new(
205
+ self.service_integration,
206
+ response_status:,
207
+ response_body:,
208
+ request_url:,
209
+ request_method:,
210
+ )
211
+ self.service_integration.organization.alerting.dispatch_alert(message)
212
+ return true
213
+ end
183
214
  end