webhookdb 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/data/messages/layouts/blank.email.liquid +10 -0
- data/data/messages/layouts/minimal.email.liquid +28 -0
- data/data/messages/layouts/standard.email.liquid +28 -0
- data/data/messages/partials/button.liquid +15 -0
- data/data/messages/partials/environment_banner.liquid +9 -0
- data/data/messages/partials/footer.liquid +22 -0
- data/data/messages/partials/greeting.liquid +3 -0
- data/data/messages/partials/logo_header.liquid +18 -0
- data/data/messages/partials/signoff.liquid +1 -0
- data/data/messages/styles/v1.liquid +346 -0
- data/data/messages/templates/errors/icalendar_fetch.email.liquid +29 -0
- data/data/messages/templates/invite.email.liquid +15 -0
- data/data/messages/templates/new_customer.email.liquid +24 -0
- data/data/messages/templates/org_database_migration_finished.email.liquid +7 -0
- data/data/messages/templates/org_database_migration_started.email.liquid +9 -0
- data/data/messages/templates/specs/_field_partial.liquid +1 -0
- data/data/messages/templates/specs/basic.email.liquid +2 -0
- data/data/messages/templates/specs/basic.fake.liquid +1 -0
- data/data/messages/templates/specs/with_field.email.liquid +2 -0
- data/data/messages/templates/specs/with_field.fake.liquid +1 -0
- data/data/messages/templates/specs/with_include.email.liquid +2 -0
- data/data/messages/templates/specs/with_partial.email.liquid +1 -0
- data/data/messages/templates/verification.email.liquid +14 -0
- data/data/messages/templates/verification.sms.liquid +1 -0
- data/data/messages/web/install-customer-login.liquid +48 -0
- data/data/messages/web/install-error.liquid +17 -0
- data/data/messages/web/install-success.liquid +35 -0
- data/data/messages/web/install.liquid +20 -0
- data/data/messages/web/partials/footer.liquid +4 -0
- data/data/messages/web/partials/form_error.liquid +1 -0
- data/data/messages/web/partials/header.liquid +3 -0
- data/data/messages/web/styles.liquid +134 -0
- data/data/windows_tz.txt +461 -0
- data/db/migrations/001_testing_pixies.rb +13 -0
- data/db/migrations/002_initial.rb +132 -0
- data/db/migrations/003_ux_overhaul.rb +20 -0
- data/db/migrations/004_incremental_backfill.rb +9 -0
- data/db/migrations/005_log_webhooks.rb +24 -0
- data/db/migrations/006_generalize_roles.rb +29 -0
- data/db/migrations/007_org_dns.rb +12 -0
- data/db/migrations/008_webhook_subscriptions.rb +19 -0
- data/db/migrations/009_nonunique_stripe_subscription_customer.rb +16 -0
- data/db/migrations/010_drop_integration_soft_delete.rb +14 -0
- data/db/migrations/011_webhook_subscriptions_created_at.rb +10 -0
- data/db/migrations/012_webhook_subscriptions_created_by.rb +9 -0
- data/db/migrations/013_default_org_membership.rb +30 -0
- data/db/migrations/014_webhook_subscription_deliveries.rb +26 -0
- data/db/migrations/015_dependent_integrations.rb +9 -0
- data/db/migrations/016_encrypted_columns.rb +9 -0
- data/db/migrations/017_skip_verification.rb +9 -0
- data/db/migrations/018_sync_targets.rb +25 -0
- data/db/migrations/019_org_schema.rb +9 -0
- data/db/migrations/020_org_database_migrations.rb +25 -0
- data/db/migrations/021_no_default_org_schema.rb +14 -0
- data/db/migrations/022_database_document.rb +15 -0
- data/db/migrations/023_sync_target_schema.rb +9 -0
- data/db/migrations/024_org_semaphore_jobs.rb +9 -0
- data/db/migrations/025_integration_backfill_cursor.rb +9 -0
- data/db/migrations/026_undo_integration_backfill_cursor.rb +9 -0
- data/db/migrations/027_sync_target_http_sync.rb +12 -0
- data/db/migrations/028_logged_webhook_path.rb +24 -0
- data/db/migrations/029_encrypt_columns.rb +97 -0
- data/db/migrations/030_org_sync_target_timeout.rb +9 -0
- data/db/migrations/031_org_max_query_rows.rb +9 -0
- data/db/migrations/032_remove_db_defaults.rb +12 -0
- data/db/migrations/033_backfill_jobs.rb +26 -0
- data/db/migrations/034_backfill_job_criteria.rb +9 -0
- data/db/migrations/035_synchronous_backfill.rb +9 -0
- data/db/migrations/036_oauth.rb +26 -0
- data/db/migrations/037_oauth_used.rb +9 -0
- data/lib/amigo/durable_job.rb +416 -0
- data/lib/pry/clipboard.rb +111 -0
- data/lib/sequel/advisory_lock.rb +65 -0
- data/lib/webhookdb/admin.rb +4 -0
- data/lib/webhookdb/admin_api/auth.rb +36 -0
- data/lib/webhookdb/admin_api/customers.rb +63 -0
- data/lib/webhookdb/admin_api/database_documents.rb +20 -0
- data/lib/webhookdb/admin_api/entities.rb +66 -0
- data/lib/webhookdb/admin_api/message_deliveries.rb +61 -0
- data/lib/webhookdb/admin_api/roles.rb +15 -0
- data/lib/webhookdb/admin_api.rb +34 -0
- data/lib/webhookdb/aggregate_result.rb +63 -0
- data/lib/webhookdb/api/auth.rb +122 -0
- data/lib/webhookdb/api/connstr_auth.rb +36 -0
- data/lib/webhookdb/api/db.rb +188 -0
- data/lib/webhookdb/api/demo.rb +14 -0
- data/lib/webhookdb/api/entities.rb +198 -0
- data/lib/webhookdb/api/helpers.rb +253 -0
- data/lib/webhookdb/api/install.rb +296 -0
- data/lib/webhookdb/api/me.rb +53 -0
- data/lib/webhookdb/api/organizations.rb +254 -0
- data/lib/webhookdb/api/replay.rb +64 -0
- data/lib/webhookdb/api/service_integrations.rb +402 -0
- data/lib/webhookdb/api/services.rb +27 -0
- data/lib/webhookdb/api/stripe.rb +22 -0
- data/lib/webhookdb/api/subscriptions.rb +67 -0
- data/lib/webhookdb/api/sync_targets.rb +232 -0
- data/lib/webhookdb/api/system.rb +37 -0
- data/lib/webhookdb/api/webhook_subscriptions.rb +96 -0
- data/lib/webhookdb/api.rb +92 -0
- data/lib/webhookdb/apps.rb +93 -0
- data/lib/webhookdb/async/audit_logger.rb +38 -0
- data/lib/webhookdb/async/autoscaler.rb +84 -0
- data/lib/webhookdb/async/job.rb +18 -0
- data/lib/webhookdb/async/job_logger.rb +45 -0
- data/lib/webhookdb/async/scheduled_job.rb +18 -0
- data/lib/webhookdb/async.rb +142 -0
- data/lib/webhookdb/aws.rb +98 -0
- data/lib/webhookdb/backfill_job.rb +107 -0
- data/lib/webhookdb/backfiller.rb +107 -0
- data/lib/webhookdb/cloudflare.rb +39 -0
- data/lib/webhookdb/connection_cache.rb +177 -0
- data/lib/webhookdb/console.rb +71 -0
- data/lib/webhookdb/convertkit.rb +14 -0
- data/lib/webhookdb/crypto.rb +66 -0
- data/lib/webhookdb/customer/reset_code.rb +94 -0
- data/lib/webhookdb/customer.rb +347 -0
- data/lib/webhookdb/database_document.rb +72 -0
- data/lib/webhookdb/db_adapter/column_types.rb +37 -0
- data/lib/webhookdb/db_adapter/default_sql.rb +187 -0
- data/lib/webhookdb/db_adapter/pg.rb +96 -0
- data/lib/webhookdb/db_adapter/snowflake.rb +137 -0
- data/lib/webhookdb/db_adapter.rb +208 -0
- data/lib/webhookdb/dbutil.rb +92 -0
- data/lib/webhookdb/demo_mode.rb +100 -0
- data/lib/webhookdb/developer_alert.rb +51 -0
- data/lib/webhookdb/email_octopus.rb +21 -0
- data/lib/webhookdb/enumerable.rb +18 -0
- data/lib/webhookdb/fixtures/backfill_jobs.rb +72 -0
- data/lib/webhookdb/fixtures/customers.rb +65 -0
- data/lib/webhookdb/fixtures/database_documents.rb +27 -0
- data/lib/webhookdb/fixtures/faker.rb +41 -0
- data/lib/webhookdb/fixtures/logged_webhooks.rb +56 -0
- data/lib/webhookdb/fixtures/message_deliveries.rb +59 -0
- data/lib/webhookdb/fixtures/oauth_sessions.rb +24 -0
- data/lib/webhookdb/fixtures/organization_database_migrations.rb +37 -0
- data/lib/webhookdb/fixtures/organization_memberships.rb +54 -0
- data/lib/webhookdb/fixtures/organizations.rb +32 -0
- data/lib/webhookdb/fixtures/reset_codes.rb +23 -0
- data/lib/webhookdb/fixtures/service_integrations.rb +42 -0
- data/lib/webhookdb/fixtures/subscriptions.rb +33 -0
- data/lib/webhookdb/fixtures/sync_targets.rb +32 -0
- data/lib/webhookdb/fixtures/webhook_subscriptions.rb +35 -0
- data/lib/webhookdb/fixtures.rb +15 -0
- data/lib/webhookdb/formatting.rb +56 -0
- data/lib/webhookdb/front.rb +49 -0
- data/lib/webhookdb/github.rb +22 -0
- data/lib/webhookdb/google_calendar.rb +29 -0
- data/lib/webhookdb/heroku.rb +21 -0
- data/lib/webhookdb/http.rb +114 -0
- data/lib/webhookdb/icalendar.rb +17 -0
- data/lib/webhookdb/id.rb +17 -0
- data/lib/webhookdb/idempotency.rb +90 -0
- data/lib/webhookdb/increase.rb +42 -0
- data/lib/webhookdb/intercom.rb +23 -0
- data/lib/webhookdb/jobs/amigo_test_jobs.rb +118 -0
- data/lib/webhookdb/jobs/backfill.rb +32 -0
- data/lib/webhookdb/jobs/create_mirror_table.rb +18 -0
- data/lib/webhookdb/jobs/create_stripe_customer.rb +17 -0
- data/lib/webhookdb/jobs/customer_created_notify_internal.rb +22 -0
- data/lib/webhookdb/jobs/demo_mode_sync_data.rb +19 -0
- data/lib/webhookdb/jobs/deprecated_jobs.rb +19 -0
- data/lib/webhookdb/jobs/developer_alert_handle.rb +14 -0
- data/lib/webhookdb/jobs/durable_job_recheck_poller.rb +17 -0
- data/lib/webhookdb/jobs/emailer.rb +15 -0
- data/lib/webhookdb/jobs/icalendar_enqueue_syncs.rb +25 -0
- data/lib/webhookdb/jobs/icalendar_sync.rb +23 -0
- data/lib/webhookdb/jobs/logged_webhook_replay.rb +17 -0
- data/lib/webhookdb/jobs/logged_webhook_resilient_replay.rb +15 -0
- data/lib/webhookdb/jobs/message_dispatched.rb +16 -0
- data/lib/webhookdb/jobs/organization_database_migration_notify_finished.rb +21 -0
- data/lib/webhookdb/jobs/organization_database_migration_notify_started.rb +21 -0
- data/lib/webhookdb/jobs/organization_database_migration_run.rb +24 -0
- data/lib/webhookdb/jobs/prepare_database_connections.rb +22 -0
- data/lib/webhookdb/jobs/process_webhook.rb +47 -0
- data/lib/webhookdb/jobs/renew_watch_channel.rb +24 -0
- data/lib/webhookdb/jobs/replication_migration.rb +24 -0
- data/lib/webhookdb/jobs/reset_code_create_dispatch.rb +23 -0
- data/lib/webhookdb/jobs/scheduled_backfills.rb +77 -0
- data/lib/webhookdb/jobs/send_invite.rb +15 -0
- data/lib/webhookdb/jobs/send_test_webhook.rb +25 -0
- data/lib/webhookdb/jobs/send_webhook.rb +20 -0
- data/lib/webhookdb/jobs/sync_target_enqueue_scheduled.rb +16 -0
- data/lib/webhookdb/jobs/sync_target_run_sync.rb +38 -0
- data/lib/webhookdb/jobs/trim_logged_webhooks.rb +15 -0
- data/lib/webhookdb/jobs/webhook_resource_notify_integrations.rb +30 -0
- data/lib/webhookdb/jobs/webhook_subscription_delivery_attempt.rb +29 -0
- data/lib/webhookdb/jobs.rb +4 -0
- data/lib/webhookdb/json.rb +113 -0
- data/lib/webhookdb/liquid/expose.rb +27 -0
- data/lib/webhookdb/liquid/filters.rb +16 -0
- data/lib/webhookdb/liquid/liquification.rb +26 -0
- data/lib/webhookdb/liquid/partial.rb +12 -0
- data/lib/webhookdb/logged_webhook/resilient.rb +95 -0
- data/lib/webhookdb/logged_webhook.rb +194 -0
- data/lib/webhookdb/message/body.rb +25 -0
- data/lib/webhookdb/message/delivery.rb +127 -0
- data/lib/webhookdb/message/email_transport.rb +133 -0
- data/lib/webhookdb/message/fake_transport.rb +54 -0
- data/lib/webhookdb/message/liquid_drops.rb +29 -0
- data/lib/webhookdb/message/template.rb +89 -0
- data/lib/webhookdb/message/transport.rb +43 -0
- data/lib/webhookdb/message.rb +150 -0
- data/lib/webhookdb/messages/error_icalendar_fetch.rb +42 -0
- data/lib/webhookdb/messages/invite.rb +23 -0
- data/lib/webhookdb/messages/new_customer.rb +14 -0
- data/lib/webhookdb/messages/org_database_migration_finished.rb +23 -0
- data/lib/webhookdb/messages/org_database_migration_started.rb +24 -0
- data/lib/webhookdb/messages/specs.rb +57 -0
- data/lib/webhookdb/messages/verification.rb +23 -0
- data/lib/webhookdb/method_utilities.rb +82 -0
- data/lib/webhookdb/microsoft_calendar.rb +36 -0
- data/lib/webhookdb/nextpax.rb +14 -0
- data/lib/webhookdb/oauth/front.rb +58 -0
- data/lib/webhookdb/oauth/intercom.rb +58 -0
- data/lib/webhookdb/oauth/session.rb +24 -0
- data/lib/webhookdb/oauth.rb +80 -0
- data/lib/webhookdb/organization/alerting.rb +35 -0
- data/lib/webhookdb/organization/database_migration.rb +151 -0
- data/lib/webhookdb/organization/db_builder.rb +429 -0
- data/lib/webhookdb/organization.rb +506 -0
- data/lib/webhookdb/organization_membership.rb +58 -0
- data/lib/webhookdb/phone_number.rb +38 -0
- data/lib/webhookdb/plaid.rb +23 -0
- data/lib/webhookdb/platform.rb +27 -0
- data/lib/webhookdb/plivo.rb +52 -0
- data/lib/webhookdb/postgres/maintenance.rb +166 -0
- data/lib/webhookdb/postgres/model.rb +82 -0
- data/lib/webhookdb/postgres/model_utilities.rb +382 -0
- data/lib/webhookdb/postgres/testing_pixie.rb +16 -0
- data/lib/webhookdb/postgres/validations.rb +46 -0
- data/lib/webhookdb/postgres.rb +176 -0
- data/lib/webhookdb/postmark.rb +20 -0
- data/lib/webhookdb/redis.rb +35 -0
- data/lib/webhookdb/replicator/atom_single_feed_v1.rb +116 -0
- data/lib/webhookdb/replicator/aws_pricing_v1.rb +488 -0
- data/lib/webhookdb/replicator/base.rb +1185 -0
- data/lib/webhookdb/replicator/column.rb +482 -0
- data/lib/webhookdb/replicator/convertkit_broadcast_v1.rb +69 -0
- data/lib/webhookdb/replicator/convertkit_subscriber_v1.rb +200 -0
- data/lib/webhookdb/replicator/convertkit_tag_v1.rb +66 -0
- data/lib/webhookdb/replicator/convertkit_v1_mixin.rb +65 -0
- data/lib/webhookdb/replicator/docgen.rb +167 -0
- data/lib/webhookdb/replicator/email_octopus_campaign_v1.rb +84 -0
- data/lib/webhookdb/replicator/email_octopus_contact_v1.rb +159 -0
- data/lib/webhookdb/replicator/email_octopus_event_v1.rb +244 -0
- data/lib/webhookdb/replicator/email_octopus_list_v1.rb +101 -0
- data/lib/webhookdb/replicator/fake.rb +453 -0
- data/lib/webhookdb/replicator/front_conversation_v1.rb +45 -0
- data/lib/webhookdb/replicator/front_marketplace_root_v1.rb +55 -0
- data/lib/webhookdb/replicator/front_message_v1.rb +45 -0
- data/lib/webhookdb/replicator/front_v1_mixin.rb +22 -0
- data/lib/webhookdb/replicator/github_issue_comment_v1.rb +58 -0
- data/lib/webhookdb/replicator/github_issue_v1.rb +83 -0
- data/lib/webhookdb/replicator/github_pull_v1.rb +84 -0
- data/lib/webhookdb/replicator/github_release_v1.rb +47 -0
- data/lib/webhookdb/replicator/github_repo_v1_mixin.rb +250 -0
- data/lib/webhookdb/replicator/github_repository_event_v1.rb +45 -0
- data/lib/webhookdb/replicator/icalendar_calendar_v1.rb +465 -0
- data/lib/webhookdb/replicator/icalendar_event_v1.rb +334 -0
- data/lib/webhookdb/replicator/increase_account_number_v1.rb +77 -0
- data/lib/webhookdb/replicator/increase_account_transfer_v1.rb +61 -0
- data/lib/webhookdb/replicator/increase_account_v1.rb +63 -0
- data/lib/webhookdb/replicator/increase_ach_transfer_v1.rb +78 -0
- data/lib/webhookdb/replicator/increase_check_transfer_v1.rb +64 -0
- data/lib/webhookdb/replicator/increase_limit_v1.rb +78 -0
- data/lib/webhookdb/replicator/increase_transaction_v1.rb +74 -0
- data/lib/webhookdb/replicator/increase_v1_mixin.rb +121 -0
- data/lib/webhookdb/replicator/increase_wire_transfer_v1.rb +61 -0
- data/lib/webhookdb/replicator/intercom_contact_v1.rb +36 -0
- data/lib/webhookdb/replicator/intercom_conversation_v1.rb +38 -0
- data/lib/webhookdb/replicator/intercom_marketplace_root_v1.rb +69 -0
- data/lib/webhookdb/replicator/intercom_v1_mixin.rb +105 -0
- data/lib/webhookdb/replicator/oauth_refresh_access_token_mixin.rb +65 -0
- data/lib/webhookdb/replicator/plivo_sms_inbound_v1.rb +102 -0
- data/lib/webhookdb/replicator/postmark_inbound_message_v1.rb +94 -0
- data/lib/webhookdb/replicator/postmark_outbound_message_event_v1.rb +107 -0
- data/lib/webhookdb/replicator/schema_modification.rb +42 -0
- data/lib/webhookdb/replicator/shopify_customer_v1.rb +58 -0
- data/lib/webhookdb/replicator/shopify_order_v1.rb +64 -0
- data/lib/webhookdb/replicator/shopify_v1_mixin.rb +161 -0
- data/lib/webhookdb/replicator/signalwire_message_v1.rb +169 -0
- data/lib/webhookdb/replicator/sponsy_customer_v1.rb +54 -0
- data/lib/webhookdb/replicator/sponsy_placement_v1.rb +34 -0
- data/lib/webhookdb/replicator/sponsy_publication_v1.rb +125 -0
- data/lib/webhookdb/replicator/sponsy_slot_v1.rb +41 -0
- data/lib/webhookdb/replicator/sponsy_status_v1.rb +35 -0
- data/lib/webhookdb/replicator/sponsy_v1_mixin.rb +165 -0
- data/lib/webhookdb/replicator/state_machine_step.rb +69 -0
- data/lib/webhookdb/replicator/stripe_charge_v1.rb +77 -0
- data/lib/webhookdb/replicator/stripe_coupon_v1.rb +62 -0
- data/lib/webhookdb/replicator/stripe_customer_v1.rb +60 -0
- data/lib/webhookdb/replicator/stripe_dispute_v1.rb +77 -0
- data/lib/webhookdb/replicator/stripe_invoice_item_v1.rb +82 -0
- data/lib/webhookdb/replicator/stripe_invoice_v1.rb +116 -0
- data/lib/webhookdb/replicator/stripe_payout_v1.rb +67 -0
- data/lib/webhookdb/replicator/stripe_price_v1.rb +60 -0
- data/lib/webhookdb/replicator/stripe_product_v1.rb +60 -0
- data/lib/webhookdb/replicator/stripe_refund_v1.rb +101 -0
- data/lib/webhookdb/replicator/stripe_subscription_item_v1.rb +56 -0
- data/lib/webhookdb/replicator/stripe_subscription_v1.rb +75 -0
- data/lib/webhookdb/replicator/stripe_v1_mixin.rb +116 -0
- data/lib/webhookdb/replicator/transistor_episode_stats_v1.rb +141 -0
- data/lib/webhookdb/replicator/transistor_episode_v1.rb +169 -0
- data/lib/webhookdb/replicator/transistor_show_v1.rb +68 -0
- data/lib/webhookdb/replicator/transistor_v1_mixin.rb +65 -0
- data/lib/webhookdb/replicator/twilio_sms_v1.rb +156 -0
- data/lib/webhookdb/replicator/webhook_request.rb +5 -0
- data/lib/webhookdb/replicator/webhookdb_customer_v1.rb +74 -0
- data/lib/webhookdb/replicator.rb +224 -0
- data/lib/webhookdb/role.rb +42 -0
- data/lib/webhookdb/sentry.rb +35 -0
- data/lib/webhookdb/service/auth.rb +138 -0
- data/lib/webhookdb/service/collection.rb +91 -0
- data/lib/webhookdb/service/entities.rb +97 -0
- data/lib/webhookdb/service/helpers.rb +270 -0
- data/lib/webhookdb/service/middleware.rb +124 -0
- data/lib/webhookdb/service/types.rb +30 -0
- data/lib/webhookdb/service/validators.rb +32 -0
- data/lib/webhookdb/service/view_api.rb +63 -0
- data/lib/webhookdb/service.rb +219 -0
- data/lib/webhookdb/service_integration.rb +332 -0
- data/lib/webhookdb/shopify.rb +35 -0
- data/lib/webhookdb/signalwire.rb +13 -0
- data/lib/webhookdb/slack.rb +68 -0
- data/lib/webhookdb/snowflake.rb +90 -0
- data/lib/webhookdb/spec_helpers/async.rb +122 -0
- data/lib/webhookdb/spec_helpers/citest.rb +88 -0
- data/lib/webhookdb/spec_helpers/integration.rb +121 -0
- data/lib/webhookdb/spec_helpers/message.rb +41 -0
- data/lib/webhookdb/spec_helpers/postgres.rb +220 -0
- data/lib/webhookdb/spec_helpers/service.rb +432 -0
- data/lib/webhookdb/spec_helpers/shared_examples_for_columns.rb +56 -0
- data/lib/webhookdb/spec_helpers/shared_examples_for_replicators.rb +915 -0
- data/lib/webhookdb/spec_helpers/whdb.rb +139 -0
- data/lib/webhookdb/spec_helpers.rb +63 -0
- data/lib/webhookdb/sponsy.rb +14 -0
- data/lib/webhookdb/stripe.rb +37 -0
- data/lib/webhookdb/subscription.rb +203 -0
- data/lib/webhookdb/sync_target.rb +491 -0
- data/lib/webhookdb/tasks/admin.rb +49 -0
- data/lib/webhookdb/tasks/annotate.rb +36 -0
- data/lib/webhookdb/tasks/db.rb +82 -0
- data/lib/webhookdb/tasks/docs.rb +42 -0
- data/lib/webhookdb/tasks/fixture.rb +35 -0
- data/lib/webhookdb/tasks/message.rb +50 -0
- data/lib/webhookdb/tasks/regress.rb +87 -0
- data/lib/webhookdb/tasks/release.rb +27 -0
- data/lib/webhookdb/tasks/sidekiq.rb +23 -0
- data/lib/webhookdb/tasks/specs.rb +64 -0
- data/lib/webhookdb/theranest.rb +15 -0
- data/lib/webhookdb/transistor.rb +13 -0
- data/lib/webhookdb/twilio.rb +13 -0
- data/lib/webhookdb/typed_struct.rb +44 -0
- data/lib/webhookdb/version.rb +5 -0
- data/lib/webhookdb/webhook_response.rb +50 -0
- data/lib/webhookdb/webhook_subscription/delivery.rb +82 -0
- data/lib/webhookdb/webhook_subscription.rb +226 -0
- data/lib/webhookdb/windows_tz.rb +32 -0
- data/lib/webhookdb/xml.rb +92 -0
- data/lib/webhookdb.rb +224 -0
- data/lib/webterm/apps.rb +45 -0
- metadata +1129 -0
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "appydays/configurable"
|
|
4
|
+
require "json"
|
|
5
|
+
require "sequel"
|
|
6
|
+
require "sidekiq"
|
|
7
|
+
require "sidekiq/api"
|
|
8
|
+
require "sidekiq/component"
|
|
9
|
+
|
|
10
|
+
module Amigo
|
|
11
|
+
# This is a placeholder until it's migrated to Amigo proper
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
# Durable jobs keep track of the job in a database, similar to DelayedJob,
|
|
15
|
+
# so that if Sidekiq loses the job (because Redis crashes, or the worker crashes),
|
|
16
|
+
# it will be sent to the Dead Set from the database.
|
|
17
|
+
#
|
|
18
|
+
# We send 'missing' jobs to the Dead Set, rather than re-enqueue them,
|
|
19
|
+
# because jobs may be deleted out of Redis manually,
|
|
20
|
+
# so any re-enqueues of a missing job must also be done manually.
|
|
21
|
+
#
|
|
22
|
+
# An alternative to durable jobs is super_fetch using something like Redis' LMOVE;
|
|
23
|
+
# however the only off-the-shelf package we could find (from Gitlab) did not work well.
|
|
24
|
+
# We could implement our own LMOVE based fetch strategy,
|
|
25
|
+
# but using PG was a lot simpler to get going (selection is easier, for example, than managing Redis sorted sets).
|
|
26
|
+
# Additionally, using PG gives us redundancy against Redis outages-
|
|
27
|
+
# it allows us to enqueue jobs even if Redis is down, for example.
|
|
28
|
+
#
|
|
29
|
+
# The way Durable Jobs works at a high level is:
|
|
30
|
+
#
|
|
31
|
+
# - Connections to a series of database servers are held.
|
|
32
|
+
# These servers act as the 'durable stores' for Redis.
|
|
33
|
+
# - In client middleware,
|
|
34
|
+
# a row is written into the first available durable store database.
|
|
35
|
+
# Every row records when it should be considered "dead";
|
|
36
|
+
# that is, after this time,
|
|
37
|
+
# DurableJob moves this job to the Dead Set, as explained below.
|
|
38
|
+
# This is known as the "assume dead at" time; the difference between when a job is enqueued/runs,
|
|
39
|
+
# and when it can be assumed dead, is known as the "heartbeat extension".
|
|
40
|
+
# - Whenever the job runs, server middleware takes a lock on the durable store row,
|
|
41
|
+
# and updates assume_dead_at to be "now plus heartbeat_extension".
|
|
42
|
+
# This is true when the job runs the first time, but also during any retry.
|
|
43
|
+
# - Any long-running jobs should be sure to call DurableJob.heartbeat
|
|
44
|
+
# to extend the assume_dead_at, so we don't attempt to enqueue another instance
|
|
45
|
+
# of the job (actually we probably won't end up with duplicate jobs,
|
|
46
|
+
# but it's a good optimization).
|
|
47
|
+
# - If the job succeeds, the row is deleted from the durable store.
|
|
48
|
+
# - If the job errors, assume_dead_at is updated, and the row remains in the durable store.
|
|
49
|
+
#
|
|
50
|
+
# That is the behavior of the durable jobs themselves.
|
|
51
|
+
# The other key piece here is a poller. The poller must use a separate periodic mechanism,
|
|
52
|
+
# like sidekiq-cron or whatever. Some number of minutes, `Amigo::DurableJob.poll_jobs` must be called.
|
|
53
|
+
# `poll_jobs` does the following at a high level (see the source for more details):
|
|
54
|
+
#
|
|
55
|
+
# - Look through each durable store database.
|
|
56
|
+
# - For each job with an assume_dead_at in the past, we need to check whether we should kill it.
|
|
57
|
+
# - If the job is currently processing in a queue, we no-op. We can't do anything about backed-up queues.
|
|
58
|
+
# - If the job is currently in the retry set, we update the assume_dead_at of the row
|
|
59
|
+
# so it's after the time the job will be retried. That way we won't try and process
|
|
60
|
+
# the job again until after it's been retried.
|
|
61
|
+
# - If the job is in the DeadSet, we delete the row since it's already dead.
|
|
62
|
+
# - If the job cannot be found in any of these places, we mark it 'missing'.
|
|
63
|
+
# It may be missing because it's processing; we'll find out on the next run.
|
|
64
|
+
# - If the job still cannot be found, it's added to the DeadSet.
|
|
65
|
+
#
|
|
66
|
+
# Note that DurableJob is subject to race conditions,
|
|
67
|
+
# and a job can be enqueued and then run multiple times.
|
|
68
|
+
# This is an expected part of Sidekiq- your jobs should already
|
|
69
|
+
# be idempotent so this race should not be an issue.
|
|
70
|
+
# There are (hopefully) no situations where the race condition
|
|
71
|
+
# will result in jobs being lost, just processed multiple times.
|
|
72
|
+
#
|
|
73
|
+
module Amigo::DurableJob
|
|
74
|
+
include Appydays::Configurable
|
|
75
|
+
extend Sidekiq::Component
|
|
76
|
+
|
|
77
|
+
def self.included(cls)
|
|
78
|
+
cls.extend ClassMethods
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
class << self
|
|
82
|
+
attr_accessor :storage_database_urls, :storage_databases, :table_fqn, :failure_notifier
|
|
83
|
+
|
|
84
|
+
# Set a field on the underlying storage databases,
|
|
85
|
+
# such as :logger or :sql_log_level.
|
|
86
|
+
# This value is set immediately on all storage databases,
|
|
87
|
+
# and persists across resets.
|
|
88
|
+
# NOTE: Some fields, like max_connections, can only be set on connect.
|
|
89
|
+
# Use replace_database_settings for this instead.
|
|
90
|
+
def set_database_setting(key, value)
|
|
91
|
+
@database_settings ||= {}
|
|
92
|
+
@database_settings[key] = value
|
|
93
|
+
self.storage_databases.each { |db| db.send(:"#{key}=", value) }
|
|
94
|
+
end
|
|
95
|
+
|
|
96
|
+
# Reconnect to all databases using the given settings.
|
|
97
|
+
# Settings persist across resets.
|
|
98
|
+
def replace_database_settings(new_settings)
|
|
99
|
+
@database_settings = new_settings
|
|
100
|
+
self.reconnect
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def reconnect
|
|
104
|
+
self.storage_databases&.each(&:disconnect)
|
|
105
|
+
settings = @database_settings || {}
|
|
106
|
+
self.storage_databases = self.storage_database_urls.map do |url|
|
|
107
|
+
Sequel.connect(
|
|
108
|
+
url,
|
|
109
|
+
keep_reference: false,
|
|
110
|
+
test: false,
|
|
111
|
+
**settings,
|
|
112
|
+
)
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def ensure_jobs_tables(drop: false)
|
|
117
|
+
self.storage_databases.map do |db|
|
|
118
|
+
db.drop_table?(self.table_fqn) if drop
|
|
119
|
+
db.create_table(self.table_fqn, if_not_exists: true) do
|
|
120
|
+
# Acts as primary key
|
|
121
|
+
text :job_id, null: false, unique: true
|
|
122
|
+
# Class name, pulled out of the item json for convenience
|
|
123
|
+
text :job_class, null: false
|
|
124
|
+
# Smaller footprint than jsonb, and we don't need to use json operators
|
|
125
|
+
text :job_item_json, null: false
|
|
126
|
+
# We must store this so we know where to look for the job
|
|
127
|
+
# NOTE: If a job were to change queues, this *may* cause an issue.
|
|
128
|
+
# But it is hard to test, and we're unlikely to see it, AND in the worst case
|
|
129
|
+
# it'd be a duplicate job, none of which seem critical to solve for now.
|
|
130
|
+
text :queue, null: false
|
|
131
|
+
timestamptz :inserted_at, null: false, default: Sequel.function(:now)
|
|
132
|
+
# Set this so we know when we should check for a dead worker
|
|
133
|
+
# This must always be set, since if the worker to get the job segfaults
|
|
134
|
+
# after taking the job, but before locking it, it will sit empty.
|
|
135
|
+
timestamptz :assume_dead_at, null: false
|
|
136
|
+
# We may need to index this, but since it's a write-heavy table,
|
|
137
|
+
# that should not get so big, let's leave it out for now.
|
|
138
|
+
# index :assume_dead_at
|
|
139
|
+
|
|
140
|
+
# Worker performing the job
|
|
141
|
+
text :locked_by
|
|
142
|
+
# Set when a worker takes a job
|
|
143
|
+
timestamptz :locked_at
|
|
144
|
+
# The first time we cannot find the job, we report it missing rather than treating it as gone.
|
|
145
|
+
timestamptz :missing_at
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
def storage_datasets
|
|
151
|
+
return self.storage_databases.map { |db| db[self.table_fqn] }
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
def insert_job(job_class, job_id, item, queue: "default", more: {})
|
|
155
|
+
raise Webhookdb::InvalidPrecondition, "not enabled" unless self.enabled?
|
|
156
|
+
item = item.dup
|
|
157
|
+
item["class"] = job_class.to_s
|
|
158
|
+
job_run_at = item.key?("at") ? Time.at(item["at"]) : Time.now
|
|
159
|
+
assume_dead_at = job_run_at + job_class.heartbeat_extension
|
|
160
|
+
inserted = self.storage_datasets.any? do |ds|
|
|
161
|
+
job_item_json = item.to_json
|
|
162
|
+
begin
|
|
163
|
+
ds.
|
|
164
|
+
insert_conflict(
|
|
165
|
+
target: :job_id,
|
|
166
|
+
# Update the job item JSON with the latest details.
|
|
167
|
+
# This is helpful if the job goes away.
|
|
168
|
+
update: {assume_dead_at:, job_item_json:},
|
|
169
|
+
).insert(
|
|
170
|
+
job_id:,
|
|
171
|
+
job_class: job_class.to_s,
|
|
172
|
+
job_item_json:,
|
|
173
|
+
assume_dead_at:,
|
|
174
|
+
# We cannot use get_sidekiq_options, since that is static. We need to pass in the queue,
|
|
175
|
+
# which can be set dynamically.
|
|
176
|
+
queue:,
|
|
177
|
+
**more,
|
|
178
|
+
)
|
|
179
|
+
rescue Sequel::DatabaseConnectionError => e
|
|
180
|
+
# Once this is in Amigo, use its logging system
|
|
181
|
+
Sidekiq.logger.warn "DurableJob: #{job_class}: insert failed: #{e}"
|
|
182
|
+
next
|
|
183
|
+
end
|
|
184
|
+
true
|
|
185
|
+
end
|
|
186
|
+
return if inserted
|
|
187
|
+
Sidekiq.logger.error "DurableJob: #{job_class}: no database available to insert"
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
def lock_job(job_id, heartbeat_extension)
|
|
191
|
+
raise Webhookdb::InvalidPrecondition, "not enabled" unless self.enabled?
|
|
192
|
+
self.storage_datasets.each do |ds|
|
|
193
|
+
begin
|
|
194
|
+
row = ds[job_id:]
|
|
195
|
+
rescue Sequel::DatabaseConnectionError
|
|
196
|
+
next nil
|
|
197
|
+
end
|
|
198
|
+
next nil if row.nil?
|
|
199
|
+
now = Time.now
|
|
200
|
+
new_fields = {
|
|
201
|
+
locked_by: self.identity,
|
|
202
|
+
locked_at: now,
|
|
203
|
+
assume_dead_at: now + heartbeat_extension,
|
|
204
|
+
}
|
|
205
|
+
row.merge!(new_fields)
|
|
206
|
+
ds.where(job_id:).update(**new_fields)
|
|
207
|
+
return [ds, row]
|
|
208
|
+
end
|
|
209
|
+
return nil
|
|
210
|
+
end
|
|
211
|
+
|
|
212
|
+
def unlock_job(dataset, job_id, heartbeat_extension, **fields)
|
|
213
|
+
dataset.where(job_id:).update(
|
|
214
|
+
locked_by: nil,
|
|
215
|
+
locked_at: nil,
|
|
216
|
+
assume_dead_at: Time.now + heartbeat_extension,
|
|
217
|
+
**fields,
|
|
218
|
+
)
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
def heartbeat(now: nil)
|
|
222
|
+
return unless self.enabled?
|
|
223
|
+
now ||= Time.now
|
|
224
|
+
active_worker, ds = Thread.current[:durable_job_active_job]
|
|
225
|
+
return nil if active_worker.nil?
|
|
226
|
+
assume_dead_at = now + active_worker.class.heartbeat_extension
|
|
227
|
+
ds.where(job_id: active_worker.jid).update(assume_dead_at:)
|
|
228
|
+
return assume_dead_at
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
def heartbeat!(now: nil)
|
|
232
|
+
return unless self.enabled?
|
|
233
|
+
assume_dead_at = self.heartbeat(now:)
|
|
234
|
+
return assume_dead_at if assume_dead_at
|
|
235
|
+
raise "DurableJob.heartbeat called but no durable job is in TLS"
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
def poll_jobs(joblike, now: Time.now, skip_queue_size: 500, max_page_size: 2000)
|
|
239
|
+
return unless self.enabled?
|
|
240
|
+
# There is a global retry set we can use across all queues.
|
|
241
|
+
# If it's too big, don't bother polling jobs.
|
|
242
|
+
# Note, this requires we don't let our retry set grow too large...
|
|
243
|
+
retryset = Sidekiq::RetrySet.new
|
|
244
|
+
if (rssize = retryset.size) >= skip_queue_size
|
|
245
|
+
Amigo.log(joblike, :warn, "poll_jobs_retry_set_too_large", {size: rssize})
|
|
246
|
+
return
|
|
247
|
+
end
|
|
248
|
+
deadset = Sidekiq::DeadSet.new
|
|
249
|
+
if (dssize = deadset.size) >= skip_queue_size
|
|
250
|
+
Amigo.log(joblike, :warn, "poll_jobs_dead_set_too_large", {size: dssize})
|
|
251
|
+
return
|
|
252
|
+
end
|
|
253
|
+
retries_by_jid = retryset.to_h { |r| [r.jid, r] }
|
|
254
|
+
deadset_jids = Set.new(deadset.map(&:jid))
|
|
255
|
+
class_cache = {}
|
|
256
|
+
self.storage_datasets.each do |ds|
|
|
257
|
+
# To avoid big memory usage, process a limited number of items.
|
|
258
|
+
all_rows_to_check = ds.where { assume_dead_at <= now }.
|
|
259
|
+
select(:job_id, :job_class, :queue, :job_item_json, :missing_at).
|
|
260
|
+
order(:assume_dead_at).
|
|
261
|
+
limit(max_page_size).
|
|
262
|
+
all
|
|
263
|
+
if all_rows_to_check.size == max_page_size
|
|
264
|
+
# If we're super backed up, don't bother polling.
|
|
265
|
+
Amigo.log(joblike, :warn, "poll_jobs_max_page_size_reached", {})
|
|
266
|
+
end
|
|
267
|
+
# All our expired rows belong to one of any number of queues.
|
|
268
|
+
# We should process grouped by queue so we only need to look through each queue once.
|
|
269
|
+
by_queues = all_rows_to_check.group_by { |r| r[:queue] }
|
|
270
|
+
by_queues.each do |queue, rows_to_check|
|
|
271
|
+
q = Sidekiq::Queue.new(queue)
|
|
272
|
+
if (qsize = q.size) >= skip_queue_size
|
|
273
|
+
Amigo.log(joblike, :warn, "poll_jobs_queue_size_too_large", {size: qsize})
|
|
274
|
+
next
|
|
275
|
+
end
|
|
276
|
+
all_jids_in_queue = Set.new(q.map(&:jid))
|
|
277
|
+
rows_to_check.each do |row|
|
|
278
|
+
job_class = row[:job_class]
|
|
279
|
+
job_id = row[:job_id]
|
|
280
|
+
cls = class_cache[job_class] ||= const_get(job_class)
|
|
281
|
+
# We may want to switch this to bulk operations,
|
|
282
|
+
# but it can get pretty challenging to reason about.
|
|
283
|
+
dswhere = ds.where(job_id:)
|
|
284
|
+
if all_jids_in_queue.include?(job_id)
|
|
285
|
+
# If a job is in the queue, it means it's waiting to be processed.
|
|
286
|
+
# Bump the deadline and keep going.
|
|
287
|
+
Amigo.log(joblike, :debug, "poll_jobs_extending_heartbeat", {job_id:, job_class:})
|
|
288
|
+
dswhere.update(missing_at: nil, assume_dead_at: now + cls.heartbeat_extension)
|
|
289
|
+
elsif (retry_record = retries_by_jid[job_id])
|
|
290
|
+
# If a job is in the retry set, we don't need to bother checking
|
|
291
|
+
# until the retry is ready. If we retry ahead of time, that's fine-
|
|
292
|
+
# if the job succeeds, it'll delete the durable job row, if it fails,
|
|
293
|
+
# it'll overwrite assume_dead_at and we'll get back here.
|
|
294
|
+
Amigo.log(joblike, :debug, "poll_jobs_found_in_retry_set", {job_id:, job_class:})
|
|
295
|
+
dswhere.update(missing_at: nil, assume_dead_at: retry_record.at + cls.heartbeat_extension)
|
|
296
|
+
elsif deadset_jids.include?(job_id)
|
|
297
|
+
# If a job moved to the dead set, we can delete the PG row.
|
|
298
|
+
# When we do the retry from the dead set, it'll push a new job to PG.
|
|
299
|
+
Amigo.log(joblike, :info, "poll_jobs_found_in_dead_set", {job_id:, job_class:})
|
|
300
|
+
dswhere.delete
|
|
301
|
+
else
|
|
302
|
+
# The job was not found for one of the following reasons:
|
|
303
|
+
# - The job is actively processing (is not in Redis while this happens).
|
|
304
|
+
# - There's an inherent race condition if we try to check workers;
|
|
305
|
+
# so instead, if this is the first time the job is missing,
|
|
306
|
+
# we assume it's because it's processing,
|
|
307
|
+
# and only treat the job as lost the next time we cannot find it.
|
|
308
|
+
# - The job was manually deleted (web UI or console).
|
|
309
|
+
# - We can't know this happened, so have to treat it like a lost job,
|
|
310
|
+
# and send it to the dead set. We can get around this by only deleting jobs from the dead set,
|
|
311
|
+
# rather than the retry set.
|
|
312
|
+
# - The job was never sent to Sidekiq.
|
|
313
|
+
# - We need to handle it.
|
|
314
|
+
# - The job was lost while processing, like due to a segfault.
|
|
315
|
+
# - We need to handle it.
|
|
316
|
+
#
|
|
317
|
+
item = JSON.parse(row[:job_item_json])
|
|
318
|
+
item["jid"] ||= job_id
|
|
319
|
+
if row[:missing_at]
|
|
320
|
+
item["durable_killed_at"] = now
|
|
321
|
+
Amigo.log(joblike, :warn, "poll_jobs_handling_failed_job", {job_id:, job_class:})
|
|
322
|
+
Amigo::DurableJob.failure_notifier&.call(item)
|
|
323
|
+
deadset.kill(item.to_json, notify_failure: Amigo::DurableJob.failure_notifier.nil?)
|
|
324
|
+
dswhere.delete
|
|
325
|
+
else
|
|
326
|
+
Amigo.log(joblike, :debug, "poll_jobs_setting_job_missing", {job_id:, job_class:})
|
|
327
|
+
# We want to look again at the next scheduled heartbeat, since this may just be a slow job
|
|
328
|
+
# that didn't check in frequently enough. In the future, we could warn about it if
|
|
329
|
+
# we end up finding a row with missing_at set, but for now it's unlikely so not worth it.
|
|
330
|
+
dswhere.update(missing_at: now, assume_dead_at: now + cls.heartbeat_extension)
|
|
331
|
+
end
|
|
332
|
+
end
|
|
333
|
+
end
|
|
334
|
+
end
|
|
335
|
+
end
|
|
336
|
+
end
|
|
337
|
+
|
|
338
|
+
def enabled?
|
|
339
|
+
return self.enabled
|
|
340
|
+
end
|
|
341
|
+
end
|
|
342
|
+
|
|
343
|
+
configurable(:durable_job) do
|
|
344
|
+
setting :enabled, false
|
|
345
|
+
|
|
346
|
+
# Space-separated URLs to write durable jobs into.
|
|
347
|
+
setting :server_urls, [], convert: ->(s) { s.split.map(&:strip) }
|
|
348
|
+
# Server env vars are the names of environment variables whose value are
|
|
349
|
+
# each value for server_urls.
|
|
350
|
+
# Allows you to use dynamically configured servers.
|
|
351
|
+
# Space-separate multiple env vars.
|
|
352
|
+
setting :server_env_vars, ["DATABASE_URL"], convert: ->(s) { s.split.map(&:strip) }
|
|
353
|
+
|
|
354
|
+
setting :schema_name, :public, convert: ->(s) { s.to_sym }
|
|
355
|
+
setting :table_name, :durable_jobs, convert: ->(s) { s.to_sym }
|
|
356
|
+
|
|
357
|
+
after_configured do
|
|
358
|
+
self.storage_database_urls = self.server_urls.dup
|
|
359
|
+
self.storage_database_urls.concat(self.server_env_vars.filter_map { |e| ENV.fetch(e, nil) })
|
|
360
|
+
self.table_fqn = Sequel[self.schema_name][self.table_name]
|
|
361
|
+
if self.enabled?
|
|
362
|
+
self.reconnect
|
|
363
|
+
self.ensure_jobs_tables
|
|
364
|
+
end
|
|
365
|
+
end
|
|
366
|
+
end
|
|
367
|
+
|
|
368
|
+
module ClassMethods
|
|
369
|
+
# Seconds or duration where, if the job is not completed, it should be re-processed.
|
|
370
|
+
# Set this to short for short jobs,
|
|
371
|
+
# and long for long jobs, since they will be re-enqueued
|
|
372
|
+
# if they take longer than this heartbeat_extension.
|
|
373
|
+
# You can also use Amigo::DurableJob.heartbeat (or heartbeat!)
|
|
374
|
+
# to push the heartbeat_extension time further out.
|
|
375
|
+
# @return [Integer,ActiveSupport::Duration]
|
|
376
|
+
def heartbeat_extension
|
|
377
|
+
return 5.minutes
|
|
378
|
+
end
|
|
379
|
+
end
|
|
380
|
+
|
|
381
|
+
class ClientMiddleware
|
|
382
|
+
def call(worker_class, job, queue, _redis_pool)
|
|
383
|
+
return job unless Amigo::DurableJob.enabled?
|
|
384
|
+
(worker_class = worker_class.constantize) if worker_class.is_a?(String)
|
|
385
|
+
return job unless worker_class.respond_to?(:heartbeat_extension)
|
|
386
|
+
Amigo::DurableJob.insert_job(worker_class, job.fetch("jid"), job, queue:) unless job["durable_reenqueued_at"]
|
|
387
|
+
return job
|
|
388
|
+
end
|
|
389
|
+
end
|
|
390
|
+
|
|
391
|
+
class ServerMiddleware
|
|
392
|
+
def call(worker, job, _queue)
|
|
393
|
+
return yield unless Amigo::DurableJob.enabled? && worker.class.respond_to?(:heartbeat_extension)
|
|
394
|
+
ds, row = Amigo::DurableJob.lock_job(worker.jid, worker.class.heartbeat_extension)
|
|
395
|
+
if row.nil?
|
|
396
|
+
Sidekiq.logger.error "DurableJob: #{worker.class}[#{worker.jid}]: no row found in database"
|
|
397
|
+
return yield
|
|
398
|
+
end
|
|
399
|
+
Thread.current[:durable_job_active_job] = worker, ds
|
|
400
|
+
# rubocop:disable Lint/RescueException
|
|
401
|
+
begin
|
|
402
|
+
yield
|
|
403
|
+
rescue Exception => e
|
|
404
|
+
j2 = job.dup
|
|
405
|
+
j2["error_class"] = e.class.to_s
|
|
406
|
+
j2["error_message"] = e.to_s
|
|
407
|
+
Amigo::DurableJob.unlock_job(ds, worker.jid, worker.class.heartbeat_extension, job_item_json: j2.to_json)
|
|
408
|
+
raise
|
|
409
|
+
ensure
|
|
410
|
+
Thread.current[:durable_job_active_job] = nil
|
|
411
|
+
end
|
|
412
|
+
# rubocop:enable Lint/RescueException
|
|
413
|
+
ds.where(job_id: row[:job_id]).delete
|
|
414
|
+
end
|
|
415
|
+
end
|
|
416
|
+
end
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "pry"
|
|
4
|
+
require "clipboard"
|
|
5
|
+
|
|
6
|
+
# Originally from https://github.com/hotchpotch/pry-clipboard
|
|
7
|
+
# but modified since it is broken in Ruby 2.7
|
|
8
|
+
module Pry::Clipboard
|
|
9
|
+
Command = Pry::CommandSet.new do
|
|
10
|
+
create_command "paste" do
|
|
11
|
+
description "Paste from clipboard"
|
|
12
|
+
|
|
13
|
+
banner <<-BANNER
|
|
14
|
+
Usage: paste [-q|--quiet]
|
|
15
|
+
BANNER
|
|
16
|
+
|
|
17
|
+
def options(opt)
|
|
18
|
+
opt.on :q, :quiet, "quiet output", optional: true
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def process
|
|
22
|
+
str = Clipboard.paste
|
|
23
|
+
unless opts.present?(:q)
|
|
24
|
+
pry_instance.output.puts green("-*-*- Paste from clipboard -*-*-")
|
|
25
|
+
pry_instance.output.puts str
|
|
26
|
+
end
|
|
27
|
+
eval_string << str
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
create_command "copy-history" do
|
|
32
|
+
description "Copy history to clipboard"
|
|
33
|
+
|
|
34
|
+
banner <<-BANNER
|
|
35
|
+
Usage: copy-history [N] [-T|--tail N] [-H|--head N] [-R|--range N..M] [-G|--grep match] [-l] [-q|--quiet]
|
|
36
|
+
e.g: `copy-history`
|
|
37
|
+
e.g: `copy-history -l`
|
|
38
|
+
e.g: `copy-history 10`
|
|
39
|
+
e.g: `copy-history -H 10`
|
|
40
|
+
e.g: `copy-history -T 5`
|
|
41
|
+
e.g: `copy-history -R 5..10`
|
|
42
|
+
BANNER
|
|
43
|
+
|
|
44
|
+
def options(opt)
|
|
45
|
+
opt.on :l, "Copy history with last result", optional: true
|
|
46
|
+
opt.on :H, :head, "Copy the first N items.", optional: true, as: Integer
|
|
47
|
+
opt.on :T, :tail, "Copy the last N items.", optional: true, as: Integer
|
|
48
|
+
opt.on :R, :range, "Copy the given range of lines.", optional: true, as: Range
|
|
49
|
+
opt.on :G, :grep, "Copy lines matching the given pattern.", optional: true, as: String
|
|
50
|
+
opt.on :q, :quiet, "quiet output", optional: true
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def process
|
|
54
|
+
history = Pry::Code(Pry.history.to_a)
|
|
55
|
+
|
|
56
|
+
history = if num_arg
|
|
57
|
+
history.take_lines(num_arg, 1)
|
|
58
|
+
else
|
|
59
|
+
history = history.grep(opts[:grep]) if opts.present?(:grep)
|
|
60
|
+
if opts.present?(:range)
|
|
61
|
+
history.between(opts[:range])
|
|
62
|
+
elsif opts.present?(:head)
|
|
63
|
+
history.take_lines(1, opts[:head] || 10)
|
|
64
|
+
elsif opts.present?(:tail) || opts.present?(:grep)
|
|
65
|
+
n = opts[:tail] || 10
|
|
66
|
+
n = history.lines.count if n > history.lines.count
|
|
67
|
+
history.take_lines(-n, n)
|
|
68
|
+
else
|
|
69
|
+
history.take_lines(-1, 1)
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
str = history.raw
|
|
74
|
+
str += "#=> #{pry_instance.last_result}\n" if opts.present?(:l)
|
|
75
|
+
Clipboard.copy str
|
|
76
|
+
|
|
77
|
+
return if opts.present?(:q)
|
|
78
|
+
pry_instance.output.puts green("-*-*- Copy history to clipboard -*-*-")
|
|
79
|
+
pry_instance.output.puts str
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
def num_arg
|
|
83
|
+
first = args[0]
|
|
84
|
+
first.to_i if first && first.to_i.to_s == first
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
create_command "copy-result" do
|
|
89
|
+
description "Copy result to clipboard."
|
|
90
|
+
|
|
91
|
+
banner <<-BANNER
|
|
92
|
+
Usage: copy-result [-q|--quiet]
|
|
93
|
+
BANNER
|
|
94
|
+
|
|
95
|
+
def options(opt)
|
|
96
|
+
opt.on :q, :quiet, "quiet output", optional: true
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def process
|
|
100
|
+
res = "#{pry_instance.last_result}\n"
|
|
101
|
+
Clipboard.copy res
|
|
102
|
+
|
|
103
|
+
return if opts.present?(:q)
|
|
104
|
+
pry_instance.output.puts green("-*-*- Copy result to clipboard -*-*-")
|
|
105
|
+
pry_instance.output.print res
|
|
106
|
+
end
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
Pry.commands.import Pry::Clipboard::Command
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "sequel"
|
|
4
|
+
|
|
5
|
+
class Sequel::AdvisoryLock
|
|
6
|
+
def initialize(db, key_or_key1, key2=nil, shared: false, xact: false)
|
|
7
|
+
@db = db
|
|
8
|
+
xstr = xact ? "_xact" : ""
|
|
9
|
+
sharestr = shared ? "_shared" : ""
|
|
10
|
+
@locker = to_expr("pg_advisory#{xstr}_lock#{sharestr}", key_or_key1, key2)
|
|
11
|
+
@trylocker = to_expr("pg_try_advisory#{xstr}_lock#{sharestr}", key_or_key1, key2)
|
|
12
|
+
@unlocker = to_expr(shared ? "pg_advisory_unlock_shared" : "pg_advisory_unlock", key_or_key1, key2)
|
|
13
|
+
if key2
|
|
14
|
+
@cond = {classid: key_or_key1, objid: key2, objsubid: 2}
|
|
15
|
+
else
|
|
16
|
+
k2 = key_or_key1 & 0xFFFF_FFFF
|
|
17
|
+
@cond = {classid: 1, objid: k2, objsubid: 1}
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
private def to_expr(name, key1, key2)
|
|
22
|
+
return key2.nil? ? Sequel.function(name.to_sym, key1) : Sequel.function(name.to_sym, key1, key2)
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def dataset(this: false)
|
|
26
|
+
ds = @db[:pg_locks]
|
|
27
|
+
ds = ds.where(@cond) if this
|
|
28
|
+
return ds
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# pg_advisory_lock
|
|
32
|
+
# pg_advisory_lock_shared
|
|
33
|
+
# pg_advisory_xact_lock
|
|
34
|
+
# pg_advisory_xact_lock_shared
|
|
35
|
+
def with_lock
|
|
36
|
+
raise LocalJumpError unless block_given?
|
|
37
|
+
@db.get(@locker)
|
|
38
|
+
return yield
|
|
39
|
+
ensure
|
|
40
|
+
self.unlock
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# pg_try_advisory_lock
|
|
44
|
+
# pg_try_advisory_lock_shared
|
|
45
|
+
# pg_try_advisory_xact_lock
|
|
46
|
+
# pg_try_advisory_xact_lock_shared
|
|
47
|
+
def with_lock?
|
|
48
|
+
raise LocalJumpError unless block_given?
|
|
49
|
+
acquired = @db.get(@trylocker)
|
|
50
|
+
return false, nil unless acquired
|
|
51
|
+
begin
|
|
52
|
+
return true, yield
|
|
53
|
+
ensure
|
|
54
|
+
self.unlock
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def unlock
|
|
59
|
+
@db.get(@unlocker)
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def unlock_all
|
|
63
|
+
@db.get(Sequel.function(:pg_advisory_unlock_all))
|
|
64
|
+
end
|
|
65
|
+
end
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "grape"
|
|
4
|
+
|
|
5
|
+
require "webhookdb/admin_api"
|
|
6
|
+
|
|
7
|
+
class Webhookdb::AdminAPI::Auth < Webhookdb::AdminAPI::V1
|
|
8
|
+
resource :auth do
|
|
9
|
+
desc "Return the current administrator customer."
|
|
10
|
+
get do
|
|
11
|
+
present admin_customer, with: Webhookdb::AdminAPI::CurrentCustomerEntity, env:
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
resource :impersonate do
|
|
15
|
+
desc "Remove any active impersonation and return the admin customer."
|
|
16
|
+
delete do
|
|
17
|
+
Webhookdb::Service::Auth::Impersonation.new(env["warden"]).off(admin_customer)
|
|
18
|
+
|
|
19
|
+
status 200
|
|
20
|
+
present admin_customer, with: Webhookdb::AdminAPI::CurrentCustomerEntity, env:
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
route_param :customer_id, type: Integer do
|
|
24
|
+
desc "Impersonate a customer"
|
|
25
|
+
post do
|
|
26
|
+
(target = Webhookdb::Customer[params[:customer_id]]) or not_found!
|
|
27
|
+
|
|
28
|
+
Webhookdb::Service::Auth::Impersonation.new(env["warden"]).on(target)
|
|
29
|
+
|
|
30
|
+
status 200
|
|
31
|
+
present target, with: Webhookdb::AdminAPI::CurrentCustomerEntity, env:
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "grape"
|
|
4
|
+
|
|
5
|
+
require "webhookdb/admin_api"
|
|
6
|
+
|
|
7
|
+
class Webhookdb::AdminAPI::Customers < Webhookdb::AdminAPI::V1
|
|
8
|
+
resource :customers do
|
|
9
|
+
desc "Return all customers, newest first"
|
|
10
|
+
params do
|
|
11
|
+
use :pagination
|
|
12
|
+
use :ordering, model: Webhookdb::Customer
|
|
13
|
+
use :searchable
|
|
14
|
+
end
|
|
15
|
+
get do
|
|
16
|
+
ds = Webhookdb::Customer.dataset
|
|
17
|
+
if (email_like = search_param_to_sql(params, :email))
|
|
18
|
+
name_like = search_param_to_sql(params, :name)
|
|
19
|
+
ds = ds.where(email_like | name_like)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
ds = order(ds, params)
|
|
23
|
+
ds = paginate(ds, params)
|
|
24
|
+
present_collection ds, with: Webhookdb::AdminAPI::CustomerEntity
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
route_param :id, type: Integer do
|
|
28
|
+
desc "Return the customer"
|
|
29
|
+
get do
|
|
30
|
+
(customer = Webhookdb::Customer[params[:id]]) or not_found!
|
|
31
|
+
present customer, with: Webhookdb::AdminAPI::DetailedCustomerEntity
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
desc "Update the customer"
|
|
35
|
+
params do
|
|
36
|
+
optional :name, type: String
|
|
37
|
+
optional :note, type: String
|
|
38
|
+
optional :email, type: String
|
|
39
|
+
optional :roles, type: [String]
|
|
40
|
+
end
|
|
41
|
+
post do
|
|
42
|
+
fields = params
|
|
43
|
+
(customer = Webhookdb::Customer[fields[:id]]) or not_found!
|
|
44
|
+
customer.db.transaction do
|
|
45
|
+
if (roles = fields.delete(:roles))
|
|
46
|
+
customer.remove_all_roles
|
|
47
|
+
roles.uniq.each { |r| customer.add_role(Webhookdb::Role[name: r]) }
|
|
48
|
+
end
|
|
49
|
+
if fields.key?(:email_verified)
|
|
50
|
+
customer.email_verified_at = fields.delete(:email_verified) ? Time.now : nil
|
|
51
|
+
end
|
|
52
|
+
if fields.key?(:phone_verified)
|
|
53
|
+
customer.phone_verified_at = fields.delete(:phone_verified) ? Time.now : nil
|
|
54
|
+
end
|
|
55
|
+
set_declared(customer, params)
|
|
56
|
+
customer.save_changes
|
|
57
|
+
end
|
|
58
|
+
status 200
|
|
59
|
+
present customer, with: Webhookdb::AdminAPI::DetailedCustomerEntity
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|