realtimex-crm 0.9.1 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/bin/realtimex-crm.js +56 -32
  2. package/dist/assets/{DealList-DnGVfS15.js → DealList-DbwJCRGl.js} +2 -2
  3. package/dist/assets/{DealList-DnGVfS15.js.map → DealList-DbwJCRGl.js.map} +1 -1
  4. package/dist/assets/index-C__S90Gb.css +1 -0
  5. package/dist/assets/index-mE-upBfc.js +166 -0
  6. package/dist/assets/{index-DPrpo5Xq.js.map → index-mE-upBfc.js.map} +1 -1
  7. package/dist/index.html +1 -1
  8. package/dist/stats.html +1 -1
  9. package/package.json +2 -1
  10. package/src/components/atomic-crm/activities/ActivitiesPage.tsx +16 -0
  11. package/src/components/atomic-crm/activities/ActivityFeed.tsx +212 -0
  12. package/src/components/atomic-crm/activities/FileUpload.tsx +359 -0
  13. package/src/components/atomic-crm/contacts/ContactShow.tsx +28 -10
  14. package/src/components/atomic-crm/integrations/CreateChannelDialog.tsx +139 -0
  15. package/src/components/atomic-crm/integrations/IngestionChannelsTab.tsx +188 -0
  16. package/src/components/atomic-crm/integrations/IntegrationsPage.tsx +15 -3
  17. package/supabase/fix_webhook_hardcoded.sql +34 -0
  18. package/supabase/functions/_shared/ingestionGuard.ts +128 -0
  19. package/supabase/functions/_shared/utils.ts +1 -1
  20. package/supabase/functions/ingest-activity/.well-known/supabase/config.toml +4 -0
  21. package/supabase/functions/ingest-activity/index.ts +261 -0
  22. package/supabase/migrations/20251219120100_webhook_triggers.sql +10 -5
  23. package/supabase/migrations/20251220120000_realtime_ingestion.sql +154 -0
  24. package/supabase/migrations/20251221000000_contact_matching.sql +94 -0
  25. package/supabase/migrations/20251221000001_fix_ingestion_providers_rls.sql +23 -0
  26. package/supabase/migrations/20251221000002_fix_contact_matching_jsonb.sql +67 -0
  27. package/supabase/migrations/20251221000003_fix_email_matching.sql +70 -0
  28. package/supabase/migrations/20251221000004_time_based_work_stealing.sql +99 -0
  29. package/supabase/migrations/20251221000005_realtime_functions.sql +73 -0
  30. package/supabase/migrations/20251221000006_enable_pg_net.sql +3 -0
  31. package/supabase/migrations/20251222075019_enable_extensions_and_configure_cron.sql +86 -0
  32. package/supabase/migrations/20251222094036_large_payload_storage.sql +213 -0
  33. package/supabase/migrations/20251222094247_large_payload_cron.sql +28 -0
  34. package/supabase/migrations/20251222220000_fix_large_payload_types.sql +72 -0
  35. package/supabase/migrations/20251223000000_enable_realtime_all_crm_tables.sql +50 -0
  36. package/supabase/migrations/20251223185638_remove_large_payload_storage.sql +54 -0
  37. package/dist/assets/index-DPrpo5Xq.js +0 -159
  38. package/dist/assets/index-kM1Og1AS.css +0 -1
@@ -0,0 +1,70 @@
1
+ -- Migration: Fix email matching to be more robust with jsonb arrays
2
+
3
+ CREATE OR REPLACE FUNCTION auto_link_contact()
4
+ RETURNS TRIGGER
5
+ LANGUAGE plpgsql
6
+ AS $$
7
+ DECLARE
8
+ matched_contact_id bigint;
9
+ BEGIN
10
+ -- Only attempt matching if contact_id is not already set
11
+ IF NEW.contact_id IS NOT NULL THEN
12
+ RETURN NEW;
13
+ END IF;
14
+
15
+ -- Strategy 1: Try exact email match
16
+ -- Note: email is stored as email_jsonb (array of email objects)
17
+ -- Use jsonb_array_elements to check each email in the array
18
+ IF NEW.metadata ? 'from' AND NEW.metadata->>'from' LIKE '%@%' THEN
19
+ SELECT c.id INTO matched_contact_id
20
+ FROM contacts c,
21
+ jsonb_array_elements(c.email_jsonb) AS email
22
+ WHERE lower(email->>'email') = lower(NEW.metadata->>'from')
23
+ LIMIT 1;
24
+
25
+ IF matched_contact_id IS NOT NULL THEN
26
+ NEW.contact_id := matched_contact_id;
27
+ RETURN NEW;
28
+ END IF;
29
+ END IF;
30
+
31
+ -- Strategy 2: Try exact phone match (E.164 normalized)
32
+ -- Note: phones are stored as phone_jsonb (array of phone objects)
33
+ IF NEW.metadata ? 'from' AND NEW.metadata->>'from' LIKE '+%' THEN
34
+ SELECT c.id INTO matched_contact_id
35
+ FROM contacts c,
36
+ jsonb_array_elements(c.phone_jsonb) AS phone
37
+ WHERE normalize_phone(phone->>'number') = normalize_phone(NEW.metadata->>'from')
38
+ LIMIT 1;
39
+
40
+ IF matched_contact_id IS NOT NULL THEN
41
+ NEW.contact_id := matched_contact_id;
42
+ RETURN NEW;
43
+ END IF;
44
+ END IF;
45
+
46
+ -- Strategy 3: Try fuzzy phone match (last 10 digits for US numbers)
47
+ IF NEW.metadata ? 'from' AND length(regexp_replace(NEW.metadata->>'from', '[^0-9]', '', 'g')) >= 10 THEN
48
+ SELECT c.id INTO matched_contact_id
49
+ FROM contacts c,
50
+ jsonb_array_elements(c.phone_jsonb) AS phone
51
+ WHERE right(regexp_replace(phone->>'number', '[^0-9]', '', 'g'), 10) =
52
+ right(regexp_replace(NEW.metadata->>'from', '[^0-9]', '', 'g'), 10)
53
+ LIMIT 1;
54
+
55
+ IF matched_contact_id IS NOT NULL THEN
56
+ NEW.contact_id := matched_contact_id;
57
+ RETURN NEW;
58
+ END IF;
59
+ END IF;
60
+
61
+ -- No match found - activity will be created as "orphan" with contact_id = NULL
62
+ RETURN NEW;
63
+ END;
64
+ $$;
65
+
66
+ COMMENT ON FUNCTION auto_link_contact() IS
67
+ 'Automatically links activities to contacts based on email or phone number in metadata.from field.
68
+ Uses case-insensitive email matching and handles jsonb array structures correctly.
69
+ Priority: Email match > Phone E.164 match > Fuzzy phone match (last 10 digits).
70
+ Activities without matches become orphans (contact_id = NULL).';
@@ -0,0 +1,99 @@
1
+ -- Migration: Time-Based Work Stealing (No Coordination Table)
2
+ -- Removes the need for crm_processing_nodes by using activity age as prioritization signal
3
+
4
+ -- Function: Unlock stale locks
5
+ -- Activities that have been locked for >5 minutes get unlocked (crashed/offline agents)
6
+ CREATE OR REPLACE FUNCTION unlock_stale_locks()
7
+ RETURNS INTEGER
8
+ LANGUAGE plpgsql
9
+ AS $$
10
+ DECLARE
11
+ unlocked_count INTEGER;
12
+ BEGIN
13
+ UPDATE activities
14
+ SET
15
+ locked_by = NULL,
16
+ locked_at = NULL,
17
+ processing_status = 'raw'
18
+ WHERE
19
+ processing_status = 'processing'
20
+ AND locked_at < NOW() - INTERVAL '5 minutes';
21
+
22
+ GET DIAGNOSTICS unlocked_count = ROW_COUNT;
23
+ RETURN unlocked_count;
24
+ END;
25
+ $$;
26
+
27
+ COMMENT ON FUNCTION unlock_stale_locks() IS
28
+ 'Unlocks activities that have been in processing state for >5 minutes.
29
+ Should be called periodically (e.g., every minute via pg_cron or application logic).';
30
+
31
+ -- Function: Claim stale activity from any user
32
+ -- Returns activities that are >5 minutes old (timeout fallback for offline users)
33
+ CREATE OR REPLACE FUNCTION claim_stale_activity()
34
+ RETURNS TABLE (
35
+ id uuid,
36
+ type text,
37
+ direction text,
38
+ sales_id bigint,
39
+ contact_id bigint,
40
+ company_id bigint,
41
+ deal_id bigint,
42
+ raw_data jsonb,
43
+ processing_status text,
44
+ processed_data jsonb,
45
+ metadata jsonb,
46
+ locked_by text,
47
+ locked_at timestamptz,
48
+ created_at timestamptz,
49
+ updated_at timestamptz
50
+ )
51
+ LANGUAGE plpgsql
52
+ AS $$
53
+ BEGIN
54
+ RETURN QUERY
55
+ UPDATE activities
56
+ SET
57
+ locked_by = 'agent',
58
+ locked_at = NOW(),
59
+ processing_status = 'processing'
60
+ WHERE activities.id = (
61
+ SELECT a.id
62
+ FROM activities a
63
+ WHERE a.processing_status = 'raw'
64
+ AND a.created_at < NOW() - INTERVAL '5 minutes' -- Only stale activities
65
+ AND a.locked_by IS NULL
66
+ ORDER BY a.created_at ASC
67
+ FOR UPDATE SKIP LOCKED
68
+ LIMIT 1
69
+ )
70
+ RETURNING
71
+ activities.id,
72
+ activities.type,
73
+ activities.direction,
74
+ activities.sales_id,
75
+ activities.contact_id,
76
+ activities.company_id,
77
+ activities.deal_id,
78
+ activities.raw_data,
79
+ activities.processing_status,
80
+ activities.processed_data,
81
+ activities.metadata,
82
+ activities.locked_by,
83
+ activities.locked_at,
84
+ activities.created_at,
85
+ activities.updated_at;
86
+ END;
87
+ $$;
88
+
89
+ COMMENT ON FUNCTION claim_stale_activity() IS
90
+ 'Claims the oldest raw activity that has been waiting for >5 minutes.
91
+ Used by agents as fallback when no fresh work is available from their own queue.
92
+ Implements time-based work stealing without coordination table.';
93
+
94
+ -- Note: The existing claim_next_pending_activity(p_agent_sales_id) function
95
+ -- continues to work for claiming fresh work from a specific user's queue.
96
+ -- Agent prioritization logic:
97
+ -- 1. Try claim_next_pending_activity(my_sales_id) for my own fresh work
98
+ -- 2. Try claim_stale_activity() for anyone's stale work (>5 min old)
99
+ -- 3. Repeat every 5 seconds
@@ -0,0 +1,73 @@
1
+ -- Migration: Realtime Helper Functions
2
+ -- Purpose: Allow programmatic enabling of Realtime for tables (creating channels)
3
+ -- This supports the RealTimeX App integration which requires Realtime subscriptions.
4
+
5
+ -- 1. Create a function to safely add tables to the supabase_realtime publication
6
+ create or replace function enable_realtime_for_table(target_table text, target_schema text default 'public')
7
+ returns void
8
+ language plpgsql
9
+ security definer -- Required: Regular users can't ALTER PUBLICATION
10
+ set search_path = public, pg_catalog -- Security: Prevent search_path attacks
11
+ as $$
12
+ declare
13
+ publication_name text := 'supabase_realtime';
14
+ begin
15
+ -- Validate inputs
16
+ if target_table is null or target_table = '' then
17
+ raise exception 'target_table cannot be null or empty';
18
+ end if;
19
+
20
+ -- Ensure the publication exists (idempotent)
21
+ if not exists (select 1 from pg_publication where pubname = publication_name) then
22
+ execute format('create publication %I', publication_name);
23
+ raise notice 'Created publication %', publication_name;
24
+ end if;
25
+
26
+ -- Check if table is already in publication
27
+ if not exists (
28
+ select 1
29
+ from pg_publication_tables
30
+ where pubname = publication_name
31
+ and schemaname = target_schema
32
+ and tablename = target_table
33
+ ) then
34
+ execute format('alter publication %I add table %I.%I', publication_name, target_schema, target_table);
35
+ raise notice 'Added table %.% to publication %', target_schema, target_table, publication_name;
36
+ else
37
+ raise notice 'Table %.% is already in publication %', target_schema, target_table, publication_name;
38
+ end if;
39
+ end;
40
+ $$;
41
+
42
+ comment on function enable_realtime_for_table(text, text) is
43
+ 'Safely adds tables to supabase_realtime publication. Realtime respects RLS policies - clients only receive changes for rows they can SELECT.';
44
+
45
+ -- 2. Enable Realtime for core CRM tables
46
+ -- Note: All realtime subscriptions respect RLS policies - users only receive changes for rows they can SELECT
47
+
48
+ -- Activities: Real-time ingestion, activity feed updates
49
+ select enable_realtime_for_table('activities');
50
+
51
+ -- Tasks: Task assignments, status changes, team collaboration
52
+ select enable_realtime_for_table('tasks');
53
+
54
+ -- Contacts: Contact profile updates, new contact notifications
55
+ select enable_realtime_for_table('contacts');
56
+
57
+ -- Companies: Company data changes, relationship updates
58
+ select enable_realtime_for_table('companies');
59
+
60
+ -- Deals: Pipeline movement, stage changes, deal value updates (critical for sales teams!)
61
+ select enable_realtime_for_table('deals');
62
+
63
+ -- Contact Notes: Real-time note additions/updates
64
+ select enable_realtime_for_table('contactNotes');
65
+
66
+ -- Deal Notes: Deal discussion updates
67
+ select enable_realtime_for_table('dealNotes');
68
+
69
+ -- Sales: Team member changes, user status updates
70
+ select enable_realtime_for_table('sales');
71
+
72
+ -- Ingestion Providers: Channel configuration changes
73
+ select enable_realtime_for_table('ingestion_providers');
@@ -0,0 +1,3 @@
1
+ -- Enable pg_net extension for webhook dispatcher
2
+ -- This extension provides the net.http_post function used by the cron job
3
+ create extension if not exists pg_net with schema extensions;
@@ -0,0 +1,86 @@
1
+ -- Enable Extensions and Configure Cron for Webhook System
2
+ -- This migration ensures all required extensions are enabled for the webhook dispatcher
3
+
4
+ -- 1. Enable required extensions
5
+ CREATE EXTENSION IF NOT EXISTS pg_cron WITH SCHEMA extensions;
6
+ CREATE EXTENSION IF NOT EXISTS pg_net WITH SCHEMA extensions;
7
+
8
+ -- 2. Create a helper function to configure cron settings
9
+ CREATE OR REPLACE FUNCTION public.configure_webhook_cron_settings(
10
+ p_supabase_url text,
11
+ p_service_role_key text
12
+ ) RETURNS text
13
+ LANGUAGE plpgsql
14
+ SECURITY DEFINER
15
+ AS $$
16
+ BEGIN
17
+ -- Set the configuration at database level
18
+ EXECUTE format('ALTER DATABASE %I SET app.settings.supabase_url = %L',
19
+ current_database(), p_supabase_url);
20
+ EXECUTE format('ALTER DATABASE %I SET app.settings.service_role_key = %L',
21
+ current_database(), p_service_role_key);
22
+
23
+ -- Reload configuration for current session
24
+ PERFORM pg_reload_conf();
25
+
26
+ RETURN format('Configuration updated successfully. Please reconnect to see changes. URL: %s', p_supabase_url);
27
+ END;
28
+ $$;
29
+
30
+ -- Grant execute permission to authenticated users
31
+ GRANT EXECUTE ON FUNCTION public.configure_webhook_cron_settings(text, text) TO authenticated;
32
+ GRANT EXECUTE ON FUNCTION public.configure_webhook_cron_settings(text, text) TO service_role;
33
+
34
+ -- 3. Add a comment with usage instructions
35
+ COMMENT ON FUNCTION public.configure_webhook_cron_settings(text, text) IS
36
+ 'Configures the webhook cron dispatcher settings.
37
+
38
+ Usage:
39
+ SELECT configure_webhook_cron_settings(
40
+ ''your-project-ref.supabase.co'',
41
+ ''your-service-role-key-here''
42
+ );
43
+
44
+ You can find these values in your Supabase Dashboard:
45
+ - Project URL: Settings → API → Project URL (without https://)
46
+ - Service Role Key: Settings → API → service_role key
47
+
48
+ After running this function, reconnect to the database for changes to take effect.
49
+ The cron job scheduled in 20251219120200_webhook_cron.sql will then work properly.';
50
+
51
+ -- 4. Check if settings are already configured
52
+ DO $$
53
+ DECLARE
54
+ current_url text;
55
+ current_key text;
56
+ BEGIN
57
+ -- Try to read current settings
58
+ BEGIN
59
+ current_url := current_setting('app.settings.supabase_url', true);
60
+ current_key := current_setting('app.settings.service_role_key', true);
61
+
62
+ IF current_url IS NOT NULL AND current_key IS NOT NULL THEN
63
+ RAISE NOTICE 'Webhook cron settings already configured:';
64
+ RAISE NOTICE ' URL: %', current_url;
65
+ RAISE NOTICE ' Key: %', left(current_key, 20) || '...';
66
+ ELSE
67
+ RAISE NOTICE '=================================================================';
68
+ RAISE NOTICE 'IMPORTANT: Webhook cron settings NOT configured!';
69
+ RAISE NOTICE '=================================================================';
70
+ RAISE NOTICE 'The webhook dispatcher cron job requires configuration.';
71
+ RAISE NOTICE '';
72
+ RAISE NOTICE 'Run this SQL to configure (replace with your actual values):';
73
+ RAISE NOTICE '';
74
+ RAISE NOTICE ' SELECT configure_webhook_cron_settings(';
75
+ RAISE NOTICE ' ''your-project-ref.supabase.co'',';
76
+ RAISE NOTICE ' ''your-service-role-key-here''';
77
+ RAISE NOTICE ' );';
78
+ RAISE NOTICE '';
79
+ RAISE NOTICE 'Find these values in: Supabase Dashboard → Settings → API';
80
+ RAISE NOTICE '=================================================================';
81
+ END IF;
82
+ EXCEPTION WHEN OTHERS THEN
83
+ RAISE NOTICE 'Could not check webhook cron settings. This is normal for fresh deployments.';
84
+ END;
85
+ END;
86
+ $$;
@@ -0,0 +1,213 @@
1
+ -- Large Payload Storage System
2
+ -- Automatically moves large raw_data payloads to Supabase Storage to prevent table bloat
3
+
4
+ -- 1. Create storage bucket for activity payloads
5
+ INSERT INTO storage.buckets (id, name, public, file_size_limit, allowed_mime_types)
6
+ VALUES (
7
+ 'activity-payloads',
8
+ 'activity-payloads',
9
+ false, -- Private bucket
10
+ NULL, -- No file size limit (supports large payloads of any size)
11
+ NULL -- Allow all MIME types (PDFs, images, videos, etc.)
12
+ )
13
+ ON CONFLICT (id) DO NOTHING;
14
+
15
+ -- 2. Create storage policy for service role access
16
+ CREATE POLICY "Service role can manage activity payloads"
17
+ ON storage.objects FOR ALL
18
+ TO service_role
19
+ USING (bucket_id = 'activity-payloads')
20
+ WITH CHECK (bucket_id = 'activity-payloads');
21
+
22
+ -- Allow authenticated users to read their own activity payloads
23
+ CREATE POLICY "Users can read their activity payloads"
24
+ ON storage.objects FOR SELECT
25
+ TO authenticated
26
+ USING (
27
+ bucket_id = 'activity-payloads' AND
28
+ (storage.foldername(name))[1] IN (
29
+ SELECT id::text FROM activities WHERE sales_id = (
30
+ SELECT id FROM sales WHERE user_id = auth.uid()
31
+ )
32
+ )
33
+ );
34
+
35
+ -- 3. Add columns to activities table for payload storage tracking
36
+ ALTER TABLE activities
37
+ ADD COLUMN IF NOT EXISTS payload_size_bytes INTEGER,
38
+ ADD COLUMN IF NOT EXISTS payload_storage_status TEXT DEFAULT 'inline' CHECK (payload_storage_status IN ('inline', 'pending_move', 'in_storage')),
39
+ ADD COLUMN IF NOT EXISTS storage_path TEXT;
40
+
41
+ -- Create index for finding activities that need storage migration
42
+ CREATE INDEX IF NOT EXISTS idx_activities_pending_storage
43
+ ON activities(payload_storage_status, created_at)
44
+ WHERE payload_storage_status = 'pending_move';
45
+
46
+ -- 4. Function to calculate payload size
47
+ CREATE OR REPLACE FUNCTION calculate_payload_size(data JSONB)
48
+ RETURNS INTEGER
49
+ LANGUAGE plpgsql
50
+ IMMUTABLE
51
+ AS $$
52
+ BEGIN
53
+ RETURN octet_length(data::text);
54
+ END;
55
+ $$;
56
+
57
+ -- 5. Trigger function to detect large payloads
58
+ CREATE OR REPLACE FUNCTION check_payload_size()
59
+ RETURNS TRIGGER
60
+ LANGUAGE plpgsql
61
+ AS $$
62
+ DECLARE
63
+ payload_size INTEGER;
64
+ size_threshold INTEGER := 102400; -- 100KB threshold
65
+ BEGIN
66
+ -- Calculate payload size
67
+ payload_size := calculate_payload_size(NEW.raw_data);
68
+ NEW.payload_size_bytes := payload_size;
69
+
70
+ -- If payload exceeds threshold, mark for storage migration
71
+ IF payload_size > size_threshold THEN
72
+ NEW.payload_storage_status := 'pending_move';
73
+
74
+ RAISE NOTICE 'Large payload detected (% bytes) for activity %. Will be moved to storage asynchronously.',
75
+ payload_size, NEW.id;
76
+ ELSE
77
+ NEW.payload_storage_status := 'inline';
78
+ END IF;
79
+
80
+ RETURN NEW;
81
+ END;
82
+ $$;
83
+
84
+ -- 6. Create trigger on activities table
85
+ DROP TRIGGER IF EXISTS trigger_check_payload_size ON activities;
86
+ CREATE TRIGGER trigger_check_payload_size
87
+ BEFORE INSERT OR UPDATE OF raw_data ON activities
88
+ FOR EACH ROW
89
+ EXECUTE FUNCTION check_payload_size();
90
+
91
+ -- 7. Function to move payload to storage (called by Edge Function)
92
+ CREATE OR REPLACE FUNCTION move_payload_to_storage(
93
+ p_activity_id BIGINT,
94
+ p_storage_path TEXT
95
+ )
96
+ RETURNS JSONB
97
+ LANGUAGE plpgsql
98
+ SECURITY DEFINER
99
+ AS $$
100
+ DECLARE
101
+ original_payload JSONB;
102
+ payload_metadata JSONB;
103
+ result JSONB;
104
+ BEGIN
105
+ -- Get the original payload
106
+ SELECT raw_data INTO original_payload
107
+ FROM activities
108
+ WHERE id = p_activity_id
109
+ FOR UPDATE;
110
+
111
+ IF original_payload IS NULL THEN
112
+ RETURN jsonb_build_object(
113
+ 'success', false,
114
+ 'error', 'Activity not found'
115
+ );
116
+ END IF;
117
+
118
+ -- Create metadata object to replace raw_data
119
+ payload_metadata := jsonb_build_object(
120
+ 'payload_type', 'storage_ref',
121
+ 'storage_path', p_storage_path,
122
+ 'size_bytes', calculate_payload_size(original_payload),
123
+ 'moved_at', now()
124
+ );
125
+
126
+ -- Update the activity record
127
+ UPDATE activities
128
+ SET
129
+ raw_data = payload_metadata,
130
+ payload_storage_status = 'in_storage',
131
+ storage_path = p_storage_path,
132
+ updated_at = now()
133
+ WHERE id = p_activity_id;
134
+
135
+ -- Return the original payload for upload
136
+ result := jsonb_build_object(
137
+ 'success', true,
138
+ 'activity_id', p_activity_id,
139
+ 'storage_path', p_storage_path,
140
+ 'payload', original_payload
141
+ );
142
+
143
+ RETURN result;
144
+ END;
145
+ $$;
146
+
147
+ -- Grant permissions
148
+ GRANT EXECUTE ON FUNCTION calculate_payload_size(JSONB) TO authenticated, service_role;
149
+ GRANT EXECUTE ON FUNCTION move_payload_to_storage(BIGINT, TEXT) TO service_role;
150
+
151
+ -- 8. Helper function to retrieve full payload (handles both inline and storage)
152
+ CREATE OR REPLACE FUNCTION get_activity_payload(p_activity_id BIGINT)
153
+ RETURNS JSONB
154
+ LANGUAGE plpgsql
155
+ SECURITY DEFINER
156
+ AS $$
157
+ DECLARE
158
+ activity_record RECORD;
159
+ BEGIN
160
+ SELECT
161
+ raw_data,
162
+ payload_storage_status,
163
+ storage_path
164
+ INTO activity_record
165
+ FROM activities
166
+ WHERE id = p_activity_id;
167
+
168
+ IF activity_record IS NULL THEN
169
+ RETURN jsonb_build_object('error', 'Activity not found');
170
+ END IF;
171
+
172
+ -- If payload is inline or pending move, return raw_data directly
173
+ IF activity_record.payload_storage_status IN ('inline', 'pending_move') THEN
174
+ RETURN activity_record.raw_data;
175
+ END IF;
176
+
177
+ -- If in storage, return metadata with instructions
178
+ -- (Actual retrieval from storage must be done via Storage API)
179
+ RETURN jsonb_build_object(
180
+ 'payload_type', 'storage_ref',
181
+ 'storage_path', activity_record.storage_path,
182
+ 'message', 'Payload is in storage. Use Supabase Storage API to retrieve.',
183
+ 'url', '/storage/v1/object/activity-payloads/' || activity_record.storage_path
184
+ );
185
+ END;
186
+ $$;
187
+
188
+ GRANT EXECUTE ON FUNCTION get_activity_payload(BIGINT) TO authenticated, service_role;
189
+
190
+ -- 9. Add comment with usage instructions
191
+ COMMENT ON COLUMN activities.payload_storage_status IS
192
+ 'Tracks where the activity payload is stored:
193
+ - inline: Payload is in raw_data column (< 100KB)
194
+ - pending_move: Payload is large and queued for storage migration
195
+ - in_storage: Payload has been moved to Supabase Storage, raw_data contains reference';
196
+
197
+ COMMENT ON FUNCTION move_payload_to_storage(BIGINT, TEXT) IS
198
+ 'Moves a large activity payload to Supabase Storage.
199
+ Called by the process-large-payloads Edge Function.
200
+
201
+ Usage:
202
+ SELECT move_payload_to_storage(12345, ''12345/1234567890.json'');
203
+
204
+ Returns the original payload for upload to storage.';
205
+
206
+ COMMENT ON FUNCTION get_activity_payload(BIGINT) IS
207
+ 'Retrieves activity payload, handling both inline and storage-based payloads.
208
+
209
+ Usage:
210
+ SELECT get_activity_payload(12345);
211
+
212
+ For storage-based payloads, returns metadata with storage path.
213
+ Use Supabase Storage API to download the actual content.';
@@ -0,0 +1,28 @@
1
+ -- Schedule cron job to process large payloads
2
+ -- Runs every 5 minutes to move pending large payloads to storage
3
+
4
+ -- Schedule the large payload processor
5
+ SELECT cron.schedule(
6
+ 'process-large-payloads',
7
+ '*/5 * * * *', -- Every 5 minutes
8
+ $$
9
+ SELECT
10
+ net.http_post(
11
+ url:='https://' || current_setting('app.settings.supabase_url', true) || '/functions/v1/process-large-payloads',
12
+ headers:=jsonb_build_object(
13
+ 'Content-Type','application/json',
14
+ 'Authorization', 'Bearer ' || current_setting('app.settings.service_role_key', true)
15
+ ),
16
+ body:=jsonb_build_object(
17
+ 'limit', 20,
18
+ 'max_age_minutes', 1
19
+ )
20
+ ) as request_id;
21
+ $$
22
+ );
23
+
24
+ -- Add comment
25
+ COMMENT ON EXTENSION pg_cron IS
26
+ 'Cron jobs:
27
+ - webhook-dispatcher: Runs every minute to dispatch webhooks
28
+ - process-large-payloads: Runs every 5 minutes to move large activity payloads to storage';
@@ -0,0 +1,72 @@
1
+ -- Fix type mismatch in large payload storage system
2
+ -- activities.id is UUID but move_payload_to_storage expected BIGINT
3
+
4
+ -- Drop the old function
5
+ DROP FUNCTION IF EXISTS move_payload_to_storage(BIGINT, TEXT);
6
+
7
+ -- Recreate with correct UUID type
8
+ CREATE OR REPLACE FUNCTION move_payload_to_storage(
9
+ p_activity_id UUID, -- Changed from BIGINT to UUID
10
+ p_storage_path TEXT
11
+ )
12
+ RETURNS JSONB
13
+ LANGUAGE plpgsql
14
+ SECURITY DEFINER
15
+ AS $$
16
+ DECLARE
17
+ original_payload JSONB;
18
+ payload_metadata JSONB;
19
+ result JSONB;
20
+ BEGIN
21
+ -- Get the original payload
22
+ SELECT raw_data INTO original_payload
23
+ FROM activities
24
+ WHERE id = p_activity_id
25
+ FOR UPDATE;
26
+
27
+ IF original_payload IS NULL THEN
28
+ RETURN jsonb_build_object(
29
+ 'success', false,
30
+ 'error', 'Activity not found'
31
+ );
32
+ END IF;
33
+
34
+ -- Create metadata object to replace raw_data
35
+ payload_metadata := jsonb_build_object(
36
+ 'payload_type', 'storage_ref',
37
+ 'storage_path', p_storage_path,
38
+ 'moved_at', NOW()
39
+ );
40
+
41
+ -- Update activity: replace raw_data with reference, update status
42
+ UPDATE activities
43
+ SET
44
+ raw_data = payload_metadata,
45
+ payload_storage_status = 'in_storage',
46
+ storage_path = p_storage_path
47
+ WHERE id = p_activity_id;
48
+
49
+ -- Return the original payload (to be uploaded by Edge Function)
50
+ result := jsonb_build_object(
51
+ 'success', true,
52
+ 'activity_id', p_activity_id,
53
+ 'storage_path', p_storage_path,
54
+ 'payload', original_payload
55
+ );
56
+
57
+ RETURN result;
58
+ END;
59
+ $$;
60
+
61
+ -- Update permissions
62
+ GRANT EXECUTE ON FUNCTION move_payload_to_storage(UUID, TEXT) TO service_role;
63
+
64
+ -- Update comment
65
+ COMMENT ON FUNCTION move_payload_to_storage(UUID, TEXT) IS
66
+ 'Moves a large activity payload to Supabase Storage.
67
+ Called by the process-large-payloads Edge Function.
68
+
69
+ Usage:
70
+ SELECT move_payload_to_storage(''25f160be-0e93-48c0-8a49-8eeda2e25762''::uuid, ''25f160be-0e93-48c0-8a49-8eeda2e25762/1234567890.json'');
71
+
72
+ Returns the original payload for upload to storage.';