@gravito/zenith 0.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/ARCHITECTURE.md +88 -0
  2. package/BATCH_OPERATIONS_IMPLEMENTATION.md +159 -0
  3. package/DEMO.md +156 -0
  4. package/DEPLOYMENT.md +157 -0
  5. package/DOCS_INTERNAL.md +73 -0
  6. package/Dockerfile +46 -0
  7. package/Dockerfile.demo-worker +29 -0
  8. package/EVOLUTION_BLUEPRINT.md +112 -0
  9. package/JOBINSPECTOR_SCROLL_FIX.md +152 -0
  10. package/PULSE_IMPLEMENTATION_PLAN.md +111 -0
  11. package/QUICK_TEST_GUIDE.md +72 -0
  12. package/README.md +33 -0
  13. package/ROADMAP.md +85 -0
  14. package/TESTING_BATCH_OPERATIONS.md +252 -0
  15. package/bin/flux-console.ts +2 -0
  16. package/dist/bin.js +108196 -0
  17. package/dist/client/assets/index-DGYEwTDL.css +1 -0
  18. package/dist/client/assets/index-oyTdySX0.js +421 -0
  19. package/dist/client/index.html +13 -0
  20. package/dist/server/index.js +108191 -0
  21. package/docker-compose.yml +40 -0
  22. package/docs/integrations/LARAVEL.md +207 -0
  23. package/package.json +50 -0
  24. package/postcss.config.js +6 -0
  25. package/scripts/flood-logs.ts +21 -0
  26. package/scripts/seed.ts +213 -0
  27. package/scripts/verify-throttle.ts +45 -0
  28. package/scripts/worker.ts +123 -0
  29. package/src/bin.ts +6 -0
  30. package/src/client/App.tsx +70 -0
  31. package/src/client/Layout.tsx +644 -0
  32. package/src/client/Sidebar.tsx +102 -0
  33. package/src/client/ThroughputChart.tsx +135 -0
  34. package/src/client/WorkerStatus.tsx +170 -0
  35. package/src/client/components/ConfirmDialog.tsx +103 -0
  36. package/src/client/components/JobInspector.tsx +524 -0
  37. package/src/client/components/LogArchiveModal.tsx +383 -0
  38. package/src/client/components/NotificationBell.tsx +203 -0
  39. package/src/client/components/Toaster.tsx +80 -0
  40. package/src/client/components/UserProfileDropdown.tsx +177 -0
  41. package/src/client/contexts/AuthContext.tsx +93 -0
  42. package/src/client/contexts/NotificationContext.tsx +103 -0
  43. package/src/client/index.css +174 -0
  44. package/src/client/index.html +12 -0
  45. package/src/client/main.tsx +15 -0
  46. package/src/client/pages/LoginPage.tsx +153 -0
  47. package/src/client/pages/MetricsPage.tsx +408 -0
  48. package/src/client/pages/OverviewPage.tsx +511 -0
  49. package/src/client/pages/QueuesPage.tsx +372 -0
  50. package/src/client/pages/SchedulesPage.tsx +531 -0
  51. package/src/client/pages/SettingsPage.tsx +449 -0
  52. package/src/client/pages/WorkersPage.tsx +316 -0
  53. package/src/client/pages/index.ts +7 -0
  54. package/src/client/utils.ts +6 -0
  55. package/src/server/index.ts +556 -0
  56. package/src/server/middleware/auth.ts +127 -0
  57. package/src/server/services/AlertService.ts +160 -0
  58. package/src/server/services/QueueService.ts +828 -0
  59. package/tailwind.config.js +73 -0
  60. package/tests/placeholder.test.ts +7 -0
  61. package/tsconfig.json +38 -0
  62. package/tsconfig.node.json +12 -0
  63. package/vite.config.ts +27 -0
@@ -0,0 +1,40 @@
1
+ version: '3.8'
2
+
3
+ services:
4
+ # Main Persistence for Archive
5
+ mysql:
6
+ image: mysql:8.0
7
+ container_name: flux-mysql
8
+ ports:
9
+ - "3306:3306"
10
+ environment:
11
+ MYSQL_ROOT_PASSWORD: root
12
+ MYSQL_DATABASE: flux
13
+ healthcheck:
14
+ test: [ "CMD", "mysqladmin", "ping", "-h", "localhost" ]
15
+ timeout: 20s
16
+ retries: 10
17
+
18
+ # Real-time state store
19
+ redis:
20
+ image: redis:7-alpine
21
+ container_name: flux-redis
22
+ ports:
23
+ - "6379:6379"
24
+ # Flux Console (Optional: run locally via npm dev instead)
25
+ # console:
26
+ # build: .
27
+ # ports:
28
+ # - "3000:3000"
29
+ # environment:
30
+ # - REDIS_URL=redis://redis:6379
31
+ # - DB_DRIVER=mysql
32
+ # - DB_HOST=mysql
33
+ # - DB_USER=root
34
+ # - DB_PASSWORD=root
35
+ # - DB_NAME=flux
36
+ # depends_on:
37
+ # mysql:
38
+ # condition: service_healthy
39
+ # redis:
40
+ # condition: service_started
@@ -0,0 +1,207 @@
1
+ # Laravel Integration Guide for Gravito Zenith
2
+
3
+ This guide outlines the architecture and implementation details for integrating Laravel applications with **Gravito Zenith**, enabling centralized monitoring, logging, and auditing for Laravel Queues.
4
+
5
+ ## Architecture Overview
6
+
7
+ +----------------+ +--------------+ +----------------+
8
+ | Laravel App | | Redis Broker | | Gravito Zenith |
9
+ | (Horizon/Queue)| ----> | (Shared) | <---- | Control Plane |
10
+ +----------------+ +--------------+ +----------------+
11
+ | ^
12
+ | | Redis Pub/Sub & Lists
13
+ +--- [Zenith Connector] +
14
+
15
+ The **Zenith Connector** acts as a bridge, translating Laravel's internal queue events into the Zenith Protocol.
16
+
17
+ ---
18
+
19
+ ## 1. Protocol Specification
20
+
21
+ To be visible in Zenith, the Laravel connector must implement the following Redis interactions:
22
+
23
+ ### Namespace
24
+ Default Prefix: `flux:` (configurable). Ensure this matches your Zenith configuration.
25
+
26
+ ### A. Worker Heartbeat (Process Discovery)
27
+ The connector must run a background process (or scheduled command) to announce the worker's presence.
28
+
29
+ - **Key**: `flux_console:worker:<worker-id>`
30
+ - **TTL**: 60 seconds (refresh every 30s)
31
+ - **Format**:
32
+ ```json
33
+ {
34
+ "id": "laravel-worker-supervisor-1",
35
+ "hostname": "app-server-01",
36
+ "pid": 1234,
37
+ "uptime": 3600,
38
+ "queues": ["default", "emails"],
39
+ "concurrency": 10,
40
+ "memory": {
41
+ "rss": "100MB",
42
+ "heapTotal": "N/A",
43
+ "heapUsed": "N/A"
44
+ },
45
+ "timestamp": "ISO-8601 String"
46
+ }
47
+ ```
48
+
49
+ ### B. Real-time Logs (Event Stream)
50
+ The connector listens to Laravel Queue events and publishes them to Zenith.
51
+
52
+ - **Channel**: `flux_console:logs`
53
+ - **Format**:
54
+ ```json
55
+ {
56
+ "level": "info", // info | warn | error | success
57
+ "message": "Processing Job: App\\Jobs\\SendWelcomeEmail",
58
+ "workerId": "laravel-worker-supervisor-1",
59
+ "queue": "emails",
60
+ "timestamp": "ISO-8601 String",
61
+ "jobId": "uuid-..." // Optional, enables specific tracing
62
+ }
63
+ ```
64
+
65
+ ### C. Job Auditing (Time Travel)
66
+ (Optional) For "Time Travel Audit" features, the connector should write to the persistent store if configured, or rely on Zenith's Redis scanning if utilizing standard Flux queue structures. Since Laravel uses its own queue structure, **Real-time Logs** are the primary integration point for v1.
67
+
68
+ ---
69
+
70
+ ## 2. Implementation Blueprint (PHP)
71
+
72
+ This section provides the reference implementation for the `gravito/zenith-laravel` composer package.
73
+
74
+ ### Service Provider: `ZenithServiceProvider.php`
75
+
76
+ This provider hooks into Laravel's Queue events.
77
+
78
+ ```php
79
+ <?php
80
+
81
+ namespace Gravito\Zenith\Laravel;
82
+
83
+ use Illuminate\Support\ServiceProvider;
84
+ use Illuminate\Support\Facades\Queue;
85
+ use Illuminate\Support\Facades\Redis;
86
+ use Illuminate\Queue\Events\JobProcessing;
87
+ use Illuminate\Queue\Events\JobProcessed;
88
+ use Illuminate\Queue\Events\JobFailed;
89
+
90
+ class ZenithServiceProvider extends ServiceProvider
91
+ {
92
+ public function boot()
93
+ {
94
+ // 1. Job Started
95
+ Queue::before(function (JobProcessing $event) {
96
+ $this->publishLog('info', $event);
97
+ });
98
+
99
+ // 2. Job Success
100
+ Queue::after(function (JobProcessed $event) {
101
+ $this->publishLog('success', $event);
102
+ });
103
+
104
+ // 3. Job Failed
105
+ Queue::failing(function (JobFailed $event) {
106
+ $this->publishLog('error', $event, $event->exception->getMessage());
107
+ });
108
+ }
109
+
110
+ protected function publishLog($level, $event, $extraMessage = '')
111
+ {
112
+ $payload = $event->job->payload();
113
+ $jobName = $payload['displayName'] ?? 'Unknown Job';
114
+
115
+ // Simplify Job Name (remove namespace for display)
116
+ $shortName = class_basename($jobName);
117
+
118
+ $message = match($level) {
119
+ 'info' => "Processing {$shortName}",
120
+ 'success' => "Completed {$shortName}",
121
+ 'error' => "Failed {$shortName}: {$extraMessage}",
122
+ };
123
+
124
+ $log = [
125
+ 'level' => $level,
126
+ 'message' => $message,
127
+ 'workerId' => gethostname() . '-' . getmypid(), // Simple ID generation
128
+ 'queue' => $event->job->getQueue(),
129
+ 'timestamp' => now()->toIso8601String(),
130
+ 'jobId' => $event->job->getJobId()
131
+ ];
132
+
133
+ // Fire and forget to Redis
134
+ try {
135
+ Redis::connection('zenith')->publish('flux_console:logs', json_encode($log));
136
+ } catch (\Exception $e) {
137
+ // Silently fail to not disrupt main application
138
+ }
139
+ }
140
+ }
141
+ ```
142
+
143
+ ### Heartbeat Command: `zenith:heartbeat`
144
+
145
+ This command should be run as a daemon (Supervisor) or scheduled every minute (less precise). For best results, run as a sidecar process.
146
+
147
+ ```php
148
+ <?php
149
+
150
+ namespace Gravito\Zenith\Laravel\Console;
151
+
152
+ use Illuminate\Console\Command;
153
+ use Illuminate\Support\Facades\Redis;
154
+
155
+ class ZenithHeartbeat extends Command
156
+ {
157
+ protected $signature = 'zenith:heartbeat';
158
+ protected $description = 'Send heartbeat to Gravito Zenith';
159
+
160
+ public function handle()
161
+ {
162
+ $this->info('Starting Zenith Heartbeat...');
163
+
164
+ while (true) {
165
+ $workerId = gethostname() . '-' . getmypid();
166
+
167
+ $payload = [
168
+ 'id' => $workerId,
169
+ 'hostname' => gethostname(),
170
+ 'pid' => getmypid(),
171
+ 'uptime' => 0, // Calculate real uptime if needed
172
+ 'queues' => config('queue.connections.redis.queue', ['default']),
173
+ 'memory' => [
174
+ 'rss' => round(memory_get_usage() / 1024 / 1024, 2) . ' MB',
175
+ 'heapUsed' => 'N/A',
176
+ 'heapTotal' => 'N/A'
177
+ ],
178
+ 'timestamp' => now()->toIso8601String()
179
+ ];
180
+
181
+ Redis::connection('zenith')->setex(
182
+ "flux_console:worker:{$workerId}",
183
+ 30, // 30s TTL
184
+ json_encode($payload)
185
+ );
186
+
187
+ sleep(5);
188
+ }
189
+ }
190
+ }
191
+ ```
192
+
193
+ ## 3. Configuration
194
+
195
+ Users will need to configure a dedicated Redis connection for Zenith in `config/database.php` to avoid prefix collisions if they modify their default Redis prefix.
196
+
197
+ ```php
198
+ 'redis' => [
199
+ 'zenith' => [
200
+ 'host' => env('ZENITH_REDIS_HOST', '127.0.0.1'),
201
+ 'password' => env('ZENITH_REDIS_PASSWORD', null),
202
+ 'port' => env('ZENITH_REDIS_PORT', '6379'),
203
+ 'database' => env('ZENITH_REDIS_DB', '0'),
204
+ 'prefix' => '', // Ensure no prefix or match Zenith's expectation
205
+ ],
206
+ ],
207
+ ```
package/package.json ADDED
@@ -0,0 +1,50 @@
1
+ {
2
+ "name": "@gravito/zenith",
3
+ "version": "0.1.0-beta.1",
4
+ "description": "Gravito Zenith: Zero-config control plane for Gravito Flux & Stream",
5
+ "type": "module",
6
+ "bin": {
7
+ "zenith": "./dist/bin.js",
8
+ "flux-console": "./dist/bin.js"
9
+ },
10
+ "main": "./dist/index.js",
11
+ "types": "./dist/index.d.ts",
12
+ "scripts": {
13
+ "dev:server": "bun run --watch src/server/index.ts",
14
+ "dev:client": "vite",
15
+ "build": "vite build && bun build ./src/server/index.ts ./src/bin.ts --outdir ./dist --target bun",
16
+ "start": "bun ./dist/bin.js",
17
+ "test": "bun test",
18
+ "seed": "bun scripts/seed.ts",
19
+ "worker": "bun scripts/worker.ts"
20
+ },
21
+ "dependencies": {
22
+ "@gravito/atlas": "workspace:*",
23
+ "@gravito/photon": "workspace:*",
24
+ "@gravito/stream": "workspace:*",
25
+ "@tanstack/react-query": "^5.0.0",
26
+ "clsx": "^2.1.1",
27
+ "date-fns": "^4.1.0",
28
+ "framer-motion": "^12.23.26",
29
+ "ioredis": "^5.0.0",
30
+ "lucide-react": "^0.562.0",
31
+ "react": "^19.0.0",
32
+ "react-dom": "^19.0.0",
33
+ "react-router-dom": "^7.11.0",
34
+ "recharts": "^3.6.0",
35
+ "tailwind-merge": "^3.4.0"
36
+ },
37
+ "devDependencies": {
38
+ "@types/react": "^19.0.0",
39
+ "@types/react-dom": "^19.0.0",
40
+ "@vitejs/plugin-react": "^5.1.2",
41
+ "autoprefixer": "^10.4.0",
42
+ "postcss": "^8.4.0",
43
+ "tailwindcss": "^3.4.0",
44
+ "typescript": "^5.0.0",
45
+ "vite": "^6.0.0"
46
+ },
47
+ "publishConfig": {
48
+ "access": "public"
49
+ }
50
+ }
@@ -0,0 +1,6 @@
1
+ export default {
2
+ plugins: {
3
+ tailwindcss: {},
4
+ autoprefixer: {},
5
+ },
6
+ }
@@ -0,0 +1,21 @@
1
+ import { QueueService } from '../src/server/services/QueueService'
2
+
3
+ const svc = new QueueService(process.env.REDIS_URL || 'redis://localhost:6379')
4
+ await svc.connect()
5
+
6
+ console.log('🌊 Flooding 500 logs in burst...')
7
+ const start = Date.now()
8
+ const promises = []
9
+ for (let i = 0; i < 500; i++) {
10
+ promises.push(
11
+ svc.publishLog({
12
+ level: 'info',
13
+ message: `Flood log ${i} - ${Date.now()}`,
14
+ workerId: 'flood-bot',
15
+ queue: 'test-flood',
16
+ })
17
+ )
18
+ }
19
+ await Promise.all(promises)
20
+ console.log(`āœ… Sent 500 logs in ${Date.now() - start}ms`)
21
+ process.exit(0)
@@ -0,0 +1,213 @@
1
+ #!/usr/bin/env bun
2
+ import { Job, QueueManager } from '@gravito/stream'
3
+ import Redis from 'ioredis'
4
+
5
+ /**
6
+ * Flux Console Unified Seed Script
7
+ *
8
+ * Usage:
9
+ * bun scripts/seed.ts [mode]
10
+ *
11
+ * Modes:
12
+ * standard - Small set of diverse jobs (Waiting, Delayed, Failed)
13
+ * stress - Many queues and many jobs for performance testing
14
+ * batch - Setup for batch operation testing (100+ jobs)
15
+ * cron - Register recurring schedules
16
+ * cleanup - Flush Redis and clear logs
17
+ */
18
+
19
+ const mode = process.argv[2] || 'standard'
20
+ const redis = new Redis('redis://localhost:6379')
21
+ const prefix = 'queue:'
22
+
23
+ // Simple Job class for testing
24
+ class GenericJob extends Job {
25
+ constructor(
26
+ id: any = null,
27
+ public data: any = {}
28
+ ) {
29
+ super()
30
+ this.id = id
31
+ }
32
+ async handle() {}
33
+ }
34
+
35
+ const manager = new QueueManager({
36
+ default: 'redis',
37
+ connections: {
38
+ redis: {
39
+ driver: 'redis',
40
+ client: redis,
41
+ prefix,
42
+ },
43
+ },
44
+ })
45
+
46
+ manager.registerJobClasses([GenericJob])
47
+
48
+ async function cleanup() {
49
+ console.log('🧹 Cleaning up Redis...')
50
+ const keys = await redis.keys(`${prefix}*`)
51
+ const internalKeys = await redis.keys('flux_console:*')
52
+ const allKeys = [...keys, ...internalKeys]
53
+
54
+ if (allKeys.length > 0) {
55
+ await redis.del(...allKeys)
56
+ }
57
+ console.log(`āœ… Removed ${allKeys.length} keys.`)
58
+ }
59
+
60
+ async function seedStandard() {
61
+ console.log('šŸš€ Seeding standard data...')
62
+
63
+ // Orders Queue
64
+ for (let i = 1; i <= 5; i++) {
65
+ const job = new GenericJob(`ORD-${1000 + i}`, { amount: Math.random() * 100 })
66
+ job.queueName = 'orders'
67
+ await manager.push(job)
68
+ }
69
+
70
+ // Failed Jobs
71
+ for (let i = 1; i <= 3; i++) {
72
+ const jobInstance = new GenericJob(`FAIL-${i}`, { error: 'Payment Timeout' })
73
+ jobInstance.queueName = 'orders'
74
+ const serialized = manager.getSerializer().serialize(jobInstance)
75
+
76
+ await redis.lpush(
77
+ `${prefix}orders:failed`,
78
+ JSON.stringify({
79
+ ...serialized,
80
+ status: 'failed',
81
+ failedReason: 'Payment Timeout',
82
+ failedAt: Date.now(),
83
+ })
84
+ )
85
+ }
86
+
87
+ // Delayed Jobs
88
+ for (let i = 1; i <= 3; i++) {
89
+ const job = new GenericJob(`DLY-${i}`, { type: 'reminder' })
90
+ job.queueName = 'notifications'
91
+ job.delay(3600 * 1000) // 1 hour
92
+ await manager.push(job)
93
+ }
94
+ }
95
+
96
+ async function seedStress() {
97
+ console.log('šŸ”„ Stress Mode: Creating 15 queues with jobs...')
98
+ const queues = [
99
+ 'billing',
100
+ 'shipping',
101
+ 'inventory',
102
+ 'marketing',
103
+ 'crm',
104
+ 'auth',
105
+ 'logs',
106
+ 'backups',
107
+ 'indexing',
108
+ 'cache',
109
+ 'sync',
110
+ 'webhooks',
111
+ 'api',
112
+ 'metrics',
113
+ 'events',
114
+ ]
115
+
116
+ for (const q of queues) {
117
+ const count = 10 + Math.floor(Math.random() * 40)
118
+ for (let i = 0; i < count; i++) {
119
+ const job = new GenericJob(`JOB-${q}-${i}`, { timestamp: Date.now() })
120
+ job.queueName = q
121
+ await manager.push(job)
122
+ }
123
+ console.log(` - ${q}: ${count} jobs`)
124
+ }
125
+ }
126
+
127
+ async function seedBatch() {
128
+ console.log('šŸ“¦ Batch Mode: Setting up specialized data for batch testing...')
129
+
130
+ // 100 Waiting jobs
131
+ for (let i = 1; i <= 100; i++) {
132
+ const job = new GenericJob(`BATCH-WAIT-${i}`)
133
+ job.queueName = 'test-batch'
134
+ await manager.push(job)
135
+ }
136
+
137
+ // 50 Failed jobs
138
+ for (let i = 1; i <= 50; i++) {
139
+ const jobInstance = new GenericJob(`BATCH-FAIL-${i}`, { error: 'Database Connection Lost' })
140
+ jobInstance.queueName = 'test-batch-fail'
141
+ const serialized = manager.getSerializer().serialize(jobInstance)
142
+
143
+ await redis.lpush(
144
+ `${prefix}test-batch-fail:failed`,
145
+ JSON.stringify({
146
+ ...serialized,
147
+ status: 'failed',
148
+ attempts: 3,
149
+ failedAt: Date.now(),
150
+ })
151
+ )
152
+ }
153
+ }
154
+
155
+ async function seedCron() {
156
+ console.log('ā° Cron Mode: Registering recurring schedules...')
157
+ const scheduler = manager.getScheduler()
158
+ const serializer = manager.getSerializer()
159
+
160
+ const rawSchedules = [
161
+ { id: 'cleanup-tmp', cron: '*/1 * * * *', queue: 'system', name: 'CleanupTmp' },
162
+ { id: 'daily-report', cron: '0 0 * * *', queue: 'reports', name: 'DailyReport' },
163
+ { id: 'health-check', cron: '*/5 * * * *', queue: 'monitoring', name: 'HealthCheck' },
164
+ { id: 'high-frequency', cron: '*/1 * * * *', queue: 'fast', name: 'Pulse' },
165
+ ]
166
+
167
+ for (const s of rawSchedules) {
168
+ const jobInstance = new GenericJob(s.id, { task: s.name })
169
+ jobInstance.queueName = s.queue
170
+ const serialized = serializer.serialize(jobInstance)
171
+
172
+ await scheduler.register({
173
+ id: s.id,
174
+ cron: s.cron,
175
+ queue: s.queue,
176
+ job: serialized,
177
+ })
178
+ console.log(` - Registered: ${s.id} (${s.cron})`)
179
+ }
180
+ }
181
+
182
+ async function main() {
183
+ try {
184
+ if (mode === 'cleanup') {
185
+ await cleanup()
186
+ } else if (mode === 'standard') {
187
+ await cleanup()
188
+ await seedStandard()
189
+ } else if (mode === 'stress') {
190
+ await seedStress()
191
+ } else if (mode === 'batch') {
192
+ await seedBatch()
193
+ } else if (mode === 'cron') {
194
+ await seedCron()
195
+ } else if (mode === 'all') {
196
+ await cleanup()
197
+ await seedStandard()
198
+ await seedStress()
199
+ await seedBatch()
200
+ await seedCron()
201
+ } else {
202
+ console.log('āŒ Unknown mode. Try: standard, stress, batch, cron, cleanup, all')
203
+ }
204
+ } catch (err) {
205
+ console.error('šŸ’„ Error:', err)
206
+ } finally {
207
+ redis.disconnect()
208
+ console.log('\nšŸ Done.')
209
+ process.exit(0)
210
+ }
211
+ }
212
+
213
+ main()
@@ -0,0 +1,45 @@
1
+ console.log('šŸŽ§ Connecting to log stream...')
2
+ const req = await fetch('http://localhost:3000/api/logs/stream')
3
+ if (!req.body) throw new Error('No body')
4
+
5
+ const reader = req.body.getReader()
6
+ const decoder = new TextDecoder()
7
+
8
+ let logCount = 0
9
+ const start = Date.now()
10
+
11
+ // Count for 2 seconds
12
+ const DURATION = 2000
13
+
14
+ console.log(`ā³ Measuring received logs for ${DURATION}ms...`)
15
+
16
+ async function readStream() {
17
+ while (true) {
18
+ const { done, value } = await reader.read()
19
+ if (done) break
20
+
21
+ const chunk = decoder.decode(value)
22
+ // SSE format: event: log\ndata: ...\n\n
23
+ const matches = chunk.match(/event: log/g)
24
+ if (matches) {
25
+ logCount += matches.length
26
+ }
27
+
28
+ if (Date.now() - start > DURATION) {
29
+ break
30
+ }
31
+ }
32
+
33
+ console.log(`šŸ“Š Result: Received ${logCount} logs in ${(Date.now() - start) / 1000}s`)
34
+ console.log(`ā„¹ļø Expected max: ~50-60 logs (50/sec limit + potential buffer/history)`)
35
+
36
+ if (logCount > 150) {
37
+ console.error('āŒ Throttling FAILED! Too many logs received.')
38
+ process.exit(1)
39
+ } else {
40
+ console.log('āœ… Throttling PASSED!')
41
+ process.exit(0)
42
+ }
43
+ }
44
+
45
+ readStream()