@qlucent/fishi-core 0.12.0 → 0.14.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -8826,6 +8826,149 @@ if (isMain) {
8826
8826
  `;
8827
8827
  }
8828
8828
 
8829
+ // src/templates/hooks/file-lock-hook.ts
8830
+ function getFileLockHookScript() {
8831
+ return `#!/usr/bin/env node
8832
+ // file-lock-hook.mjs \u2014 FISHI File Lock Manager
8833
+ // Prevents worktree conflicts by locking files before agent assignment.
8834
+ // Usage:
8835
+ // node .fishi/scripts/file-lock-hook.mjs check --files "src/a.ts,src/b.ts" --agent backend-agent --task auth
8836
+ // node .fishi/scripts/file-lock-hook.mjs lock --files "src/a.ts" --agent backend-agent --task auth --coordinator dev-lead
8837
+ // node .fishi/scripts/file-lock-hook.mjs release --agent backend-agent [--task auth]
8838
+ // node .fishi/scripts/file-lock-hook.mjs status
8839
+ // node .fishi/scripts/file-lock-hook.mjs agent-locks --agent backend-agent
8840
+
8841
+ import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
8842
+ import { join, dirname } from 'path';
8843
+
8844
+ const ROOT = process.env.FISHI_PROJECT_ROOT || process.cwd();
8845
+ const LOCK_FILE = join(ROOT, '.fishi', 'state', 'file-locks.yaml');
8846
+
8847
+ function out(obj) {
8848
+ process.stdout.write(JSON.stringify(obj, null, 2) + '\\n');
8849
+ }
8850
+
8851
+ function fail(msg) {
8852
+ out({ error: msg });
8853
+ process.exit(1);
8854
+ }
8855
+
8856
+ function readLocks() {
8857
+ if (!existsSync(LOCK_FILE)) return [];
8858
+ const content = readFileSync(LOCK_FILE, 'utf-8');
8859
+ const locks = [];
8860
+ const blocks = content.split(/\\n\\s*-\\s+file:\\s*/).slice(1);
8861
+ for (const block of blocks) {
8862
+ const lines = ('file: ' + block).split('\\n');
8863
+ const lock = {};
8864
+ for (const line of lines) {
8865
+ const fm = line.match(/^\\s*file:\\s*["']?(.+?)["']?\\s*$/);
8866
+ const am = line.match(/^\\s*agent:\\s*["']?(.+?)["']?\\s*$/);
8867
+ const tm = line.match(/^\\s*task:\\s*["']?(.+?)["']?\\s*$/);
8868
+ const cm = line.match(/^\\s*coordinator:\\s*["']?(.+?)["']?\\s*$/);
8869
+ const lm = line.match(/^\\s*locked_at:\\s*["']?(.+?)["']?\\s*$/);
8870
+ if (fm) lock.file = fm[1];
8871
+ if (am) lock.agent = am[1];
8872
+ if (tm) lock.task = tm[1];
8873
+ if (cm) lock.coordinator = cm[1];
8874
+ if (lm) lock.lockedAt = lm[1];
8875
+ }
8876
+ if (lock.file && lock.agent) locks.push(lock);
8877
+ }
8878
+ return locks;
8879
+ }
8880
+
8881
+ function writeLocks(locks) {
8882
+ const dir = dirname(LOCK_FILE);
8883
+ if (!existsSync(dir)) mkdirSync(dir, { recursive: true });
8884
+ if (locks.length === 0) { writeFileSync(LOCK_FILE, 'locks: []\\n', 'utf-8'); return; }
8885
+ let yaml = 'locks:\\n';
8886
+ for (const l of locks) {
8887
+ yaml += ' - file: "' + l.file + '"\\n';
8888
+ yaml += ' agent: "' + l.agent + '"\\n';
8889
+ yaml += ' task: "' + l.task + '"\\n';
8890
+ yaml += ' coordinator: "' + (l.coordinator || '') + '"\\n';
8891
+ yaml += ' locked_at: "' + l.lockedAt + '"\\n';
8892
+ }
8893
+ writeFileSync(LOCK_FILE, yaml, 'utf-8');
8894
+ }
8895
+
8896
+ function parseArgs() {
8897
+ const args = process.argv.slice(2);
8898
+ const cmd = args[0];
8899
+ const opts = {};
8900
+ for (let i = 1; i < args.length; i += 2) {
8901
+ const key = args[i]?.replace(/^--/, '');
8902
+ opts[key] = args[i + 1];
8903
+ }
8904
+ return { cmd, opts };
8905
+ }
8906
+
8907
+ const { cmd, opts } = parseArgs();
8908
+
8909
+ if (cmd === 'check') {
8910
+ if (!opts.files || !opts.agent || !opts.task) fail('Usage: check --files "a,b" --agent X --task Y');
8911
+ const files = opts.files.split(',').map(f => f.trim());
8912
+ const locks = readLocks();
8913
+ const conflicts = [];
8914
+ for (const file of files) {
8915
+ const locked = locks.find(l => l.file === file && l.agent !== opts.agent);
8916
+ if (locked) conflicts.push({ file, lockedBy: locked.agent, lockedTask: locked.task, lockedAt: locked.lockedAt });
8917
+ }
8918
+ out({ conflicts, hasConflicts: conflicts.length > 0 });
8919
+
8920
+ } else if (cmd === 'lock') {
8921
+ if (!opts.files || !opts.agent || !opts.task) fail('Usage: lock --files "a,b" --agent X --task Y --coordinator Z');
8922
+ const files = opts.files.split(',').map(f => f.trim());
8923
+ const locks = readLocks();
8924
+ // Check conflicts first
8925
+ const conflicts = [];
8926
+ for (const f of files) {
8927
+ const locked = locks.find(l => l.file === f && l.agent !== opts.agent);
8928
+ if (locked) conflicts.push({ file: f, lockedBy: locked.agent, lockedTask: locked.task });
8929
+ }
8930
+ if (conflicts.length > 0) {
8931
+ out({ success: false, conflicts });
8932
+ process.exit(1);
8933
+ }
8934
+ const now = new Date().toISOString();
8935
+ for (const f of files) {
8936
+ if (!locks.some(l => l.file === f && l.agent === opts.agent)) {
8937
+ locks.push({ file: f, agent: opts.agent, task: opts.task, coordinator: opts.coordinator || '', lockedAt: now });
8938
+ }
8939
+ }
8940
+ writeLocks(locks);
8941
+ out({ success: true, locked: files, agent: opts.agent, task: opts.task });
8942
+
8943
+ } else if (cmd === 'release') {
8944
+ if (!opts.agent) fail('Usage: release --agent X [--task Y]');
8945
+ const locks = readLocks();
8946
+ const released = [];
8947
+ const remaining = [];
8948
+ for (const l of locks) {
8949
+ if (l.agent === opts.agent && (!opts.task || l.task === opts.task)) released.push(l.file);
8950
+ else remaining.push(l);
8951
+ }
8952
+ writeLocks(remaining);
8953
+ out({ released, agent: opts.agent, remaining: remaining.length });
8954
+
8955
+ } else if (cmd === 'status') {
8956
+ const locks = readLocks();
8957
+ const byAgent = {};
8958
+ for (const l of locks) byAgent[l.agent] = (byAgent[l.agent] || 0) + 1;
8959
+ out({ totalLocked: locks.length, byAgent, locks });
8960
+
8961
+ } else if (cmd === 'agent-locks') {
8962
+ if (!opts.agent) fail('Usage: agent-locks --agent X');
8963
+ const locks = readLocks().filter(l => l.agent === opts.agent);
8964
+ out({ agent: opts.agent, lockCount: locks.length, locks });
8965
+
8966
+ } else {
8967
+ fail('Unknown command: ' + cmd + '. Use: check, lock, release, status, agent-locks');
8968
+ }
8969
+ `;
8970
+ }
8971
+
8829
8972
  // src/templates/commands/init-command.ts
8830
8973
  function getInitCommand() {
8831
8974
  return `# /fishi-init \u2014 Launch the FISHI Orchestration Pipeline
@@ -11368,7 +11511,8 @@ async function generateScaffold(targetDir, options) {
11368
11511
  await write(".fishi/scripts/learnings-manager.mjs", getLearningsManagerScript());
11369
11512
  await write(".fishi/scripts/doc-checker.mjs", getDocCheckerScript());
11370
11513
  await write(".fishi/scripts/monitor-emitter.mjs", getMonitorEmitterScript());
11371
- const hookCount = 16;
11514
+ await write(".fishi/scripts/file-lock-hook.mjs", getFileLockHookScript());
11515
+ const hookCount = 17;
11372
11516
  const todoTemplate = (name) => `# TODO \u2014 ${name}
11373
11517
 
11374
11518
  ## Active
@@ -11453,6 +11597,7 @@ async function generateScaffold(targetDir, options) {
11453
11597
  projectType: options.projectType
11454
11598
  }));
11455
11599
  await write(".fishi/state/agent-registry.yaml", getAgentRegistryTemplate());
11600
+ await write(".fishi/state/file-locks.yaml", "locks: []\n");
11456
11601
  await write(".fishi/state/task-graph.yaml", "tasks: []\ndependencies: []\n");
11457
11602
  await write(".fishi/state/gates.yaml", "gates: []\n");
11458
11603
  await write(".fishi/state/monitor.json", JSON.stringify({
@@ -11718,7 +11863,7 @@ async function createBackup(targetDir, conflictingFiles) {
11718
11863
  manifestFiles.push({ path: relPath, size: stat.size });
11719
11864
  }
11720
11865
  }
11721
- const fishiVersion = "0.12.0";
11866
+ const fishiVersion = "0.14.0";
11722
11867
  const manifest = {
11723
11868
  timestamp: now.toISOString(),
11724
11869
  fishi_version: fishiVersion,
@@ -13016,6 +13161,2005 @@ function findScanFiles(projectDir) {
13016
13161
  return files.slice(0, 500);
13017
13162
  }
13018
13163
 
13164
+ // src/generators/pattern-marketplace.ts
13165
+ import { existsSync as existsSync10, readFileSync as readFileSync7, writeFileSync as writeFileSync4, mkdirSync as mkdirSync4 } from "fs";
13166
+ import { join as join10 } from "path";
13167
+ var PATTERN_CATEGORIES = [
13168
+ // 1. Authentication
13169
+ {
13170
+ id: "authentication",
13171
+ name: "Authentication",
13172
+ description: "User authentication and identity management solutions",
13173
+ patterns: [
13174
+ {
13175
+ id: "auth0",
13176
+ name: "Auth0",
13177
+ category: "authentication",
13178
+ description: "Enterprise-grade identity platform with SSO, MFA, and social login",
13179
+ tools: ["Auth0", "auth0-nextjs", "auth0-spa-js"],
13180
+ guide: `### Setup
13181
+ - Install: \`pnpm add @auth0/nextjs-auth0\` (Next.js) or \`@auth0/auth0-spa-js\` (SPA)
13182
+ - Env vars: AUTH0_SECRET, AUTH0_BASE_URL, AUTH0_ISSUER_BASE_URL, AUTH0_CLIENT_ID, AUTH0_CLIENT_SECRET
13183
+
13184
+ ### Architecture
13185
+ - Use Auth0 Universal Login for authentication flows \u2014 avoid custom login forms
13186
+ - Server-side: \`handleAuth()\` creates /api/auth/* routes (login, logout, callback, me)
13187
+ - Middleware: \`withMiddlewareAuthRequired\` to protect routes at the edge
13188
+ - Client-side: \`useUser()\` hook for user state, \`withPageAuthRequired\` for page protection
13189
+ - Store Auth0 user_id in your database users table for linking
13190
+
13191
+ ### Key Patterns
13192
+ - Use Auth0 Actions for post-login hooks (sync user to DB, add custom claims)
13193
+ - Configure RBAC in Auth0 Dashboard \u2014 add roles/permissions as JWT claims
13194
+ - Use refresh token rotation for long-lived sessions
13195
+
13196
+ ### Pitfalls
13197
+ - Never validate JWTs manually \u2014 use the SDK's built-in verification
13198
+ - Set allowed callback URLs in Auth0 Dashboard for every environment
13199
+ - Auth0 rate limits: 300 requests/min for Management API`
13200
+ },
13201
+ {
13202
+ id: "clerk",
13203
+ name: "Clerk",
13204
+ category: "authentication",
13205
+ description: "Modern authentication with prebuilt UI components and user management",
13206
+ tools: ["Clerk", "@clerk/nextjs", "@clerk/clerk-react"],
13207
+ guide: `### Setup
13208
+ - Install: \`pnpm add @clerk/nextjs\` (Next.js) or \`@clerk/clerk-react\` (React)
13209
+ - Env vars: NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY, CLERK_SECRET_KEY
13210
+
13211
+ ### Architecture
13212
+ - Wrap app with \`<ClerkProvider>\` \u2014 provides auth context to all components
13213
+ - Use \`<SignIn />\`, \`<SignUp />\`, \`<UserButton />\` prebuilt components
13214
+ - Middleware: \`clerkMiddleware()\` in middleware.ts protects routes automatically
13215
+ - Server-side: \`auth()\` in Server Components, \`getAuth()\` in API routes
13216
+ - Webhooks: POST /api/webhooks/clerk \u2014 handle user.created, user.updated events
13217
+
13218
+ ### Key Patterns
13219
+ - Use Clerk Organizations for multi-tenant apps
13220
+ - Store Clerk user_id in your DB, sync via webhooks (not on every request)
13221
+ - Use \`auth().protect()\` for role-based access in server code
13222
+
13223
+ ### Pitfalls
13224
+ - Always verify webhook signatures with svix
13225
+ - Prebuilt components use Clerk's domain \u2014 configure for custom domains in production
13226
+ - Free tier: 10,000 MAUs \u2014 plan for pricing at scale`
13227
+ },
13228
+ {
13229
+ id: "nextauth",
13230
+ name: "NextAuth.js",
13231
+ category: "authentication",
13232
+ description: "Open-source authentication for Next.js with 50+ providers",
13233
+ tools: ["NextAuth.js", "next-auth", "Auth.js"],
13234
+ guide: `### Setup
13235
+ - Install: \`pnpm add next-auth\` (v4) or \`pnpm add next-auth@beta\` (v5/Auth.js)
13236
+ - Env vars: NEXTAUTH_SECRET, NEXTAUTH_URL, provider-specific client IDs/secrets
13237
+
13238
+ ### Architecture
13239
+ - API Route: /api/auth/[...nextauth] \u2014 handles all auth flows
13240
+ - Configure providers in auth config: Google, GitHub, Credentials, Email, etc.
13241
+ - Database adapter: Prisma, Drizzle, or Supabase for session/user persistence
13242
+ - Client: \`useSession()\` hook, \`<SessionProvider>\` wrapper
13243
+ - Server: \`getServerSession()\` in API routes, \`auth()\` in v5
13244
+
13245
+ ### Key Patterns
13246
+ - Use JWT strategy for serverless, database strategy for traditional servers
13247
+ - Extend session with custom fields via callbacks.session and callbacks.jwt
13248
+ - Use middleware matcher for route protection (v5)
13249
+
13250
+ ### Pitfalls
13251
+ - NEXTAUTH_SECRET must be set in production \u2014 generate with \`openssl rand -base64 32\`
13252
+ - Credentials provider doesn't support session persistence without a database adapter
13253
+ - v4 to v5 migration: significant API changes \u2014 check migration guide`
13254
+ },
13255
+ {
13256
+ id: "supabase-auth",
13257
+ name: "Supabase Auth",
13258
+ category: "authentication",
13259
+ description: "PostgreSQL-backed auth with Row Level Security integration",
13260
+ tools: ["Supabase", "@supabase/supabase-js", "@supabase/auth-helpers-nextjs"],
13261
+ guide: `### Setup
13262
+ - Install: \`pnpm add @supabase/supabase-js @supabase/ssr\`
13263
+ - Env vars: NEXT_PUBLIC_SUPABASE_URL, NEXT_PUBLIC_SUPABASE_ANON_KEY
13264
+
13265
+ ### Architecture
13266
+ - Auth is built into Supabase \u2014 no separate service needed
13267
+ - Client-side: \`supabase.auth.signInWithOAuth()\`, \`signInWithPassword()\`
13268
+ - Server-side: Create server client with cookie-based sessions
13269
+ - RLS: auth.uid() in PostgreSQL policies ties data access to authenticated user
13270
+ - Middleware: Refresh sessions on every request with \`updateSession()\`
13271
+
13272
+ ### Key Patterns
13273
+ - Enable RLS on all tables \u2014 use \`auth.uid() = user_id\` policies
13274
+ - Use Supabase Edge Functions for server-side auth logic
13275
+ - Configure OAuth providers in Supabase Dashboard > Authentication > Providers
13276
+ - Use auth.users table \u2014 don't create a separate users table for auth data
13277
+
13278
+ ### Pitfalls
13279
+ - Anon key is public but RLS must be enabled \u2014 without RLS, data is exposed
13280
+ - Cookie-based auth requires middleware setup for server components
13281
+ - Email confirmation enabled by default \u2014 disable in dev for faster iteration`
13282
+ },
13283
+ {
13284
+ id: "custom-jwt",
13285
+ name: "Custom JWT",
13286
+ category: "authentication",
13287
+ description: "Roll your own JWT-based auth for full control",
13288
+ tools: ["jsonwebtoken", "jose", "bcrypt"],
13289
+ guide: `### Setup
13290
+ - Install: \`pnpm add jose bcryptjs\` (jose for Edge-compatible JWT)
13291
+ - Env vars: JWT_SECRET (min 256-bit), JWT_EXPIRES_IN
13292
+
13293
+ ### Architecture
13294
+ - Auth endpoints: POST /api/auth/register, /api/auth/login, /api/auth/refresh
13295
+ - Hash passwords with bcrypt (cost factor 12+), store in users table
13296
+ - Issue short-lived access tokens (15min) + long-lived refresh tokens (7d)
13297
+ - Store refresh tokens in httpOnly cookies, access tokens in memory (not localStorage)
13298
+ - Middleware: Verify JWT on every protected request, extract user claims
13299
+
13300
+ ### Key Patterns
13301
+ - Use RS256 (asymmetric) for microservices, HS256 (symmetric) for monoliths
13302
+ - Implement token rotation: new refresh token on each refresh, invalidate old one
13303
+ - Add jti (JWT ID) claim for token revocation support
13304
+ - Include minimal claims: sub, role, iat, exp \u2014 not sensitive data
13305
+
13306
+ ### Pitfalls
13307
+ - Never store JWTs in localStorage \u2014 XSS vulnerable. Use httpOnly cookies
13308
+ - Implement refresh token reuse detection (if reused, revoke all family)
13309
+ - jose library works in Edge runtimes; jsonwebtoken does not`
13310
+ }
13311
+ ]
13312
+ },
13313
+ // 2. Payments
13314
+ {
13315
+ id: "payments",
13316
+ name: "Payments",
13317
+ description: "Payment processing and subscription management",
13318
+ patterns: [
13319
+ {
13320
+ id: "stripe",
13321
+ name: "Stripe",
13322
+ category: "payments",
13323
+ description: "Full-featured payment platform with subscriptions, invoicing, and Connect",
13324
+ tools: ["Stripe", "stripe", "@stripe/stripe-js"],
13325
+ guide: `### Setup
13326
+ - Install: \`pnpm add stripe @stripe/stripe-js\`
13327
+ - Env vars: STRIPE_SECRET_KEY, NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY, STRIPE_WEBHOOK_SECRET
13328
+
13329
+ ### Architecture
13330
+ - Server-side: Create checkout sessions, manage subscriptions via Stripe API
13331
+ - Client-side: Use @stripe/stripe-js for Elements (card input, payment form)
13332
+ - Webhooks: POST /api/webhooks/stripe \u2014 verify signature, handle checkout.session.completed, invoice.paid, customer.subscription.updated
13333
+ - Store customer_id and subscription_id in your users table
13334
+
13335
+ ### Key Patterns
13336
+ - Always verify webhook signatures with stripe.webhooks.constructEvent()
13337
+ - Use Stripe Customer Portal for self-service billing management
13338
+ - Implement idempotency keys for payment operations
13339
+ - Handle subscription states: active, past_due, canceled, trialing
13340
+
13341
+ ### Pitfalls
13342
+ - Never expose STRIPE_SECRET_KEY to client
13343
+ - Always handle webhook retries (idempotent handlers)
13344
+ - Test with Stripe CLI: \`stripe listen --forward-to localhost:3000/api/webhooks/stripe\``
13345
+ },
13346
+ {
13347
+ id: "paypal",
13348
+ name: "PayPal",
13349
+ category: "payments",
13350
+ description: "Global payment platform with buyer protection and Express Checkout",
13351
+ tools: ["PayPal", "@paypal/react-paypal-js", "paypal-rest-sdk"],
13352
+ guide: `### Setup
13353
+ - Install: \`pnpm add @paypal/react-paypal-js @paypal/paypal-js\`
13354
+ - Env vars: PAYPAL_CLIENT_ID, PAYPAL_CLIENT_SECRET, PAYPAL_WEBHOOK_ID
13355
+
13356
+ ### Architecture
13357
+ - Client-side: \`<PayPalScriptProvider>\` + \`<PayPalButtons>\` for checkout UI
13358
+ - Server-side: Create orders via PayPal REST API, capture payments on approval
13359
+ - Flow: createOrder \u2192 buyer approves \u2192 onApprove \u2192 captureOrder on server
13360
+ - Webhooks: PAYMENT.CAPTURE.COMPLETED, BILLING.SUBSCRIPTION.ACTIVATED
13361
+
13362
+ ### Key Patterns
13363
+ - Use PayPal Smart Buttons \u2014 auto-detect buyer's preferred payment method
13364
+ - Implement both one-time payments and subscriptions (PayPal Billing Plans)
13365
+ - Store PayPal order_id and subscription_id in your database
13366
+ - Use sandbox environment for development (separate sandbox credentials)
13367
+
13368
+ ### Pitfalls
13369
+ - PayPal SDK is loaded externally \u2014 handle script loading states
13370
+ - Always capture payments server-side, never trust client-side confirmation alone
13371
+ - Webhook verification requires PayPal's certificate chain validation`
13372
+ },
13373
+ {
13374
+ id: "lemonsqueezy",
13375
+ name: "LemonSqueezy",
13376
+ category: "payments",
13377
+ description: "Merchant of record \u2014 handles tax, billing, and compliance for digital products",
13378
+ tools: ["LemonSqueezy", "@lemonsqueezy/lemonsqueezy.js"],
13379
+ guide: `### Setup
13380
+ - Install: \`pnpm add @lemonsqueezy/lemonsqueezy.js\`
13381
+ - Env vars: LEMONSQUEEZY_API_KEY, LEMONSQUEEZY_STORE_ID, LEMONSQUEEZY_WEBHOOK_SECRET
13382
+
13383
+ ### Architecture
13384
+ - LemonSqueezy is the merchant of record \u2014 they handle tax, VAT, and compliance
13385
+ - Create products/variants in LemonSqueezy Dashboard, reference by ID in your app
13386
+ - Checkout: Generate checkout URLs via API, redirect users to LemonSqueezy-hosted page
13387
+ - Webhooks: order_created, subscription_created, subscription_updated, license_key_created
13388
+
13389
+ ### Key Patterns
13390
+ - Use checkout overlays for in-app purchase experience (no redirect)
13391
+ - Map LemonSqueezy customer_id to your user_id via webhook on first purchase
13392
+ - License keys for desktop/CLI apps \u2014 validate via API
13393
+ - Use test mode for development (separate test API key)
13394
+
13395
+ ### Pitfalls
13396
+ - As merchant of record, LemonSqueezy receives funds first, then pays you \u2014 expect delay
13397
+ - Webhook payload structure differs from Stripe \u2014 use their SDK for type safety
13398
+ - No direct card element integration \u2014 checkout is always LemonSqueezy-hosted or overlay`
13399
+ }
13400
+ ]
13401
+ },
13402
+ // 3. Email
13403
+ {
13404
+ id: "email",
13405
+ name: "Email",
13406
+ description: "Transactional and marketing email delivery",
13407
+ patterns: [
13408
+ {
13409
+ id: "sendgrid",
13410
+ name: "SendGrid",
13411
+ category: "email",
13412
+ description: "Scalable email delivery with templates and analytics",
13413
+ tools: ["SendGrid", "@sendgrid/mail"],
13414
+ guide: `### Setup
13415
+ - Install: \`pnpm add @sendgrid/mail\`
13416
+ - Env vars: SENDGRID_API_KEY, SENDGRID_FROM_EMAIL
13417
+
13418
+ ### Architecture
13419
+ - Use SendGrid Dynamic Templates for transactional emails (welcome, reset password, receipts)
13420
+ - Create templates in SendGrid Dashboard with Handlebars variables
13421
+ - API: \`sgMail.send({ to, from, templateId, dynamicTemplateData })\`
13422
+ - Inbound Parse: Route incoming emails to your webhook for reply handling
13423
+
13424
+ ### Key Patterns
13425
+ - Use template IDs, not inline HTML \u2014 keeps email design out of code
13426
+ - Batch sending: \`sgMail.sendMultiple()\` for bulk transactional emails
13427
+ - Set up domain authentication (SPF, DKIM) for deliverability
13428
+ - Use categories and custom_args for email analytics tracking
13429
+
13430
+ ### Pitfalls
13431
+ - Free tier: 100 emails/day \u2014 insufficient for production
13432
+ - Always set reply-to address separately from the from address
13433
+ - Implement exponential backoff for 429 rate limit responses`
13434
+ },
13435
+ {
13436
+ id: "resend",
13437
+ name: "Resend",
13438
+ category: "email",
13439
+ description: "Modern email API built for developers with React Email support",
13440
+ tools: ["Resend", "resend", "react-email"],
13441
+ guide: `### Setup
13442
+ - Install: \`pnpm add resend\` + \`pnpm add react-email @react-email/components -D\`
13443
+ - Env vars: RESEND_API_KEY, RESEND_FROM_EMAIL
13444
+
13445
+ ### Architecture
13446
+ - Write email templates as React components using @react-email/components
13447
+ - Send: \`resend.emails.send({ from, to, subject, react: <WelcomeEmail /> })\`
13448
+ - Preview templates: \`npx email dev\` \u2014 opens browser preview at localhost:3000
13449
+ - Supports attachments, scheduling, and batch sending
13450
+
13451
+ ### Key Patterns
13452
+ - Build email templates in /emails directory as React components
13453
+ - Use Resend's domain verification for custom from addresses
13454
+ - Batch API for sending to multiple recipients efficiently
13455
+ - Webhook events: email.sent, email.delivered, email.bounced, email.complained
13456
+
13457
+ ### Pitfalls
13458
+ - React Email renders to HTML server-side \u2014 don't use client-only hooks
13459
+ - Free tier: 3,000 emails/month, 100/day \u2014 verify limits before launch
13460
+ - Domain DNS setup required for custom from addresses (not just verified email)`
13461
+ },
13462
+ {
13463
+ id: "aws-ses",
13464
+ name: "AWS SES",
13465
+ category: "email",
13466
+ description: "High-volume email service with excellent deliverability",
13467
+ tools: ["AWS SES", "@aws-sdk/client-ses"],
13468
+ guide: `### Setup
13469
+ - Install: \`pnpm add @aws-sdk/client-ses\`
13470
+ - Env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, SES_FROM_EMAIL
13471
+
13472
+ ### Architecture
13473
+ - Use SES v2 API via @aws-sdk/client-sesv2 for modern interface
13474
+ - Send raw emails for full control or templated emails for consistency
13475
+ - Configuration sets: Group sending config (tracking, reputation, suppression)
13476
+ - Receipt rules: Process incoming email (store in S3, trigger Lambda)
13477
+
13478
+ ### Key Patterns
13479
+ - Start in sandbox mode (verified recipients only), request production access early
13480
+ - Use SES Templates for transactional emails \u2014 manage via API or CloudFormation
13481
+ - Implement bounce/complaint handling via SNS notifications
13482
+ - Use dedicated IPs for high-volume sending (reputation isolation)
13483
+
13484
+ ### Pitfalls
13485
+ - Sandbox mode is default \u2014 you can only send to verified addresses until approved
13486
+ - SES charges per email ($0.10/1000) \u2014 monitor costs for high volume
13487
+ - Set up DKIM, SPF, and DMARC before going to production`
13488
+ },
13489
+ {
13490
+ id: "mailgun",
13491
+ name: "Mailgun",
13492
+ category: "email",
13493
+ description: "Email API with powerful routing, validation, and analytics",
13494
+ tools: ["Mailgun", "mailgun.js", "form-data"],
13495
+ guide: `### Setup
13496
+ - Install: \`pnpm add mailgun.js form-data\`
13497
+ - Env vars: MAILGUN_API_KEY, MAILGUN_DOMAIN, MAILGUN_FROM_EMAIL
13498
+
13499
+ ### Architecture
13500
+ - Initialize: \`new Mailgun(formData).client({ username: 'api', key })\`
13501
+ - Send: \`mg.messages.create(domain, { from, to, subject, html })\`
13502
+ - Routes: Define inbound email routing rules (forward, store, webhook)
13503
+ - Templates: Manage via API or Mailgun Dashboard with Handlebars
13504
+
13505
+ ### Key Patterns
13506
+ - Use Mailgun's email validation API to verify addresses before sending
13507
+ - Mailing lists for group communications and marketing
13508
+ - Tags for categorizing and tracking email campaigns
13509
+ - Webhooks for delivery events: delivered, opened, clicked, bounced
13510
+
13511
+ ### Pitfalls
13512
+ - API key has two types: primary (full access) and sending (limited) \u2014 use sending key in app
13513
+ - Free tier: 100 emails/day for first 3 months \u2014 plan accordingly
13514
+ - EU region requires different API endpoint (api.eu.mailgun.net)`
13515
+ }
13516
+ ]
13517
+ },
13518
+ // 4. Analytics
13519
+ {
13520
+ id: "analytics",
13521
+ name: "Analytics",
13522
+ description: "Product analytics, user behavior tracking, and insights",
13523
+ patterns: [
13524
+ {
13525
+ id: "posthog",
13526
+ name: "PostHog",
13527
+ category: "analytics",
13528
+ description: "Open-source product analytics with session recording and feature flags",
13529
+ tools: ["PostHog", "posthog-js", "posthog-node"],
13530
+ guide: `### Setup
13531
+ - Install: \`pnpm add posthog-js\` (client) + \`pnpm add posthog-node\` (server)
13532
+ - Env vars: NEXT_PUBLIC_POSTHOG_KEY, NEXT_PUBLIC_POSTHOG_HOST
13533
+
13534
+ ### Architecture
13535
+ - Client: Initialize PostHog in app layout, auto-captures pageviews and clicks
13536
+ - Server: Use posthog-node for server-side event tracking and feature flags
13537
+ - Feature flags: Evaluate server-side for SSR, client-side for interactive UI
13538
+ - Session recording: Automatic with posthog-js, configure sampling rate
13539
+
13540
+ ### Key Patterns
13541
+ - Use \`posthog.capture('event_name', { properties })\` for custom events
13542
+ - Feature flags: \`posthog.isFeatureEnabled('flag-name')\` with fallback values
13543
+ - Group analytics: \`posthog.group('company', company_id)\` for B2B
13544
+ - Use PostHog Toolbar for no-code event creation (click-to-track)
13545
+
13546
+ ### Pitfalls
13547
+ - Self-hosted PostHog requires significant infrastructure \u2014 start with Cloud
13548
+ - Client-side capture sends data on page unload \u2014 some events may be lost
13549
+ - Ad blockers block PostHog by default \u2014 consider reverse proxy setup`
13550
+ },
13551
+ {
13552
+ id: "plausible",
13553
+ name: "Plausible",
13554
+ category: "analytics",
13555
+ description: "Privacy-friendly, lightweight analytics \u2014 no cookies, GDPR compliant",
13556
+ tools: ["Plausible", "next-plausible", "plausible-tracker"],
13557
+ guide: `### Setup
13558
+ - Install: \`pnpm add next-plausible\` (Next.js) or add script tag manually
13559
+ - Env vars: NEXT_PUBLIC_PLAUSIBLE_DOMAIN
13560
+
13561
+ ### Architecture
13562
+ - Script-based: Add Plausible script to document head \u2014 auto-tracks pageviews
13563
+ - Next.js: \`<PlausibleProvider domain="...">\` in layout
13564
+ - Custom events: \`plausible('Signup', { props: { plan: 'pro' } })\`
13565
+ - API: Query stats programmatically via Plausible Stats API
13566
+
13567
+ ### Key Patterns
13568
+ - Goal conversions: Define in Plausible Dashboard, track with custom events
13569
+ - Custom properties: Add metadata to events for segmentation
13570
+ - Self-hosting: Plausible Community Edition on your own server for full control
13571
+ - Use 404 tracking with custom events for broken link detection
13572
+
13573
+ ### Pitfalls
13574
+ - No session recording or heatmaps \u2014 Plausible is pageview/event analytics only
13575
+ - Custom properties limited to string values
13576
+ - Self-hosted requires ClickHouse \u2014 resource-intensive for small teams`
13577
+ },
13578
+ {
13579
+ id: "mixpanel",
13580
+ name: "Mixpanel",
13581
+ category: "analytics",
13582
+ description: "Advanced product analytics with funnels, cohorts, and A/B testing",
13583
+ tools: ["Mixpanel", "mixpanel-browser", "mixpanel"],
13584
+ guide: `### Setup
13585
+ - Install: \`pnpm add mixpanel-browser\` (client) + \`pnpm add mixpanel\` (server)
13586
+ - Env vars: NEXT_PUBLIC_MIXPANEL_TOKEN
13587
+
13588
+ ### Architecture
13589
+ - Client: \`mixpanel.init(token)\` in app layout, track events with \`mixpanel.track()\`
13590
+ - Server: Use mixpanel Node SDK for backend events (payments, signups)
13591
+ - Identity: \`mixpanel.identify(user_id)\` after login, \`mixpanel.alias()\` on signup
13592
+ - User profiles: \`mixpanel.people.set({ plan, company })\` for segmentation
13593
+
13594
+ ### Key Patterns
13595
+ - Define funnels in Mixpanel for conversion analysis (signup \u2192 activate \u2192 pay)
13596
+ - Use super properties for persistent event metadata: \`mixpanel.register({ plan })\`
13597
+ - Cohort analysis for retention tracking
13598
+ - Use Mixpanel's Warehouse Connectors for raw data export
13599
+
13600
+ ### Pitfalls
13601
+ - Identity management is critical \u2014 incorrect alias/identify calls corrupt user data
13602
+ - Free tier: 20M events/month \u2014 generous but watch for event volume
13603
+ - Don't track PII in event properties \u2014 use user profiles instead`
13604
+ },
13605
+ {
13606
+ id: "google-analytics",
13607
+ name: "Google Analytics",
13608
+ category: "analytics",
13609
+ description: "Industry-standard web analytics with GA4 event-based model",
13610
+ tools: ["Google Analytics", "@next/third-parties", "gtag.js"],
13611
+ guide: `### Setup
13612
+ - Install: \`pnpm add @next/third-parties\` (Next.js) or add gtag.js script
13613
+ - Env vars: NEXT_PUBLIC_GA_MEASUREMENT_ID (G-XXXXXXXXXX)
13614
+
13615
+ ### Architecture
13616
+ - Next.js: \`<GoogleAnalytics gaId="G-..." />\` in layout \u2014 auto-tracks pageviews
13617
+ - Custom events: \`gtag('event', 'purchase', { value: 29.99, currency: 'USD' })\`
13618
+ - Ecommerce: Enhanced ecommerce events (view_item, add_to_cart, purchase)
13619
+ - Server-side: Measurement Protocol for backend event tracking
13620
+
13621
+ ### Key Patterns
13622
+ - Use GA4 recommended events for automatic reporting (sign_up, login, purchase)
13623
+ - Configure conversions in GA4 Dashboard for key business events
13624
+ - Use UTM parameters for campaign attribution tracking
13625
+ - BigQuery export for raw event data analysis
13626
+
13627
+ ### Pitfalls
13628
+ - GA4 is event-based (not session-based like UA) \u2014 different mental model
13629
+ - Data processing delay: up to 24-48 hours for some reports
13630
+ - Cookie consent required in EU \u2014 implement consent mode for GDPR compliance`
13631
+ }
13632
+ ]
13633
+ },
13634
+ // 5. Database
13635
+ {
13636
+ id: "database",
13637
+ name: "Database",
13638
+ description: "Database ORMs and managed database services",
13639
+ patterns: [
13640
+ {
13641
+ id: "prisma-postgresql",
13642
+ name: "Prisma + PostgreSQL",
13643
+ category: "database",
13644
+ description: "Type-safe ORM with auto-generated client and migrations",
13645
+ tools: ["Prisma", "PostgreSQL", "@prisma/client"],
13646
+ guide: `### Setup
13647
+ - Install: \`pnpm add prisma -D && pnpm add @prisma/client\`
13648
+ - Init: \`npx prisma init\` \u2014 creates prisma/schema.prisma
13649
+ - Env vars: DATABASE_URL (postgresql://user:pass@host:5432/db)
13650
+
13651
+ ### Architecture
13652
+ - Schema-first: Define models in schema.prisma, generate client with \`npx prisma generate\`
13653
+ - Migrations: \`npx prisma migrate dev\` for development, \`migrate deploy\` for production
13654
+ - Client: Singleton pattern \u2014 create one PrismaClient instance, reuse across requests
13655
+ - Relations: Use Prisma's relation fields for type-safe joins and nested queries
13656
+
13657
+ ### Key Patterns
13658
+ - Use \`prisma.$transaction()\` for multi-step operations
13659
+ - Soft deletes: Add deletedAt field, use middleware to filter automatically
13660
+ - Use \`prisma db seed\` for development/test data
13661
+ - Index frequently queried fields with \`@@index\` in schema
13662
+
13663
+ ### Pitfalls
13664
+ - PrismaClient in serverless: Use connection pooling (PgBouncer or Prisma Accelerate)
13665
+ - Hot reload creates multiple clients \u2014 use global singleton pattern in dev
13666
+ - N+1 queries: Use \`include\` or \`select\` to eager-load relations`
13667
+ },
13668
+ {
13669
+ id: "drizzle",
13670
+ name: "Drizzle ORM",
13671
+ category: "database",
13672
+ description: "Lightweight TypeScript ORM with SQL-like syntax and zero overhead",
13673
+ tools: ["Drizzle", "drizzle-orm", "drizzle-kit"],
13674
+ guide: `### Setup
13675
+ - Install: \`pnpm add drizzle-orm postgres\` + \`pnpm add drizzle-kit -D\`
13676
+ - Create schema in src/db/schema.ts using Drizzle table builders
13677
+ - Env vars: DATABASE_URL
13678
+
13679
+ ### Architecture
13680
+ - Schema-as-code: Define tables with \`pgTable()\`, columns as TypeScript
13681
+ - Queries: SQL-like syntax \u2014 \`db.select().from(users).where(eq(users.id, id))\`
13682
+ - Migrations: \`npx drizzle-kit generate\` then \`npx drizzle-kit migrate\`
13683
+ - Relational queries: \`db.query.users.findMany({ with: { posts: true } })\`
13684
+
13685
+ ### Key Patterns
13686
+ - Use \`drizzle-zod\` to generate Zod schemas from Drizzle tables
13687
+ - Prepared statements for performance: \`db.select().from(users).prepare('getUsers')\`
13688
+ - Use \`db.transaction(async (tx) => { ... })\` for atomic operations
13689
+ - Drizzle Studio: \`npx drizzle-kit studio\` for database browsing
13690
+
13691
+ ### Pitfalls
13692
+ - Drizzle is SQL-first \u2014 if you prefer object-oriented, use Prisma instead
13693
+ - No auto-migration like Prisma \u2014 you must generate and review migration files
13694
+ - Connection pooling still needed for serverless (use @neondatabase/serverless or pg-pool)`
13695
+ },
13696
+ {
13697
+ id: "supabase-db",
13698
+ name: "Supabase Database",
13699
+ category: "database",
13700
+ description: "Managed PostgreSQL with auto-generated REST and realtime APIs",
13701
+ tools: ["Supabase", "@supabase/supabase-js", "PostgreSQL"],
13702
+ guide: `### Setup
13703
+ - Install: \`pnpm add @supabase/supabase-js\`
13704
+ - Env vars: NEXT_PUBLIC_SUPABASE_URL, NEXT_PUBLIC_SUPABASE_ANON_KEY, SUPABASE_SERVICE_ROLE_KEY
13705
+
13706
+ ### Architecture
13707
+ - Auto-generated REST API: \`supabase.from('users').select('*')\` \u2014 no ORM needed
13708
+ - Realtime: Subscribe to database changes with \`supabase.channel().on('postgres_changes', ...)\`
13709
+ - RLS: Row Level Security policies control data access per-user
13710
+ - Edge Functions: Server-side logic in Deno, deployed on Supabase infrastructure
13711
+
13712
+ ### Key Patterns
13713
+ - Use RLS policies on every table \u2014 anon key is public, RLS is your access control
13714
+ - Service role key bypasses RLS \u2014 only use server-side, never expose to client
13715
+ - Use database functions for complex queries and business logic
13716
+ - Supabase CLI for local development: \`supabase start\`
13717
+
13718
+ ### Pitfalls
13719
+ - Anon key without RLS = public database \u2014 always enable RLS
13720
+ - Supabase client caches auth state \u2014 create separate clients for different auth contexts
13721
+ - Free tier: 500MB database, 2 projects \u2014 plan for paid tier in production`
13722
+ },
13723
+ {
13724
+ id: "mongodb",
13725
+ name: "MongoDB",
13726
+ category: "database",
13727
+ description: "Document database with flexible schema and Atlas managed service",
13728
+ tools: ["MongoDB", "mongoose", "mongodb"],
13729
+ guide: `### Setup
13730
+ - Install: \`pnpm add mongoose\` (ODM) or \`pnpm add mongodb\` (native driver)
13731
+ - Env vars: MONGODB_URI (mongodb+srv://user:pass@cluster.mongodb.net/db)
13732
+
13733
+ ### Architecture
13734
+ - Mongoose: Define schemas with validation, middleware, virtuals, and methods
13735
+ - Native driver: Direct MongoDB queries for maximum flexibility
13736
+ - Atlas: Managed service with built-in search, serverless, and edge functions
13737
+ - Aggregation pipeline for complex queries and data transformations
13738
+
13739
+ ### Key Patterns
13740
+ - Design schemas for your access patterns \u2014 denormalize for read performance
13741
+ - Use Mongoose middleware (pre/post hooks) for validation and side effects
13742
+ - Indexes: Create compound indexes for common query patterns
13743
+ - Use MongoDB Atlas Search for full-text search capabilities
13744
+
13745
+ ### Pitfalls
13746
+ - No joins \u2014 use embedding or $lookup (expensive). Design for denormalized reads
13747
+ - Mongoose models are cached \u2014 use \`mongoose.models.User || mongoose.model('User', schema)\`
13748
+ - ObjectId comparison requires .toString() or .equals() \u2014 not ===`
13749
+ }
13750
+ ]
13751
+ },
13752
+ // 6. Storage
13753
+ {
13754
+ id: "storage",
13755
+ name: "Storage",
13756
+ description: "File and media storage solutions",
13757
+ patterns: [
13758
+ {
13759
+ id: "s3",
13760
+ name: "AWS S3",
13761
+ category: "storage",
13762
+ description: "Object storage with CDN integration and fine-grained access control",
13763
+ tools: ["AWS S3", "@aws-sdk/client-s3", "@aws-sdk/s3-request-presigner"],
13764
+ guide: `### Setup
13765
+ - Install: \`pnpm add @aws-sdk/client-s3 @aws-sdk/s3-request-presigner\`
13766
+ - Env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, S3_BUCKET_NAME
13767
+
13768
+ ### Architecture
13769
+ - Upload flow: Client requests presigned URL \u2192 uploads directly to S3 \u2192 webhook/callback
13770
+ - Server-side: Generate presigned URLs with expiration for secure uploads/downloads
13771
+ - CDN: CloudFront distribution in front of S3 for global performance
13772
+ - Lifecycle rules: Auto-archive to Glacier, auto-delete temporary files
13773
+
13774
+ ### Key Patterns
13775
+ - Use presigned URLs for direct client upload \u2014 avoids routing files through your server
13776
+ - Organize keys with prefixes: \`users/{userId}/avatars/{filename}\`
13777
+ - Use S3 event notifications (SNS/SQS/Lambda) for post-upload processing
13778
+ - Enable versioning for critical files, lifecycle rules for cost management
13779
+
13780
+ ### Pitfalls
13781
+ - S3 bucket names are globally unique \u2014 use a naming convention
13782
+ - Public buckets are a security risk \u2014 use presigned URLs or CloudFront OAC
13783
+ - Large file uploads: Use multipart upload for files > 100MB`
13784
+ },
13785
+ {
13786
+ id: "cloudinary",
13787
+ name: "Cloudinary",
13788
+ category: "storage",
13789
+ description: "Image and video management with on-the-fly transformations",
13790
+ tools: ["Cloudinary", "cloudinary", "next-cloudinary"],
13791
+ guide: `### Setup
13792
+ - Install: \`pnpm add cloudinary next-cloudinary\`
13793
+ - Env vars: NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET
13794
+
13795
+ ### Architecture
13796
+ - Upload: Direct from client using unsigned upload presets, or server-side via SDK
13797
+ - Transformations: URL-based \u2014 resize, crop, format conversion, effects on-the-fly
13798
+ - Delivery: Cloudinary CDN with automatic format (WebP/AVIF) and quality optimization
13799
+ - \`<CldImage>\` component: Next.js Image with Cloudinary transformations built-in
13800
+
13801
+ ### Key Patterns
13802
+ - Use upload presets for client-side uploads (unsigned for public, signed for private)
13803
+ - URL transformations: \`/image/upload/w_400,h_300,c_fill/v1234/photo.jpg\`
13804
+ - Use eager transformations for critical sizes (thumbnails) on upload
13805
+ - Tag assets for organization and bulk operations
13806
+
13807
+ ### Pitfalls
13808
+ - Free tier: 25 credits/month \u2014 transformations consume credits quickly
13809
+ - Unsigned uploads allow anyone with your cloud name to upload \u2014 set size/type limits
13810
+ - Original files are stored forever unless explicitly deleted \u2014 monitor storage usage`
13811
+ },
13812
+ {
13813
+ id: "supabase-storage",
13814
+ name: "Supabase Storage",
13815
+ category: "storage",
13816
+ description: "S3-compatible storage with RLS policies and CDN",
13817
+ tools: ["Supabase Storage", "@supabase/supabase-js"],
13818
+ guide: `### Setup
13819
+ - Install: \`pnpm add @supabase/supabase-js\` (storage is built into the client)
13820
+ - Env vars: NEXT_PUBLIC_SUPABASE_URL, NEXT_PUBLIC_SUPABASE_ANON_KEY
13821
+
13822
+ ### Architecture
13823
+ - Buckets: Create via Dashboard or API \u2014 public (CDN) or private (signed URLs)
13824
+ - Upload: \`supabase.storage.from('bucket').upload(path, file)\`
13825
+ - Download: \`supabase.storage.from('bucket').getPublicUrl(path)\` or \`createSignedUrl()\`
13826
+ - RLS: Storage policies use auth.uid() \u2014 same as database RLS
13827
+
13828
+ ### Key Patterns
13829
+ - Use public buckets for avatars/media, private buckets for documents
13830
+ - Organize files: \`{userId}/{category}/{filename}\`
13831
+ - Image transformations: \`getPublicUrl(path, { transform: { width: 200 } })\`
13832
+ - Use \`upsert: true\` option to replace existing files without errors
13833
+
13834
+ ### Pitfalls
13835
+ - File size limit: 50MB default \u2014 increase via Dashboard for larger files
13836
+ - Public bucket URLs are predictable \u2014 don't store sensitive files in public buckets
13837
+ - RLS policies are separate from database policies \u2014 configure both`
13838
+ },
13839
+ {
13840
+ id: "r2",
13841
+ name: "Cloudflare R2",
13842
+ category: "storage",
13843
+ description: "S3-compatible storage with zero egress fees",
13844
+ tools: ["Cloudflare R2", "@aws-sdk/client-s3", "wrangler"],
13845
+ guide: `### Setup
13846
+ - Install: \`pnpm add @aws-sdk/client-s3\` (R2 is S3-compatible)
13847
+ - Env vars: R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY, R2_ENDPOINT, R2_BUCKET_NAME
13848
+
13849
+ ### Architecture
13850
+ - S3-compatible API: Use AWS SDK with R2 endpoint
13851
+ - Workers integration: Access R2 directly from Cloudflare Workers (no SDK needed)
13852
+ - Custom domains: Serve files from your domain via R2 public buckets
13853
+ - Zero egress: No bandwidth charges \u2014 ideal for media-heavy applications
13854
+
13855
+ ### Key Patterns
13856
+ - Use presigned URLs for direct client uploads (same as S3)
13857
+ - Workers binding for server-side: \`env.MY_BUCKET.put(key, body)\`
13858
+ - Enable public access per-bucket for CDN-served static assets
13859
+ - Use R2 lifecycle rules for automatic object expiration
13860
+
13861
+ ### Pitfalls
13862
+ - R2 doesn't support all S3 features \u2014 check compatibility for advanced operations
13863
+ - No built-in image transformations \u2014 use Cloudflare Images or Workers for processing
13864
+ - Public bucket URLs use r2.dev domain \u2014 configure custom domain for production`
13865
+ }
13866
+ ]
13867
+ },
13868
+ // 7. Search
13869
+ {
13870
+ id: "search",
13871
+ name: "Search",
13872
+ description: "Full-text search and search-as-a-service solutions",
13873
+ patterns: [
13874
+ {
13875
+ id: "algolia",
13876
+ name: "Algolia",
13877
+ category: "search",
13878
+ description: "Hosted search with typo tolerance, faceting, and instant results",
13879
+ tools: ["Algolia", "algoliasearch", "react-instantsearch"],
13880
+ guide: `### Setup
13881
+ - Install: \`pnpm add algoliasearch react-instantsearch\`
13882
+ - Env vars: NEXT_PUBLIC_ALGOLIA_APP_ID, NEXT_PUBLIC_ALGOLIA_SEARCH_KEY, ALGOLIA_ADMIN_KEY
13883
+
13884
+ ### Architecture
13885
+ - Index data: Push records to Algolia using admin API (server-side only)
13886
+ - Search: Client-side with react-instantsearch widgets or algoliasearch client
13887
+ - Sync strategy: Webhook on data change \u2192 update Algolia index
13888
+ - Widgets: \`<InstantSearch>\`, \`<SearchBox>\`, \`<Hits>\`, \`<RefinementList>\`
13889
+
13890
+ ### Key Patterns
13891
+ - Use searchable attributes ranking to prioritize title > description > content
13892
+ - Configure facets for filtering (category, price range, tags)
13893
+ - Use Algolia's query rules for merchandising and custom result boosting
13894
+ - Implement "search as you type" with debounced queries
13895
+
13896
+ ### Pitfalls
13897
+ - Search-only API key is public \u2014 admin key must never be exposed to client
13898
+ - Keep index in sync with database \u2014 stale data degrades search experience
13899
+ - Free tier: 10,000 search requests/month \u2014 monitor usage closely`
13900
+ },
13901
+ {
13902
+ id: "meilisearch",
13903
+ name: "Meilisearch",
13904
+ category: "search",
13905
+ description: "Open-source, fast search engine with typo tolerance and easy setup",
13906
+ tools: ["Meilisearch", "meilisearch"],
13907
+ guide: `### Setup
13908
+ - Install: \`pnpm add meilisearch\`
13909
+ - Run: \`docker run -p 7700:7700 getmeili/meilisearch\` or use Meilisearch Cloud
13910
+ - Env vars: MEILISEARCH_HOST, MEILISEARCH_API_KEY
13911
+
13912
+ ### Architecture
13913
+ - Self-hosted or Meilisearch Cloud \u2014 REST API with SDKs for all languages
13914
+ - Index documents: \`client.index('products').addDocuments(products)\`
13915
+ - Search: \`client.index('products').search('query', { filter, sort, limit })\`
13916
+ - Filterable/sortable attributes must be configured before use
13917
+
13918
+ ### Key Patterns
13919
+ - Use \`filterableAttributes\` for faceted search (category, price, etc.)
13920
+ - Typo tolerance and ranking rules are configured per-index
13921
+ - Implement multi-index search for searching across different content types
13922
+ - Use webhooks or CRON jobs to keep Meilisearch in sync with your database
13923
+
13924
+ ### Pitfalls
13925
+ - Meilisearch stores all data in memory \u2014 plan RAM based on dataset size
13926
+ - No partial updates \u2014 re-index the full document on any change
13927
+ - Self-hosted requires maintenance \u2014 use Meilisearch Cloud for production simplicity`
13928
+ },
13929
+ {
13930
+ id: "typesense",
13931
+ name: "Typesense",
13932
+ category: "search",
13933
+ description: "Open-source, typo-tolerant search with automatic schema detection",
13934
+ tools: ["Typesense", "typesense"],
13935
+ guide: `### Setup
13936
+ - Install: \`pnpm add typesense\`
13937
+ - Run: \`docker run -p 8108:8108 typesense/typesense:latest --api-key=xyz\`
13938
+ - Env vars: TYPESENSE_HOST, TYPESENSE_API_KEY, TYPESENSE_PORT
13939
+
13940
+ ### Architecture
13941
+ - Define collection schema with field types and faceting configuration
13942
+ - Index: \`client.collections('products').documents().create(document)\`
13943
+ - Search: \`client.collections('products').documents().search(searchParameters)\`
13944
+ - Curation: Pin/hide specific results for marketing queries
13945
+
13946
+ ### Key Patterns
13947
+ - Use \`query_by\` to specify which fields to search across
13948
+ - Faceting: \`facet_by: 'category,brand'\` for filter panels
13949
+ - Geo search: Add \`geopoint\` fields for location-based search
13950
+ - Use scoped API keys for client-side search (restrict to specific collections)
13951
+
13952
+ ### Pitfalls
13953
+ - Collection schema must be defined upfront \u2014 auto-schema detection is limited
13954
+ - All data must fit in RAM \u2014 not suitable for very large datasets without clustering
13955
+ - Self-hosted HA requires Typesense Cloud or manual clustering setup`
13956
+ },
13957
+ {
13958
+ id: "pgvector",
13959
+ name: "pgvector",
13960
+ category: "search",
13961
+ description: "Vector similarity search as a PostgreSQL extension",
13962
+ tools: ["pgvector", "PostgreSQL", "pgvector"],
13963
+ guide: `### Setup
13964
+ - Install extension: \`CREATE EXTENSION vector;\` in PostgreSQL
13965
+ - Install: \`pnpm add pgvector\` (Node.js client) or use Prisma/Drizzle with raw queries
13966
+ - Requires PostgreSQL 11+ with pgvector extension installed
13967
+
13968
+ ### Architecture
13969
+ - Add vector column: \`ALTER TABLE items ADD COLUMN embedding vector(1536);\`
13970
+ - Store embeddings from OpenAI, Cohere, or local models alongside your data
13971
+ - Query: \`SELECT * FROM items ORDER BY embedding <=> '[...]' LIMIT 10;\`
13972
+ - Index: Create IVFFlat or HNSW index for fast similarity search
13973
+
13974
+ ### Key Patterns
13975
+ - Use HNSW index for best recall/speed tradeoff: \`CREATE INDEX ON items USING hnsw (embedding vector_cosine_ops)\`
13976
+ - Combine vector search with SQL filters: \`WHERE category = 'tech' ORDER BY embedding <=> $1\`
13977
+ - Batch insert embeddings with COPY for large datasets
13978
+ - Use inner product (\`<#>\`) for normalized vectors, cosine (\`<=>\`) for general use
13979
+
13980
+ ### Pitfalls
13981
+ - pgvector indexes require tuning \u2014 default parameters may give poor recall
13982
+ - Embedding dimensions must match your model (OpenAI ada-002 = 1536, text-embedding-3-small = 1536)
13983
+ - Large vector columns increase table size significantly \u2014 consider separate table`
13984
+ },
13985
+ {
13986
+ id: "elasticsearch",
13987
+ name: "Elasticsearch",
13988
+ category: "search",
13989
+ description: "Distributed search and analytics engine for large-scale data",
13990
+ tools: ["Elasticsearch", "@elastic/elasticsearch"],
13991
+ guide: `### Setup
13992
+ - Install: \`pnpm add @elastic/elasticsearch\`
13993
+ - Run: Elastic Cloud (managed) or Docker: \`docker run -p 9200:9200 elasticsearch:8\`
13994
+ - Env vars: ELASTICSEARCH_URL, ELASTICSEARCH_API_KEY
13995
+
13996
+ ### Architecture
13997
+ - Index documents with typed mappings \u2014 define analyzers for text fields
13998
+ - Query DSL: \`client.search({ index, body: { query: { match: { title: 'search term' } } } })\`
13999
+ - Aggregations: Facets, histograms, stats computed during search
14000
+ - Ingest pipelines: Transform documents on indexing (extract, enrich, normalize)
14001
+
14002
+ ### Key Patterns
14003
+ - Use bulk API for indexing large datasets: \`client.bulk({ body: operations })\`
14004
+ - Multi-match queries for searching across multiple fields
14005
+ - Use index aliases for zero-downtime reindexing
14006
+ - Implement search-as-you-type with completion suggesters
14007
+
14008
+ ### Pitfalls
14009
+ - Elasticsearch is resource-intensive \u2014 requires dedicated infrastructure
14010
+ - Mapping changes require reindexing \u2014 plan schema carefully upfront
14011
+ - JVM heap size must be configured properly \u2014 default is often too small`
14012
+ }
14013
+ ]
14014
+ },
14015
+ // 8. Vector DB
14016
+ {
14017
+ id: "vectordb",
14018
+ name: "Vector Database",
14019
+ description: "Purpose-built vector databases for AI/ML embeddings",
14020
+ patterns: [
14021
+ {
14022
+ id: "qdrant",
14023
+ name: "Qdrant",
14024
+ category: "vectordb",
14025
+ description: "High-performance vector database with filtering and payload storage",
14026
+ tools: ["Qdrant", "@qdrant/js-client-rest"],
14027
+ guide: `### Setup
14028
+ - Install: \`pnpm add @qdrant/js-client-rest\`
14029
+ - Run: \`docker run -p 6333:6333 qdrant/qdrant\` or use Qdrant Cloud
14030
+ - Env vars: QDRANT_URL, QDRANT_API_KEY
14031
+
14032
+ ### Architecture
14033
+ - Collections: Create with vector size and distance metric (cosine, dot, euclidean)
14034
+ - Upsert points with vectors + payload metadata
14035
+ - Search: \`client.search(collection, { vector, filter, limit })\`
14036
+ - Filtering: Payload-based filters during vector search (no post-filtering)
14037
+
14038
+ ### Key Patterns
14039
+ - Use payload indexes for frequent filter fields (category, userId, timestamp)
14040
+ - Batch upsert for bulk ingestion: \`client.upsert(collection, { points: [...] })\`
14041
+ - Use named vectors for multi-model embeddings in same collection
14042
+ - Snapshots for backup and migration between environments
14043
+
14044
+ ### Pitfalls
14045
+ - Collection must be created before upserting \u2014 handle initialization in startup
14046
+ - Vector dimension is immutable after collection creation
14047
+ - Self-hosted requires persistent storage volume \u2014 don't use ephemeral containers`
14048
+ },
14049
+ {
14050
+ id: "milvus",
14051
+ name: "Milvus",
14052
+ category: "vectordb",
14053
+ description: "Cloud-native vector database designed for billion-scale similarity search",
14054
+ tools: ["Milvus", "@zilliz/milvus2-sdk-node"],
14055
+ guide: `### Setup
14056
+ - Install: \`pnpm add @zilliz/milvus2-sdk-node\`
14057
+ - Run: Zilliz Cloud (managed) or Docker: \`docker compose up milvus-standalone\`
14058
+ - Env vars: MILVUS_ADDRESS, MILVUS_TOKEN
14059
+
14060
+ ### Architecture
14061
+ - Collections with schema: Define fields (vector, scalar) and index types
14062
+ - Insert: Batch insert vectors with metadata fields
14063
+ - Search: \`client.search({ collection_name, vector, limit, filter })\`
14064
+ - Index types: IVF_FLAT, HNSW, ANNOY \u2014 choose based on dataset size and speed needs
14065
+
14066
+ ### Key Patterns
14067
+ - Use HNSW index for datasets under 10M vectors, IVF for larger
14068
+ - Partition by a high-cardinality field (tenant_id) for multi-tenant isolation
14069
+ - Use hybrid search: combine vector similarity with scalar filtering
14070
+ - Flush after batch inserts to make data searchable
14071
+
14072
+ ### Pitfalls
14073
+ - Milvus requires etcd and MinIO for standalone \u2014 complex Docker setup
14074
+ - Schema changes require dropping and recreating collection
14075
+ - Zilliz Cloud simplifies operations significantly \u2014 recommended for production`
14076
+ },
14077
+ {
14078
+ id: "pinecone",
14079
+ name: "Pinecone",
14080
+ category: "vectordb",
14081
+ description: "Fully managed vector database with serverless and pod-based deployment",
14082
+ tools: ["Pinecone", "@pinecone-database/pinecone"],
14083
+ guide: `### Setup
14084
+ - Install: \`pnpm add @pinecone-database/pinecone\`
14085
+ - Env vars: PINECONE_API_KEY, PINECONE_INDEX_NAME
14086
+
14087
+ ### Architecture
14088
+ - Serverless indexes: Auto-scale, pay-per-query \u2014 recommended for most use cases
14089
+ - Upsert: \`index.upsert([{ id, values, metadata }])\` \u2014 vectors with metadata
14090
+ - Query: \`index.query({ vector, topK, filter })\`
14091
+ - Namespaces: Logical partitions within an index for multi-tenancy
14092
+
14093
+ ### Key Patterns
14094
+ - Use namespaces to separate data by tenant/user/project
14095
+ - Metadata filtering: \`filter: { category: { $eq: 'tech' } }\` during query
14096
+ - Batch upsert in chunks of 100 for optimal performance
14097
+ - Use sparse-dense vectors for hybrid search (keyword + semantic)
14098
+
14099
+ ### Pitfalls
14100
+ - Serverless has cold start latency \u2014 first query after idle may be slower
14101
+ - Metadata values have size limits \u2014 don't store large text in metadata
14102
+ - Index deletion is permanent \u2014 no undo, no snapshots on serverless`
14103
+ },
14104
+ {
14105
+ id: "chroma",
14106
+ name: "Chroma",
14107
+ category: "vectordb",
14108
+ description: "Open-source embedding database, great for prototyping and local development",
14109
+ tools: ["Chroma", "chromadb"],
14110
+ guide: `### Setup
14111
+ - Install: \`pip install chromadb\` (Python) or use Chroma's REST API with fetch
14112
+ - Run: \`chroma run --host 0.0.0.0 --port 8000\` or in-memory for dev
14113
+ - Env vars: CHROMA_HOST, CHROMA_PORT
14114
+
14115
+ ### Architecture
14116
+ - Collections: Create with optional embedding function (auto-embed documents)
14117
+ - Add: \`collection.add({ ids, documents, embeddings, metadatas })\`
14118
+ - Query: \`collection.query({ queryTexts, nResults, where })\`
14119
+ - Built-in embedding: Pass documents without embeddings \u2014 Chroma auto-embeds
14120
+
14121
+ ### Key Patterns
14122
+ - Use in-memory mode for prototyping, persistent mode for development
14123
+ - Built-in embedding functions: sentence-transformers, OpenAI, Cohere
14124
+ - Where filters on metadata during query for hybrid search
14125
+ - Use \`collection.update()\` to modify existing documents without re-adding
14126
+
14127
+ ### Pitfalls
14128
+ - Chroma is best for prototyping \u2014 consider Qdrant/Pinecone for production scale
14129
+ - Node.js client is community-maintained \u2014 Python SDK is primary
14130
+ - In-memory collections are lost on restart \u2014 use persistent directory for dev`
14131
+ }
14132
+ ]
14133
+ },
14134
+ // 9. Monitoring
14135
+ {
14136
+ id: "monitoring",
14137
+ name: "Monitoring",
14138
+ description: "Error tracking, performance monitoring, and observability",
14139
+ patterns: [
14140
+ {
14141
+ id: "sentry",
14142
+ name: "Sentry",
14143
+ category: "monitoring",
14144
+ description: "Error tracking and performance monitoring with source maps",
14145
+ tools: ["Sentry", "@sentry/nextjs", "@sentry/node"],
14146
+ guide: `### Setup
14147
+ - Install: \`npx @sentry/wizard@latest -i nextjs\` (auto-configures Next.js)
14148
+ - Manual: \`pnpm add @sentry/nextjs\` and configure sentry.client.config.ts + sentry.server.config.ts
14149
+ - Env vars: SENTRY_DSN, SENTRY_AUTH_TOKEN, SENTRY_ORG, SENTRY_PROJECT
14150
+
14151
+ ### Architecture
14152
+ - Client: Auto-captures unhandled errors, performance transactions, web vitals
14153
+ - Server: Captures API route errors, SSR errors, and server-side exceptions
14154
+ - Source maps: Upload during build with Sentry webpack plugin for readable stack traces
14155
+ - Releases: Tag deployments for error regression tracking
14156
+
14157
+ ### Key Patterns
14158
+ - Use \`Sentry.captureException(error)\` for manually caught errors
14159
+ - Add context: \`Sentry.setUser({ id, email })\`, \`Sentry.setTag('feature', 'checkout')\`
14160
+ - Performance: Custom transactions with \`Sentry.startTransaction()\`
14161
+ - Use Sentry's Issues page to triage, assign, and resolve errors
14162
+
14163
+ ### Pitfalls
14164
+ - Source map upload requires SENTRY_AUTH_TOKEN \u2014 set in CI environment
14165
+ - Free tier: 5,000 errors/month \u2014 noisy errors can exhaust quota quickly
14166
+ - Don't capture sensitive data \u2014 configure beforeSend to scrub PII`
14167
+ },
14168
+ {
14169
+ id: "logrocket",
14170
+ name: "LogRocket",
14171
+ category: "monitoring",
14172
+ description: "Session replay with error tracking and performance monitoring",
14173
+ tools: ["LogRocket", "logrocket", "logrocket-react"],
14174
+ guide: `### Setup
14175
+ - Install: \`pnpm add logrocket logrocket-react\`
14176
+ - Env vars: NEXT_PUBLIC_LOGROCKET_APP_ID
14177
+
14178
+ ### Architecture
14179
+ - Initialize: \`LogRocket.init('app-id')\` in app entry point
14180
+ - Auto-captures: DOM changes, network requests, console logs, JS errors
14181
+ - Session replay: Pixel-perfect video replay of user sessions
14182
+ - Identify users: \`LogRocket.identify(userId, { name, email })\`
14183
+
14184
+ ### Key Patterns
14185
+ - Use \`LogRocket.track('event')\` for custom event tracking
14186
+ - Filter sessions by error, URL, user, or custom event in dashboard
14187
+ - Integrate with Sentry: Attach LogRocket session URL to Sentry errors
14188
+ - Use Redux/Vuex middleware to capture state changes in session replay
14189
+
14190
+ ### Pitfalls
14191
+ - LogRocket records all DOM \u2014 sanitize sensitive fields with \`data-log-rocket-mask\`
14192
+ - Recording increases page weight \u2014 configure sampling rate for high-traffic apps
14193
+ - Free tier: 1,000 sessions/month \u2014 use sampling in production`
14194
+ },
14195
+ {
14196
+ id: "datadog",
14197
+ name: "Datadog",
14198
+ category: "monitoring",
14199
+ description: "Full-stack observability platform with APM, logs, and infrastructure monitoring",
14200
+ tools: ["Datadog", "dd-trace", "@datadog/browser-rum"],
14201
+ guide: `### Setup
14202
+ - Install: \`pnpm add dd-trace\` (APM) + \`pnpm add @datadog/browser-rum\` (browser)
14203
+ - APM: \`require('dd-trace').init()\` \u2014 must be first line in app entry
14204
+ - Env vars: DD_API_KEY, DD_APP_KEY, DD_SITE, DD_SERVICE, DD_ENV
14205
+
14206
+ ### Architecture
14207
+ - APM: Automatic tracing of HTTP, database, and cache operations
14208
+ - RUM: Real User Monitoring \u2014 performance, errors, and user journeys in browser
14209
+ - Logs: Correlate logs with traces using dd-trace log injection
14210
+ - Custom metrics: \`tracer.dogstatsd.increment('api.requests', 1, { endpoint: '/users' })\`
14211
+
14212
+ ### Key Patterns
14213
+ - Use unified service tagging: DD_SERVICE, DD_ENV, DD_VERSION on all telemetry
14214
+ - Trace ID correlation: Connect frontend RUM sessions to backend APM traces
14215
+ - Custom spans: \`tracer.trace('operation', () => { ... })\` for business logic
14216
+ - Dashboards: Build custom dashboards for SLIs/SLOs
14217
+
14218
+ ### Pitfalls
14219
+ - dd-trace must be initialized before any other imports \u2014 use --require flag
14220
+ - Datadog pricing is per-host and per-ingested-data \u2014 costs can escalate quickly
14221
+ - RUM sampling: Default 100% \u2014 reduce for high-traffic sites to control costs`
14222
+ }
14223
+ ]
14224
+ },
14225
+ // 10. CI/CD
14226
+ {
14227
+ id: "cicd",
14228
+ name: "CI/CD",
14229
+ description: "Continuous integration and deployment pipelines",
14230
+ patterns: [
14231
+ {
14232
+ id: "github-actions",
14233
+ name: "GitHub Actions",
14234
+ category: "cicd",
14235
+ description: "Native CI/CD for GitHub repositories with reusable workflows",
14236
+ tools: ["GitHub Actions", "GitHub"],
14237
+ guide: `### Setup
14238
+ - Create .github/workflows/ci.yml for CI pipeline
14239
+ - No installation needed \u2014 runs on GitHub-hosted runners
14240
+ - Env vars: Set secrets in GitHub repo Settings > Secrets and Variables
14241
+
14242
+ ### Architecture
14243
+ - Workflow triggers: push, pull_request, schedule, workflow_dispatch
14244
+ - Jobs: Lint \u2192 Test \u2192 Build \u2192 Deploy (with dependency chain)
14245
+ - Caching: actions/cache for node_modules, pnpm store, build artifacts
14246
+ - Matrix strategy: Test across Node versions and OS
14247
+
14248
+ ### Key Patterns
14249
+ - Use pnpm with \`pnpm/action-setup\` for fast installs
14250
+ - Reusable workflows: \`.github/workflows/reusable-*.yml\` for shared logic
14251
+ - Branch protection: Require CI pass before merge
14252
+ - Use \`concurrency\` to cancel in-progress runs on new pushes
14253
+
14254
+ ### Pitfalls
14255
+ - Free tier: 2,000 minutes/month for private repos \u2014 cache aggressively
14256
+ - Secrets are not available in PRs from forks \u2014 design workflows accordingly
14257
+ - Always pin action versions with SHA, not just tags (supply chain security)`
14258
+ },
14259
+ {
14260
+ id: "vercel-cicd",
14261
+ name: "Vercel",
14262
+ category: "cicd",
14263
+ description: "Zero-config deployment platform for frontend and full-stack apps",
14264
+ tools: ["Vercel", "vercel"],
14265
+ guide: `### Setup
14266
+ - Install: \`pnpm add -g vercel\` or connect GitHub repo in Vercel Dashboard
14267
+ - Link: \`vercel link\` to connect local project to Vercel
14268
+ - Env vars: Set in Vercel Dashboard > Project > Settings > Environment Variables
14269
+
14270
+ ### Architecture
14271
+ - Git integration: Auto-deploy on push to main (production) and PRs (preview)
14272
+ - Preview deployments: Every PR gets a unique URL for testing
14273
+ - Serverless functions: API routes auto-deploy as serverless functions
14274
+ - Edge functions: Use edge runtime for low-latency middleware and routes
14275
+
14276
+ ### Key Patterns
14277
+ - Use vercel.json for custom build settings, redirects, and headers
14278
+ - Environment variables: Separate production, preview, and development values
14279
+ - Deploy hooks: Trigger deploys from external events (CMS publish, etc.)
14280
+ - Monorepo: Configure root directory per project in Vercel Dashboard
14281
+
14282
+ ### Pitfalls
14283
+ - Serverless function timeout: 10s (Hobby), 60s (Pro) \u2014 optimize long-running tasks
14284
+ - Free tier: 100 deployments/day \u2014 sufficient for most development
14285
+ - Build cache is per-branch \u2014 first deploy on new branch is always full build`
14286
+ },
14287
+ {
14288
+ id: "docker",
14289
+ name: "Docker",
14290
+ category: "cicd",
14291
+ description: "Containerization for consistent builds and deployments",
14292
+ tools: ["Docker", "docker-compose"],
14293
+ guide: `### Setup
14294
+ - Create Dockerfile in project root with multi-stage build
14295
+ - Create docker-compose.yml for local development with services
14296
+ - No npm packages needed \u2014 Docker CLI and Docker Desktop
14297
+
14298
+ ### Architecture
14299
+ - Multi-stage builds: Stage 1 (build), Stage 2 (production) \u2014 minimize image size
14300
+ - Docker Compose: Define app + database + redis + other services
14301
+ - Volume mounts: Persist data for databases, mount source for hot reload in dev
14302
+ - Networks: Isolate services, name-based DNS resolution between containers
14303
+
14304
+ ### Key Patterns
14305
+ - Use .dockerignore to exclude node_modules, .env, .git
14306
+ - Layer caching: Copy package.json first, install deps, then copy source
14307
+ - Health checks: \`HEALTHCHECK CMD curl -f http://localhost:3000/health\`
14308
+ - Use build args for environment-specific configuration
14309
+
14310
+ ### Pitfalls
14311
+ - Don't run as root in containers \u2014 add \`USER node\` after install
14312
+ - Node.js alpine images are smaller but may lack native deps \u2014 test thoroughly
14313
+ - Docker Desktop licensing: Free for personal/small business, paid for enterprise`
14314
+ },
14315
+ {
14316
+ id: "railway",
14317
+ name: "Railway",
14318
+ category: "cicd",
14319
+ description: "Infrastructure platform with instant deploys and built-in databases",
14320
+ tools: ["Railway", "railway"],
14321
+ guide: `### Setup
14322
+ - Install: \`pnpm add -g @railway/cli\`
14323
+ - Link: \`railway link\` to connect to Railway project
14324
+ - Env vars: Set in Railway Dashboard or \`railway variables set KEY=value\`
14325
+
14326
+ ### Architecture
14327
+ - Auto-detect: Railway detects framework and configures build/start commands
14328
+ - Services: Deploy multiple services (API, worker, cron) in one project
14329
+ - Databases: One-click PostgreSQL, MySQL, Redis, MongoDB provisioning
14330
+ - Networking: Private networking between services, public domains for external access
14331
+
14332
+ ### Key Patterns
14333
+ - Use Railway templates for common stacks (Next.js + PostgreSQL, etc.)
14334
+ - Reference variables across services: \`\${{Postgres.DATABASE_URL}}\`
14335
+ - Use \`railway run\` to execute commands with production env vars locally
14336
+ - Deploy from GitHub: Auto-deploy on push, preview environments for PRs
14337
+
14338
+ ### Pitfalls
14339
+ - Free tier: $5/month credit, limited to 500 hours \u2014 not for always-on services
14340
+ - Build logs: Check for memory issues \u2014 Railway has build-time memory limits
14341
+ - Custom domains require paid plan \u2014 use .up.railway.app for development`
14342
+ }
14343
+ ]
14344
+ },
14345
+ // 11. Realtime
14346
+ {
14347
+ id: "realtime",
14348
+ name: "Realtime",
14349
+ description: "Real-time communication and live data synchronization",
14350
+ patterns: [
14351
+ {
14352
+ id: "websocket",
14353
+ name: "WebSocket",
14354
+ category: "realtime",
14355
+ description: "Native WebSocket implementation for custom real-time communication",
14356
+ tools: ["WebSocket", "ws", "socket.io"],
14357
+ guide: `### Setup
14358
+ - Install: \`pnpm add ws\` (server) or \`pnpm add socket.io socket.io-client\` (full framework)
14359
+ - Native WebSocket API available in browsers \u2014 no client package needed for basic use
14360
+
14361
+ ### Architecture
14362
+ - Server: Create WebSocket server alongside HTTP server on same port
14363
+ - Rooms/channels: Implement pub/sub pattern for topic-based messaging
14364
+ - Protocol: Define message types as JSON: \`{ type: 'chat', payload: { ... } }\`
14365
+ - Scaling: Use Redis adapter (socket.io-redis) for multi-server deployments
14366
+
14367
+ ### Key Patterns
14368
+ - Heartbeat/ping-pong: Detect stale connections and auto-reconnect
14369
+ - Authentication: Verify JWT on connection upgrade, attach user to socket
14370
+ - Rate limiting: Implement per-connection message rate limits
14371
+ - Binary data: Use ArrayBuffer for file transfer, JSON for structured messages
14372
+
14373
+ ### Pitfalls
14374
+ - Serverless platforms don't support persistent WebSocket connections \u2014 use Pusher/Ably
14375
+ - Connection limits: Typical server handles ~10K concurrent connections
14376
+ - Always handle reconnection logic on client \u2014 connections drop frequently on mobile`
14377
+ },
14378
+ {
14379
+ id: "pusher",
14380
+ name: "Pusher",
14381
+ category: "realtime",
14382
+ description: "Hosted real-time messaging with channels and presence",
14383
+ tools: ["Pusher", "pusher", "pusher-js"],
14384
+ guide: `### Setup
14385
+ - Install: \`pnpm add pusher\` (server) + \`pnpm add pusher-js\` (client)
14386
+ - Env vars: PUSHER_APP_ID, PUSHER_KEY, PUSHER_SECRET, PUSHER_CLUSTER, NEXT_PUBLIC_PUSHER_KEY
14387
+
14388
+ ### Architecture
14389
+ - Server: Trigger events on channels \u2014 \`pusher.trigger('channel', 'event', data)\`
14390
+ - Client: Subscribe to channels \u2014 \`pusher.subscribe('channel').bind('event', callback)\`
14391
+ - Channel types: Public, private (auth required), presence (who's online)
14392
+ - Auth endpoint: POST /api/pusher/auth \u2014 verify user can access private/presence channels
14393
+
14394
+ ### Key Patterns
14395
+ - Use private channels for user-specific events: \`private-user-{userId}\`
14396
+ - Presence channels for collaborative features: show online users, typing indicators
14397
+ - Client events: Direct client-to-client messaging on private channels
14398
+ - Batch trigger: Send to multiple channels in one API call
14399
+
14400
+ ### Pitfalls
14401
+ - Free tier: 200K messages/day, 100 concurrent connections \u2014 limited for production
14402
+ - Message size limit: 10KB \u2014 compress large payloads or use S3 for file transfers
14403
+ - Pusher doesn't persist messages \u2014 implement your own history/replay`
14404
+ },
14405
+ {
14406
+ id: "ably",
14407
+ name: "Ably",
14408
+ category: "realtime",
14409
+ description: "Enterprise real-time messaging with guaranteed delivery and history",
14410
+ tools: ["Ably", "ably"],
14411
+ guide: `### Setup
14412
+ - Install: \`pnpm add ably\`
14413
+ - Env vars: ABLY_API_KEY, NEXT_PUBLIC_ABLY_API_KEY (publishable)
14414
+
14415
+ ### Architecture
14416
+ - Publish/subscribe: \`channel.publish('event', data)\` and \`channel.subscribe(callback)\`
14417
+ - Presence: Track who's connected to a channel with automatic enter/leave
14418
+ - History: Retrieve past messages from channels (configurable retention)
14419
+ - Token auth: Generate short-lived tokens server-side for client authentication
14420
+
14421
+ ### Key Patterns
14422
+ - Use token authentication for client-side \u2014 never expose full API key
14423
+ - Channel namespaces: \`chat:room-123\`, \`notifications:user-456\`
14424
+ - Message ordering and exactly-once delivery guarantees
14425
+ - Use Ably Reactor for server-side event processing (webhooks, queues, functions)
14426
+
14427
+ ### Pitfalls
14428
+ - Free tier: 6M messages/month, 200 peak connections \u2014 generous for development
14429
+ - Full API key has publish+subscribe+admin \u2014 always use token auth for clients
14430
+ - Message size limit: 64KB \u2014 use for signaling, not file transfer`
14431
+ },
14432
+ {
14433
+ id: "supabase-realtime",
14434
+ name: "Supabase Realtime",
14435
+ category: "realtime",
14436
+ description: "PostgreSQL change data capture with WebSocket delivery",
14437
+ tools: ["Supabase Realtime", "@supabase/supabase-js"],
14438
+ guide: `### Setup
14439
+ - Install: \`pnpm add @supabase/supabase-js\` (realtime is built into the client)
14440
+ - Enable realtime on tables in Supabase Dashboard > Database > Replication
14441
+ - Env vars: NEXT_PUBLIC_SUPABASE_URL, NEXT_PUBLIC_SUPABASE_ANON_KEY
14442
+
14443
+ ### Architecture
14444
+ - Postgres Changes: Subscribe to INSERT, UPDATE, DELETE events on tables
14445
+ - Broadcast: Send arbitrary messages to channels (not tied to database)
14446
+ - Presence: Track online users and shared state across clients
14447
+ - Channel: \`supabase.channel('room').on('postgres_changes', { event, schema, table }, callback)\`
14448
+
14449
+ ### Key Patterns
14450
+ - Filter subscriptions: \`.on('postgres_changes', { filter: 'user_id=eq.123' })\`
14451
+ - Combine Broadcast + Presence for collaborative features (cursors, typing)
14452
+ - Use RLS with realtime \u2014 users only receive changes they're authorized to see
14453
+ - Unsubscribe on cleanup: \`supabase.removeChannel(channel)\`
14454
+
14455
+ ### Pitfalls
14456
+ - Realtime must be explicitly enabled per-table in Dashboard
14457
+ - RLS applies to realtime \u2014 test policies to ensure correct data delivery
14458
+ - High-frequency updates may cause performance issues \u2014 debounce where possible`
14459
+ }
14460
+ ]
14461
+ },
14462
+ // 12. Project Management
14463
+ {
14464
+ id: "project-management",
14465
+ name: "Project Management",
14466
+ description: "Project tracking and team collaboration tools",
14467
+ patterns: [
14468
+ {
14469
+ id: "jira",
14470
+ name: "Jira",
14471
+ category: "project-management",
14472
+ description: "Enterprise project management with Agile boards, sprints, and automation",
14473
+ tools: ["Jira", "jira-client", "Atlassian API"],
14474
+ guide: `### Setup
14475
+ - Install: \`pnpm add jira-client\` or use Atlassian REST API directly
14476
+ - Env vars: JIRA_HOST, JIRA_EMAIL, JIRA_API_TOKEN
14477
+
14478
+ ### Architecture
14479
+ - REST API: Create/update issues, manage sprints, query with JQL
14480
+ - Webhooks: Issue created, updated, transitioned \u2014 POST to your endpoint
14481
+ - Automation: Jira Automation rules for workflow triggers (on PR merge, auto-transition)
14482
+ - Integration: Link commits/PRs to Jira issues via branch naming (PROJ-123)
14483
+
14484
+ ### Key Patterns
14485
+ - Smart commits: \`git commit -m "PROJ-123 fix login bug #done"\` auto-transitions issues
14486
+ - Use JQL for complex queries: \`project = PROJ AND sprint in openSprints()\`
14487
+ - Webhooks for syncing Jira state with your app (status dashboards, notifications)
14488
+ - Custom fields for project-specific metadata
14489
+
14490
+ ### Pitfalls
14491
+ - API rate limits: 100 requests per 10 seconds per user \u2014 implement request queuing
14492
+ - Jira Cloud API differs from Server/Data Center \u2014 verify endpoint compatibility
14493
+ - API tokens are tied to user accounts \u2014 use service accounts for integrations`
14494
+ }
14495
+ ]
14496
+ },
14497
+ // 13. Communication
14498
+ {
14499
+ id: "communication",
14500
+ name: "Communication",
14501
+ description: "Team communication and notification integrations",
14502
+ patterns: [
14503
+ {
14504
+ id: "slack",
14505
+ name: "Slack",
14506
+ category: "communication",
14507
+ description: "Team messaging with bots, webhooks, and workflow integrations",
14508
+ tools: ["Slack", "@slack/web-api", "@slack/bolt"],
14509
+ guide: `### Setup
14510
+ - Install: \`pnpm add @slack/bolt\` (framework) or \`@slack/web-api\` (API client only)
14511
+ - Create Slack App in api.slack.com/apps \u2014 configure bot scopes and event subscriptions
14512
+ - Env vars: SLACK_BOT_TOKEN, SLACK_SIGNING_SECRET, SLACK_APP_TOKEN (for Socket Mode)
14513
+
14514
+ ### Architecture
14515
+ - Bolt framework: \`app.message('pattern', handler)\`, \`app.command('/cmd', handler)\`
14516
+ - Incoming webhooks: Simple POST to send messages to a channel (no bot needed)
14517
+ - Events API: Receive events (message, reaction, member joined) via HTTP or Socket Mode
14518
+ - Block Kit: Rich message layouts with interactive components
14519
+
14520
+ ### Key Patterns
14521
+ - Use Socket Mode for development \u2014 no public URL needed
14522
+ - Block Kit Builder (app.slack.com/block-kit-builder) to design rich messages
14523
+ - Slash commands for user-initiated actions: \`/deploy\`, \`/status\`
14524
+ - Schedule messages: \`chat.scheduleMessage\` for delayed notifications
14525
+
14526
+ ### Pitfalls
14527
+ - Bot tokens are channel-specific \u2014 bot must be invited to channels to post
14528
+ - Event subscriptions require URL verification (challenge response)
14529
+ - Rate limits: 1 message per second per channel \u2014 queue messages for bulk sending`
14530
+ }
14531
+ ]
14532
+ },
14533
+ // 14. E-commerce
14534
+ {
14535
+ id: "ecommerce",
14536
+ name: "E-commerce",
14537
+ description: "E-commerce platforms and storefront integrations",
14538
+ patterns: [
14539
+ {
14540
+ id: "shopify",
14541
+ name: "Shopify",
14542
+ category: "ecommerce",
14543
+ description: "Full-featured e-commerce platform with Storefront and Admin APIs",
14544
+ tools: ["Shopify", "@shopify/shopify-api", "@shopify/hydrogen"],
14545
+ guide: `### Setup
14546
+ - Install: \`pnpm add @shopify/shopify-api\` (custom app) or \`npx create-hydrogen-app\` (Hydrogen)
14547
+ - Create app in Shopify Partners Dashboard \u2014 get API credentials
14548
+ - Env vars: SHOPIFY_API_KEY, SHOPIFY_API_SECRET, SHOPIFY_STORE_DOMAIN, SHOPIFY_ACCESS_TOKEN
14549
+
14550
+ ### Architecture
14551
+ - Storefront API: Public, client-safe \u2014 product listings, cart, checkout (GraphQL)
14552
+ - Admin API: Server-side only \u2014 orders, inventory, customers, fulfillment (REST/GraphQL)
14553
+ - Hydrogen: Shopify's React framework for custom storefronts (built on Remix)
14554
+ - Webhooks: Order created, product updated, checkout completed \u2014 mandatory for data sync
14555
+
14556
+ ### Key Patterns
14557
+ - Use Storefront API for headless commerce \u2014 cart and checkout without Shopify theme
14558
+ - GraphQL for Storefront API, REST or GraphQL for Admin API
14559
+ - Implement webhook HMAC verification for all incoming webhooks
14560
+ - Use metafields for custom product/order data that doesn't fit standard schema
14561
+
14562
+ ### Pitfalls
14563
+ - Storefront API requires storefront access token (different from admin token)
14564
+ - Checkout is Shopify-hosted unless you're on Shopify Plus (checkout extensibility)
14565
+ - API versioning: Shopify deprecates API versions quarterly \u2014 pin and upgrade regularly`
14566
+ }
14567
+ ]
14568
+ },
14569
+ // 15. Design
14570
+ {
14571
+ id: "design",
14572
+ name: "Design",
14573
+ description: "Design tools and creative platform integrations",
14574
+ patterns: [
14575
+ {
14576
+ id: "framer",
14577
+ name: "Framer",
14578
+ category: "design",
14579
+ description: "Design-to-production website builder with React component support",
14580
+ tools: ["Framer", "framer-motion"],
14581
+ guide: `### Setup
14582
+ - Install: \`pnpm add framer-motion\` for animation library in your React app
14583
+ - Framer Sites: Design and publish directly from Framer \u2014 no code export needed
14584
+ - For custom components in Framer: Use Framer's component API
14585
+
14586
+ ### Architecture
14587
+ - Framer Motion: Animation library \u2014 \`<motion.div animate={{ opacity: 1 }}>\`
14588
+ - Framer Sites: Visual editor that publishes to production (hosting included)
14589
+ - Code components: Write React components that work inside Framer editor
14590
+ - CMS: Built-in CMS for dynamic content in Framer Sites
14591
+
14592
+ ### Key Patterns
14593
+ - Use \`AnimatePresence\` for exit animations (page transitions, modals)
14594
+ - Layout animations: \`layout\` prop for automatic smooth transitions on DOM changes
14595
+ - Scroll-linked animations: \`useScroll()\` hook for parallax and progress indicators
14596
+ - Variants: Define animation states and orchestrate child animations
14597
+
14598
+ ### Pitfalls
14599
+ - framer-motion bundle size is significant \u2014 use lazy loading or \`m\` component for tree-shaking
14600
+ - Framer Sites are separate from your codebase \u2014 not suitable for app UIs
14601
+ - Code components in Framer have limitations \u2014 no hooks from external state libraries`
14602
+ },
14603
+ {
14604
+ id: "figma",
14605
+ name: "Figma",
14606
+ category: "design",
14607
+ description: "Collaborative design platform with developer handoff and plugins",
14608
+ tools: ["Figma", "Figma API", "figma-js"],
14609
+ guide: `### Setup
14610
+ - Install: \`pnpm add figma-js\` or use Figma REST API directly
14611
+ - Generate Personal Access Token in Figma > Settings > Account
14612
+ - Env vars: FIGMA_ACCESS_TOKEN
14613
+
14614
+ ### Architecture
14615
+ - REST API: Read files, components, styles, and images from Figma
14616
+ - Webhooks: File updated, comment added \u2014 trigger design-to-code pipelines
14617
+ - Plugins: Build custom Figma plugins for team-specific workflows
14618
+ - Dev Mode: Developers inspect designs, copy CSS, and export assets directly
14619
+
14620
+ ### Key Patterns
14621
+ - Use Figma's component library as source of truth for design tokens
14622
+ - Export design tokens via API \u2192 generate CSS variables / Tailwind config
14623
+ - Automate asset export: Fetch SVGs/PNGs from Figma API in CI pipeline
14624
+ - Use Figma Variants for component states (hover, active, disabled)
14625
+
14626
+ ### Pitfalls
14627
+ - Figma API rate limits: 30 requests/minute \u2014 cache responses aggressively
14628
+ - File structure varies by team \u2014 normalize node traversal logic
14629
+ - Personal access tokens expire \u2014 use OAuth for production integrations`
14630
+ },
14631
+ {
14632
+ id: "canva",
14633
+ name: "Canva",
14634
+ category: "design",
14635
+ description: "Visual design platform with template marketplace and brand kit",
14636
+ tools: ["Canva", "Canva Connect API"],
14637
+ guide: `### Setup
14638
+ - Register app in Canva Developers portal (canva.com/developers)
14639
+ - Use Canva Connect API for integration \u2014 REST-based
14640
+ - Env vars: CANVA_CLIENT_ID, CANVA_CLIENT_SECRET
14641
+
14642
+ ### Architecture
14643
+ - Connect API: Create designs, manage brand assets, export designs programmatically
14644
+ - OAuth 2.0: User authorization flow for accessing their Canva designs
14645
+ - Design import/export: Create designs from templates, export as PNG/PDF
14646
+ - Brand Kit API: Manage brand colors, fonts, logos, and templates
14647
+
14648
+ ### Key Patterns
14649
+ - Use Canva Connect to embed design creation in your app
14650
+ - Template-based generation: Pre-configure templates, fill with dynamic data
14651
+ - Asset management: Upload and organize brand assets via API
14652
+ - Export designs in multiple formats for different channels (social, print, web)
14653
+
14654
+ ### Pitfalls
14655
+ - Canva API is relatively new \u2014 some features may have limited documentation
14656
+ - OAuth flow requires redirect URI setup per environment
14657
+ - Rate limits apply \u2014 implement retry logic with exponential backoff`
14658
+ }
14659
+ ]
14660
+ },
14661
+ // 16. Customer Support
14662
+ {
14663
+ id: "customer-support",
14664
+ name: "Customer Support",
14665
+ description: "Help desk and customer support platform integrations",
14666
+ patterns: [
14667
+ {
14668
+ id: "freshdesk",
14669
+ name: "Freshdesk",
14670
+ category: "customer-support",
14671
+ description: "Customer support platform with ticketing, knowledge base, and automation",
14672
+ tools: ["Freshdesk", "Freshdesk API"],
14673
+ guide: `### Setup
14674
+ - Use Freshdesk REST API v2 \u2014 no official Node.js SDK (use fetch or axios)
14675
+ - Auth: API key as username with \`X\` as password (Basic Auth)
14676
+ - Env vars: FRESHDESK_DOMAIN, FRESHDESK_API_KEY
14677
+
14678
+ ### Architecture
14679
+ - Tickets: Create, update, list, filter via REST API
14680
+ - Contacts: Manage customer profiles linked to tickets
14681
+ - Knowledge base: Articles and categories for self-service support
14682
+ - Webhooks: Ticket created, updated, resolved \u2014 trigger custom workflows
14683
+
14684
+ ### Key Patterns
14685
+ - Create tickets from your app: POST /api/v2/tickets with requester info
14686
+ - Use custom fields for app-specific metadata on tickets
14687
+ - Automation rules in Freshdesk for routing, SLA, and escalation
14688
+ - Embed Freshdesk widget in your app for in-app support
14689
+
14690
+ ### Pitfalls
14691
+ - API rate limit: varies by plan (free: 50/min, paid: higher) \u2014 implement throttling
14692
+ - Pagination: Use \`page\` and \`per_page\` params \u2014 default 30 items
14693
+ - Webhook payloads are not signed \u2014 verify source IP or implement shared secret`
14694
+ },
14695
+ {
14696
+ id: "zendesk",
14697
+ name: "Zendesk",
14698
+ category: "customer-support",
14699
+ description: "Enterprise customer service platform with omnichannel support",
14700
+ tools: ["Zendesk", "node-zendesk", "Zendesk API"],
14701
+ guide: `### Setup
14702
+ - Install: \`pnpm add node-zendesk\` or use Zendesk REST API with fetch
14703
+ - Auth: API token (\`email/token:api_key\`) or OAuth
14704
+ - Env vars: ZENDESK_SUBDOMAIN, ZENDESK_EMAIL, ZENDESK_API_TOKEN
14705
+
14706
+ ### Architecture
14707
+ - Tickets API: Create, update, search, and manage support tickets
14708
+ - Users API: Manage end-users, agents, and organizations
14709
+ - Help Center API: Articles, sections, categories for self-service
14710
+ - Chat/Messaging: Real-time customer communication via Zendesk Widget
14711
+
14712
+ ### Key Patterns
14713
+ - Use ticket triggers for automated responses and routing
14714
+ - Zendesk Widget: Embed support chat/help center in your app with one script tag
14715
+ - Search API with Zendesk Query Language for complex ticket queries
14716
+ - Webhooks + Zendesk Triggers for real-time ticket event processing
14717
+
14718
+ ### Pitfalls
14719
+ - API rate limit: 700 requests/minute (Enterprise) \u2014 lower on smaller plans
14720
+ - Zendesk has multiple products (Support, Chat, Guide) \u2014 each has separate APIs
14721
+ - Sandbox environment requires Enterprise plan \u2014 test carefully on trial accounts`
14722
+ }
14723
+ ]
14724
+ },
14725
+ // 17. Crawler
14726
+ {
14727
+ id: "crawler",
14728
+ name: "Crawler",
14729
+ description: "Web crawling and content extraction services",
14730
+ patterns: [
14731
+ {
14732
+ id: "firecrawl",
14733
+ name: "Firecrawl",
14734
+ category: "crawler",
14735
+ description: "AI-powered web crawler that extracts clean markdown from any URL",
14736
+ tools: ["Firecrawl", "@mendable/firecrawl-js"],
14737
+ guide: `### Setup
14738
+ - Install: \`pnpm add @mendable/firecrawl-js\`
14739
+ - Env vars: FIRECRAWL_API_KEY
14740
+
14741
+ ### Architecture
14742
+ - Scrape: Single URL \u2192 clean markdown/HTML extraction with metadata
14743
+ - Crawl: Start from URL, follow links, extract content from entire site
14744
+ - Map: Get all URLs from a website without extracting content
14745
+ - Async crawl: Submit job, poll for completion, get results
14746
+
14747
+ ### Key Patterns
14748
+ - Use scrape for single-page extraction: \`app.scrapeUrl(url, { formats: ['markdown'] })\`
14749
+ - Crawl for site-wide content: \`app.crawlUrl(url, { limit: 100 })\`
14750
+ - Use \`includePaths\` and \`excludePaths\` to filter crawled URLs
14751
+ - Extract structured data with LLM extraction mode (schema-based)
14752
+
14753
+ ### Pitfalls
14754
+ - Crawl jobs can be slow for large sites \u2014 use async mode with webhooks
14755
+ - Respect robots.txt \u2014 Firecrawl follows it by default
14756
+ - API rate limits depend on plan \u2014 implement retry logic for 429 responses`
14757
+ }
14758
+ ]
14759
+ },
14760
+ // 18. Web Scraping
14761
+ {
14762
+ id: "web-scraping",
14763
+ name: "Web Scraping",
14764
+ description: "Web scraping and browser automation tools",
14765
+ patterns: [
14766
+ {
14767
+ id: "scrapling",
14768
+ name: "Scrapling",
14769
+ category: "web-scraping",
14770
+ description: "Lightweight web scraping library with CSS selector-based extraction",
14771
+ tools: ["Scrapling", "cheerio", "puppeteer"],
14772
+ guide: `### Setup
14773
+ - Install: \`pnpm add cheerio\` (HTML parsing) + \`pnpm add puppeteer\` (browser automation)
14774
+ - For static pages: Use fetch + cheerio \u2014 fast and lightweight
14775
+ - For JS-rendered pages: Use puppeteer for full browser rendering
14776
+
14777
+ ### Architecture
14778
+ - Static scraping: Fetch HTML \u2192 parse with cheerio \u2192 extract with CSS selectors
14779
+ - Dynamic scraping: Launch puppeteer browser \u2192 navigate \u2192 wait for content \u2192 extract
14780
+ - Pipeline: URL queue \u2192 fetch/render \u2192 parse \u2192 transform \u2192 store
14781
+ - Scheduling: CRON jobs for periodic scraping (price monitoring, content aggregation)
14782
+
14783
+ ### Key Patterns
14784
+ - Use cheerio for static HTML: \`$(selector).text()\`, \`$(selector).attr('href')\`
14785
+ - Puppeteer for SPAs: \`page.waitForSelector('.data')\` then \`page.evaluate()\`
14786
+ - Implement request delays and rotation to avoid rate limits
14787
+ - Cache responses to avoid re-scraping unchanged pages (ETags, Last-Modified)
14788
+
14789
+ ### Pitfalls
14790
+ - Respect robots.txt and terms of service \u2014 scraping may violate ToS
14791
+ - Puppeteer requires Chromium \u2014 significant resource usage in production
14792
+ - Anti-bot measures (Cloudflare, captchas) may block scrapers \u2014 consider using proxies`
14793
+ }
14794
+ ]
14795
+ },
14796
+ // 19. Hosting
14797
+ {
14798
+ id: "hosting",
14799
+ name: "Hosting",
14800
+ description: "Web hosting and deployment platforms",
14801
+ patterns: [
14802
+ {
14803
+ id: "netlify",
14804
+ name: "Netlify",
14805
+ category: "hosting",
14806
+ description: "Web hosting with serverless functions, forms, and edge handlers",
14807
+ tools: ["Netlify", "netlify-cli", "@netlify/functions"],
14808
+ guide: `### Setup
14809
+ - Install: \`pnpm add -g netlify-cli\` + \`pnpm add @netlify/functions\`
14810
+ - Link: \`netlify link\` or connect GitHub repo in Netlify Dashboard
14811
+ - Env vars: Set in Netlify Dashboard > Site > Environment variables
14812
+
14813
+ ### Architecture
14814
+ - Static hosting: Deploy built assets to Netlify CDN (automatic from Git)
14815
+ - Serverless functions: \`netlify/functions/\` directory \u2014 auto-deployed as AWS Lambda
14816
+ - Edge functions: \`netlify/edge-functions/\` \u2014 run at CDN edge with Deno runtime
14817
+ - Forms: HTML forms with \`data-netlify="true"\` \u2014 submissions stored/forwarded automatically
14818
+
14819
+ ### Key Patterns
14820
+ - Use \`netlify.toml\` for build config, redirects, and headers
14821
+ - Branch deploys: Every branch gets a deploy URL for testing
14822
+ - Deploy previews: PRs get preview URLs with Netlify comment on GitHub
14823
+ - Use Netlify Identity for quick auth or Netlify Graph for API integrations
14824
+
14825
+ ### Pitfalls
14826
+ - Serverless function timeout: 10s (free), 26s (paid) \u2014 not for long-running tasks
14827
+ - Build minutes: 300/month on free tier \u2014 optimize build times
14828
+ - Netlify Functions use AWS Lambda \u2014 cold starts may impact latency`
14829
+ },
14830
+ {
14831
+ id: "vercel-hosting",
14832
+ name: "Vercel",
14833
+ category: "hosting",
14834
+ description: "Edge-first hosting platform optimized for Next.js and React frameworks",
14835
+ tools: ["Vercel", "vercel"],
14836
+ guide: `### Setup
14837
+ - Install: \`pnpm add -g vercel\`
14838
+ - Deploy: \`vercel\` from project root \u2014 auto-detects framework
14839
+ - Env vars: \`vercel env add\` or set in Dashboard > Project > Settings
14840
+
14841
+ ### Architecture
14842
+ - Framework detection: Auto-configures build for Next.js, Remix, Astro, etc.
14843
+ - Edge Network: Static assets served from 100+ global locations
14844
+ - Serverless functions: API routes auto-deploy, scale to zero when idle
14845
+ - ISR: Incremental Static Regeneration for dynamic content with static performance
14846
+
14847
+ ### Key Patterns
14848
+ - Use preview deployments for every PR \u2014 share URLs with team for review
14849
+ - Edge middleware: Run logic before request hits your app (auth, redirects, A/B tests)
14850
+ - Cron jobs: Define in vercel.json for scheduled serverless function execution
14851
+ - Speed Insights: Built-in web vitals monitoring
14852
+
14853
+ ### Pitfalls
14854
+ - Hobby plan: 100 deployments/day, 100GB bandwidth \u2014 sufficient for small projects
14855
+ - Serverless function regions: Choose region close to your database for lowest latency
14856
+ - No persistent file system \u2014 use external storage (S3, Supabase) for uploads`
14857
+ }
14858
+ ]
14859
+ },
14860
+ // 20. Domain
14861
+ {
14862
+ id: "domain",
14863
+ name: "Domain",
14864
+ description: "Domain registration and DNS management",
14865
+ patterns: [
14866
+ {
14867
+ id: "godaddy",
14868
+ name: "GoDaddy",
14869
+ category: "domain",
14870
+ description: "Domain registrar with DNS management and API access",
14871
+ tools: ["GoDaddy", "GoDaddy API"],
14872
+ guide: `### Setup
14873
+ - Get API credentials from developer.godaddy.com
14874
+ - Use GoDaddy REST API \u2014 no official Node.js SDK (use fetch)
14875
+ - Env vars: GODADDY_API_KEY, GODADDY_API_SECRET
14876
+
14877
+ ### Architecture
14878
+ - Domains API: Check availability, register, renew, transfer domains
14879
+ - DNS API: Manage A, CNAME, MX, TXT records programmatically
14880
+ - Certificates API: SSL certificate provisioning and management
14881
+ - Auth: API key + secret in Authorization header
14882
+
14883
+ ### Key Patterns
14884
+ - DNS automation: Update records on deployment (point to new server IP)
14885
+ - Domain availability check before registration: GET /v1/domains/available
14886
+ - Bulk DNS updates: PATCH /v1/domains/{domain}/records for batch changes
14887
+ - Use TXT records for domain verification (SSL, email, Stripe, etc.)
14888
+
14889
+ ### Pitfalls
14890
+ - API has separate OTE (test) and production environments \u2014 use OTE for development
14891
+ - Rate limits: 60 requests/minute \u2014 implement throttling for batch operations
14892
+ - DNS propagation: Changes take 15min to 48hrs \u2014 TTL affects propagation speed`
14893
+ },
14894
+ {
14895
+ id: "google-domains",
14896
+ name: "Google Domains",
14897
+ category: "domain",
14898
+ description: "Domain registration with Google Cloud DNS integration (now Squarespace)",
14899
+ tools: ["Google Domains", "Squarespace Domains", "Cloud DNS"],
14900
+ guide: `### Setup
14901
+ - Google Domains transferred to Squarespace Domains \u2014 manage at domains.squarespace.com
14902
+ - For DNS management with Google Cloud: Use Cloud DNS API
14903
+ - Env vars: GOOGLE_CLOUD_PROJECT, GOOGLE_APPLICATION_CREDENTIALS
14904
+
14905
+ ### Architecture
14906
+ - Squarespace Domains: Web-based management for registration and basic DNS
14907
+ - Cloud DNS: Programmable DNS management via Google Cloud API
14908
+ - Integration: Use Cloud DNS managed zones for high-availability DNS
14909
+ - DNSSEC: Automatic DNSSEC signing with Cloud DNS
14910
+
14911
+ ### Key Patterns
14912
+ - Use Cloud DNS for programmable DNS management with IaC (Terraform, Pulumi)
14913
+ - Managed zones for each domain with record sets for A, CNAME, MX, TXT
14914
+ - Health checks: Route traffic based on backend health (with Cloud Load Balancing)
14915
+ - DNS peering: Private DNS zones for internal service discovery
14916
+
14917
+ ### Pitfalls
14918
+ - Google Domains is now Squarespace \u2014 migration may affect API access
14919
+ - Cloud DNS charges per managed zone ($0.20/month) and per million queries
14920
+ - DNS changes via Cloud DNS API take effect immediately but propagation varies`
14921
+ }
14922
+ ]
14923
+ },
14924
+ // 21. Cloud
14925
+ {
14926
+ id: "cloud",
14927
+ name: "Cloud",
14928
+ description: "Cloud infrastructure platforms and services",
14929
+ patterns: [
14930
+ {
14931
+ id: "gcp",
14932
+ name: "Google Cloud Platform",
14933
+ category: "cloud",
14934
+ description: "Full cloud platform with compute, storage, AI/ML, and managed services",
14935
+ tools: ["GCP", "@google-cloud/storage", "@google-cloud/firestore", "gcloud"],
14936
+ guide: `### Setup
14937
+ - Install: \`pnpm add @google-cloud/storage @google-cloud/firestore\` (per service)
14938
+ - Auth: Service account JSON key or Application Default Credentials
14939
+ - Env vars: GOOGLE_CLOUD_PROJECT, GOOGLE_APPLICATION_CREDENTIALS
14940
+
14941
+ ### Architecture
14942
+ - Compute: Cloud Run (containers), Cloud Functions (serverless), GKE (Kubernetes)
14943
+ - Storage: Cloud Storage (objects), Firestore (NoSQL), Cloud SQL (relational)
14944
+ - AI/ML: Vertex AI, Gemini API, Cloud Vision, Natural Language
14945
+ - Networking: Cloud Load Balancing, Cloud CDN, VPC
14946
+
14947
+ ### Key Patterns
14948
+ - Use Cloud Run for containerized apps \u2014 auto-scales, pay per request
14949
+ - Application Default Credentials: Works locally with gcloud CLI, in production with service account
14950
+ - IAM: Principle of least privilege \u2014 create service accounts per service
14951
+ - Use Secret Manager for sensitive configuration (API keys, credentials)
14952
+
14953
+ ### Pitfalls
14954
+ - GCP billing can surprise \u2014 set budget alerts and quotas early
14955
+ - Service account key files are security-sensitive \u2014 use Workload Identity where possible
14956
+ - Region selection matters for latency and compliance \u2014 choose closest to users`
14957
+ },
14958
+ {
14959
+ id: "aws",
14960
+ name: "Amazon Web Services",
14961
+ category: "cloud",
14962
+ description: "Comprehensive cloud platform with 200+ services",
14963
+ tools: ["AWS", "@aws-sdk/client-s3", "@aws-sdk/client-lambda", "aws-cdk"],
14964
+ guide: `### Setup
14965
+ - Install: \`pnpm add @aws-sdk/client-s3\` (per service) or AWS CDK for infrastructure
14966
+ - Auth: IAM credentials, AWS SSO, or IAM roles (EC2/Lambda)
14967
+ - Env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION
14968
+
14969
+ ### Architecture
14970
+ - Compute: Lambda (serverless), ECS/Fargate (containers), EC2 (VMs)
14971
+ - Storage: S3 (objects), DynamoDB (NoSQL), RDS (relational), ElastiCache (Redis)
14972
+ - Networking: ALB, CloudFront (CDN), Route 53 (DNS), API Gateway
14973
+ - IaC: AWS CDK (TypeScript), CloudFormation, or Terraform
14974
+
14975
+ ### Key Patterns
14976
+ - Use AWS CDK for infrastructure as TypeScript code \u2014 type-safe and composable
14977
+ - Lambda + API Gateway for serverless APIs \u2014 auto-scale with zero management
14978
+ - Use IAM roles (not access keys) for service-to-service communication
14979
+ - S3 + CloudFront for static site hosting with global CDN
14980
+
14981
+ ### Pitfalls
14982
+ - AWS pricing is complex \u2014 use AWS Cost Explorer and set billing alerts
14983
+ - IAM is powerful but complex \u2014 start with AWS managed policies, customize later
14984
+ - Region lock-in: Some services are regional \u2014 design for multi-region if needed`
14985
+ }
14986
+ ]
14987
+ }
14988
+ ];
14989
+ function getPatternCategories() {
14990
+ return PATTERN_CATEGORIES;
14991
+ }
14992
+ function getPatternsByCategory(categoryId) {
14993
+ const cat = PATTERN_CATEGORIES.find((c) => c.id === categoryId);
14994
+ return cat?.patterns || [];
14995
+ }
14996
+ function getPattern(patternId) {
14997
+ for (const cat of PATTERN_CATEGORIES) {
14998
+ const p = cat.patterns.find((p2) => p2.id === patternId);
14999
+ if (p) return p;
15000
+ }
15001
+ return void 0;
15002
+ }
15003
+ function searchPatterns(query) {
15004
+ const lower = query.toLowerCase();
15005
+ const results = [];
15006
+ for (const cat of PATTERN_CATEGORIES) {
15007
+ for (const p of cat.patterns) {
15008
+ if (p.name.toLowerCase().includes(lower) || p.description.toLowerCase().includes(lower) || p.tools.some((t) => t.toLowerCase().includes(lower)) || p.category.toLowerCase().includes(lower)) {
15009
+ results.push(p);
15010
+ }
15011
+ }
15012
+ }
15013
+ return results;
15014
+ }
15015
+ function saveSelectedPatterns(projectDir, patternIds) {
15016
+ const dir = join10(projectDir, ".fishi");
15017
+ if (!existsSync10(dir)) mkdirSync4(dir, { recursive: true });
15018
+ const data = { patterns: patternIds, savedAt: (/* @__PURE__ */ new Date()).toISOString() };
15019
+ writeFileSync4(join10(dir, "patterns.json"), JSON.stringify(data, null, 2) + "\n", "utf-8");
15020
+ }
15021
+ function readSelectedPatterns(projectDir) {
15022
+ const p = join10(projectDir, ".fishi", "patterns.json");
15023
+ if (!existsSync10(p)) return [];
15024
+ try {
15025
+ const data = JSON.parse(readFileSync7(p, "utf-8"));
15026
+ return data.patterns || [];
15027
+ } catch {
15028
+ return [];
15029
+ }
15030
+ }
15031
+ function generatePatternGuide(patternIds) {
15032
+ let guide = "# Integration Patterns \u2014 Architecture Guide\n\n";
15033
+ guide += "> Auto-generated by FISHI Pattern Marketplace.\n";
15034
+ guide += "> Use these patterns as architectural reference during the design phase.\n\n";
15035
+ for (const id of patternIds) {
15036
+ const pattern = getPattern(id);
15037
+ if (!pattern) continue;
15038
+ guide += `## ${pattern.name} (${pattern.category})
15039
+
15040
+ `;
15041
+ guide += `**Tools:** ${pattern.tools.join(", ")}
15042
+
15043
+ `;
15044
+ guide += pattern.guide + "\n\n---\n\n";
15045
+ }
15046
+ return guide;
15047
+ }
15048
+
15049
+ // src/generators/file-lock-manager.ts
15050
+ import { existsSync as existsSync11, readFileSync as readFileSync8, writeFileSync as writeFileSync5, mkdirSync as mkdirSync5 } from "fs";
15051
+ import { join as join11, dirname as dirname4 } from "path";
15052
+ function lockFilePath(projectDir) {
15053
+ return join11(projectDir, ".fishi", "state", "file-locks.yaml");
15054
+ }
15055
+ function readFileLocks(projectDir) {
15056
+ const p = lockFilePath(projectDir);
15057
+ if (!existsSync11(p)) return [];
15058
+ const content = readFileSync8(p, "utf-8");
15059
+ const locks = [];
15060
+ const lockBlocks = content.split(/\n\s*-\s+file:\s*/).slice(1);
15061
+ for (const block of lockBlocks) {
15062
+ const lines = ("file: " + block).split("\n");
15063
+ const lock = {};
15064
+ for (const line of lines) {
15065
+ const fileMatch = line.match(/^\s*file:\s*['"]?(.+?)['"]?\s*$/);
15066
+ const agentMatch = line.match(/^\s*agent:\s*['"]?(.+?)['"]?\s*$/);
15067
+ const taskMatch = line.match(/^\s*task:\s*['"]?(.+?)['"]?\s*$/);
15068
+ const coordMatch = line.match(/^\s*coordinator:\s*['"]?(.+?)['"]?\s*$/);
15069
+ const timeMatch = line.match(/^\s*locked_at:\s*['"]?(.+?)['"]?\s*$/);
15070
+ if (fileMatch) lock.file = fileMatch[1];
15071
+ if (agentMatch) lock.agent = agentMatch[1];
15072
+ if (taskMatch) lock.task = taskMatch[1];
15073
+ if (coordMatch) lock.coordinator = coordMatch[1];
15074
+ if (timeMatch) lock.lockedAt = timeMatch[1];
15075
+ }
15076
+ if (lock.file && lock.agent) {
15077
+ locks.push(lock);
15078
+ }
15079
+ }
15080
+ return locks;
15081
+ }
15082
+ function writeLocks(projectDir, locks) {
15083
+ const dir = dirname4(lockFilePath(projectDir));
15084
+ if (!existsSync11(dir)) mkdirSync5(dir, { recursive: true });
15085
+ if (locks.length === 0) {
15086
+ writeFileSync5(lockFilePath(projectDir), "locks: []\n", "utf-8");
15087
+ return;
15088
+ }
15089
+ let yaml = "locks:\n";
15090
+ for (const lock of locks) {
15091
+ yaml += ` - file: "${lock.file}"
15092
+ `;
15093
+ yaml += ` agent: "${lock.agent}"
15094
+ `;
15095
+ yaml += ` task: "${lock.task}"
15096
+ `;
15097
+ yaml += ` coordinator: "${lock.coordinator}"
15098
+ `;
15099
+ yaml += ` locked_at: "${lock.lockedAt}"
15100
+ `;
15101
+ }
15102
+ writeFileSync5(lockFilePath(projectDir), yaml, "utf-8");
15103
+ }
15104
+ function checkLockConflicts(projectDir, files, agent, task) {
15105
+ const existing = readFileLocks(projectDir);
15106
+ const conflicts = [];
15107
+ for (const file of files) {
15108
+ const locked = existing.find((l) => l.file === file && l.agent !== agent);
15109
+ if (locked) {
15110
+ conflicts.push({
15111
+ file,
15112
+ requestingAgent: agent,
15113
+ requestingTask: task,
15114
+ lockedBy: locked.agent,
15115
+ lockedTask: locked.task,
15116
+ lockedAt: locked.lockedAt
15117
+ });
15118
+ }
15119
+ }
15120
+ return conflicts;
15121
+ }
15122
+ function acquireLocks(projectDir, files, agent, task, coordinator) {
15123
+ const conflicts = checkLockConflicts(projectDir, files, agent, task);
15124
+ if (conflicts.length > 0) {
15125
+ return { success: false, locked: [], conflicts };
15126
+ }
15127
+ const existing = readFileLocks(projectDir);
15128
+ const now = (/* @__PURE__ */ new Date()).toISOString();
15129
+ const newLocks = [];
15130
+ for (const file of files) {
15131
+ if (existing.some((l) => l.file === file && l.agent === agent)) continue;
15132
+ newLocks.push({ file, agent, task, coordinator, lockedAt: now });
15133
+ }
15134
+ writeLocks(projectDir, [...existing, ...newLocks]);
15135
+ return { success: true, locked: files, conflicts: [] };
15136
+ }
15137
+ function releaseLocks(projectDir, agent, task) {
15138
+ const existing = readFileLocks(projectDir);
15139
+ const released = [];
15140
+ const remaining = [];
15141
+ for (const lock of existing) {
15142
+ if (lock.agent === agent && (!task || lock.task === task)) {
15143
+ released.push(lock.file);
15144
+ } else {
15145
+ remaining.push(lock);
15146
+ }
15147
+ }
15148
+ writeLocks(projectDir, remaining);
15149
+ return released;
15150
+ }
15151
+ function getAgentLocks(projectDir, agent) {
15152
+ return readFileLocks(projectDir).filter((l) => l.agent === agent);
15153
+ }
15154
+ function getLockSummary(projectDir) {
15155
+ const locks = readFileLocks(projectDir);
15156
+ const byAgent = {};
15157
+ for (const lock of locks) {
15158
+ byAgent[lock.agent] = (byAgent[lock.agent] || 0) + 1;
15159
+ }
15160
+ return { totalLocked: locks.length, byAgent };
15161
+ }
15162
+
13019
15163
  // src/templates/configs/sandbox-policy.ts
13020
15164
  function getSandboxPolicyTemplate() {
13021
15165
  return `# FISHI Sandbox Policy
@@ -13409,9 +15553,11 @@ function getDashboardHtml() {
13409
15553
  }
13410
15554
  export {
13411
15555
  DOMAIN_INFO,
15556
+ acquireLocks,
13412
15557
  architectAgentTemplate,
13413
15558
  backendAgentTemplate,
13414
15559
  buildSandboxEnv,
15560
+ checkLockConflicts,
13415
15561
  createBackup,
13416
15562
  detectComponentRegistry,
13417
15563
  detectConflicts,
@@ -13426,12 +15572,14 @@ export {
13426
15572
  fullstackAgentTemplate,
13427
15573
  generateDefaultTokens,
13428
15574
  generateDesignSystemConfig,
15575
+ generatePatternGuide,
13429
15576
  generatePermissionBlock,
13430
15577
  generateScaffold,
13431
15578
  generateSecurityReport,
13432
15579
  getAdaptiveTaskGraphSkill,
13433
15580
  getAgentCompleteHook,
13434
15581
  getAgentFactoryTemplate,
15582
+ getAgentLocks,
13435
15583
  getAgentRegistryTemplate,
13436
15584
  getAgentSummary,
13437
15585
  getAgentsMdTemplate,
@@ -13456,12 +15604,14 @@ export {
13456
15604
  getDockerfileTemplate,
13457
15605
  getDocumentationSkill,
13458
15606
  getDomainConfigYaml,
15607
+ getFileLockHookScript,
13459
15608
  getFishiYamlTemplate,
13460
15609
  getGateCommand,
13461
15610
  getGateManagerScript,
13462
15611
  getGitignoreAdditions,
13463
15612
  getInitCommand,
13464
15613
  getLearningsManagerScript,
15614
+ getLockSummary,
13465
15615
  getMarketplaceArchitectTemplate,
13466
15616
  getMasterOrchestratorTemplate,
13467
15617
  getMcpJsonTemplate,
@@ -13469,6 +15619,9 @@ export {
13469
15619
  getMobileArchitectTemplate,
13470
15620
  getModelRoutingReference,
13471
15621
  getMonitorEmitterScript,
15622
+ getPattern,
15623
+ getPatternCategories,
15624
+ getPatternsByCategory,
13472
15625
  getPermissionsForRole,
13473
15626
  getPhaseRunnerScript,
13474
15627
  getPostEditHook,
@@ -13505,15 +15658,20 @@ export {
13505
15658
  planningLeadTemplate,
13506
15659
  qualityLeadTemplate,
13507
15660
  readDomainConfig,
15661
+ readFileLocks,
13508
15662
  readMonitorState,
13509
15663
  readSandboxConfig,
13510
15664
  readSandboxPolicy,
15665
+ readSelectedPatterns,
15666
+ releaseLocks,
13511
15667
  researchAgentTemplate,
13512
15668
  runBrandGuardian,
13513
15669
  runInDockerSandbox,
13514
15670
  runInProcessSandbox,
13515
15671
  runInSandbox,
13516
15672
  runSecurityScan,
15673
+ saveSelectedPatterns,
15674
+ searchPatterns,
13517
15675
  securityAgentTemplate,
13518
15676
  startDevServer,
13519
15677
  testingAgentTemplate,