webpeel 0.20.1 → 0.20.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/dist/core/llm-extract.d.ts +17 -1
  2. package/dist/core/llm-extract.js +255 -11
  3. package/dist/mcp/handlers/extract.d.ts +1 -0
  4. package/dist/mcp/handlers/extract.js +31 -0
  5. package/dist/server/app.d.ts +14 -0
  6. package/dist/server/app.js +384 -0
  7. package/dist/server/auth-store.d.ts +27 -0
  8. package/dist/server/auth-store.js +88 -0
  9. package/dist/server/email-service.d.ts +21 -0
  10. package/dist/server/email-service.js +79 -0
  11. package/dist/server/job-queue.d.ts +100 -0
  12. package/dist/server/job-queue.js +145 -0
  13. package/dist/server/logger.d.ts +10 -0
  14. package/dist/server/logger.js +37 -0
  15. package/dist/server/middleware/auth.d.ts +28 -0
  16. package/dist/server/middleware/auth.js +221 -0
  17. package/dist/server/middleware/rate-limit.d.ts +24 -0
  18. package/dist/server/middleware/rate-limit.js +167 -0
  19. package/dist/server/middleware/url-validator.d.ts +15 -0
  20. package/dist/server/middleware/url-validator.js +186 -0
  21. package/dist/server/openapi.yaml +6418 -0
  22. package/dist/server/pg-auth-store.d.ts +132 -0
  23. package/dist/server/pg-auth-store.js +472 -0
  24. package/dist/server/pg-job-queue.d.ts +59 -0
  25. package/dist/server/pg-job-queue.js +375 -0
  26. package/dist/server/premium/domain-intel.d.ts +16 -0
  27. package/dist/server/premium/domain-intel.js +133 -0
  28. package/dist/server/premium/index.d.ts +17 -0
  29. package/dist/server/premium/index.js +35 -0
  30. package/dist/server/premium/swr-cache.d.ts +14 -0
  31. package/dist/server/premium/swr-cache.js +34 -0
  32. package/dist/server/routes/activity.d.ts +6 -0
  33. package/dist/server/routes/activity.js +74 -0
  34. package/dist/server/routes/answer.d.ts +5 -0
  35. package/dist/server/routes/answer.js +125 -0
  36. package/dist/server/routes/ask.d.ts +28 -0
  37. package/dist/server/routes/ask.js +229 -0
  38. package/dist/server/routes/batch.d.ts +6 -0
  39. package/dist/server/routes/batch.js +493 -0
  40. package/dist/server/routes/cli-usage.d.ts +6 -0
  41. package/dist/server/routes/cli-usage.js +127 -0
  42. package/dist/server/routes/compat.d.ts +23 -0
  43. package/dist/server/routes/compat.js +652 -0
  44. package/dist/server/routes/deep-fetch.d.ts +8 -0
  45. package/dist/server/routes/deep-fetch.js +57 -0
  46. package/dist/server/routes/demo.d.ts +24 -0
  47. package/dist/server/routes/demo.js +517 -0
  48. package/dist/server/routes/do.d.ts +8 -0
  49. package/dist/server/routes/do.js +72 -0
  50. package/dist/server/routes/extract.d.ts +8 -0
  51. package/dist/server/routes/extract.js +235 -0
  52. package/dist/server/routes/fetch.d.ts +7 -0
  53. package/dist/server/routes/fetch.js +999 -0
  54. package/dist/server/routes/health.d.ts +7 -0
  55. package/dist/server/routes/health.js +19 -0
  56. package/dist/server/routes/jobs.d.ts +7 -0
  57. package/dist/server/routes/jobs.js +573 -0
  58. package/dist/server/routes/mcp.d.ts +14 -0
  59. package/dist/server/routes/mcp.js +141 -0
  60. package/dist/server/routes/oauth.d.ts +9 -0
  61. package/dist/server/routes/oauth.js +396 -0
  62. package/dist/server/routes/playground.d.ts +17 -0
  63. package/dist/server/routes/playground.js +283 -0
  64. package/dist/server/routes/screenshot.d.ts +22 -0
  65. package/dist/server/routes/screenshot.js +816 -0
  66. package/dist/server/routes/search.d.ts +6 -0
  67. package/dist/server/routes/search.js +303 -0
  68. package/dist/server/routes/session.d.ts +15 -0
  69. package/dist/server/routes/session.js +397 -0
  70. package/dist/server/routes/stats.d.ts +6 -0
  71. package/dist/server/routes/stats.js +71 -0
  72. package/dist/server/routes/stripe.d.ts +15 -0
  73. package/dist/server/routes/stripe.js +294 -0
  74. package/dist/server/routes/users.d.ts +8 -0
  75. package/dist/server/routes/users.js +1671 -0
  76. package/dist/server/routes/watch.d.ts +15 -0
  77. package/dist/server/routes/watch.js +309 -0
  78. package/dist/server/routes/webhooks.d.ts +26 -0
  79. package/dist/server/routes/webhooks.js +170 -0
  80. package/dist/server/routes/youtube.d.ts +6 -0
  81. package/dist/server/routes/youtube.js +130 -0
  82. package/dist/server/sentry.d.ts +13 -0
  83. package/dist/server/sentry.js +38 -0
  84. package/dist/server/types.d.ts +15 -0
  85. package/dist/server/types.js +7 -0
  86. package/dist/server/utils/response.d.ts +44 -0
  87. package/dist/server/utils/response.js +69 -0
  88. package/dist/server/utils/sse.d.ts +22 -0
  89. package/dist/server/utils/sse.js +38 -0
  90. package/package.json +2 -1
@@ -2,8 +2,14 @@
2
2
  * LLM-based extraction: sends markdown/text content to an LLM
3
3
  * with instructions to extract structured data.
4
4
  *
5
- * Supports OpenAI-compatible APIs (OpenAI, Anthropic via proxy, local models).
5
+ * Supports:
6
+ * - OpenAI-compatible APIs (OpenAI, custom models via baseUrl)
7
+ * - Anthropic (Claude Haiku, Sonnet, Opus)
8
+ * - Google (Gemini Flash, Pro)
6
9
  */
10
+ export type LLMProvider = 'openai' | 'anthropic' | 'google';
11
+ /** Default models per provider (cheapest/fastest) */
12
+ export declare const DEFAULT_PROVIDER_MODELS: Record<LLMProvider, string>;
7
13
  export interface LLMExtractionOptions {
8
14
  content: string;
9
15
  instruction?: string;
@@ -12,6 +18,11 @@ export interface LLMExtractionOptions {
12
18
  baseUrl?: string;
13
19
  model?: string;
14
20
  maxTokens?: number;
21
+ url?: string;
22
+ prompt?: string;
23
+ llmProvider?: LLMProvider;
24
+ llmApiKey?: string;
25
+ llmModel?: string;
15
26
  }
16
27
  export interface LLMExtractionResult {
17
28
  items: Array<Record<string, any>>;
@@ -21,6 +32,7 @@ export interface LLMExtractionResult {
21
32
  };
22
33
  model: string;
23
34
  cost?: number;
35
+ provider?: LLMProvider;
24
36
  }
25
37
  /**
26
38
  * Detect if schema is a "full" JSON Schema (has type:"object" and properties).
@@ -51,5 +63,9 @@ export declare function estimateCost(model: string, inputTokens: number, outputT
51
63
  export declare function parseItems(text: string, _schema?: object): Array<Record<string, any>>;
52
64
  /**
53
65
  * Extract structured data from content using an LLM.
66
+ *
67
+ * Supports OpenAI (default), Anthropic, and Google providers.
68
+ * Pass `llmProvider` + `llmApiKey` to select a provider.
69
+ * Falls back to OpenAI-compatible path when no provider is specified.
54
70
  */
55
71
  export declare function extractWithLLM(options: LLMExtractionOptions): Promise<LLMExtractionResult>;
@@ -2,8 +2,17 @@
2
2
  * LLM-based extraction: sends markdown/text content to an LLM
3
3
  * with instructions to extract structured data.
4
4
  *
5
- * Supports OpenAI-compatible APIs (OpenAI, Anthropic via proxy, local models).
5
+ * Supports:
6
+ * - OpenAI-compatible APIs (OpenAI, custom models via baseUrl)
7
+ * - Anthropic (Claude Haiku, Sonnet, Opus)
8
+ * - Google (Gemini Flash, Pro)
6
9
  */
10
+ /** Default models per provider (cheapest/fastest) */
11
+ export const DEFAULT_PROVIDER_MODELS = {
12
+ openai: 'gpt-4o-mini',
13
+ anthropic: 'claude-haiku-4-5',
14
+ google: 'gemini-2.0-flash',
15
+ };
7
16
  // Cost per 1M tokens (input, output) for known models
8
17
  const MODEL_COSTS = {
9
18
  'gpt-4o-mini': [0.15, 0.60],
@@ -197,16 +206,250 @@ function buildResponseFormat(schema) {
197
206
  // For simple example schemas, fall back to json_object
198
207
  return { type: 'json_object' };
199
208
  }
209
+ // ─── Multi-provider helpers ────────────────────────────────────────────────
210
+ /**
211
+ * Strip markdown code block wrappers from LLM output.
212
+ * Handles ```json...``` or ```...``` patterns.
213
+ */
214
+ function stripMarkdownCodeBlocks(text) {
215
+ // Match ```json ... ``` or ``` ... ``` (possibly multiline)
216
+ const stripped = text.replace(/^```(?:json)?\s*\n?([\s\S]*?)\n?```\s*$/m, '$1').trim();
217
+ return stripped || text.trim();
218
+ }
219
+ /**
220
+ * Attempt to fix common JSON issues: comments, trailing commas.
221
+ */
222
+ function fixJsonString(text) {
223
+ return text
224
+ .replace(/\/\/[^\n]*/g, '') // single-line comments
225
+ .replace(/\/\*[\s\S]*?\*\//g, '') // multi-line comments
226
+ .replace(/,(\s*[}\]])/g, '$1') // trailing commas
227
+ .trim();
228
+ }
229
+ /**
230
+ * Parse a raw LLM response into a JSON value (object or array).
231
+ * Strips markdown code blocks and attempts to fix invalid JSON.
232
+ * Returns the parsed value, or throws with `rawOutput` attached.
233
+ */
234
+ function parseJsonSafe(text) {
235
+ const cleaned = stripMarkdownCodeBlocks(text);
236
+ // 1. Direct parse
237
+ try {
238
+ return JSON.parse(cleaned);
239
+ }
240
+ catch { /* continue */ }
241
+ // 2. Fix comments/trailing commas
242
+ try {
243
+ return JSON.parse(fixJsonString(cleaned));
244
+ }
245
+ catch { /* continue */ }
246
+ // 3. Extract JSON object or array from surrounding text
247
+ const objMatch = cleaned.match(/\{[\s\S]*\}/);
248
+ const arrMatch = cleaned.match(/\[[\s\S]*\]/);
249
+ if (objMatch) {
250
+ try {
251
+ return JSON.parse(objMatch[0]);
252
+ }
253
+ catch { /* continue */ }
254
+ try {
255
+ return JSON.parse(fixJsonString(objMatch[0]));
256
+ }
257
+ catch { /* continue */ }
258
+ }
259
+ if (arrMatch) {
260
+ try {
261
+ return JSON.parse(arrMatch[0]);
262
+ }
263
+ catch { /* continue */ }
264
+ try {
265
+ return JSON.parse(fixJsonString(arrMatch[0]));
266
+ }
267
+ catch { /* continue */ }
268
+ }
269
+ const err = new Error(`Failed to parse LLM response as JSON: ${text.slice(0, 200)}`);
270
+ err.rawOutput = text;
271
+ throw err;
272
+ }
273
+ /**
274
+ * Normalize a parsed JSON value into an items array.
275
+ */
276
+ function normalizeToItems(parsed) {
277
+ if (Array.isArray(parsed))
278
+ return parsed;
279
+ if (parsed && typeof parsed === 'object') {
280
+ const obj = parsed;
281
+ if (Array.isArray(obj['items']))
282
+ return obj['items'];
283
+ if (Array.isArray(obj['data']))
284
+ return obj['data'];
285
+ if (Array.isArray(obj['results']))
286
+ return obj['results'];
287
+ return [obj];
288
+ }
289
+ return [];
290
+ }
291
+ /**
292
+ * Call the Anthropic Messages API for extraction.
293
+ */
294
+ async function callAnthropicExtract(params) {
295
+ const { content, schema, prompt, llmApiKey, llmModel } = params;
296
+ const model = llmModel || DEFAULT_PROVIDER_MODELS.anthropic;
297
+ const truncated = content.slice(0, 30_000);
298
+ const userContent = `Extract data from this webpage content according to the JSON schema.\n\n` +
299
+ `Schema: ${JSON.stringify(schema)}\n` +
300
+ (prompt ? `Instructions: ${prompt}\n` : '') +
301
+ `\nWebpage content:\n${truncated}\n\n` +
302
+ `Return ONLY valid JSON matching the schema. No explanation.`;
303
+ const response = await fetch('https://api.anthropic.com/v1/messages', {
304
+ method: 'POST',
305
+ headers: {
306
+ 'x-api-key': llmApiKey,
307
+ 'anthropic-version': '2023-06-01',
308
+ 'content-type': 'application/json',
309
+ },
310
+ body: JSON.stringify({
311
+ model,
312
+ max_tokens: 4096,
313
+ messages: [{ role: 'user', content: userContent }],
314
+ }),
315
+ });
316
+ if (!response.ok) {
317
+ const body = await response.text().catch(() => '');
318
+ if (response.status === 401)
319
+ throw new Error('LLM API authentication failed (401). Check your Anthropic API key.');
320
+ if (response.status === 429)
321
+ throw new Error('LLM API rate limit exceeded (429). Please wait and retry.');
322
+ throw new Error(`Anthropic API error: HTTP ${response.status}${body ? ` — ${body.slice(0, 200)}` : ''}`);
323
+ }
324
+ const data = await response.json();
325
+ const text = (data.content ?? []).filter(b => b.type === 'text').map(b => b.text).join('');
326
+ let parsed;
327
+ try {
328
+ parsed = parseJsonSafe(text);
329
+ }
330
+ catch (err) {
331
+ const e = new Error('llm_parse_error');
332
+ e.rawOutput = text;
333
+ throw e;
334
+ }
335
+ return {
336
+ items: normalizeToItems(parsed),
337
+ tokens: {
338
+ input: data.usage?.input_tokens ?? 0,
339
+ output: data.usage?.output_tokens ?? 0,
340
+ },
341
+ model: data.model || model,
342
+ };
343
+ }
344
+ /**
345
+ * Call the Google Gemini API for extraction.
346
+ */
347
+ async function callGoogleExtract(params) {
348
+ const { content, schema, prompt, llmApiKey, llmModel } = params;
349
+ const model = llmModel || DEFAULT_PROVIDER_MODELS.google;
350
+ const truncated = content.slice(0, 30_000);
351
+ const userText = `Extract data from this webpage content according to the JSON schema.\n\n` +
352
+ `Schema: ${JSON.stringify(schema)}\n` +
353
+ (prompt ? `Instructions: ${prompt}\n` : '') +
354
+ `\nWebpage content:\n${truncated}\n\n` +
355
+ `Return ONLY valid JSON matching the schema. No explanation.`;
356
+ const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${llmApiKey}`, {
357
+ method: 'POST',
358
+ headers: { 'content-type': 'application/json' },
359
+ body: JSON.stringify({
360
+ contents: [{ parts: [{ text: userText }] }],
361
+ generationConfig: { responseMimeType: 'application/json' },
362
+ }),
363
+ });
364
+ if (!response.ok) {
365
+ const body = await response.text().catch(() => '');
366
+ if (response.status === 401 || response.status === 403)
367
+ throw new Error('LLM API authentication failed. Check your Google API key.');
368
+ if (response.status === 429)
369
+ throw new Error('LLM API rate limit exceeded (429). Please wait and retry.');
370
+ throw new Error(`Google API error: HTTP ${response.status}${body ? ` — ${body.slice(0, 200)}` : ''}`);
371
+ }
372
+ const data = await response.json();
373
+ const text = (data.candidates?.[0]?.content?.parts ?? []).map(p => p.text).join('');
374
+ let parsed;
375
+ try {
376
+ parsed = parseJsonSafe(text);
377
+ }
378
+ catch (err) {
379
+ const e = new Error('llm_parse_error');
380
+ e.rawOutput = text;
381
+ throw e;
382
+ }
383
+ return {
384
+ items: normalizeToItems(parsed),
385
+ tokens: {
386
+ input: data.usageMetadata?.promptTokenCount ?? 0,
387
+ output: data.usageMetadata?.candidatesTokenCount ?? 0,
388
+ },
389
+ model: data.modelVersion || model,
390
+ };
391
+ }
392
+ // ─── Main export ───────────────────────────────────────────────────────────
200
393
  /**
201
394
  * Extract structured data from content using an LLM.
395
+ *
396
+ * Supports OpenAI (default), Anthropic, and Google providers.
397
+ * Pass `llmProvider` + `llmApiKey` to select a provider.
398
+ * Falls back to OpenAI-compatible path when no provider is specified.
202
399
  */
203
400
  export async function extractWithLLM(options) {
204
- const { content, instruction, baseUrl = 'https://api.openai.com/v1', model = 'gpt-4o-mini', maxTokens = 4000, } = options;
205
- const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
206
- if (!apiKey) {
401
+ // Resolve aliases: new-style params take precedence over old-style
402
+ const resolvedProvider = (options.llmProvider || 'openai');
403
+ const resolvedApiKey = options.llmApiKey || options.apiKey || process.env.OPENAI_API_KEY;
404
+ const resolvedModel = options.llmModel || options.model;
405
+ const resolvedInstruction = options.prompt || options.instruction;
406
+ const { content, baseUrl = 'https://api.openai.com/v1', maxTokens = 4000, } = options;
407
+ if (!resolvedApiKey) {
207
408
  throw new Error('LLM extraction requires an API key.\n' +
208
- 'Set OPENAI_API_KEY environment variable or use --llm-key <key>');
409
+ 'Set OPENAI_API_KEY environment variable or provide llmApiKey in the request.');
410
+ }
411
+ // ── Anthropic path ────────────────────────────────────────────────────────
412
+ if (resolvedProvider === 'anthropic') {
413
+ const schema = options.schema || {};
414
+ const result = await callAnthropicExtract({
415
+ content,
416
+ schema,
417
+ prompt: resolvedInstruction,
418
+ llmApiKey: resolvedApiKey,
419
+ llmModel: resolvedModel || DEFAULT_PROVIDER_MODELS.anthropic,
420
+ });
421
+ if (options.schema) {
422
+ validateSchemaShape(result.items, options.schema);
423
+ }
424
+ return {
425
+ items: result.items,
426
+ tokensUsed: result.tokens,
427
+ model: result.model,
428
+ provider: 'anthropic',
429
+ };
209
430
  }
431
+ // ── Google path ───────────────────────────────────────────────────────────
432
+ if (resolvedProvider === 'google') {
433
+ const schema = options.schema || {};
434
+ const result = await callGoogleExtract({
435
+ content,
436
+ schema,
437
+ prompt: resolvedInstruction,
438
+ llmApiKey: resolvedApiKey,
439
+ llmModel: resolvedModel || DEFAULT_PROVIDER_MODELS.google,
440
+ });
441
+ if (options.schema) {
442
+ validateSchemaShape(result.items, options.schema);
443
+ }
444
+ return {
445
+ items: result.items,
446
+ tokensUsed: result.tokens,
447
+ model: result.model,
448
+ provider: 'google',
449
+ };
450
+ }
451
+ // ── OpenAI path (default, backward-compatible) ────────────────────────────
452
+ const finalModel = resolvedModel || DEFAULT_PROVIDER_MODELS.openai;
210
453
  // Resolve schema: convert simple schemas to full JSON Schema if needed
211
454
  let resolvedSchema = options.schema;
212
455
  if (resolvedSchema && !isFullJsonSchema(resolvedSchema)) {
@@ -214,16 +457,16 @@ export async function extractWithLLM(options) {
214
457
  }
215
458
  // Choose system prompt based on whether a schema is provided
216
459
  const systemPrompt = resolvedSchema ? SCHEMA_SYSTEM_PROMPT : GENERIC_SYSTEM_PROMPT;
217
- const userMessage = buildUserMessage(content, instruction, resolvedSchema ?? options.schema);
460
+ const userMessage = buildUserMessage(content, resolvedInstruction, resolvedSchema ?? options.schema);
218
461
  const responseFormat = buildResponseFormat(resolvedSchema);
219
462
  const response = await fetch(`${baseUrl}/chat/completions`, {
220
463
  method: 'POST',
221
464
  headers: {
222
465
  'Content-Type': 'application/json',
223
- 'Authorization': `Bearer ${apiKey}`,
466
+ 'Authorization': `Bearer ${resolvedApiKey}`,
224
467
  },
225
468
  body: JSON.stringify({
226
- model,
469
+ model: finalModel,
227
470
  messages: [
228
471
  { role: 'system', content: systemPrompt },
229
472
  { role: 'user', content: userMessage },
@@ -252,12 +495,13 @@ export async function extractWithLLM(options) {
252
495
  }
253
496
  const inputTokens = data.usage?.prompt_tokens ?? 0;
254
497
  const outputTokens = data.usage?.completion_tokens ?? 0;
255
- const resolvedModel = data.model ?? model;
256
- const cost = estimateCost(resolvedModel, inputTokens, outputTokens);
498
+ const resolvedFinalModel = data.model ?? finalModel;
499
+ const cost = estimateCost(resolvedFinalModel, inputTokens, outputTokens);
257
500
  return {
258
501
  items,
259
502
  tokensUsed: { input: inputTokens, output: outputTokens },
260
- model: resolvedModel,
503
+ model: resolvedFinalModel,
261
504
  cost,
505
+ provider: 'openai',
262
506
  };
263
507
  }
@@ -1,6 +1,7 @@
1
1
  /**
2
2
  * handleExtract — extract structured data from a URL.
3
3
  * Supports auto-detection, field lists, schema, and brand presets.
4
+ * Supports LLM-based extraction via llmProvider + llmApiKey.
4
5
  */
5
6
  import { type McpHandler } from './types.js';
6
7
  export declare const handleExtract: McpHandler;
@@ -1,9 +1,11 @@
1
1
  /**
2
2
  * handleExtract — extract structured data from a URL.
3
3
  * Supports auto-detection, field lists, schema, and brand presets.
4
+ * Supports LLM-based extraction via llmProvider + llmApiKey.
4
5
  */
5
6
  import { peel } from '../../index.js';
6
7
  import { textResult, safeStringify, timeout } from './types.js';
8
+ import { extractWithLLM } from '../../core/llm-extract.js';
7
9
  function extractColorsFromContent(content) {
8
10
  const hexRegex = /#[0-9A-Fa-f]{6}|#[0-9A-Fa-f]{3}/g;
9
11
  const matches = content.match(hexRegex);
@@ -27,6 +29,35 @@ export const handleExtract = async (args, _ctx) => {
27
29
  const schema = args['schema'];
28
30
  const fields = args['fields'];
29
31
  const render = args['render'] || false;
32
+ const llmApiKey = args['llmApiKey'];
33
+ const llmProvider = args['llmProvider'];
34
+ const llmModel = args['llmModel'];
35
+ const prompt = args['prompt'];
36
+ // LLM-based extraction: when llmApiKey (and optionally llmProvider) are provided
37
+ if (llmApiKey && (schema || prompt)) {
38
+ const peelResult = await Promise.race([
39
+ peel(url, { format: 'markdown', render }),
40
+ timeout(60000, 'LLM extract fetch'),
41
+ ]);
42
+ const extractResult = await extractWithLLM({
43
+ content: peelResult.content,
44
+ schema: schema,
45
+ prompt,
46
+ llmApiKey,
47
+ llmProvider: llmProvider || 'openai',
48
+ llmModel,
49
+ });
50
+ return textResult(safeStringify({
51
+ success: true,
52
+ url: peelResult.url,
53
+ data: extractResult.items.length === 1 ? extractResult.items[0] : extractResult.items,
54
+ llm: {
55
+ provider: extractResult.provider || llmProvider || 'openai',
56
+ model: extractResult.model,
57
+ tokens: extractResult.tokensUsed,
58
+ },
59
+ }));
60
+ }
30
61
  // Brand preset: fields=['name','logo','colors','fonts','socials'] or _brand flag
31
62
  const isBrandPreset = args['_brand'] ||
32
63
  (Array.isArray(fields) &&
@@ -0,0 +1,14 @@
1
+ /**
2
+ * WebPeel API Server
3
+ * Express-based REST API for hosted deployments
4
+ */
5
+ import { Express } from 'express';
6
+ import './types.js';
7
+ export interface ServerConfig {
8
+ port?: number;
9
+ corsOrigins?: string[];
10
+ rateLimitWindowMs?: number;
11
+ usePostgres?: boolean;
12
+ }
13
+ export declare function createApp(config?: ServerConfig): Express;
14
+ export declare function startServer(config?: ServerConfig): void;