mallmaverick-store-scraper 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mallmaverick-store-scraper",
3
- "version": "0.1.3",
3
+ "version": "0.1.5",
4
4
  "description": "MCP server + CLI for scraping shopping mall store directories. Hours-first layered pipeline + image classification.",
5
5
  "main": "src/main.js",
6
6
  "type": "commonjs",
package/src/mcp-server.js CHANGED
@@ -60,11 +60,11 @@ const TOOLS = [
60
60
  '(name, hours, phone, logo, brand image, categories, etc.). Use this ' +
61
61
  'when the user wants to capture a directory like ' +
62
62
  'https://grasslands.ca/store-directory/.\n\n' +
63
- 'AFTER RUNNING THIS TOOL: give a short summaryhow many stores were ' +
64
- 'extracted, hours-layer breakdown, and the saved file path. The CSV is ' +
65
- 'attached as a resource in the tool response (the user can download/' +
66
- 'preview it from there) do NOT paste the CSV text into your reply. ' +
67
- 'Keep your text reply short.',
63
+ 'AFTER RUNNING THIS TOOL: reply with ONE short sentencethe count ' +
64
+ 'and the file path. The CSV file is attached as a resource_link in the ' +
65
+ 'tool response; do NOT paste CSV text into your reply, do NOT print the ' +
66
+ 'JSON, do NOT summarize each store. If the tool response includes an ' +
67
+ 'error block, surface that error verbatim to the user.',
68
68
  inputSchema: {
69
69
  type: 'object',
70
70
  properties: {
@@ -113,6 +113,15 @@ const TOOLS = [
113
113
  required: ['store_url'],
114
114
  },
115
115
  },
116
+ {
117
+ name: 'check_status',
118
+ description:
119
+ 'Returns the running mall-scraper-mcp version, auth mode, and Worker ' +
120
+ 'connectivity. Use this BEFORE running scrape_directory to verify the ' +
121
+ 'tool is wired up correctly — confirms version, that the OpenAI proxy ' +
122
+ 'is reachable, and that the shared secret is valid.',
123
+ inputSchema: { type: 'object', properties: {} },
124
+ },
116
125
  {
117
126
  name: 'validate_image_url',
118
127
  description:
@@ -129,7 +138,7 @@ const TOOLS = [
129
138
  },
130
139
  ];
131
140
 
132
- const PACKAGE_VERSION = '0.1.3';
141
+ const PACKAGE_VERSION = '0.1.5';
133
142
 
134
143
  const server = new Server(
135
144
  { name: 'mall-scraper-mcp', version: PACKAGE_VERSION },
@@ -145,6 +154,7 @@ server.setRequestHandler(CallToolRequestSchema, async (req) => {
145
154
  case 'scrape_directory': return await handleScrapeDirectory(args || {});
146
155
  case 'get_store_hours': return await handleGetStoreHours(args || {});
147
156
  case 'validate_image_url': return await handleValidateImageUrl(args || {});
157
+ case 'check_status': return await handleCheckStatus(args || {});
148
158
  default:
149
159
  return errorResult(`Unknown tool: ${name}`);
150
160
  }
@@ -225,52 +235,82 @@ async function handleScrapeDirectory({ directory_url, max_stores = 10, concurren
225
235
  stores_extracted: stores.length,
226
236
  hours_layer_breakdown: bySource,
227
237
  llm_usage: usage,
238
+ llm_failed: usage.errors > 0
239
+ ? `⚠ ${usage.errors} LLM calls failed (description/categories/etc. fields will be empty). Last error: ${usage.lastError}. Run check_status to diagnose.`
240
+ : null,
228
241
  written_files: writtenPaths,
229
242
  write_error: writeError,
230
243
  auth_mode: creds.mode,
231
244
  mcp_version: PACKAGE_VERSION,
232
245
  };
233
246
 
234
- // Build a short brief + return the CSV as an embedded resource so
235
- // Claude Desktop can render it as an attachment card instead of inline
236
- // text. Falls back to inline-rendering if the client doesn't support
237
- // resources, but most clients (including Claude Desktop) do.
247
+ // Response design:
248
+ // 1. Brief status line (always) what the user sees in the chat reply
249
+ // 2. resource_link to the CSV file attachment with user-priority annotations
250
+ // 3. ONLY on error: a loud error block so the user knows something failed
251
+ //
252
+ // No JSON dump / no inline CSV preview when things succeed — keeps the chat
253
+ // reply minimal.
238
254
  const host = new URL(directory_url).hostname.replace(/^www\./, '');
239
255
  const csvFilename = writtenPaths
240
256
  ? path.basename(writtenPaths.csv)
241
257
  : `stores_v5_${host}.csv`;
242
258
  const csvUri = writtenPaths
243
259
  ? `file://${writtenPaths.csv}`
244
- : `file:///tmp/${csvFilename}`;
260
+ : null;
261
+
262
+ const hasLlmFailure = usage.errors > 0;
263
+ const hasWriteFailure = !!writeError;
264
+ const anyFailure = hasLlmFailure || hasWriteFailure;
245
265
 
246
266
  const brief =
247
- `mall-scraper-mcp v${PACKAGE_VERSION}\n` +
248
- `Scraped ${stores.length} store${stores.length === 1 ? '' : 's'} from ${host}.\n` +
249
- `Hours-layer breakdown: ${Object.entries(bySource).map(([k, v]) => `${k}=${v}`).join(', ')}.\n` +
250
- (writtenPaths
251
- ? `Saved to: ${writtenPaths.csv}`
252
- : `⚠ disk write failed (${writeError}); CSV is in the attached resource only.`);
253
-
254
- return {
255
- content: [
256
- { type: 'text', text: brief },
257
- {
258
- type: 'resource',
259
- resource: {
260
- uri: csvUri,
261
- name: csvFilename,
262
- mimeType: 'text/csv',
263
- text: csvText,
264
- },
265
- },
266
- // Keep the JSON summary at the end for any debugging the user asks for,
267
- // but it's far enough down that it doesn't dominate the chat.
268
- {
269
- type: 'text',
270
- text: '\n--- Run summary ---\n' + JSON.stringify(summary, null, 2),
271
- },
272
- ],
273
- };
267
+ `✅ mall-scraper-mcp v${PACKAGE_VERSION}\n` +
268
+ `${stores.length} store${stores.length === 1 ? '' : 's'} from ${host}\n` +
269
+ (writtenPaths ? `📄 ${writtenPaths.csv}` : ' Disk write failed');
270
+
271
+ const content = [
272
+ {
273
+ type: 'text',
274
+ text: brief,
275
+ annotations: { audience: ['user'], priority: 1.0 },
276
+ },
277
+ ];
278
+
279
+ // resource_link only if we have a real file path (file:// URI must point at
280
+ // an existing file for the client to do anything useful with it).
281
+ if (csvUri) {
282
+ content.push({
283
+ type: 'resource_link',
284
+ uri: csvUri,
285
+ name: csvFilename,
286
+ description: `Store directory scrape ${stores.length} stores from ${host}`,
287
+ mimeType: 'text/csv',
288
+ annotations: { audience: ['user'], priority: 0.9 },
289
+ });
290
+ }
291
+
292
+ // Loud error block — only when something failed. The user explicitly asked
293
+ // for nothing other than a status rundown UNLESS something broke.
294
+ if (anyFailure) {
295
+ const errLines = [];
296
+ if (hasLlmFailure) {
297
+ errLines.push(
298
+ `❌ ${usage.errors} LLM call${usage.errors === 1 ? '' : 's'} failed: ${usage.lastError}`,
299
+ ' → description / categories / location_type fields will be empty.',
300
+ ' → Run check_status to diagnose (most likely the Worker token doesn\'t match the SHARED_SECRET).',
301
+ );
302
+ }
303
+ if (hasWriteFailure) {
304
+ errLines.push(`❌ Disk write failed: ${writeError}`);
305
+ }
306
+ content.push({
307
+ type: 'text',
308
+ text: '\n' + errLines.join('\n'),
309
+ annotations: { audience: ['user'], priority: 1.0 },
310
+ });
311
+ }
312
+
313
+ return { content };
274
314
  } finally {
275
315
  try { await browser.close(); } catch (_) {}
276
316
  }
@@ -321,6 +361,86 @@ async function handleGetStoreHours({ store_url, mall_root_url }) {
321
361
  }
322
362
  }
323
363
 
364
+ async function handleCheckStatus() {
365
+ const creds = describeCredentials();
366
+ const status = {
367
+ mcp_version: PACKAGE_VERSION,
368
+ node_version: process.version,
369
+ auth_mode: creds.mode,
370
+ auth_endpoint: creds.endpoint,
371
+ worker_reachable: null,
372
+ worker_health: null,
373
+ worker_auth_ok: null,
374
+ };
375
+
376
+ // If we're in proxy mode, ping the Worker /health endpoint and probe auth.
377
+ if (creds.mode === 'proxy' && creds.endpoint) {
378
+ try {
379
+ const healthUrl = creds.endpoint.replace(/\/+$/, '') + '/health';
380
+ const health = await new Promise((resolve) => {
381
+ const req = https.get(healthUrl, { timeout: 6000 }, (res) => {
382
+ let body = '';
383
+ res.on('data', (c) => { body += c; });
384
+ res.on('end', () => resolve({ status: res.statusCode, body }));
385
+ });
386
+ req.on('error', () => resolve(null));
387
+ req.on('timeout', () => { req.destroy(); resolve(null); });
388
+ });
389
+ if (health) {
390
+ status.worker_reachable = true;
391
+ status.worker_health = health.body.slice(0, 200);
392
+ } else {
393
+ status.worker_reachable = false;
394
+ }
395
+
396
+ // Probe auth: a tiny POST to /v1/models with the shared secret.
397
+ // OpenAI's /v1/models is a cheap, no-tokens endpoint that proves the
398
+ // Worker is forwarding and the key works.
399
+ const token = process.env.MALL_SCRAPER_TOKEN || '';
400
+ const modelsUrl = creds.endpoint.replace(/\/+$/, '') + '/v1/models';
401
+ const auth = await new Promise((resolve) => {
402
+ const req = https.get(modelsUrl, {
403
+ timeout: 8000,
404
+ headers: { 'X-Mall-Scraper-Token': token },
405
+ }, (res) => {
406
+ let body = '';
407
+ res.on('data', (c) => { body += c; });
408
+ res.on('end', () => resolve({ status: res.statusCode, body }));
409
+ });
410
+ req.on('error', () => resolve(null));
411
+ req.on('timeout', () => { req.destroy(); resolve(null); });
412
+ });
413
+ if (auth) {
414
+ status.worker_auth_ok = auth.status === 200;
415
+ if (auth.status !== 200) {
416
+ status.worker_auth_error = auth.body.slice(0, 200);
417
+ }
418
+ }
419
+ } catch (err) {
420
+ status.worker_probe_error = err.message;
421
+ }
422
+ }
423
+
424
+ // Verdict line for the user
425
+ let verdict;
426
+ if (creds.mode === 'none') {
427
+ verdict = '⚠ No credentials configured. Set MALL_SCRAPER_PROXY_URL+MALL_SCRAPER_TOKEN or OPENAI_API_KEY.';
428
+ } else if (creds.mode === 'proxy') {
429
+ if (status.worker_reachable && status.worker_auth_ok) verdict = '✅ All good — version, Worker, and auth all working.';
430
+ else if (!status.worker_reachable) verdict = '⚠ Worker is unreachable. Check MALL_SCRAPER_PROXY_URL.';
431
+ else if (!status.worker_auth_ok) verdict = '⚠ Worker is reachable but rejected the token. MALL_SCRAPER_TOKEN does not match the SHARED_SECRET on the Worker.';
432
+ else verdict = '⚠ Partial — see fields below.';
433
+ } else {
434
+ verdict = `✅ Direct mode (using OPENAI_API_KEY env var). Version ${PACKAGE_VERSION}.`;
435
+ }
436
+
437
+ return {
438
+ content: [
439
+ { type: 'text', text: verdict + '\n\n' + JSON.stringify(status, null, 2) },
440
+ ],
441
+ };
442
+ }
443
+
324
444
  function handleValidateImageUrl({ url }) {
325
445
  if (!url) return Promise.resolve(errorResult('url is required'));
326
446
  return new Promise((resolve) => {
@@ -128,6 +128,8 @@ class StoreExtractor {
128
128
  this.totalTokensOutput = 0;
129
129
  this.totalCost = 0;
130
130
  this.extractionCount = 0;
131
+ this.errorCount = 0;
132
+ this.lastError = null;
131
133
  }
132
134
 
133
135
  async extract(pageData, hoursCanonical) {
@@ -152,6 +154,8 @@ class StoreExtractor {
152
154
  this._trackUsage(resp);
153
155
  raw = JSON.parse(resp.choices[0].message.content);
154
156
  } catch (err) {
157
+ this.errorCount++;
158
+ this.lastError = err.message;
155
159
  if (this.logger) this.logger.warn(` ⚠ Store LLM extract failed: ${err.message}`);
156
160
  return { fields: {}, confidence: 0 };
157
161
  }
@@ -218,6 +222,8 @@ class StoreExtractor {
218
222
  return {
219
223
  model: this.model,
220
224
  extractions: this.extractionCount,
225
+ errors: this.errorCount,
226
+ lastError: this.lastError,
221
227
  totalInputTokens: this.totalTokensInput,
222
228
  totalOutputTokens: this.totalTokensOutput,
223
229
  estimatedCost: `$${this.totalCost.toFixed(4)}`,