clean-web-scraper 3.3.1 → 3.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/example-usage.js CHANGED
@@ -76,14 +76,14 @@ async function bdsmovement ()
76
76
 
77
77
  void async function main ()
78
78
  {
79
- // const khameneiIrFreePalestineTagScraper = await khameneiIrFreePalestineTag();
80
- // const decolonizepalestineScraper = await decolonizepalestine();
81
- const bdsmovementScraper = await bdsmovement();
82
- // await WebScraper.combineResults( "./dataset/combined", [
83
- // khameneiIrFreePalestineTagScraper,
84
- // decolonizepalestineScraper,
85
- // bdsmovementScraper
86
- // ] );
79
+ const khameneiIrFreePalestineTagScraper = await khameneiIrFreePalestineTag();
80
+ const decolonizepalestineScraper = await decolonizepalestine();
81
+ // const bdsmovementScraper = await bdsmovement();
82
+ await WebScraper.combineResults( "./dataset/combined", [
83
+ khameneiIrFreePalestineTagScraper,
84
+ decolonizepalestineScraper,
85
+ // bdsmovementScraper
86
+ ] );
87
87
 
88
88
  // 4
89
89
  // https://electronicintifada.net/
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "clean-web-scraper",
3
- "version": "3.3.1",
3
+ "version": "3.3.3",
4
4
  "main": "main.js",
5
5
  "scripts": {
6
6
  "start": "node main.js",
package/src/WebScraper.js CHANGED
@@ -19,7 +19,11 @@ class WebScraper
19
19
  csvOutputPath,
20
20
  includeMetadata = false,
21
21
  metadataFields = [], // ['title', 'description', 'author', 'lastModified', etc.]
22
- userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0"
22
+ headers = {
23
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0",
24
+ "Cache-Control": "private",
25
+ "Accept": "application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5"
26
+ }
23
27
  })
24
28
  {
25
29
  this.baseURL = baseURL;
@@ -31,7 +35,7 @@ class WebScraper
31
35
  this.csvOutputPath = csvOutputPath || path.join( this.scrapResultPath, "train.csv" );
32
36
  this.jsonlOutputPathWithMeta = jsonlOutputPath.replace( ".jsonl", "_with_metadata.jsonl" );
33
37
  this.csvOutputPathWithMeta = csvOutputPath.replace( ".csv", "_with_metadata.csv" );
34
- this.userAgent = userAgent;
38
+ this.headers = headers;
35
39
  this.includeMetadata = includeMetadata;
36
40
  this.metadataFields = new Set( metadataFields );
37
41
  this.visited = new Set();
@@ -60,9 +64,7 @@ class WebScraper
60
64
  try
61
65
  {
62
66
  const { data, headers } = await axios.get( url, {
63
- headers: {
64
- "user-agent": this.userAgent
65
- }
67
+ headers: this.headers,
66
68
  });
67
69
  const dom = new JSDOM( data, { url });
68
70
  const { document } = dom.window;
@@ -74,9 +76,17 @@ class WebScraper
74
76
 
75
77
  if ( article )
76
78
  {
77
- const metadata = this.metadataextractor( url, document, headers );
78
- metadata.depth = depth;
79
- this.saveArticle( url, article.textContent, metadata );
79
+ if ( this.isValidContent( article.textContent ) )
80
+ {
81
+
82
+ const metadata = this.metadataextractor( url, document, headers );
83
+ metadata.depth = depth;
84
+ this.saveArticle( url, article.textContent, metadata );
85
+ }
86
+ else
87
+ {
88
+ console.error( `Invalid content found at ${url}` );
89
+ }
80
90
  }
81
91
  else
82
92
  {
@@ -373,12 +383,55 @@ class WebScraper
373
383
  if ( fs.existsSync( path.join( __dirname, this.scrapResultPath ) ) )
374
384
  {
375
385
  fs.rmSync( path.join( __dirname, this.scrapResultPath ), { recursive: true, force: true });
386
+ }
387
+ if ( fs.existsSync( path.join( __dirname, this.textOutputPath ) ) )
388
+ {
376
389
  fs.rmSync( path.join( __dirname, this.textOutputPath ), { recursive: true, force: true });
377
390
  }
391
+ if ( fs.existsSync( path.join( __dirname, this.csvOutputPath ) ) )
392
+ {
393
+ fs.rmSync( path.join( __dirname, this.csvOutputPath ), { recursive: true, force: true });
394
+ }
395
+ if ( fs.existsSync( path.join( __dirname, this.csvOutputPathWithMeta ) ) )
396
+ {
397
+ fs.rmSync( path.join( __dirname, this.csvOutputPathWithMeta ), { recursive: true, force: true });
398
+ }
399
+ if ( fs.existsSync( path.join( __dirname, this.jsonlOutputPath ) ) )
400
+ {
401
+ fs.rmSync( path.join( __dirname, this.jsonlOutputPath ), { recursive: true, force: true });
402
+ }
403
+ if ( fs.existsSync( path.join( __dirname, this.jsonlOutputPathWithMeta ) ) )
404
+ {
405
+ fs.rmSync( path.join( __dirname, this.jsonlOutputPathWithMeta ), { recursive: true, force: true });
406
+ }
378
407
  fs.mkdirSync( path.join( __dirname, this.scrapResultPath ), { recursive: true });
379
408
  fs.mkdirSync( path.join( __dirname, this.textOutputPath ), { recursive: true });
380
409
  }
381
410
 
411
+ isValidContent ( content )
412
+ {
413
+ // Remove whitespace and newlines for checking
414
+ const cleanContent = content.replace( /\s+/g, " " ).trim().toLowerCase();
415
+
416
+ // List of phrases that indicate invalid content
417
+ const invalidPhrases = [
418
+ "verifying that you are not a robot",
419
+ "checking if the site connection is secure",
420
+ "please wait while we verify",
421
+ "please enable javascript",
422
+ "access denied",
423
+ "captcha verification"
424
+ ];
425
+
426
+ const hasInvalidPhrases = invalidPhrases.some( phrase => { return cleanContent.includes( phrase ) });
427
+ // Check content length
428
+ if ( cleanContent.length < 100 && hasInvalidPhrases )
429
+ {
430
+ return false;
431
+ }
432
+ return true;
433
+ }
434
+
382
435
  static sleep ( ms )
383
436
  {
384
437
  return new Promise( resolve => { return setTimeout( resolve, ms ) });
@@ -419,12 +472,18 @@ class WebScraper
419
472
  for ( const website of websites )
420
473
  {
421
474
  const jsonlContent = fs.readFileSync( path.join( __dirname, website.jsonlOutputPath ), "utf-8" );
422
- jsonlOutput.write( jsonlContent );
475
+ if ( jsonlContent )
476
+ {
477
+ jsonlOutput.write( jsonlContent );
478
+ }
423
479
 
424
480
  if ( website.includeMetadata )
425
481
  {
426
482
  const jsonlMetaContent = fs.readFileSync( path.join( __dirname, website.jsonlOutputPathWithMeta ), "utf-8" );
427
- jsonlMetaOutput.write( jsonlMetaContent );
483
+ if ( jsonlMetaContent )
484
+ {
485
+ jsonlMetaOutput.write( jsonlMetaContent );
486
+ }
428
487
  }
429
488
  }
430
489
 
@@ -451,7 +510,10 @@ class WebScraper
451
510
  .split( "\n" )
452
511
  .slice( 1 )
453
512
  .filter( line => { return line.trim() });
454
- csvOutput.write( `${csvContent.join( "\n" )}\n` );
513
+ if ( csvContent.length > 0 )
514
+ {
515
+ csvOutput.write( `${csvContent.join( "\n" )}\n` );
516
+ }
455
517
 
456
518
  if ( website.includeMetadata )
457
519
  {
@@ -459,7 +521,10 @@ class WebScraper
459
521
  .split( "\n" )
460
522
  .slice( 1 )
461
523
  .filter( line => { return line.trim() });
462
- csvMetaOutput.write( `${csvMetaContent.join( "\n" )}\n` );
524
+ if ( csvMetaContent.length > 0 )
525
+ {
526
+ csvMetaOutput.write( `${csvMetaContent.join( "\n" )}\n` );
527
+ }
463
528
  }
464
529
  }
465
530
 
@@ -500,7 +565,6 @@ class WebScraper
500
565
  }
501
566
  }
502
567
  }
503
-
504
568
  }
505
569
 
506
570
  module.exports = WebScraper;