clean-web-scraper 3.3.0 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/example-usage.js CHANGED
@@ -3,15 +3,12 @@ const WebScraper = require( "./src/WebScraper" );
3
3
 
4
4
  async function khameneiIrFreePalestineTag ()
5
5
  {
6
- // 1
7
6
  // https://english.khamenei.ir/Opinions/FreePalestine
8
7
  // https://english.khamenei.ir/page/search.xhtml?topicid=0&period=0&q=FreePalestine&pageSize=100#
9
8
  const scraper = new WebScraper({
10
9
  baseURL: "https://english.khamenei.ir/news",
11
10
  startURL: "https://english.khamenei.ir/page/search.xhtml?topicid=0&period=0&q=FreePalestine&pageSize=100#",
12
11
  maxDepth: 1,
13
- excludeList: [
14
- ],
15
12
  exactExcludeList: [
16
13
  "https://english.khamenei.ir/page/search.xhtml?topicid=0&period=0&q=FreePalestine&pageSize=100#"
17
14
  ],
@@ -20,7 +17,7 @@ async function khameneiIrFreePalestineTag ()
20
17
  textOutputPath: "./dataset/khamenei-ir-free-palestine-tag/texts",
21
18
  csvOutputPath: "./dataset/khamenei-ir-free-palestine-tag/train.csv",
22
19
  includeMetadata: true,
23
- metadataFields: ["title", "description", "author", "lastModified", "language"]
20
+ metadataFields: ["title", "description", "author"]
24
21
  });
25
22
  await scraper.start();
26
23
  return scraper;
@@ -28,7 +25,6 @@ async function khameneiIrFreePalestineTag ()
28
25
 
29
26
  async function decolonizepalestine ()
30
27
  {
31
- // 2
32
28
  // https://decolonizepalestine.com
33
29
  const scraper = new WebScraper({
34
30
  baseURL: "https://decolonizepalestine.com",
@@ -48,23 +44,46 @@ async function decolonizepalestine ()
48
44
  textOutputPath: "./dataset/decolonizepalestine/texts",
49
45
  csvOutputPath: "./dataset/decolonizepalestine/train.csv",
50
46
  includeMetadata: true,
51
- metadataFields: ["title", "description", "author", "lastModified", "language"]
47
+ metadataFields: ["title", "description", "author"]
52
48
  });
53
49
  await scraper.start();
54
50
  return scraper;
55
51
  }
56
52
 
57
- void async function main ()
53
+ async function bdsmovement ()
58
54
  {
59
- const khameneiIrFreePalestineTagScraper = await khameneiIrFreePalestineTag();
60
- const decolonizepalestineScraper = await decolonizepalestine();
61
- await WebScraper.combineResults( "./dataset/combined", [
62
- khameneiIrFreePalestineTagScraper,
63
- decolonizepalestineScraper
64
- ] );
55
+ // https://bdsmovement.org
56
+ const scraper = new WebScraper({
57
+ baseURL: "https://bdsmovement.org",
58
+ excludeList: [
59
+ "https://bdsmovement.net/press-area",
60
+ "https://bdsmovement.net/privacy-policy",
61
+ "https://bdsmovement.net/get-involved/join-a-bds-campaign",
62
+ "https://bdsmovement.net/donate_",
63
+ "https://bdsmovement.net/user",
64
+ "https://bdsmovement.net/admin"
65
+ ],
66
+ scrapResultPath: "./dataset/bdsmovement/website",
67
+ jsonlOutputPath: "./dataset/bdsmovement/train.jsonl",
68
+ textOutputPath: "./dataset/bdsmovement/texts",
69
+ csvOutputPath: "./dataset/bdsmovement/train.csv",
70
+ includeMetadata: true,
71
+ metadataFields: ["title", "description", "author"]
72
+ });
73
+ await scraper.start();
74
+ return scraper;
75
+ }
65
76
 
66
- // 3
67
- // https://bdsmovement.net
77
+ void async function main ()
78
+ {
79
+ // const khameneiIrFreePalestineTagScraper = await khameneiIrFreePalestineTag();
80
+ // const decolonizepalestineScraper = await decolonizepalestine();
81
+ const bdsmovementScraper = await bdsmovement();
82
+ // await WebScraper.combineResults( "./dataset/combined", [
83
+ // khameneiIrFreePalestineTagScraper,
84
+ // decolonizepalestineScraper,
85
+ // bdsmovementScraper
86
+ // ] );
68
87
 
69
88
  // 4
70
89
  // https://electronicintifada.net/
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "clean-web-scraper",
3
- "version": "3.3.0",
3
+ "version": "3.3.1",
4
4
  "main": "main.js",
5
5
  "scripts": {
6
6
  "start": "node main.js",
package/src/WebScraper.js CHANGED
@@ -18,7 +18,8 @@ class WebScraper
18
18
  textOutputPath,
19
19
  csvOutputPath,
20
20
  includeMetadata = false,
21
- metadataFields = [] // ['title', 'description', 'author', 'lastModified', etc.]
21
+ metadataFields = [], // ['title', 'description', 'author', 'lastModified', etc.]
22
+ userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0"
22
23
  })
23
24
  {
24
25
  this.baseURL = baseURL;
@@ -30,10 +31,11 @@ class WebScraper
30
31
  this.csvOutputPath = csvOutputPath || path.join( this.scrapResultPath, "train.csv" );
31
32
  this.jsonlOutputPathWithMeta = jsonlOutputPath.replace( ".jsonl", "_with_metadata.jsonl" );
32
33
  this.csvOutputPathWithMeta = csvOutputPath.replace( ".csv", "_with_metadata.csv" );
34
+ this.userAgent = userAgent;
33
35
  this.includeMetadata = includeMetadata;
34
36
  this.metadataFields = new Set( metadataFields );
35
37
  this.visited = new Set();
36
- this.excludeList = new Set( excludeList );
38
+ this.excludeList = this.normalizeExcludeList( excludeList );
37
39
  this.exactExcludeList = this.normalizeExcludeList( exactExcludeList );
38
40
  this.allProcessedContent = [];
39
41
  }
@@ -57,7 +59,11 @@ class WebScraper
57
59
  this.visited.add( url );
58
60
  try
59
61
  {
60
- const { data, headers } = await axios.get( url );
62
+ const { data, headers } = await axios.get( url, {
63
+ headers: {
64
+ "user-agent": this.userAgent
65
+ }
66
+ });
61
67
  const dom = new JSDOM( data, { url });
62
68
  const { document } = dom.window;
63
69
 
@@ -334,7 +340,7 @@ class WebScraper
334
340
  };
335
341
  }
336
342
 
337
- normalizeExcludeList ( list )
343
+ normalizeExcludeList ( list = [] )
338
344
  {
339
345
  const normalizedSet = new Set();
340
346
  for ( let i = 0; i < list.length; i++ )