warn-scraper 1.2.57__py3-none-any.whl → 1.2.59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
warn/scrapers/hi.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import datetime
2
2
  import logging
3
3
  from pathlib import Path
4
+ from time import sleep
4
5
  from urllib.parse import quote
5
6
 
6
7
  from bs4 import BeautifulSoup
@@ -29,23 +30,34 @@ def scrape(
29
30
  cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)
30
31
  Returns: the Path where the file is written
31
32
  """
32
- cacheprefix = "https://webcache.googleusercontent.com/search?q=cache%3A" # Use Google Cache, per #600
33
+ # Google Cache is a backup if the state re-implements its JS-enabled browser equivalent
34
+ usegooglecache = False
35
+ cacheprefix = "https://webcache.googleusercontent.com/search?q=cache%3A"
33
36
 
34
- firstpage = utils.get_url(cacheprefix + quote("https://labor.hawaii.gov/wdc/real-time-warn-updates/"))
37
+ firstpageurl = "https://labor.hawaii.gov/wdc/real-time-warn-updates/"
38
+ if usegooglecache:
39
+ firstpageurl = cacheprefix + quote(firstpageurl)
40
+
41
+ firstpage = utils.get_url(firstpageurl)
35
42
  soup = BeautifulSoup(firstpage.text, features="html5lib")
36
43
  pagesection = soup.select("div.primary-content")[0]
37
44
  subpageurls = []
38
45
  for atag in pagesection.find_all("a"):
39
46
  href = atag["href"]
40
47
  if href.endswith("/"):
41
- href = href # [:-1]
42
- subpageurls.append(cacheprefix + quote(href))
48
+ href = href # [:-1]
49
+ subpageurl = href
50
+ if usegooglecache:
51
+ subpageurl = cacheprefix + quote(subpageurl)
52
+ subpageurls.append(subpageurl)
43
53
 
54
+ masterlist = []
44
55
  headers = ["Company", "Date", "PDF url", "location", "jobs"]
45
- data = [headers]
56
+ # data = [headers]
46
57
  # lastdateseen = "2099-12-31"
47
58
 
48
59
  for subpageurl in reversed(subpageurls):
60
+ sleep(2)
49
61
  # Conditionally here, we want to check and see if we have the old cached files, or if the year is current or previous.
50
62
  # Only need to download if it's current or previous year.
51
63
  # But do we care enough to implement right now?
@@ -53,47 +65,67 @@ def scrape(
53
65
  logger.debug(f"Parsing page {subpageurl}")
54
66
  page = utils.get_url(subpageurl)
55
67
  soup = BeautifulSoup(page.text, features="html5lib")
68
+ if subpageurl.endswith("/"):
69
+ subpageurl = subpageurl[:-1] # Trim off the final slash, if there is one
56
70
  pageyear = subpageurl.split("/")[-1][:4]
57
- tags = soup.select("p a[href*=pdf]")
58
- p_tags = [i.parent.get_text().replace("\xa0", " ").split("\n") for i in tags]
59
- clean_p_tags = [j for i in p_tags for j in i]
60
71
 
61
- dates = [k.split("–")[0].strip() for k in clean_p_tags]
62
- for i in range(len(dates)):
72
+ # There are at least two formats for Hawaii. In some years, each individual layoff is in a paragraph tag.
73
+ # In others, all the layoffs are grouped under a single paragraph tag, separated by <br>
74
+ # BeautifulSoup converts that to a <br/>.
75
+ # But the call to parent also repeats a bunch of entries, so we need to ensure they're not.
76
+ # So in more recent years, finding the parent of the "p a" there find essentially the row of data.
77
+ # In the older years, the parent is ... all the rows of data, which gets repeated.
78
+ # So take each chunk of data, find the parent, do some quality checks, clean up the text,
79
+ # don't engage with duplicates.
80
+
81
+ selection = soup.select("p a[href*=pdf]")
82
+ rows = []
83
+ for child in selection:
84
+ parent = child.parent
85
+ for subitem in parent.prettify().split("<br/>"):
86
+ if len(subitem.strip()) > 5 and ".pdf" in subitem:
87
+ subitem = subitem.replace("\xa0", " ").replace("\n", "").strip()
88
+ row = BeautifulSoup(subitem, features="html5lib")
89
+ if row not in rows:
90
+ rows.append(row)
91
+
92
+ for row in rows:
93
+ line: dict = {}
94
+ for item in headers:
95
+ line[item] = None
96
+ graftext = row.get_text().strip()
97
+ tempdate = graftext
98
+
99
+ # Check to see if it's not an amendment, doesn't have 3/17/2022 date format
100
+ # Most dates should be like "March 17, 2022"
101
+ if pageyear in tempdate and f"/{pageyear}" not in tempdate:
102
+ try:
103
+ tempdate = (
104
+ graftext.strip().split(pageyear)[0].strip() + f" {pageyear}"
105
+ )
106
+ except ValueError:
107
+ print(f"Date conversion failed on row: {row}")
108
+
109
+ line["Date"] = tempdate
110
+
63
111
  try:
64
- tempdate = dates[i].split(pageyear)[0].strip() + f" {pageyear}"
65
112
  parsed_date = datetime.datetime.strptime(
66
113
  tempdate, "%B %d, %Y"
67
114
  ).strftime("%Y-%m-%d")
68
- dates[i] = parsed_date
69
- # lastdateseen = parsed_date
70
-
71
- # Disabling amendment automation to shift fixes into warn-transformer instead.
72
- # If this needs to come back, uncomment the lastseendate references
73
- # then rebuild the below section as an else
115
+ line["Date"] = parsed_date
74
116
  except ValueError:
75
- logger.debug(f"Date error: {dates[i]}, leaving intact")
76
- # if "*" in dates[i]:
77
- # logger.debug(
78
- # f"Date error: {dates[i]} as apparent amendment; saving as {lastdateseen}"
79
- # )
80
- # dates[i] = lastdateseen
81
- # else:
82
-
83
- for i in range(len(tags)):
84
- row = []
85
- url = tags[i].get("href")
86
- row.append(tags[i].get_text())
87
-
88
- row.append(dates[i])
89
-
90
- row.append(url)
91
- row.append(None) # location
92
- row.append(None) # jobs
93
- data.append(row)
117
+ logger.debug(f"Date error: '{tempdate}', leaving intact")
118
+
119
+ line["PDF url"] = row.select("a")[0].get("href")
120
+ line["Company"] = row.select("a")[0].get_text().strip()
121
+ masterlist.append(line)
94
122
 
123
+ if len(masterlist) == 0:
124
+ logger.error(
125
+ "No data scraped -- anti-scraping mechanism may be back in play -- try Google Cache?"
126
+ )
95
127
  output_csv = data_dir / "hi.csv"
96
- utils.write_rows_to_csv(output_csv, data)
128
+ utils.write_dict_rows_to_csv(output_csv, headers, masterlist)
97
129
  return output_csv
98
130
 
99
131
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: warn-scraper
3
- Version: 1.2.57
3
+ Version: 1.2.59
4
4
  Summary: Command-line interface for downloading WARN Act notices of qualified plant closings and mass layoffs from state government websites
5
5
  Home-page: https://github.com/biglocalnews/warn-scraper
6
6
  Author: Big Local News
@@ -36,7 +36,7 @@ warn/scrapers/dc.py,sha256=kAWmERzEIOtGHla9tn8hK0NfP4B-aP4jknNGUiPw4C4,4493
36
36
  warn/scrapers/de.py,sha256=yMpCFAAlIO2f4DVUQpPKKLzm52_Zpn9IuPPFBNX1pjQ,1386
37
37
  warn/scrapers/fl.py,sha256=mHymxjwNGtYXRxAwjUSIG1qeSp4Y_zzr4XDxPz9LAfY,9560
38
38
  warn/scrapers/ga.py,sha256=EuqBrMlBojH6eXOHisNqJAQcsnb8FPHDwWomNopw9Ys,7285
39
- warn/scrapers/hi.py,sha256=IrwgUMNPqsHExiLZ8dFM25am7KTtVRrLDjIesNtJIsk,3736
39
+ warn/scrapers/hi.py,sha256=vpX-F82e_CYhxY7EAPLTMXG9cLCmPAprdDfsx4GziNo,5226
40
40
  warn/scrapers/ia.py,sha256=zOncaA9M0d6paT4pB7UU_4D_yxUgeUiGRcnpKi9DsRA,1999
41
41
  warn/scrapers/id.py,sha256=rRkE9g9ZOL7JgTdIz46kyGOeetzSla3e1Xr6gJ1v_74,5443
42
42
  warn/scrapers/il.py,sha256=sygdvsNuB_Gvu3o_HidtpSP4FLz0szKb1zEHqGxVtlI,1563
@@ -66,9 +66,9 @@ warn/scrapers/va.py,sha256=DDuR4_2Jpaxg9nVmuM6PAR8v8xz3VgxTBG5sWJgz2q0,1582
66
66
  warn/scrapers/vt.py,sha256=d-bo4WK2hkrk4BhCCmLpEovcoZltlvdIUB6O0uaMx5A,1186
67
67
  warn/scrapers/wa.py,sha256=Il3RmJpKr7SbwUBxHxlhEFLoxy7zSiduyo8F2EddB2Y,4021
68
68
  warn/scrapers/wi.py,sha256=ClEzXkwZbop0W4fkQgsb5oHAPUrb4luUPGV-jOKwkcg,4855
69
- warn_scraper-1.2.57.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
70
- warn_scraper-1.2.57.dist-info/METADATA,sha256=Z__kggmqalQaYaWaynUtNFXqNiBW9X8u7SDh0Y3qjkI,2025
71
- warn_scraper-1.2.57.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
72
- warn_scraper-1.2.57.dist-info/entry_points.txt,sha256=poh_oSweObGlBSs1_2qZmnTodlOYD0KfO7-h7W2UQIw,47
73
- warn_scraper-1.2.57.dist-info/top_level.txt,sha256=gOhHgNEkrUvajlzoKkVOo-TlQht9MoXnKOErjzqLGHo,11
74
- warn_scraper-1.2.57.dist-info/RECORD,,
69
+ warn_scraper-1.2.59.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
70
+ warn_scraper-1.2.59.dist-info/METADATA,sha256=WspYx_YFC0ojeHKwqV2SXwIAVzNGay3_ChH2j35sCA8,2025
71
+ warn_scraper-1.2.59.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
72
+ warn_scraper-1.2.59.dist-info/entry_points.txt,sha256=poh_oSweObGlBSs1_2qZmnTodlOYD0KfO7-h7W2UQIw,47
73
+ warn_scraper-1.2.59.dist-info/top_level.txt,sha256=gOhHgNEkrUvajlzoKkVOo-TlQht9MoXnKOErjzqLGHo,11
74
+ warn_scraper-1.2.59.dist-info/RECORD,,