uk_bin_collection 0.117.0__py3-none-any.whl → 0.119.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (20) hide show
  1. uk_bin_collection/tests/input.json +75 -2
  2. uk_bin_collection/uk_bin_collection/councils/AberdeenCityCouncil.py +122 -0
  3. uk_bin_collection/uk_bin_collection/councils/BaberghDistrictCouncil.py +62 -0
  4. uk_bin_collection/uk_bin_collection/councils/BraintreeDistrictCouncil.py +70 -0
  5. uk_bin_collection/uk_bin_collection/councils/BurnleyBoroughCouncil.py +88 -0
  6. uk_bin_collection/uk_bin_collection/councils/CopelandBoroughCouncil.py +93 -0
  7. uk_bin_collection/uk_bin_collection/councils/CrawleyBoroughCouncil.py +91 -40
  8. uk_bin_collection/uk_bin_collection/councils/EdinburghCityCouncil.py +98 -0
  9. uk_bin_collection/uk_bin_collection/councils/ExeterCityCouncil.py +52 -0
  10. uk_bin_collection/uk_bin_collection/councils/MidSuffolkDistrictCouncil.py +62 -0
  11. uk_bin_collection/uk_bin_collection/councils/RotherDistrictCouncil.py +84 -0
  12. uk_bin_collection/uk_bin_collection/councils/SouthHamsDistrictCouncil.py +90 -0
  13. uk_bin_collection/uk_bin_collection/councils/StevenageBoroughCouncil.py +101 -0
  14. uk_bin_collection/uk_bin_collection/councils/ThanetDistrictCouncil.py +51 -0
  15. uk_bin_collection/uk_bin_collection/councils/WolverhamptonCityCouncil.py +57 -0
  16. {uk_bin_collection-0.117.0.dist-info → uk_bin_collection-0.119.0.dist-info}/METADATA +1 -1
  17. {uk_bin_collection-0.117.0.dist-info → uk_bin_collection-0.119.0.dist-info}/RECORD +20 -9
  18. {uk_bin_collection-0.117.0.dist-info → uk_bin_collection-0.119.0.dist-info}/LICENSE +0 -0
  19. {uk_bin_collection-0.117.0.dist-info → uk_bin_collection-0.119.0.dist-info}/WHEEL +0 -0
  20. {uk_bin_collection-0.117.0.dist-info → uk_bin_collection-0.119.0.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,6 @@
1
- from bs4 import BeautifulSoup
1
+ import time
2
+
3
+ import requests
2
4
  from dateutil.relativedelta import relativedelta
3
5
 
4
6
  from uk_bin_collection.uk_bin_collection.common import *
@@ -19,43 +21,92 @@ class CouncilClass(AbstractGetBinDataClass):
19
21
  usrn = kwargs.get("paon")
20
22
  check_uprn(uprn)
21
23
  check_usrn(usrn)
24
+ bindata = {"bins": []}
25
+
26
+ SESSION_URL = "https://crawleybc-self.achieveservice.com/authapi/isauthenticated?uri=https%253A%252F%252Fcrawleybc-self.achieveservice.com%252Fen%252FAchieveForms%252F%253Fform_uri%253Dsandbox-publish%253A%252F%252FAF-Process-fb73f73e-e8f5-4441-9f83-8b5d04d889d6%252FAF-Stage-ec9ada91-d2d9-43bc-9730-597d15fc8108%252Fdefinition.json%2526redirectlink%253D%252Fen%2526cancelRedirectLink%253D%252Fen%2526noLoginPrompt%253D1%2526accept%253Dyes&hostname=crawleybc-self.achieveservice.com&withCredentials=true"
27
+
28
+ API_URL = "https://crawleybc-self.achieveservice.com/apibroker/"
29
+
30
+ currentdate = datetime.now().strftime("%d/%m/%Y")
31
+
32
+ data = {
33
+ "formValues": {
34
+ "Address": {
35
+ "address": {
36
+ "value": {
37
+ "Address": {
38
+ "usrn": {
39
+ "value": usrn,
40
+ },
41
+ "uprn": {
42
+ "value": uprn,
43
+ },
44
+ }
45
+ },
46
+ },
47
+ "dayConverted": {
48
+ "value": currentdate,
49
+ },
50
+ "getCollection": {
51
+ "value": "true",
52
+ },
53
+ "getWorksheets": {
54
+ "value": "false",
55
+ },
56
+ },
57
+ },
58
+ }
59
+
60
+ headers = {
61
+ "Content-Type": "application/json",
62
+ "Accept": "application/json",
63
+ "User-Agent": "Mozilla/5.0",
64
+ "X-Requested-With": "XMLHttpRequest",
65
+ "Referer": "https://crawleybc-self.achieveservice.com/fillform/?iframe_id=fillform-frame-1&db_id=",
66
+ }
67
+ s = requests.session()
68
+ r = s.get(SESSION_URL)
69
+ r.raise_for_status()
70
+ session_data = r.json()
71
+ sid = session_data["auth-session"]
72
+ params = {
73
+ "api": "RunLookup",
74
+ "id": "5b4f0ec5f13f4",
75
+ "repeat_against": "",
76
+ "noRetry": "true",
77
+ "getOnlyTokens": "undefined",
78
+ "log_id": "",
79
+ "app_name": "AF-Renderer::Self",
80
+ # unix_timestamp
81
+ "_": str(int(time.time() * 1000)),
82
+ "sid": sid,
83
+ }
84
+
85
+ r = s.post(API_URL, json=data, headers=headers, params=params)
86
+ r.raise_for_status()
87
+
88
+ data = r.json()
89
+ rows_data = data["integration"]["transformed"]["rows_data"]["0"]
90
+ if not isinstance(rows_data, dict):
91
+ raise ValueError("Invalid data returned from API")
92
+
93
+ # Extract each service's relevant details for the bin schedule
94
+ for key, value in rows_data.items():
95
+ if key.endswith("DateNext"):
96
+ BinType = key.replace("DateNext", "Service")
97
+ for key2, value2 in rows_data.items():
98
+ if key2 == BinType:
99
+ BinType = value2
100
+ next_collection = datetime.strptime(value, "%A %d %B").replace(
101
+ year=datetime.now().year
102
+ )
103
+ if datetime.now().month == 12 and next_collection.month == 1:
104
+ next_collection = next_collection + relativedelta(years=1)
105
+
106
+ dict_data = {
107
+ "type": BinType,
108
+ "collectionDate": next_collection.strftime(date_format),
109
+ }
110
+ bindata["bins"].append(dict_data)
22
111
 
23
- day = datetime.now().date().strftime("%d")
24
- month = datetime.now().date().strftime("%m")
25
- year = datetime.now().date().strftime("%Y")
26
-
27
- api_url = (
28
- f"https://my.crawley.gov.uk/appshost/firmstep/self/apps/custompage/waste?language=en&uprn={uprn}"
29
- f"&usrn={usrn}&day={day}&month={month}&year={year}"
30
- )
31
- response = requests.get(api_url)
32
-
33
- soup = BeautifulSoup(response.text, features="html.parser")
34
- soup.prettify()
35
-
36
- data = {"bins": []}
37
-
38
- titles = [title.text for title in soup.select(".block-title")]
39
- collection_tag = soup.body.find_all(
40
- "div", {"class": "col-md-6 col-sm-6 col-xs-6"}, string="Next collection"
41
- )
42
- bin_index = 0
43
- for tag in collection_tag:
44
- for item in tag.next_elements:
45
- if (
46
- str(item).startswith('<div class="date text-right text-grey">')
47
- and str(item) != ""
48
- ):
49
- collection_date = datetime.strptime(item.text, "%A %d %B")
50
- next_collection = collection_date.replace(year=datetime.now().year)
51
- if datetime.now().month == 12 and next_collection.month == 1:
52
- next_collection = next_collection + relativedelta(years=1)
53
-
54
- dict_data = {
55
- "type": titles[bin_index].strip(),
56
- "collectionDate": next_collection.strftime(date_format),
57
- }
58
- data["bins"].append(dict_data)
59
- bin_index += 1
60
- break
61
- return data
112
+ return bindata
@@ -0,0 +1,98 @@
1
+ import re
2
+ import time
3
+
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ from selenium.webdriver.common.by import By
7
+ from selenium.webdriver.support import expected_conditions as EC
8
+ from selenium.webdriver.support.ui import Select
9
+ from selenium.webdriver.support.wait import WebDriverWait
10
+
11
+ from uk_bin_collection.uk_bin_collection.common import *
12
+ from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
13
+
14
+
15
+ # import the wonderful Beautiful Soup and the URL grabber
16
+ class CouncilClass(AbstractGetBinDataClass):
17
+ """
18
+ Concrete classes have to implement all abstract operations of the
19
+ base class. They can also override some operations with a default
20
+ implementation.
21
+ """
22
+
23
+ def parse_data(self, page: str, **kwargs) -> dict:
24
+
25
+ collection_day = kwargs.get("paon")
26
+ collection_week = kwargs.get("postcode")
27
+ bindata = {"bins": []}
28
+
29
+ days_of_week = [
30
+ "Monday",
31
+ "Tuesday",
32
+ "Wednesday",
33
+ "Thursday",
34
+ "Friday",
35
+ "Saturday",
36
+ "Sunday",
37
+ ]
38
+
39
+ collection_weeks = ["Week 1", "Week 2"]
40
+ collection_week = collection_weeks.index(collection_week)
41
+
42
+ offset_days = days_of_week.index(collection_day)
43
+
44
+ if collection_week == 0:
45
+ recyclingstartDate = datetime(2024, 11, 4)
46
+ glassstartDate = datetime(2024, 11, 4)
47
+ refusestartDate = datetime(2024, 11, 11)
48
+ elif collection_week == 1:
49
+ recyclingstartDate = datetime(2024, 11, 11)
50
+ glassstartDate = datetime(2024, 11, 11)
51
+ refusestartDate = datetime(2024, 11, 4)
52
+
53
+ refuse_dates = get_dates_every_x_days(refusestartDate, 14, 28)
54
+ glass_dates = get_dates_every_x_days(glassstartDate, 14, 28)
55
+ recycling_dates = get_dates_every_x_days(recyclingstartDate, 14, 28)
56
+
57
+ for refuseDate in refuse_dates:
58
+
59
+ collection_date = (
60
+ datetime.strptime(refuseDate, "%d/%m/%Y") + timedelta(days=offset_days)
61
+ ).strftime("%d/%m/%Y")
62
+
63
+ dict_data = {
64
+ "type": "Grey Bin",
65
+ "collectionDate": collection_date,
66
+ }
67
+ bindata["bins"].append(dict_data)
68
+
69
+ for recyclingDate in recycling_dates:
70
+
71
+ collection_date = (
72
+ datetime.strptime(recyclingDate, "%d/%m/%Y")
73
+ + timedelta(days=offset_days)
74
+ ).strftime("%d/%m/%Y")
75
+
76
+ dict_data = {
77
+ "type": "Green Bin",
78
+ "collectionDate": collection_date,
79
+ }
80
+ bindata["bins"].append(dict_data)
81
+
82
+ for glassDate in glass_dates:
83
+
84
+ collection_date = (
85
+ datetime.strptime(glassDate, "%d/%m/%Y") + timedelta(days=offset_days)
86
+ ).strftime("%d/%m/%Y")
87
+
88
+ dict_data = {
89
+ "type": "Glass Box",
90
+ "collectionDate": collection_date,
91
+ }
92
+ bindata["bins"].append(dict_data)
93
+
94
+ bindata["bins"].sort(
95
+ key=lambda x: datetime.strptime(x.get("collectionDate"), "%d/%m/%Y")
96
+ )
97
+
98
+ return bindata
@@ -0,0 +1,52 @@
1
+ import time
2
+
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+
6
+ from uk_bin_collection.uk_bin_collection.common import *
7
+ from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
8
+
9
+
10
+ # import the wonderful Beautiful Soup and the URL grabber
11
+ class CouncilClass(AbstractGetBinDataClass):
12
+ """
13
+ Concrete classes have to implement all abstract operations of the
14
+ base class. They can also override some operations with a default
15
+ implementation.
16
+ """
17
+
18
+ def parse_data(self, page: str, **kwargs) -> dict:
19
+
20
+ user_uprn = kwargs.get("uprn")
21
+ check_uprn(user_uprn)
22
+ bindata = {"bins": []}
23
+
24
+ URI = f"https://exeter.gov.uk/repositories/hidden-pages/address-finder/?qsource=UPRN&qtype=bins&term={user_uprn}"
25
+
26
+ response = requests.get(URI)
27
+ response.raise_for_status()
28
+
29
+ data = response.json()
30
+
31
+ soup = BeautifulSoup(data[0]["Results"], "html.parser")
32
+ soup.prettify()
33
+
34
+ # Extract bin schedule
35
+ for section in soup.find_all("h2"):
36
+ bin_type = section.text.strip()
37
+ collection_date = section.find_next("h3").text.strip()
38
+
39
+ dict_data = {
40
+ "type": bin_type,
41
+ "collectionDate": datetime.strptime(
42
+ remove_ordinal_indicator_from_date_string(collection_date),
43
+ "%A, %d %B %Y",
44
+ ).strftime(date_format),
45
+ }
46
+ bindata["bins"].append(dict_data)
47
+
48
+ bindata["bins"].sort(
49
+ key=lambda x: datetime.strptime(x.get("collectionDate"), date_format)
50
+ )
51
+
52
+ return bindata
@@ -23,6 +23,7 @@ class CouncilClass(AbstractGetBinDataClass):
23
23
  def parse_data(self, page: str, **kwargs) -> dict:
24
24
 
25
25
  collection_day = kwargs.get("paon")
26
+ garden_collection_week = kwargs.get("postcode")
26
27
  bindata = {"bins": []}
27
28
 
28
29
  days_of_week = [
@@ -35,10 +36,14 @@ class CouncilClass(AbstractGetBinDataClass):
35
36
  "Sunday",
36
37
  ]
37
38
 
39
+ garden_week = ["Week 1", "Week 2"]
40
+
38
41
  refusestartDate = datetime(2024, 11, 11)
39
42
  recyclingstartDate = datetime(2024, 11, 4)
40
43
 
41
44
  offset_days = days_of_week.index(collection_day)
45
+ if garden_collection_week:
46
+ garden_collection = garden_week.index(garden_collection_week)
42
47
 
43
48
  refuse_dates = get_dates_every_x_days(refusestartDate, 14, 28)
44
49
  recycling_dates = get_dates_every_x_days(recyclingstartDate, 14, 28)
@@ -125,6 +130,63 @@ class CouncilClass(AbstractGetBinDataClass):
125
130
  }
126
131
  bindata["bins"].append(dict_data)
127
132
 
133
+ if garden_collection_week:
134
+ if garden_collection == 0:
135
+ gardenstartDate = datetime(2024, 11, 11)
136
+ elif garden_collection == 1:
137
+ gardenstartDate = datetime(2024, 11, 4)
138
+
139
+ garden_dates = get_dates_every_x_days(gardenstartDate, 14, 28)
140
+
141
+ garden_bank_holidays = [
142
+ ("23/12/2024", 1),
143
+ ("24/12/2024", 1),
144
+ ("25/12/2024", 1),
145
+ ("26/12/2024", 1),
146
+ ("27/12/2024", 1),
147
+ ("30/12/2024", 1),
148
+ ("31/12/2024", 1),
149
+ ("01/01/2025", 1),
150
+ ("02/01/2025", 1),
151
+ ("03/01/2025", 1),
152
+ ]
153
+
154
+ for gardenDate in garden_dates:
155
+
156
+ collection_date = (
157
+ datetime.strptime(gardenDate, "%d/%m/%Y")
158
+ + timedelta(days=offset_days)
159
+ ).strftime("%d/%m/%Y")
160
+
161
+ garden_holiday = next(
162
+ (
163
+ value
164
+ for date, value in garden_bank_holidays
165
+ if date == collection_date
166
+ ),
167
+ 0,
168
+ )
169
+
170
+ if garden_holiday > 0:
171
+ continue
172
+
173
+ holiday_offset = next(
174
+ (value for date, value in bank_holidays if date == collection_date),
175
+ 0,
176
+ )
177
+
178
+ if holiday_offset > 0:
179
+ collection_date = (
180
+ datetime.strptime(collection_date, "%d/%m/%Y")
181
+ + timedelta(days=holiday_offset)
182
+ ).strftime("%d/%m/%Y")
183
+
184
+ dict_data = {
185
+ "type": "Garden Bin",
186
+ "collectionDate": collection_date,
187
+ }
188
+ bindata["bins"].append(dict_data)
189
+
128
190
  bindata["bins"].sort(
129
191
  key=lambda x: datetime.strptime(x.get("collectionDate"), "%d/%m/%Y")
130
192
  )
@@ -0,0 +1,84 @@
1
+ from datetime import datetime
2
+
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+ from dateutil.relativedelta import relativedelta
6
+
7
+ from uk_bin_collection.uk_bin_collection.common import *
8
+ from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
9
+
10
+
11
+ class CouncilClass(AbstractGetBinDataClass):
12
+ """
13
+ Concrete classes have to implement all abstract operations of the
14
+ base class. They can also override some operations with a default
15
+ implementation.
16
+ """
17
+
18
+ def parse_data(self, page: str, **kwargs) -> dict:
19
+ # Get and check UPRN
20
+ user_uprn = kwargs.get("uprn")
21
+ check_uprn(user_uprn)
22
+ bindata = {"bins": []}
23
+
24
+ uri = "https://www.rother.gov.uk/wp-admin/admin-ajax.php"
25
+ params = {
26
+ "action": "get_address_data",
27
+ "uprn": user_uprn,
28
+ "context": "full-page",
29
+ }
30
+
31
+ headers = {
32
+ "Content-Type": "application/x-www-form-urlencoded",
33
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
34
+ }
35
+
36
+ # Send a POST request with form data and headers
37
+ r = requests.post(uri, data=params, headers=headers, verify=False)
38
+
39
+ result = r.json()
40
+
41
+ if result["success"]:
42
+ # Parse the HTML with BeautifulSoup
43
+ soup = BeautifulSoup(result["data"], "html.parser")
44
+ soup.prettify()
45
+
46
+ # print(soup)
47
+
48
+ # Find the div elements with class "bindays-item"
49
+ bin_days = soup.find_all("div", class_="bindays-item")
50
+
51
+ # Loop through each bin item and extract type and date
52
+ for bin_day in bin_days:
53
+ # Extract bin type from the <h3> tag
54
+ bin_type = bin_day.find("h3").get_text(strip=True).replace(":", "")
55
+
56
+ # Extract date (or check if it's a subscription link for Garden Waste)
57
+ date_span = bin_day.find("span", class_="find-my-nearest-bindays-date")
58
+ if date_span:
59
+ if date_span.find("a"):
60
+ # If there is a link, this is the Garden bin signup link
61
+ continue
62
+ else:
63
+ # Otherwise, get the date text directly
64
+ date = date_span.get_text(strip=True)
65
+ else:
66
+ date = None
67
+
68
+ date = datetime.strptime(
69
+ remove_ordinal_indicator_from_date_string(date),
70
+ "%A %d %B",
71
+ ).replace(year=datetime.now().year)
72
+ if datetime.now().month == 12 and date.month == 1:
73
+ date = date + relativedelta(years=1)
74
+
75
+ dict_data = {
76
+ "type": bin_type,
77
+ "collectionDate": date.strftime(date_format),
78
+ }
79
+ bindata["bins"].append(dict_data)
80
+
81
+ bindata["bins"].sort(
82
+ key=lambda x: datetime.strptime(x.get("collectionDate"), "%d/%m/%Y")
83
+ )
84
+ return bindata
@@ -0,0 +1,90 @@
1
+ from datetime import datetime
2
+
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+ from dateutil.relativedelta import relativedelta
6
+
7
+ from uk_bin_collection.uk_bin_collection.common import *
8
+ from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
9
+
10
+
11
+ class CouncilClass(AbstractGetBinDataClass):
12
+ """
13
+ Concrete classes have to implement all abstract operations of the
14
+ base class. They can also override some operations with a default
15
+ implementation.
16
+ """
17
+
18
+ def parse_data(self, page: str, **kwargs) -> dict:
19
+ # Get and check UPRN
20
+ user_uprn = kwargs.get("uprn")
21
+ check_uprn(user_uprn)
22
+ bindata = {"bins": []}
23
+
24
+ uri = "https://waste.southhams.gov.uk/mycollections"
25
+
26
+ s = requests.session()
27
+ r = s.get(uri)
28
+ for cookie in r.cookies:
29
+ if cookie.name == "fcc_session_cookie":
30
+ fcc_session_token = cookie.value
31
+
32
+ uri = "https://waste.southhams.gov.uk/mycollections/getcollectiondetails"
33
+
34
+ params = {
35
+ "fcc_session_token": fcc_session_token,
36
+ "uprn": user_uprn,
37
+ }
38
+
39
+ headers = {
40
+ "Content-Type": "application/x-www-form-urlencoded",
41
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
42
+ "Referer": "https://waste.southhams.gov.uk/mycollections",
43
+ "X-Requested-With": "XMLHttpRequest",
44
+ }
45
+
46
+ # Send a POST request with form data and headers
47
+ r = s.post(uri, data=params, headers=headers)
48
+
49
+ result = r.json()
50
+
51
+ for collection in result["binCollections"]["tile"]:
52
+
53
+ # Parse the HTML with BeautifulSoup
54
+ soup = BeautifulSoup(collection[0], "html.parser")
55
+ soup.prettify()
56
+
57
+ # Find all collectionDiv elements
58
+ collections = soup.find_all("div", class_="collectionDiv")
59
+
60
+ # Process each collectionDiv
61
+ for collection in collections:
62
+ # Extract the service name
63
+ service_name = collection.find("h3").text.strip()
64
+
65
+ # Extract collection frequency and day
66
+ details = collection.find("div", class_="detWrap").text.strip()
67
+
68
+ # Extract the next collection date
69
+ next_collection = details.split("Your next scheduled collection is ")[
70
+ 1
71
+ ].split(".")[0]
72
+
73
+ if next_collection.startswith("today"):
74
+ next_collection = next_collection.split("today, ")[1]
75
+ elif next_collection.startswith("tomorrow"):
76
+ next_collection = next_collection.split("tomorrow, ")[1]
77
+
78
+ dict_data = {
79
+ "type": service_name,
80
+ "collectionDate": datetime.strptime(
81
+ next_collection, "%A, %d %B %Y"
82
+ ).strftime(date_format),
83
+ }
84
+ bindata["bins"].append(dict_data)
85
+
86
+ bindata["bins"].sort(
87
+ key=lambda x: datetime.strptime(x.get("collectionDate"), "%d/%m/%Y")
88
+ )
89
+
90
+ return bindata
@@ -0,0 +1,101 @@
1
+ import time
2
+
3
+ import requests
4
+ from dateutil.relativedelta import relativedelta
5
+
6
+ from uk_bin_collection.uk_bin_collection.common import *
7
+ from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
8
+
9
+
10
+ # import the wonderful Beautiful Soup and the URL grabber
11
+ class CouncilClass(AbstractGetBinDataClass):
12
+ """
13
+ Concrete classes have to implement all abstract operations of the
14
+ base class. They can also override some operations with a default
15
+ implementation.
16
+ """
17
+
18
+ def parse_data(self, page: str, **kwargs) -> dict:
19
+ # Make a BS4 object
20
+ uprn = kwargs.get("uprn")
21
+ check_uprn(uprn)
22
+ bindata = {"bins": []}
23
+
24
+ SESSION_URL = "https://stevenage-self.achieveservice.com/authapi/isauthenticated?uri=https%253A%252F%252Fstevenage-self.achieveservice.com%252Fservice%252Fmy_bin_collection_schedule&hostname=stevenage-self.achieveservice.com&withCredentials=true"
25
+ TOKEN_URL = "https://stevenage-self.achieveservice.com/apibroker/runLookup?id=5e55337a540d4"
26
+ API_URL = "https://stevenage-self.achieveservice.com/apibroker/runLookup"
27
+
28
+ data = {
29
+ "formValues": {
30
+ "Section 1": {
31
+ "token": {"value": ""},
32
+ "LLPGUPRN": {
33
+ "value": uprn,
34
+ },
35
+ "MinimumDateLookAhead": {
36
+ "value": time.strftime("%Y-%m-%d"),
37
+ },
38
+ "MaximumDateLookAhead": {
39
+ "value": str(int(time.strftime("%Y")) + 1)
40
+ + time.strftime("-%m-%d"),
41
+ },
42
+ },
43
+ },
44
+ }
45
+
46
+ headers = {
47
+ "Content-Type": "application/json",
48
+ "Accept": "application/json",
49
+ "User-Agent": "Mozilla/5.0",
50
+ "X-Requested-With": "XMLHttpRequest",
51
+ "Referer": "https://stevenage-self.achieveservice.com/fillform/?iframe_id=fillform-frame-1&db_id=",
52
+ }
53
+ s = requests.session()
54
+ r = s.get(SESSION_URL)
55
+ r.raise_for_status()
56
+ session_data = r.json()
57
+ sid = session_data["auth-session"]
58
+
59
+ t = s.get(TOKEN_URL)
60
+ t.raise_for_status()
61
+ token_data = t.json()
62
+ data["formValues"]["Section 1"]["token"]["value"] = token_data["integration"][
63
+ "transformed"
64
+ ]["rows_data"]["0"]["token"]
65
+
66
+ params = {
67
+ "id": "64ba8cee353e6",
68
+ "repeat_against": "",
69
+ "noRetry": "false",
70
+ "getOnlyTokens": "undefined",
71
+ "log_id": "",
72
+ "app_name": "AF-Renderer::Self",
73
+ # unix_timestamp
74
+ "_": str(int(time.time() * 1000)),
75
+ "sid": sid,
76
+ }
77
+
78
+ r = s.post(API_URL, json=data, headers=headers, params=params)
79
+ r.raise_for_status()
80
+
81
+ data = r.json()
82
+ rows_data = data["integration"]["transformed"]["rows_data"]
83
+ if not isinstance(rows_data, dict):
84
+ raise ValueError("Invalid data returned from API")
85
+
86
+ for key in rows_data:
87
+ value = rows_data[key]
88
+ bin_type = value["bintype"].strip()
89
+
90
+ try:
91
+ date = datetime.strptime(value["collectiondate"], "%A %d %B %Y").date()
92
+ except ValueError:
93
+ continue
94
+
95
+ dict_data = {
96
+ "type": bin_type,
97
+ "collectionDate": date.strftime(date_format),
98
+ }
99
+ bindata["bins"].append(dict_data)
100
+
101
+ return bindata