cs2tracker 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cs2tracker might be problematic. Click here for more details.
- cs2tracker/__main__.py +6 -1
- cs2tracker/_version.py +2 -2
- cs2tracker/application.py +52 -57
- cs2tracker/constants.py +173 -172
- cs2tracker/data/config.ini +12 -12
- cs2tracker/main.py +11 -4
- cs2tracker/scraper.py +333 -282
- {cs2tracker-2.1.1.dist-info → cs2tracker-2.1.3.dist-info}/METADATA +12 -5
- cs2tracker-2.1.3.dist-info/RECORD +14 -0
- cs2tracker-2.1.1.dist-info/RECORD +0 -14
- {cs2tracker-2.1.1.dist-info → cs2tracker-2.1.3.dist-info}/WHEEL +0 -0
- {cs2tracker-2.1.1.dist-info → cs2tracker-2.1.3.dist-info}/entry_points.txt +0 -0
- {cs2tracker-2.1.1.dist-info → cs2tracker-2.1.3.dist-info}/licenses/LICENSE.md +0 -0
- {cs2tracker-2.1.1.dist-info → cs2tracker-2.1.3.dist-info}/top_level.txt +0 -0
cs2tracker/scraper.py
CHANGED
|
@@ -1,338 +1,389 @@
|
|
|
1
|
-
import configparser
|
|
2
1
|
import csv
|
|
3
|
-
import datetime
|
|
4
2
|
import os
|
|
3
|
+
import sys
|
|
5
4
|
import time
|
|
5
|
+
from configparser import ConfigParser
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from subprocess import DEVNULL, call
|
|
6
8
|
|
|
7
|
-
import requests
|
|
8
9
|
from bs4 import BeautifulSoup
|
|
10
|
+
from bs4.element import Tag
|
|
9
11
|
from currency_converter import CurrencyConverter
|
|
12
|
+
from requests import RequestException, Session
|
|
10
13
|
from requests.adapters import HTTPAdapter, Retry
|
|
11
14
|
from rich.console import Console
|
|
12
|
-
from tenacity import retry, stop_after_attempt
|
|
15
|
+
from tenacity import RetryError, retry, stop_after_attempt
|
|
13
16
|
|
|
14
|
-
from .constants import (
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
CAPSULE_NAMES_GENERIC,
|
|
18
|
-
CAPSULE_PAGES,
|
|
17
|
+
from cs2tracker.constants import (
|
|
18
|
+
BATCH_FILE,
|
|
19
|
+
CAPSULE_INFO,
|
|
19
20
|
CASE_HREFS,
|
|
20
|
-
CASE_NAMES,
|
|
21
21
|
CASE_PAGES,
|
|
22
22
|
CONFIG_FILE,
|
|
23
23
|
OUTPUT_FILE,
|
|
24
|
+
PROJECT_DIR,
|
|
25
|
+
PYTHON_EXECUTABLE,
|
|
24
26
|
)
|
|
25
27
|
|
|
26
28
|
MAX_LINE_LEN = 72
|
|
29
|
+
SEPARATOR = "-"
|
|
30
|
+
PRICE_INFO = "Owned: {} Steam market price: ${} Total: ${}\n"
|
|
31
|
+
BACKGROUND_TASK_NAME = "CS2Tracker Daily Calculation"
|
|
32
|
+
BACKGROUND_TASK_TIME = "12:00"
|
|
27
33
|
|
|
28
34
|
|
|
29
35
|
class Scraper:
|
|
30
36
|
def __init__(self):
|
|
31
|
-
|
|
32
|
-
self.
|
|
33
|
-
|
|
34
|
-
self.
|
|
35
|
-
|
|
36
|
-
self.
|
|
37
|
-
self.
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
self.
|
|
42
|
-
self.
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
self.session = requests.Session()
|
|
37
|
+
"""Initialize the Scraper class."""
|
|
38
|
+
self.console = Console()
|
|
39
|
+
self.parse_config()
|
|
40
|
+
self._start_session()
|
|
41
|
+
|
|
42
|
+
self.usd_total = 0
|
|
43
|
+
self.eur_total = 0
|
|
44
|
+
|
|
45
|
+
def parse_config(self):
|
|
46
|
+
"""Parse the configuration file to read settings and user-owned items."""
|
|
47
|
+
self.config = ConfigParser()
|
|
48
|
+
self.config.read(CONFIG_FILE)
|
|
49
|
+
|
|
50
|
+
def _start_session(self):
|
|
51
|
+
"""Start a requests session with custom headers and retry logic."""
|
|
52
|
+
self.session = Session()
|
|
48
53
|
self.session.headers.update(
|
|
49
54
|
{
|
|
50
55
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
|
|
51
56
|
}
|
|
52
57
|
)
|
|
53
|
-
retries = Retry(
|
|
54
|
-
total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504, 520]
|
|
55
|
-
)
|
|
58
|
+
retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504, 520])
|
|
56
59
|
self.session.mount("http://", HTTPAdapter(max_retries=retries))
|
|
57
60
|
self.session.mount("https://", HTTPAdapter(max_retries=retries))
|
|
58
61
|
|
|
59
|
-
self.console = Console()
|
|
60
|
-
|
|
61
|
-
config = self.parse_config()
|
|
62
|
-
self.set_config(config)
|
|
63
|
-
|
|
64
62
|
def scrape_prices(self):
|
|
65
|
-
for
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
elif "stockholm" in capsule_page_url:
|
|
75
|
-
capsule_name = "Stockholm"
|
|
76
|
-
capsule_quantities = self.stockholm_quantities
|
|
77
|
-
capsule_hrefs = CAPSULE_HREFS[3:8]
|
|
78
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:4] + [
|
|
79
|
-
CAPSULE_NAMES_GENERIC[-1]
|
|
80
|
-
]
|
|
81
|
-
elif "antwerp" in capsule_page_url:
|
|
82
|
-
capsule_name = "Antwerp"
|
|
83
|
-
capsule_quantities = self.antwerp_quantities
|
|
84
|
-
capsule_hrefs = CAPSULE_HREFS[8:15]
|
|
85
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:7]
|
|
86
|
-
elif "rio" in capsule_page_url:
|
|
87
|
-
capsule_name = "Rio"
|
|
88
|
-
capsule_quantities = self.rio_quantities
|
|
89
|
-
capsule_hrefs = CAPSULE_HREFS[15:22]
|
|
90
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:7]
|
|
91
|
-
elif "paris" in capsule_page_url:
|
|
92
|
-
capsule_name = "Paris"
|
|
93
|
-
capsule_quantities = self.paris_quantities
|
|
94
|
-
capsule_hrefs = CAPSULE_HREFS[22:29]
|
|
95
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:7]
|
|
96
|
-
elif "copenhagen" in capsule_page_url:
|
|
97
|
-
capsule_name = "Copenhagen"
|
|
98
|
-
capsule_quantities = self.copenhagen_quantities
|
|
99
|
-
capsule_hrefs = CAPSULE_HREFS[29:36]
|
|
100
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:7]
|
|
101
|
-
elif "shanghai" in capsule_page_url:
|
|
102
|
-
capsule_name = "Shanghai"
|
|
103
|
-
capsule_quantities = self.shanghai_quantities
|
|
104
|
-
capsule_hrefs = CAPSULE_HREFS[36:43]
|
|
105
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:7]
|
|
106
|
-
elif "austin" in capsule_page_url:
|
|
107
|
-
capsule_name = "Austin"
|
|
108
|
-
capsule_quantities = self.austin_quantities
|
|
109
|
-
capsule_hrefs = CAPSULE_HREFS[43:50]
|
|
110
|
-
capsule_names_generic = CAPSULE_NAMES_GENERIC[0:7]
|
|
111
|
-
|
|
112
|
-
self._scrape_prices_capsule(
|
|
113
|
-
capsule_page_url,
|
|
114
|
-
capsule_hrefs,
|
|
115
|
-
capsule_name,
|
|
116
|
-
capsule_names_generic,
|
|
117
|
-
capsule_quantities,
|
|
63
|
+
"""Scrape prices for capsules and cases, calculate totals in USD and EUR, and
|
|
64
|
+
print/save the results.
|
|
65
|
+
"""
|
|
66
|
+
capsule_usd_total = 0
|
|
67
|
+
try:
|
|
68
|
+
capsule_usd_total = self.scrape_capsule_section_prices()
|
|
69
|
+
except (RequestException, AttributeError, RetryError, ValueError):
|
|
70
|
+
self.console.print(
|
|
71
|
+
"[bold red][!] Failed to scrape capsule prices. (Consider using proxies to prevent rate limiting)\n"
|
|
118
72
|
)
|
|
119
73
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
74
|
+
case_usd_total = 0
|
|
75
|
+
try:
|
|
76
|
+
case_usd_total = self._scrape_case_prices()
|
|
77
|
+
except (RequestException, AttributeError, RetryError, ValueError):
|
|
78
|
+
self.console.print(
|
|
79
|
+
"[bold red][!] Failed to scrape case prices. (Consider using proxies to prevent rate limiting)\n"
|
|
80
|
+
)
|
|
123
81
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
self.
|
|
127
|
-
self.console.print(f"${self.total_price:.2f}")
|
|
82
|
+
self.usd_total += capsule_usd_total
|
|
83
|
+
self.usd_total += case_usd_total
|
|
84
|
+
self.eur_total = CurrencyConverter().convert(self.usd_total, "USD", "EUR")
|
|
128
85
|
|
|
129
|
-
self.
|
|
130
|
-
|
|
131
|
-
)
|
|
132
|
-
eur_string = "EUR Total".center(MAX_LINE_LEN, "-")
|
|
133
|
-
self.console.print(f"[bold green]{eur_string}")
|
|
134
|
-
self.console.print(f"€{self.total_price_euro:.2f}")
|
|
135
|
-
end_string = "-" * MAX_LINE_LEN
|
|
136
|
-
self.console.print(f"[bold green]{end_string}\n")
|
|
86
|
+
self._print_total()
|
|
87
|
+
self._save_price_log()
|
|
137
88
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
date = now.strftime("%Y-%m-%d")
|
|
89
|
+
# Reset totals for next run
|
|
90
|
+
self.usd_total, self.eur_total = 0, 0
|
|
141
91
|
|
|
142
|
-
|
|
143
|
-
|
|
92
|
+
def _print_total(self):
|
|
93
|
+
"""Print the total prices in USD and EUR, formatted with titles and
|
|
94
|
+
separators.
|
|
95
|
+
"""
|
|
96
|
+
usd_title = "USD Total".center(MAX_LINE_LEN, SEPARATOR)
|
|
97
|
+
self.console.print(f"[bold green]{usd_title}")
|
|
98
|
+
self.console.print(f"${self.usd_total:.2f}")
|
|
144
99
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
for row in reader:
|
|
149
|
-
last_row = row
|
|
150
|
-
if last_row:
|
|
151
|
-
last_date_str = last_row[0][:10]
|
|
152
|
-
else:
|
|
153
|
-
last_date_str = ""
|
|
100
|
+
eur_title = "EUR Total".center(MAX_LINE_LEN, SEPARATOR)
|
|
101
|
+
self.console.print(f"[bold green]{eur_title}")
|
|
102
|
+
self.console.print(f"€{self.eur_total:.2f}")
|
|
154
103
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
total = f"{self.total_price:.2f}$"
|
|
158
|
-
total_euro = f"{self.total_price_euro:.2f}€"
|
|
159
|
-
with open(OUTPUT_FILE, "a", newline="", encoding="utf-8") as csvfile:
|
|
160
|
-
writer = csv.writer(csvfile)
|
|
161
|
-
writer.writerow([today, total])
|
|
162
|
-
writer.writerow([today, total_euro])
|
|
104
|
+
end_string = SEPARATOR * MAX_LINE_LEN
|
|
105
|
+
self.console.print(f"[bold green]{end_string}\n")
|
|
163
106
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
107
|
+
def _save_price_log(self):
|
|
108
|
+
"""
|
|
109
|
+
Save the current date and total prices in USD and EUR to a CSV file.
|
|
167
110
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
111
|
+
This will append a new entry to the output file if no entry has been made for
|
|
112
|
+
today.
|
|
113
|
+
"""
|
|
114
|
+
if not os.path.isfile(OUTPUT_FILE):
|
|
115
|
+
open(OUTPUT_FILE, "w", encoding="utf-8").close()
|
|
172
116
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
int(config.get("Copenhagen", config_capsule_name))
|
|
213
|
-
)
|
|
214
|
-
elif "Shanghai" in capsule_name:
|
|
215
|
-
self.shanghai_quantities.append(
|
|
216
|
-
int(config.get("Shanghai", config_capsule_name))
|
|
217
|
-
)
|
|
218
|
-
elif "Austin" in capsule_name:
|
|
219
|
-
self.austin_quantities.append(
|
|
220
|
-
int(config.get("Austin", config_capsule_name))
|
|
221
|
-
)
|
|
222
|
-
|
|
223
|
-
for case_name in CASE_NAMES:
|
|
224
|
-
config_case_name = case_name.replace(" ", "_")
|
|
225
|
-
self.case_quantities.append(int(config.get("Cases", config_case_name)))
|
|
117
|
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as price_logs:
|
|
118
|
+
price_logs_reader = csv.reader(price_logs)
|
|
119
|
+
last_log_date = ""
|
|
120
|
+
for row in price_logs_reader:
|
|
121
|
+
last_log_date, _ = row
|
|
122
|
+
|
|
123
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
124
|
+
if last_log_date != today:
|
|
125
|
+
with open(OUTPUT_FILE, "a", newline="", encoding="utf-8") as price_logs:
|
|
126
|
+
price_logs_writer = csv.writer(price_logs)
|
|
127
|
+
price_logs_writer.writerow([today, f"{self.usd_total:.2f}$"])
|
|
128
|
+
price_logs_writer.writerow([today, f"{self.eur_total:.2f}€"])
|
|
129
|
+
|
|
130
|
+
def read_price_log(self):
|
|
131
|
+
"""
|
|
132
|
+
Parse the output file to extract dates, dollar prices, and euro prices. This
|
|
133
|
+
data is used for drawing the plot of past prices.
|
|
134
|
+
|
|
135
|
+
:return: A tuple containing three lists: dates, dollar prices, and euro prices.
|
|
136
|
+
"""
|
|
137
|
+
if not os.path.isfile(OUTPUT_FILE):
|
|
138
|
+
open(OUTPUT_FILE, "w", encoding="utf-8").close()
|
|
139
|
+
|
|
140
|
+
dates, dollars, euros = [], [], []
|
|
141
|
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as price_logs:
|
|
142
|
+
price_logs_reader = csv.reader(price_logs)
|
|
143
|
+
for row in price_logs_reader:
|
|
144
|
+
date, price_with_currency = row
|
|
145
|
+
date = datetime.strptime(date, "%Y-%m-%d")
|
|
146
|
+
price = float(price_with_currency.rstrip("$€"))
|
|
147
|
+
if price_with_currency.endswith("€"):
|
|
148
|
+
euros.append(price)
|
|
149
|
+
else:
|
|
150
|
+
dollars.append(price)
|
|
151
|
+
# Only append every second date since the dates are the same for euros and dollars
|
|
152
|
+
# and we want the length of dates to match the lengths of dollars and euros
|
|
153
|
+
dates.append(date)
|
|
154
|
+
|
|
155
|
+
return dates, dollars, euros
|
|
226
156
|
|
|
227
157
|
@retry(stop=stop_after_attempt(10))
|
|
228
158
|
def _get_page(self, url):
|
|
229
|
-
|
|
159
|
+
"""
|
|
160
|
+
Get the page content from the given URL, using a proxy if configured. If the
|
|
161
|
+
request fails, it will retry up to 10 times.
|
|
162
|
+
|
|
163
|
+
:param url: The URL to fetch the page from.
|
|
164
|
+
:return: The HTTP response object containing the page content.
|
|
165
|
+
:raises RequestException: If the request fails.
|
|
166
|
+
:raises RetryError: If the retry limit is reached.
|
|
167
|
+
"""
|
|
168
|
+
use_proxy = self.config.getboolean("Settings", "Use_Proxy", fallback=False)
|
|
169
|
+
api_key = self.config.get("Settings", "API_Key", fallback=None)
|
|
170
|
+
if use_proxy and api_key:
|
|
230
171
|
page = self.session.get(
|
|
231
172
|
url=url,
|
|
232
173
|
proxies={
|
|
233
|
-
"http": f"http://{
|
|
234
|
-
"https": f"http://{
|
|
174
|
+
"http": f"http://{api_key}:@smartproxy.crawlbase.com:8012",
|
|
175
|
+
"https": f"http://{api_key}:@smartproxy.crawlbase.com:8012",
|
|
235
176
|
},
|
|
236
177
|
verify=False,
|
|
237
178
|
)
|
|
238
179
|
else:
|
|
239
180
|
page = self.session.get(url)
|
|
240
181
|
|
|
182
|
+
if not page.ok or not page.content:
|
|
183
|
+
status = page.status_code
|
|
184
|
+
self.console.print(f"[bold red][!] Failed to load page ({status}). Retrying...\n")
|
|
185
|
+
raise RequestException(f"Failed to load page: {url}")
|
|
186
|
+
|
|
241
187
|
return page
|
|
242
188
|
|
|
243
|
-
def
|
|
189
|
+
def _parse_capsule_price(self, capsule_page, capsule_href):
|
|
190
|
+
"""
|
|
191
|
+
Parse the price of a capsule from the given page and href.
|
|
192
|
+
|
|
193
|
+
:param capsule_page: The HTTP response object containing the capsule page
|
|
194
|
+
content.
|
|
195
|
+
:param capsule_href: The href of the capsule listing to find the price for.
|
|
196
|
+
:return: The price of the capsule as a float.
|
|
197
|
+
:raises ValueError: If the capsule listing or price span cannot be found.
|
|
198
|
+
"""
|
|
199
|
+
capsule_soup = BeautifulSoup(capsule_page.content, "html.parser")
|
|
200
|
+
capsule_listing = capsule_soup.find("a", attrs={"href": f"{capsule_href}"})
|
|
201
|
+
if not isinstance(capsule_listing, Tag):
|
|
202
|
+
raise ValueError(f"Failed to find capsule listing: {capsule_href}")
|
|
203
|
+
|
|
204
|
+
price_span = capsule_listing.find("span", attrs={"class": "normal_price"})
|
|
205
|
+
if not isinstance(price_span, Tag):
|
|
206
|
+
raise ValueError(f"Failed to find price span in capsule listing: {capsule_href}")
|
|
207
|
+
|
|
208
|
+
price_str = price_span.text.split()[2]
|
|
209
|
+
price = float(price_str.replace("$", ""))
|
|
210
|
+
|
|
211
|
+
return price
|
|
212
|
+
|
|
213
|
+
def _scrape_capsule_prices(
|
|
244
214
|
self,
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
capsule_name,
|
|
248
|
-
capsule_names_generic,
|
|
249
|
-
capsule_quantities,
|
|
215
|
+
capsule_section,
|
|
216
|
+
capsule_info,
|
|
250
217
|
):
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
218
|
+
"""
|
|
219
|
+
Scrape prices for a specific capsule section, printing the details to the
|
|
220
|
+
console.
|
|
221
|
+
|
|
222
|
+
:param capsule_section: The section name in the config for the capsule.
|
|
223
|
+
:param capsule_info: A dictionary containing information about the capsule page,
|
|
224
|
+
hrefs, and names.
|
|
225
|
+
"""
|
|
226
|
+
capsule_title = capsule_section.center(MAX_LINE_LEN, SEPARATOR)
|
|
227
|
+
self.console.print(f"[bold magenta]{capsule_title}")
|
|
228
|
+
|
|
229
|
+
capsule_usd_total = 0
|
|
230
|
+
capsule_page = self._get_page(capsule_info["page"])
|
|
231
|
+
for capsule_name, capsule_href in zip(capsule_info["names"], capsule_info["items"]):
|
|
232
|
+
config_capsule_name = capsule_name.replace(" ", "_")
|
|
233
|
+
owned = self.config.getint(capsule_section, config_capsule_name, fallback=0)
|
|
234
|
+
if owned == 0:
|
|
235
|
+
continue
|
|
236
|
+
|
|
237
|
+
price_usd = self._parse_capsule_price(capsule_page, capsule_href)
|
|
238
|
+
price_usd_owned = round(float(owned * price_usd), 2)
|
|
239
|
+
|
|
240
|
+
self.console.print(f"[bold deep_sky_blue4]{capsule_name}")
|
|
241
|
+
self.console.print(PRICE_INFO.format(owned, price_usd, price_usd_owned))
|
|
242
|
+
capsule_usd_total += price_usd_owned
|
|
243
|
+
|
|
244
|
+
return capsule_usd_total
|
|
245
|
+
|
|
246
|
+
def scrape_capsule_section_prices(self):
|
|
247
|
+
"""Scrape prices for all capsule sections defined in the configuration."""
|
|
248
|
+
capsule_usd_total = 0
|
|
249
|
+
for capsule_section, capsule_info in CAPSULE_INFO.items():
|
|
250
|
+
# Only scrape capsule sections where the user owns at least one item
|
|
251
|
+
if any(int(owned) > 0 for _, owned in self.config.items(capsule_section)):
|
|
252
|
+
capsule_usd_total += self._scrape_capsule_prices(capsule_section, capsule_info)
|
|
253
|
+
|
|
254
|
+
return capsule_usd_total
|
|
255
|
+
|
|
256
|
+
def _parse_case_price(self, case_page, case_href):
|
|
257
|
+
"""
|
|
258
|
+
Parse the price of a case from the given page and href.
|
|
259
|
+
|
|
260
|
+
:param case_page: The HTTP response object containing the case page content.
|
|
261
|
+
:param case_href: The href of the case listing to find the price for.
|
|
262
|
+
:return: The price of the case as a float.
|
|
263
|
+
:raises ValueError: If the case listing or price span cannot be found.
|
|
264
|
+
"""
|
|
265
|
+
case_soup = BeautifulSoup(case_page.content, "html.parser")
|
|
266
|
+
case_listing = case_soup.find("a", attrs={"href": case_href})
|
|
267
|
+
if not isinstance(case_listing, Tag):
|
|
268
|
+
raise ValueError(f"Failed to find case listing: {case_href}")
|
|
269
|
+
|
|
270
|
+
price_class = case_listing.find("span", attrs={"class": "normal_price"})
|
|
271
|
+
if not isinstance(price_class, Tag):
|
|
272
|
+
raise ValueError(f"Failed to find price class in case listing: {case_href}")
|
|
273
|
+
|
|
274
|
+
price_str = price_class.text.split()[2]
|
|
275
|
+
price = float(price_str.replace("$", ""))
|
|
276
|
+
|
|
277
|
+
return price
|
|
278
|
+
|
|
279
|
+
def _scrape_case_prices(self):
|
|
280
|
+
"""
|
|
281
|
+
Scrape prices for all cases defined in the configuration.
|
|
282
|
+
|
|
283
|
+
For each case, it prints the case name, owned count, price per item, and total
|
|
284
|
+
price for owned items.
|
|
285
|
+
"""
|
|
286
|
+
case_usd_total = 0
|
|
287
|
+
for case_index, (config_case_name, owned) in enumerate(self.config.items("Cases")):
|
|
288
|
+
if int(owned) == 0:
|
|
289
|
+
continue
|
|
290
|
+
|
|
291
|
+
case_name = config_case_name.replace("_", " ").title()
|
|
292
|
+
case_title = case_name.center(MAX_LINE_LEN, SEPARATOR)
|
|
293
|
+
self.console.print(f"[bold magenta]{case_title}")
|
|
294
|
+
|
|
295
|
+
case_page = self._get_page(CASE_PAGES[case_index])
|
|
296
|
+
price_usd = self._parse_case_price(case_page, CASE_HREFS[case_index])
|
|
297
|
+
price_usd_owned = round(float(int(owned) * price_usd), 2)
|
|
298
|
+
|
|
299
|
+
self.console.print(PRICE_INFO.format(owned, price_usd, price_usd_owned))
|
|
300
|
+
case_usd_total += price_usd_owned
|
|
301
|
+
|
|
302
|
+
if not self.config.getboolean("Settings", "Use_Proxy", fallback=False):
|
|
303
|
+
time.sleep(1)
|
|
304
|
+
|
|
305
|
+
return case_usd_total
|
|
306
|
+
|
|
307
|
+
def identify_background_task(self):
|
|
308
|
+
"""
|
|
309
|
+
Search the OS for a daily background task that runs the scraper.
|
|
310
|
+
|
|
311
|
+
:return: True if a background task is found, False otherwise.
|
|
312
|
+
"""
|
|
313
|
+
if sys.platform.startswith("win"):
|
|
314
|
+
cmd = ["schtasks", "/query", "/tn", BACKGROUND_TASK_NAME]
|
|
315
|
+
return_code = call(cmd, stdout=DEVNULL, stderr=DEVNULL)
|
|
316
|
+
found = return_code == 0
|
|
317
|
+
return found
|
|
318
|
+
else:
|
|
319
|
+
# TODO: implement finder for cron jobs
|
|
320
|
+
return False
|
|
321
|
+
|
|
322
|
+
def _toggle_task_batch_file(self, enabled: bool):
|
|
323
|
+
"""
|
|
324
|
+
Create or delete a batch file that runs the scraper.
|
|
325
|
+
|
|
326
|
+
:param enabled: If True, the batch file will be created; if False, the batch
|
|
327
|
+
file will be deleted.
|
|
328
|
+
"""
|
|
329
|
+
if enabled:
|
|
330
|
+
with open(BATCH_FILE, "w", encoding="utf-8") as batch_file:
|
|
331
|
+
batch_file.write(f"cd {PROJECT_DIR}\n")
|
|
332
|
+
batch_file.write(f"{PYTHON_EXECUTABLE} -m cs2tracker.scraper\n")
|
|
333
|
+
else:
|
|
334
|
+
if os.path.exists(BATCH_FILE):
|
|
335
|
+
os.remove(BATCH_FILE)
|
|
336
|
+
|
|
337
|
+
def _toggle_background_task_windows(self, enabled: bool):
|
|
338
|
+
"""
|
|
339
|
+
Create or delete a daily background task that runs the scraper on Windows.
|
|
340
|
+
|
|
341
|
+
:param enabled: If True, the task will be created; if False, the task will be
|
|
342
|
+
deleted.
|
|
343
|
+
"""
|
|
344
|
+
self._toggle_task_batch_file(enabled)
|
|
345
|
+
if enabled:
|
|
346
|
+
cmd = [
|
|
347
|
+
"schtasks",
|
|
348
|
+
"/create",
|
|
349
|
+
"/tn",
|
|
350
|
+
BACKGROUND_TASK_NAME,
|
|
351
|
+
"/tr",
|
|
352
|
+
BATCH_FILE,
|
|
353
|
+
"/sc",
|
|
354
|
+
"DAILY",
|
|
355
|
+
"/st",
|
|
356
|
+
BACKGROUND_TASK_TIME,
|
|
357
|
+
]
|
|
358
|
+
return_code = call(cmd, stdout=DEVNULL, stderr=DEVNULL)
|
|
359
|
+
if return_code == 0:
|
|
360
|
+
self.console.print("[bold green][+] Background task enabled.")
|
|
361
|
+
else:
|
|
362
|
+
self.console.print("[bold red][!] Failed to enable background task.")
|
|
363
|
+
else:
|
|
364
|
+
cmd = ["schtasks", "/delete", "/tn", BACKGROUND_TASK_NAME, "/f"]
|
|
365
|
+
return_code = call(cmd, stdout=DEVNULL, stderr=DEVNULL)
|
|
366
|
+
if return_code == 0:
|
|
367
|
+
self.console.print("[bold green][-] Background task disabled.")
|
|
368
|
+
else:
|
|
369
|
+
self.console.print("[bold red][!] Failed to disable background task.")
|
|
370
|
+
|
|
371
|
+
def toggle_background_task(self, enabled: bool):
|
|
372
|
+
"""
|
|
373
|
+
Create or delete a daily background task that runs the scraper.
|
|
374
|
+
|
|
375
|
+
:param enabled: If True, the task will be created; if False, the task will be
|
|
376
|
+
deleted.
|
|
377
|
+
"""
|
|
378
|
+
if sys.platform.startswith("win"):
|
|
379
|
+
self._toggle_background_task_windows(enabled)
|
|
380
|
+
else:
|
|
381
|
+
# TODO: implement toggle for cron jobs
|
|
382
|
+
pass
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
if __name__ == "__main__":
|
|
386
|
+
# If this file is run as a script, create a Scraper instance and run the
|
|
387
|
+
# scrape_prices method.
|
|
388
|
+
scraper = Scraper()
|
|
389
|
+
scraper.scrape_prices()
|