esprit-py 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
esprit/__init__.py ADDED
@@ -0,0 +1 @@
1
+ from .esprit import Esprit
esprit/absence.py ADDED
@@ -0,0 +1,52 @@
1
+ from bs4 import BeautifulSoup
2
+
3
+
4
+ class Absence:
5
+ """
6
+ A class used to represent an Absence.
7
+
8
+ ...
9
+
10
+ Attributes
11
+ ----------
12
+ url : str
13
+ a formatted string that represents the URL of the absence page
14
+ session : requests.Session
15
+ a Session object from the requests library
16
+
17
+ Methods
18
+ -------
19
+ get_absences():
20
+ Returns a list of absences for the student.
21
+ """
22
+
23
+ def __init__(self, session):
24
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/absenceetud.aspx"
25
+ self.session = session
26
+
27
+ def get_absences(self):
28
+ """
29
+ Returns a list of absences for the student.
30
+
31
+ Returns
32
+ -------
33
+ list
34
+ a list of absences, each represented as a list of strings. The first list is the headers.
35
+ Returns None if the page does not contain the expected text.
36
+ """
37
+
38
+ response = self.session.get(self.url)
39
+ soup = BeautifulSoup(response.text, 'html.parser')
40
+
41
+ # Check if the <strong> tag with the text "Absence" exists
42
+ strong_tag = soup.find('strong', text='Absence')
43
+ if strong_tag is None:
44
+ print("The page does not contain the expected text.")
45
+ return None
46
+
47
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView2'})
48
+ rows = table.find_all('tr')
49
+ headers = [cell.text.strip() for cell in rows[0].find_all('th')]
50
+ absences = [headers] + [[cell.text.strip() for cell in row.find_all('td')]
51
+ for row in rows[1:]] # Skip header row
52
+ return absences
esprit/auth.py ADDED
@@ -0,0 +1,432 @@
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+
5
+ class Auth:
6
+ """
7
+ A class used to represent an Authentication process
8
+
9
+ ...
10
+
11
+ Attributes
12
+ ----------
13
+ login_url : str
14
+ a string representing the login URL
15
+ debug : bool
16
+ a boolean indicating if debug mode is on
17
+ session : requests.Session
18
+ a requests Session object to maintain cookies and state
19
+
20
+ Methods
21
+ -------
22
+ login(id, password)
23
+ Logs in to the website using the provided id and password
24
+ """
25
+
26
+ def __init__(self, driver_path=None, driver=None, debug=False, headless=True):
27
+ """
28
+ Constructs all the necessary attributes for the Auth object.
29
+
30
+ Parameters
31
+ ----------
32
+ driver_path : str, optional
33
+ Path to the WebDriver executable (deprecated, kept for compatibility)
34
+ driver : WebDriver, optional
35
+ Existing WebDriver instance (deprecated, kept for compatibility)
36
+ debug : bool, optional
37
+ Debug mode flag (default is False)
38
+ headless : bool, optional
39
+ Headless mode flag (deprecated, kept for compatibility)
40
+ """
41
+
42
+ self.login_url = "https://esprit-tn.com/esponline/online/default.aspx"
43
+ self.home_url = "https://esprit-tn.com/esponline/Etudiants/Accueil.aspx"
44
+ self.logout_urls = [
45
+ "https://esprit-tn.com/esponline/Etudiants/Deconnexion.aspx",
46
+ "https://esprit-tn.com/esponline/online/Deconnexion.aspx",
47
+ ]
48
+ self.debug = debug
49
+ self.session = requests.Session()
50
+ # Set user agent to mimic a real browser
51
+ self.session.headers.update({
52
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
53
+ })
54
+
55
+ def _extract_viewstate(self, html):
56
+ """Extract ASP.NET ViewState and EventValidation from HTML"""
57
+ soup = BeautifulSoup(html, 'html.parser')
58
+ viewstate = soup.find('input', {'id': '__VIEWSTATE'})
59
+ viewstate_generator = soup.find('input', {'id': '__VIEWSTATEGENERATOR'})
60
+ event_validation = soup.find('input', {'id': '__EVENTVALIDATION'})
61
+
62
+ data = {}
63
+ if viewstate:
64
+ data['__VIEWSTATE'] = viewstate.get('value', '')
65
+ if viewstate_generator:
66
+ data['__VIEWSTATEGENERATOR'] = viewstate_generator.get('value', '')
67
+ if event_validation:
68
+ data['__EVENTVALIDATION'] = event_validation.get('value', '')
69
+
70
+ return data
71
+
72
+ def login(self, id, password):
73
+ """
74
+ Logs in to the website using the provided id and password.
75
+
76
+ Parameters
77
+ ----------
78
+ id : str
79
+ User's id
80
+ password : str
81
+ User's password
82
+
83
+ Returns
84
+ -------
85
+ list
86
+ List of cookies if login is successful, None otherwise
87
+ """
88
+
89
+ # Step 1: Get the initial login page
90
+ response = self.session.get(self.login_url)
91
+ if response.status_code != 200:
92
+ print('Failed to load login page')
93
+ return None
94
+
95
+ # Extract ViewState and EventValidation
96
+ form_data = self._extract_viewstate(response.text)
97
+
98
+ # Step 2: Find the ID input field and checkbox on initial page
99
+ soup = BeautifulSoup(response.text, 'html.parser')
100
+ # Try to find ID field by various methods
101
+ id_input = (soup.find('input', {'id': lambda x: x and 'textbox3' in x.lower()}) or
102
+ soup.find('input', {'id': lambda x: x and 'textbox1' in x.lower()}) or
103
+ soup.find('input', {'type': 'text', 'id': lambda x: x and 'textbox' in x.lower()}))
104
+ id_field_name = None
105
+ if id_input:
106
+ id_field_name = id_input.get('name') or id_input.get('id', '')
107
+ if self.debug:
108
+ print(f'Found ID field: id={id_input.get("id")}, name={id_field_name}')
109
+
110
+ # Find the checkbox on the initial page (before clicking Suivant)
111
+ checkbox = soup.find('input', {'type': 'checkbox'})
112
+ checkbox_name = None
113
+ if checkbox:
114
+ checkbox_name = checkbox.get('name') or checkbox.get('id', '')
115
+ if self.debug:
116
+ print(f'Found checkbox on initial page: id={checkbox.get("id")}, name={checkbox_name}')
117
+
118
+ # Use dynamic name if found, otherwise try both with and without ctl00$ prefix
119
+ if id_field_name:
120
+ form_data[id_field_name] = id
121
+ else:
122
+ form_data['ctl00$ContentPlaceHolder1$TextBox3'] = id
123
+ form_data['ContentPlaceHolder1$TextBox3'] = id
124
+
125
+ # First, check the checkbox to reveal continue button
126
+ if checkbox_name:
127
+ form_data_checkbox = self._extract_viewstate(response.text)
128
+ form_data_checkbox[id_field_name] = id
129
+ form_data_checkbox[checkbox_name] = 'on'
130
+ form_data_checkbox['__EVENTTARGET'] = checkbox_name
131
+ form_data_checkbox['__EVENTARGUMENT'] = ''
132
+
133
+ if self.debug:
134
+ print('Checking checkbox to reveal continue button...')
135
+
136
+ response_checkbox = self.session.post(self.login_url, data=form_data_checkbox)
137
+ if response_checkbox.status_code == 200:
138
+ # Check if continue button appeared
139
+ soup_checkbox = BeautifulSoup(response_checkbox.text, 'html.parser')
140
+ continue_button = (soup_checkbox.find('input', {'type': 'submit', 'value': lambda x: x and ('continuer' in x.lower() or 'continue' in x.lower())}) or
141
+ soup_checkbox.find('input', {'type': 'button', 'value': lambda x: x and ('continuer' in x.lower() or 'continue' in x.lower())}) or
142
+ soup_checkbox.find('button', string=lambda x: x and ('continuer' in x.lower() or 'continue' in x.lower())))
143
+
144
+ # Debug: Check all buttons on the page after checking checkbox
145
+ all_buttons = soup_checkbox.find_all('input', {'type': ['submit', 'button']})
146
+ if self.debug:
147
+ print(f'Found {len(all_buttons)} buttons on page after checking checkbox')
148
+ for btn in all_buttons:
149
+ btn_id = btn.get('id', '')
150
+ btn_name = btn.get('name', '')
151
+ btn_value = btn.get('value', '')
152
+ print(f' Button: id={btn_id}, name={btn_name}, value={btn_value}')
153
+
154
+ if continue_button:
155
+ if self.debug:
156
+ print('Continue button appeared, clicking it...')
157
+ # Click continue button
158
+ form_data_continue = self._extract_viewstate(response_checkbox.text)
159
+ form_data_continue[id_field_name] = id
160
+ form_data_continue[checkbox_name] = 'on'
161
+ continue_button_name = continue_button.get('name') or continue_button.get('id', '')
162
+ form_data_continue['__EVENTTARGET'] = continue_button_name
163
+ form_data_continue['__EVENTARGUMENT'] = ''
164
+
165
+ response_continue = self.session.post(self.login_url, data=form_data_continue)
166
+ if response_continue.status_code == 200:
167
+ response = response_continue
168
+ form_data = self._extract_viewstate(response_continue.text)
169
+ form_data[id_field_name] = id
170
+ form_data[checkbox_name] = 'on'
171
+ else:
172
+ response = response_checkbox
173
+ form_data = self._extract_viewstate(response_checkbox.text)
174
+ form_data[id_field_name] = id
175
+ form_data[checkbox_name] = 'on'
176
+ else:
177
+ # No continue button found by text, but maybe it's the Suivant button that's now enabled?
178
+ # Or maybe we can proceed directly with Suivant
179
+ if self.debug:
180
+ print('Continue button not found, proceeding with Suivant')
181
+ response = response_checkbox
182
+ form_data = self._extract_viewstate(response_checkbox.text)
183
+ form_data[id_field_name] = id
184
+ form_data[checkbox_name] = 'on'
185
+ else:
186
+ # If checkbox interaction fails, proceed normally
187
+ if self.debug:
188
+ print('Checkbox interaction failed, proceeding normally')
189
+ form_data[id_field_name] = id
190
+
191
+ # Find the Suivant button dynamically
192
+ # After checking checkbox, Button3 becomes visible (Button1 is hidden)
193
+ soup_current = BeautifulSoup(response.text, 'html.parser')
194
+ suivant_button = (soup_current.find('input', {'id': lambda x: x and 'button3' in x.lower()}) or
195
+ soup_current.find('input', {'name': lambda x: x and 'button3' in x.lower()}) or
196
+ soup_current.find('input', {'type': 'submit', 'value': lambda x: x and ('suivant' in x.lower() or 'next' in x.lower())}) or
197
+ soup_current.find('input', {'id': lambda x: x and 'button1' in x.lower()}) or
198
+ soup_current.find('input', {'name': lambda x: x and 'button1' in x.lower()}))
199
+
200
+ if suivant_button:
201
+ button_name = suivant_button.get('name') or suivant_button.get('id', '')
202
+ if button_name:
203
+ form_data['__EVENTTARGET'] = button_name
204
+ if self.debug:
205
+ print(f'Found suivant button: {button_name}')
206
+ else:
207
+ # Fall back to Button3 (after checkbox) or Button1
208
+ form_data['__EVENTTARGET'] = 'ctl00$ContentPlaceHolder1$Button3'
209
+ if self.debug:
210
+ print('Using default suivant button name (Button3)')
211
+
212
+ form_data['__EVENTARGUMENT'] = ''
213
+
214
+ # Step 3: Submit the ID with checkbox checked (click 'Suivant' button)
215
+ response = self.session.post(self.login_url, data=form_data)
216
+ if response.status_code != 200:
217
+ print('Failed to submit ID')
218
+ return None
219
+
220
+ if self.debug:
221
+ print('After submitting ID with checkbox checked, now on password page...')
222
+
223
+ # Step 4: Extract ViewState again after clicking Suivant (now on password page)
224
+ form_data = self._extract_viewstate(response.text)
225
+
226
+ # Step 5: Submit the password (click 'Connexion' button)
227
+ # Use the latest response (after continue button click)
228
+ # Re-parse the latest response to find password field
229
+ soup = BeautifulSoup(response.text, 'html.parser')
230
+
231
+ if self.debug:
232
+ # Check what's on the current page
233
+ all_inputs = soup.find_all('input')
234
+ print(f'Current page has {len(all_inputs)} input fields')
235
+ for inp in all_inputs:
236
+ inp_type = inp.get('type', '')
237
+ inp_id = inp.get('id', '')
238
+ inp_name = inp.get('name', '')
239
+ inp_value = inp.get('value', '')
240
+ if inp_type == 'password' or 'password' in inp_id.lower() or 'password' in inp_name.lower():
241
+ print(f' Password input: id={inp_id}, name={inp_name}, type={inp_type}, disabled={inp.get("disabled")}')
242
+ elif inp_type == 'text':
243
+ print(f' Text input: id={inp_id}, name={inp_name}, value={inp_value[:30] if inp_value else ""}')
244
+ elif inp_type == 'hidden':
245
+ print(f' Hidden input: id={inp_id}, name={inp_name}, value={inp_value[:30] if inp_value else ""}')
246
+ elif inp_type in ['submit', 'button']:
247
+ print(f' Button: id={inp_id}, name={inp_name}, value={inp_value}')
248
+
249
+ # Find password field for student (TextBox7, not TextBox2!)
250
+ password_input = (soup.find('input', {'id': lambda x: x and 'textbox7' in x.lower()}) or
251
+ soup.find('input', {'name': lambda x: x and 'textbox7' in x.lower()}) or
252
+ soup.find('input', {'type': 'password'}))
253
+ password_field_name = None
254
+ if password_input:
255
+ password_field_name = password_input.get('name') or password_input.get('id', '')
256
+ if self.debug:
257
+ print(f'Found password field: {password_field_name}')
258
+ # Check if it's disabled
259
+ if password_input.get('disabled'):
260
+ print('WARNING: Password field is disabled!')
261
+
262
+ # Use dynamic name if found, otherwise fall back to default
263
+ if password_field_name:
264
+ form_data[password_field_name] = password
265
+ else:
266
+ # Try both with and without ctl00$ prefix
267
+ form_data['ctl00$ContentPlaceHolder1$TextBox7'] = password
268
+ form_data['ContentPlaceHolder1$TextBox7'] = password
269
+
270
+ # DO NOT include ID or checkbox - they don't exist on the password page after Suivant!
271
+
272
+ # Find the connection button for student (ButtonEtudiant, not Button2!)
273
+ connexion_button = (soup.find('input', {'id': lambda x: x and 'buttonetudiant' in x.lower()}) or
274
+ soup.find('input', {'name': lambda x: x and 'buttonetudiant' in x.lower()}) or
275
+ soup.find('input', {'type': 'submit', 'value': lambda x: x and ('connexion' in x.lower() or 'connect' in x.lower())}) or
276
+ soup.find('input', {'id': lambda x: x and 'button2' in x.lower()}) or
277
+ soup.find('input', {'name': lambda x: x and 'button2' in x.lower()}))
278
+
279
+ if connexion_button:
280
+ button_name = connexion_button.get('name') or connexion_button.get('id', '')
281
+ if button_name:
282
+ # For ASP.NET, use __EVENTTARGET with the button name
283
+ form_data['__EVENTTARGET'] = button_name
284
+ form_data['__EVENTARGUMENT'] = ''
285
+ if self.debug:
286
+ print(f'Found connexion button: {button_name}, using __EVENTTARGET')
287
+ else:
288
+ # Fall back to default (ButtonEtudiant for student login)
289
+ form_data['__EVENTTARGET'] = 'ctl00$ContentPlaceHolder1$ButtonEtudiant'
290
+ form_data['__EVENTARGUMENT'] = ''
291
+ if self.debug:
292
+ print('Using default connexion button name (ButtonEtudiant)')
293
+
294
+ if self.debug:
295
+ print('Submitting password (without ID or checkbox)')
296
+
297
+ if self.debug:
298
+ print(f'Submitting password with form data keys: {list(form_data.keys())}')
299
+
300
+ # Use the current page URL (password page) instead of login URL
301
+ current_url = response.url if hasattr(response, 'url') else self.login_url
302
+ if self.debug:
303
+ print(f'Posting to URL: {current_url}')
304
+
305
+ response = self.session.post(current_url, data=form_data, allow_redirects=True)
306
+
307
+ if self.debug:
308
+ print(f'After password submission:')
309
+ print(f' Status code: {response.status_code}')
310
+ print(f' Final URL: {response.url}')
311
+ print(f' Redirected: {response.url != self.login_url}')
312
+
313
+ # Check what's in the response
314
+ if 'error' in response.text.lower() or 'erreur' in response.text.lower():
315
+ print('Response contains error/erreur')
316
+ if 'invalid' in response.text.lower() or 'invalide' in response.text.lower():
317
+ print('Response contains invalid/invalide')
318
+
319
+ # Check if login was successful
320
+ # After successful login, we should be redirected to Accueil.aspx (home page)
321
+ # Check if we're on the login page (default.aspx) or home page (Accueil.aspx)
322
+ is_on_login_page = 'default.aspx' in response.url.lower()
323
+ is_on_home_page = 'accueil.aspx' in response.url.lower() or 'Accueil.aspx' in response.url
324
+
325
+ # Look for multiple success indicators
326
+ success_indicators = [
327
+ 'Vous pouvez consulter dans cet espace :',
328
+ 'Espace Etudiant',
329
+ 'Accueil.aspx',
330
+ 'Label2', # Student name label
331
+ 'Label3' # Student class label
332
+ ]
333
+
334
+ login_successful = (not is_on_login_page) and (is_on_home_page or any(indicator in response.text for indicator in success_indicators))
335
+
336
+ if self.debug:
337
+ print(f'Checking login success...')
338
+ print(f' On login page: {is_on_login_page}')
339
+ print(f' On home page: {is_on_home_page}')
340
+ for indicator in success_indicators:
341
+ if indicator in response.text:
342
+ print(f' Found success indicator: {indicator}')
343
+
344
+ if login_successful:
345
+ print('''
346
+ -----------------
347
+ Login successful!
348
+ -----------------
349
+ ''')
350
+ # Convert requests cookies to Selenium-like format for compatibility
351
+ cookies = []
352
+ for cookie in self.session.cookies:
353
+ cookies.append({
354
+ 'name': cookie.name,
355
+ 'value': cookie.value,
356
+ 'domain': cookie.domain,
357
+ 'path': cookie.path
358
+ })
359
+ return cookies
360
+ else:
361
+ print('''
362
+ -------------
363
+ Login failed!
364
+ -------------
365
+ ''')
366
+ return None
367
+
368
+ def logout(self):
369
+ """
370
+ Logout from the ESPRIT website.
371
+
372
+ Returns:
373
+ bool: True if logout was successful, False otherwise.
374
+ """
375
+ if self.debug:
376
+ print('Starting logout...')
377
+
378
+ # Method 1: Try ASP.NET postback mechanism
379
+ try:
380
+ # Get the home page to retrieve ViewState
381
+ response = self.session.get(self.home_url, allow_redirects=True)
382
+
383
+ if response.status_code == 200 and 'default.aspx' not in response.url.lower():
384
+ # We're logged in, now perform logout postback
385
+ soup = BeautifulSoup(response.text, 'html.parser')
386
+
387
+ # Extract ViewState for the postback
388
+ form_data = self._extract_viewstate(response.text)
389
+
390
+ # Set up the postback to trigger LinkButton1 (logout button)
391
+ form_data['__EVENTTARGET'] = 'ctl00$LinkButton1'
392
+ form_data['__EVENTARGUMENT'] = ''
393
+
394
+ if self.debug:
395
+ print('Sending logout postback...')
396
+
397
+ # Submit the logout postback
398
+ logout_response = self.session.post(self.home_url, data=form_data, allow_redirects=True)
399
+
400
+ # Check if we're redirected to login page
401
+ if 'default.aspx' in logout_response.url or logout_response.status_code == 200:
402
+ if self.debug:
403
+ print('Logout successful (postback method)')
404
+ return True
405
+ except Exception as e:
406
+ if self.debug:
407
+ print(f'Postback logout failed: {e}')
408
+
409
+ # Method 2: Try direct logout URLs
410
+ for logout_url in self.logout_urls:
411
+ try:
412
+ if self.debug:
413
+ print(f'Trying logout URL: {logout_url}')
414
+
415
+ response = self.session.get(logout_url, allow_redirects=True)
416
+
417
+ # Check if redirected to login page or session cleared
418
+ if 'default.aspx' in response.url or response.status_code == 200:
419
+ if self.debug:
420
+ print(f'Logout successful via URL: {logout_url}')
421
+ return True
422
+ except Exception as e:
423
+ if self.debug:
424
+ print(f'Logout URL {logout_url} failed: {e}')
425
+ continue
426
+
427
+ # Method 3: Clear session cookies as fallback
428
+ if self.debug:
429
+ print('Clearing session cookies...')
430
+ self.session.cookies.clear()
431
+
432
+ return True
esprit/credit.py ADDED
@@ -0,0 +1,91 @@
1
+ from bs4 import BeautifulSoup
2
+
3
+
4
+ class Credit:
5
+ """
6
+ A class used to represent a Credit.
7
+
8
+ ...
9
+
10
+ Attributes
11
+ ----------
12
+ url : str
13
+ a formatted string that represents the URL of the credit page
14
+ session : requests.Session
15
+ a Session object from the requests library
16
+
17
+ Methods
18
+ -------
19
+ get_credits():
20
+ Returns a list of credits for the student.
21
+ """
22
+
23
+ def __init__(self, session):
24
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Historique_Cr%C3%A9dit.aspx"
25
+ self.session = session
26
+
27
+ def get_credits(self):
28
+ """
29
+ Returns a list of credits for the student.
30
+
31
+ Returns
32
+ -------
33
+ list
34
+ a list of credits, each represented as a list of strings. The first list is the headers.
35
+ Returns None if the page does not contain the expected text.
36
+ """
37
+ response = self.session.get(self.url, allow_redirects=False)
38
+ if response.status_code == 302 or response.status_code == 301:
39
+ # Follow redirect
40
+ response = self.session.get(self.url, allow_redirects=True)
41
+
42
+ if response.status_code != 200:
43
+ print("Failed to load credits page.")
44
+ return None
45
+
46
+ # Check if we were redirected to login page
47
+ if 'default.aspx' in response.url or 'login' in response.url.lower():
48
+ print("Session expired or invalid - redirected to login page.")
49
+ return None
50
+
51
+ soup = BeautifulSoup(response.text, 'html.parser')
52
+
53
+ # Find the table with id='ContentPlaceHolder1_GridView1'
54
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView1'})
55
+ if table is None:
56
+ # Try to find table by structure (has headers like "Année universitaire")
57
+ all_tables = soup.find_all('table')
58
+ for tbl in all_tables:
59
+ rows = tbl.find_all('tr')
60
+ if len(rows) > 0:
61
+ first_row = rows[0]
62
+ ths = first_row.find_all('th')
63
+ if len(ths) > 0:
64
+ headers = [th.text.strip() for th in ths]
65
+ if 'Année universitaire' in headers or 'Unité d\'enseignement' in headers:
66
+ table = tbl
67
+ break
68
+
69
+ if table is None:
70
+ print("Credits table not found on page.")
71
+ return None
72
+
73
+ rows = table.find_all('tr')
74
+ if len(rows) == 0:
75
+ print("No rows found in credits table.")
76
+ return None
77
+
78
+ # Extract headers from first row
79
+ headers = [cell.text.strip() for cell in rows[0].find_all('th')]
80
+ if len(headers) == 0:
81
+ print("No headers found in credits table.")
82
+ return None
83
+
84
+ # Extract data rows
85
+ credits = [headers]
86
+ for row in rows[1:]: # Skip header row
87
+ cells = [cell.text.strip() for cell in row.find_all('td')]
88
+ if len(cells) > 0:
89
+ credits.append(cells)
90
+
91
+ return credits
esprit/esprit.py ADDED
@@ -0,0 +1,66 @@
1
+ import requests
2
+ from .auth import Auth
3
+ from .grade import Grade
4
+ from .absence import Absence
5
+ from .time_schedule import TimeSchedule
6
+ from .credit import Credit
7
+ from .utils import Utils
8
+ # from .exceptions import EspritException #TODO
9
+
10
+
11
+ class Esprit:
12
+ def __init__(self, driver_path=None, driver=None, debug=False, headless=True):
13
+ self.auth = Auth(driver_path, driver, debug, headless)
14
+ # Use the same session from Auth to maintain cookies
15
+ self.session = self.auth.session
16
+ self.grade_scrape = Grade(self.session)
17
+ self.absence_scrape = Absence(self.session)
18
+ self.time_schedule_scrape = TimeSchedule(self.session)
19
+ self.credit = Credit(self.session)
20
+ self.utils = Utils(self.session)
21
+
22
+ def login(self, username, password):
23
+ cookies = self.auth.login(username, password)
24
+ # Session is already updated in Auth, so we just need to check if login was successful
25
+ if cookies is None:
26
+ return False
27
+ return True
28
+
29
+ def logout(self):
30
+ """
31
+ Logout from the ESPRIT website.
32
+
33
+ Returns:
34
+ bool: True if logout was successful, False otherwise.
35
+ """
36
+ return self.auth.logout()
37
+
38
+ def get_grades(self):
39
+ return self.grade_scrape.get_grades()
40
+
41
+ def calculate_average(self, grades):
42
+ return self.grade_scrape.calculate_average(grades)
43
+
44
+ def get_absences(self):
45
+ return self.absence_scrape.get_absences()
46
+
47
+ def get_table_schedules(self):
48
+ return self.time_schedule_scrape.get_table_schedules()
49
+
50
+ def get_last_week_schedule(self):
51
+ return self.time_schedule_scrape.get_last_week_schedule()
52
+
53
+ def download_files(self, schedule, download_path):
54
+ return self.time_schedule_scrape.download_files(schedule, download_path)
55
+
56
+ def get_class_week_schedule(self, file_path, class_name, result_path):
57
+ return self.time_schedule_scrape.get_class_week_schedule(file_path, class_name, result_path)
58
+
59
+ def get_credits(self):
60
+ return self.credit.get_credits()
61
+
62
+ def get_student_name(self):
63
+ return self.utils.get_student_name()
64
+
65
+ def get_student_class(self):
66
+ return self.utils.get_student_class()
esprit/grade.py ADDED
@@ -0,0 +1,102 @@
1
+ from bs4 import BeautifulSoup
2
+ import pandas as pd
3
+ import numpy as np
4
+
5
+
6
+ class Grade:
7
+ """
8
+ A class used to represent a Grade.
9
+
10
+ ...
11
+
12
+ Attributes
13
+ ----------
14
+ url : str
15
+ a formatted string that represents the URL of the grade page
16
+ session : requests.Session
17
+ a Session object from the requests library
18
+
19
+ Methods
20
+ -------
21
+ get_grades():
22
+ Returns a list of grades for the student.
23
+ calculate_average(grades):
24
+ Calculate the average grade based on the given grades.
25
+ """
26
+
27
+ def __init__(self, session):
28
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Resultat2021.aspx"
29
+ self.session = session
30
+
31
+ def get_grades(self):
32
+ """
33
+ Returns a list of grades for the student.
34
+
35
+ Returns
36
+ -------
37
+ list
38
+ a list of grades, each represented as a list of strings. The first list is the headers.
39
+ Returns None if the page does not contain the expected text.
40
+ """
41
+ response = self.session.get(self.url)
42
+ soup = BeautifulSoup(response.text, 'html.parser')
43
+
44
+ # Check if the <h1> tag with the text "Notes Des Modules" exists
45
+ h1_tag = soup.find('h1', text=' Notes Des Modules ')
46
+ if h1_tag is None:
47
+ print("The page does not contain the expected text.")
48
+ return None
49
+
50
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView1'})
51
+ rows = table.find_all('tr')
52
+ headers = [cell.text.strip() for cell in rows[0].find_all('th')]
53
+ grades = [headers] + [[cell.text.strip() for cell in row.find_all('td')]
54
+ for row in rows[1:]] # Skip header row
55
+ return grades
56
+
57
+ def calculate_average(self, grades):
58
+ """
59
+ Calculate the average grade based on the given grades.
60
+
61
+ Parameters
62
+ ----------
63
+ grades (list): A list of lists representing the grades. The first list should contain the column names.
64
+
65
+ Returns
66
+ -------
67
+ float: The calculated average grade.
68
+ """
69
+ # Convert the list of lists to a DataFrame
70
+ df = pd.DataFrame(grades[1:], columns=grades[0])
71
+
72
+ # Replace empty strings with NaN
73
+ df.replace('', np.nan, inplace=True)
74
+
75
+ # Replace comma with dot and convert to float
76
+ for col in ['COEF', 'NOTE_CC', 'NOTE_TP', 'NOTE_EXAM']:
77
+ df[col] = df[col].str.replace(',', '.').astype(float)
78
+
79
+ # Calculate the average based on available grades
80
+
81
+ def calculate_average(row):
82
+ if pd.isna(row['NOTE_TP']):
83
+ if pd.isna(row['NOTE_CC']):
84
+ return row['NOTE_EXAM']
85
+ else:
86
+ return row['NOTE_EXAM'] * 0.6 + row['NOTE_CC'] * 0.4
87
+ elif pd.isna(row['NOTE_CC']):
88
+ return row['NOTE_EXAM'] * 0.8 + row['NOTE_TP'] * 0.2
89
+ else:
90
+ return row['NOTE_EXAM'] * 0.5 + row['NOTE_CC'] * 0.3 + row['NOTE_TP'] * 0.2
91
+
92
+ df['MOYENNE'] = df.apply(calculate_average, axis=1)
93
+
94
+ # Calculate the total average
95
+ total_average = (df['MOYENNE'] * df['COEF']).sum() / df['COEF'].sum()
96
+
97
+ # Append the total average to the DataFrame
98
+ df = df._append({'DESIGNATION': 'Moyenne', 'COEF': df['COEF'].sum(
99
+ ), 'MOYENNE': total_average}, ignore_index=True)
100
+
101
+ print(df)
102
+ return total_average
@@ -0,0 +1,185 @@
1
+ from bs4 import BeautifulSoup
2
+ from datetime import datetime
3
+ import re
4
+ import os
5
+ from PyPDF2 import PdfReader, PdfWriter
6
+
7
+
8
+ class TimeSchedule:
9
+ """
10
+ A class used to represent a Time Schedule.
11
+
12
+ ...
13
+
14
+ Attributes
15
+ ----------
16
+ url : str
17
+ a formatted string that represents the URL of the time schedule page
18
+ session : requests.Session
19
+ a Session object from the requests library
20
+
21
+ Methods
22
+ -------
23
+ get_table_schedules():
24
+ Returns a list of time schedules for the student.
25
+ get_last_week_schedule():
26
+ Returns the most recent weekly schedule.
27
+ download_files(schedule: list):
28
+ Downloads the files associated with a given schedule.
29
+ get_class_week_schedule(file_path: str, class_name: str):
30
+ Extracts the weekly schedule for a specific class from a given file.
31
+ """
32
+
33
+ def __init__(self, session):
34
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Emplois.aspx"
35
+ self.session = session
36
+
37
+ def get_table_schedules(self):
38
+ """
39
+ Returns a list of time schedules for the student.
40
+
41
+ Returns
42
+ -------
43
+ list
44
+ a list of time schedules, each represented as a list of strings.
45
+ Returns None if the page does not contain the expected text.
46
+ """
47
+ response = self.session.get(self.url)
48
+ soup = BeautifulSoup(response.text, 'html.parser')
49
+
50
+ # Check if the <strong> tag with the text "Emploi du temps" exists
51
+ strong_tag = soup.find('strong', text='Emploi du temps')
52
+ if strong_tag is None:
53
+ print("The page does not contain the expected text.")
54
+ return None
55
+
56
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView1'})
57
+ rows = table.find_all('tr')
58
+ time_schedules = []
59
+ for row in rows[1:]:
60
+ row_data = []
61
+ for cell in row.find_all('td'):
62
+ cell_text = cell.text.strip()
63
+ cell_link = cell.find('a')
64
+ if cell_link is not None:
65
+ cell_text += ' ' + cell_link.get('href')
66
+ row_data.append(cell_text)
67
+ time_schedules.append(row_data)
68
+ return time_schedules
69
+
70
+ def get_last_week_schedule(self):
71
+ """
72
+ Returns the most recent weekly schedule.
73
+
74
+ Returns
75
+ -------
76
+ list
77
+ the most recent weekly schedule, represented as a list of strings.
78
+ Returns None if no schedules are found.
79
+ """
80
+ time_schedules = self.get_table_schedules()
81
+ if time_schedules is None:
82
+ return None
83
+
84
+ # Filter schedules that contain "Semaine" and extract dates
85
+ week_schedules = [
86
+ schedule for schedule in time_schedules if "Semaine" in schedule[0]]
87
+ date_format = "%d-%m-%Y"
88
+ dates_and_schedules = []
89
+
90
+ for schedule in week_schedules:
91
+ # Extract date from file name
92
+ match = re.search(r"\d{2}-\d{2}-\d{4}", schedule[0])
93
+ if match:
94
+ date_str = match.group()
95
+ date = datetime.strptime(date_str, date_format)
96
+ dates_and_schedules.append((date, schedule))
97
+
98
+ # Sort by date and return the latest schedule
99
+ dates_and_schedules.sort(key=lambda x: x[0])
100
+ return dates_and_schedules[-1][1] if dates_and_schedules else None
101
+
102
+ def download_files(self, schedule, download_path):
103
+ """
104
+ Downloads the files associated with a given schedule.
105
+
106
+ Parameters
107
+ ----------
108
+ schedule : list
109
+ the schedule to download files for, represented as a list of strings
110
+ download_path : str
111
+ the path where the file should be downloaded
112
+
113
+ Returns
114
+ -------
115
+ str
116
+ the path to the downloaded file
117
+ """
118
+ response = self.session.get(self.url)
119
+ soup = BeautifulSoup(response.text, 'html.parser')
120
+
121
+ # Extract __VIEWSTATE and __EVENTVALIDATION values
122
+ viewstate = soup.find('input', {'id': '__VIEWSTATE'})['value']
123
+ eventvalidation = soup.find(
124
+ 'input', {'id': '__EVENTVALIDATION'})['value']
125
+
126
+ # Extract eventTarget from the JavaScript function call
127
+ event_target = schedule[1].split("'")[1]
128
+
129
+ # Send a POST request to mimic the postback
130
+ post_data = {
131
+ '__EVENTTARGET': event_target,
132
+ '__EVENTARGUMENT': '',
133
+ '__VIEWSTATE': viewstate,
134
+ '__EVENTVALIDATION': eventvalidation,
135
+ }
136
+ file_response = self.session.post(self.url, data=post_data)
137
+
138
+ # Save the file
139
+ file_path = os.path.join(download_path, schedule[0])
140
+ with open(file_path, 'wb') as f:
141
+ f.write(file_response.content)
142
+
143
+ return file_path
144
+
145
+ def get_class_week_schedule(self, file_path, class_name, result_path):
146
+ """
147
+ Extracts the weekly schedule for a specific class from a given file.
148
+
149
+ Parameters
150
+ ----------
151
+ file_path : str
152
+ the path to the file to extract the schedule from
153
+ class_name : str
154
+ the name of the class to extract the schedule for
155
+ result_path : str
156
+ the path where the result PDF will be saved
157
+
158
+ Returns
159
+ -------
160
+ str
161
+ the path to the extracted schedule, or None if the class is not found in the file
162
+ """
163
+ # Open the existing PDF
164
+ with open(file_path, "rb") as file:
165
+ reader = PdfReader(file)
166
+
167
+ # Assume class_name is found on a specific page
168
+ for page_number in range(len(reader.pages)):
169
+ page = reader.pages[page_number]
170
+ content = page.extract_text()
171
+
172
+ # If class_name is found in content
173
+ if class_name in content:
174
+ writer = PdfWriter()
175
+ writer.add_page(page)
176
+
177
+ # Save the page as a new PDF
178
+ new_file_path = os.path.join(
179
+ result_path, f"{class_name}.pdf")
180
+ with open(new_file_path, "wb") as output_pdf:
181
+ writer.write(output_pdf)
182
+
183
+ return new_file_path
184
+
185
+ return None
esprit/utils.py ADDED
@@ -0,0 +1,93 @@
1
+ from bs4 import BeautifulSoup
2
+
3
+
4
+ class Utils:
5
+ """
6
+ A utility class for interacting with the ESPRIT website.
7
+ """
8
+
9
+ def __init__(self, session):
10
+ # Try both URL formats (case might matter)
11
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Accueil.aspx"
12
+ self.url_alt = "https://esprit-tn.com/esponline/Etudiants/Accueil.aspx"
13
+ self.session = session
14
+
15
+ def get_student_name(self):
16
+ """
17
+ Get the name of the student from the ESPRIT website.
18
+
19
+ Returns:
20
+ The name of the student, or None if the name could not be found.
21
+ """
22
+ # Try primary URL first
23
+ response = self.session.get(self.url, allow_redirects=True)
24
+
25
+ # Check if we were redirected to login page
26
+ if 'default.aspx' in response.url or 'login' in response.url.lower():
27
+ # Try alternative URL
28
+ response = self.session.get(self.url_alt, allow_redirects=True)
29
+ if 'default.aspx' in response.url or 'login' in response.url.lower():
30
+ print("Session expired or invalid - redirected to login page.")
31
+ return None
32
+
33
+ if response.status_code != 200:
34
+ print("Failed to load student page.")
35
+ return None
36
+
37
+ soup = BeautifulSoup(response.text, 'html.parser')
38
+
39
+ # Find the span with id='Label2' and class='h4 text-info'
40
+ span = soup.find('span', {'id': 'Label2', 'class': 'h4 text-info'})
41
+ if span is not None and span.text.strip():
42
+ return span.text.strip()
43
+
44
+ # Try alternative: ContentPlaceHolder1_Label2
45
+ span = soup.find('span', {'id': 'ContentPlaceHolder1_Label2'})
46
+ if span is not None and span.text.strip():
47
+ return span.text.strip()
48
+
49
+ # Try alternative: just find by id='Label2'
50
+ span = soup.find('span', {'id': 'Label2'})
51
+ if span is not None and span.text.strip():
52
+ return span.text.strip()
53
+
54
+ print("Student name not found on page.")
55
+ return None
56
+
57
+ def get_student_class(self):
58
+ """
59
+ Get the class of the student from the ESPRIT website.
60
+
61
+ Returns:
62
+ The class of the student, or None if the class could not be found.
63
+ """
64
+ # Try primary URL first
65
+ response = self.session.get(self.url, allow_redirects=True)
66
+
67
+ # Check if we were redirected to login page
68
+ if 'default.aspx' in response.url or 'login' in response.url.lower():
69
+ # Try alternative URL
70
+ response = self.session.get(self.url_alt, allow_redirects=True)
71
+ if 'default.aspx' in response.url or 'login' in response.url.lower():
72
+ print("Session expired or invalid - redirected to login page.")
73
+ return None
74
+
75
+ if response.status_code != 200:
76
+ print("Failed to load student page.")
77
+ return None
78
+
79
+ soup = BeautifulSoup(response.text, 'html.parser')
80
+
81
+ # Find the span with id='Label3'
82
+ span = soup.find('span', {'id': 'Label3'})
83
+ if span is not None and span.text.strip():
84
+ return span.text.strip()
85
+
86
+ # Try alternative: ContentPlaceHolder1_Label3
87
+ span = soup.find('span', {'id': 'ContentPlaceHolder1_Label3'})
88
+ if span is not None and span.text.strip():
89
+ return span.text.strip()
90
+
91
+ print("Student class not found on page.")
92
+ return None
93
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Aymen Hmani
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,214 @@
1
+ Metadata-Version: 2.1
2
+ Name: esprit-py
3
+ Version: 0.5.0
4
+ Summary: A fast, lightweight Python library for interacting with data from esprit-tn.com
5
+ Home-page: https://github.com/TheLime1/esprit.py
6
+ Author: Lime1 (Aymen Hmani)
7
+ Keywords: python,api
8
+ Classifier: Development Status :: 4 - Beta
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: Unix
12
+ Classifier: Operating System :: MacOS :: MacOS X
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: requests
18
+ Requires-Dist: beautifulsoup4
19
+ Requires-Dist: PyPDF2
20
+ Requires-Dist: numpy
21
+ Requires-Dist: pandas
22
+
23
+
24
+ # <img src="https://esprit.tn/favicon.ico" width="28px" /> esprit-py
25
+
26
+
27
+
28
+ [![PyPI version](https://badge.fury.io/py/esprit-py.svg)](https://pypi.org/project/esprit-py/)
29
+
30
+
31
+
32
+ > [!NOTE]
33
+
34
+ > Please note that this library is not an official API provided by Esprit and is intended for educational and personal use only.
35
+
36
+
37
+
38
+ ## Features
39
+
40
+
41
+
42
+ - **Fast & Lightweight**: 5-10x faster than previous versions (no Selenium/browser required)
43
+
44
+ - Get your exact timetable pdf *not 300 pages pdf*
45
+
46
+ - Get your grades
47
+
48
+ - Get your absences
49
+
50
+ - Get your credits
51
+
52
+ - Calculate your total semester average
53
+
54
+ - **New**: Logout functionality for proper session management
55
+
56
+
57
+
58
+ ## Installation
59
+
60
+
61
+
62
+ ```bash
63
+
64
+ pip install --upgrade esprit-py
65
+
66
+ ```
67
+
68
+
69
+
70
+ ## Examples
71
+
72
+
73
+
74
+ get your total avreage:
75
+
76
+
77
+
78
+ ```python
79
+
80
+ from esprit import Esprit
81
+
82
+
83
+
84
+ # Replace with your actual ID and password
85
+
86
+ id = 'ID'
87
+
88
+ password = 'PASSWORD'
89
+
90
+
91
+
92
+ grades = None
93
+
94
+
95
+
96
+ # Keep trying to get grades until it is successful cuz esprit use garabage servers
97
+
98
+ while grades is None:
99
+
100
+ try:
101
+
102
+ # Create an Esprit object
103
+
104
+ esprit = Esprit()
105
+
106
+
107
+
108
+ # Attempt to log in
109
+
110
+ esprit.login(id, password)
111
+
112
+
113
+
114
+ # Get grades
115
+
116
+ grades = esprit.get_grades()
117
+
118
+
119
+
120
+ except Exception as e:
121
+
122
+ print(f"An error occurred: {e}. Retrying...")
123
+
124
+
125
+
126
+ if grades is not None:
127
+
128
+ for grade in grades:
129
+
130
+ print(grade)
131
+
132
+ else:
133
+
134
+ print("Failed to get grades.")
135
+
136
+
137
+
138
+ esprit.calculate_average(grades)
139
+
140
+
141
+
142
+ # Logout when done
143
+
144
+ esprit.logout()
145
+
146
+
147
+
148
+ ```
149
+
150
+
151
+
152
+ get a list of all your absences;
153
+
154
+
155
+
156
+ ```python
157
+
158
+ from esprit import Esprit
159
+
160
+
161
+
162
+ # Create an Esprit object
163
+
164
+ esprit = Esprit()
165
+
166
+
167
+
168
+ # Replace with your actual ID and password
169
+
170
+ id = 'ID'
171
+
172
+ password = 'PASSWORD'
173
+
174
+
175
+
176
+ # Attempt to log in
177
+
178
+ if esprit.login(id, password):
179
+
180
+ print("Login successful.")
181
+
182
+ else:
183
+
184
+ print("Login failed.")
185
+
186
+
187
+
188
+ # Get absences
189
+
190
+ absences = esprit.get_absences()
191
+
192
+ if absences is not None:
193
+
194
+ for absence in absences:
195
+
196
+ print(absence)
197
+
198
+ else:
199
+
200
+ print("Failed to get absences.")
201
+
202
+
203
+
204
+ # Logout when done
205
+
206
+ esprit.logout()
207
+
208
+
209
+
210
+ ```
211
+
212
+
213
+
214
+ More examples can be found in the [examples folder](examples)
@@ -0,0 +1,13 @@
1
+ esprit/__init__.py,sha256=PnahGRQ5xmA26XMFaj9ImLty9OEmYwnBqLanvGXZwwU,28
2
+ esprit/absence.py,sha256=1CE7FCnWUr9omNXkgFH4Q3Vq5FkonjAEOmAOarXoCqU,1649
3
+ esprit/auth.py,sha256=9tLQadQYu0Y5yfQcOLvlzSOYhSURd75OLe1BpBgQ86w,20882
4
+ esprit/credit.py,sha256=mcrBWGiTh-aYuSFti2LKZtuxj5MXOI8a0mxEvXjFXRU,3216
5
+ esprit/esprit.py,sha256=OyHR4rTWv_wxHnvIYoCZ9yTwxZKuOsTHoxF4Km_-Ulo,2289
6
+ esprit/grade.py,sha256=Wt5rjcqHAaG2gQ5DaqaMUMkP7ImMhZhQqoJTxTZkXhk,3504
7
+ esprit/time_schedule.py,sha256=APash3wNe09kdwJDl3B1Du1WH71h6j3TAirVGoWndCc,6473
8
+ esprit/utils.py,sha256=QeBB8Nkh5w_Es5NQNAY_2Zps9fCAsbs88FQmPbtlWKQ,3591
9
+ esprit_py-0.5.0.dist-info/LICENSE,sha256=DOcn7qpE6TsUEcakIsDDKm757jx5YlQ8fXDiED21P_w,1089
10
+ esprit_py-0.5.0.dist-info/METADATA,sha256=Yy5AYedoA_7FD1p7QGpoZ_D9KNghNH6O6gPCSERSoNc,3054
11
+ esprit_py-0.5.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
12
+ esprit_py-0.5.0.dist-info/top_level.txt,sha256=aS9besFTZ4EYTsoBJVf3GaMjQtJLgLaK7WqAxSvGVdQ,7
13
+ esprit_py-0.5.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ esprit