esprit-py 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of esprit-py might be problematic. Click here for more details.

esprit/__init__.py ADDED
File without changes
@@ -0,0 +1,25 @@
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+
5
+ class Absence:
6
+ def __init__(self, session):
7
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/absenceetud.aspx"
8
+ self.session = session
9
+
10
+ def get_absences(self):
11
+ response = self.session.get(self.url)
12
+ soup = BeautifulSoup(response.text, 'html.parser')
13
+
14
+ # Check if the <strong> tag with the text "Absence" exists
15
+ strong_tag = soup.find('strong', text='Absence')
16
+ if strong_tag is None:
17
+ print("The page does not contain the expected text.")
18
+ return None
19
+
20
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView2'})
21
+ rows = table.find_all('tr')
22
+ headers = [cell.text.strip() for cell in rows[0].find_all('th')]
23
+ absences = [headers] + [[cell.text.strip() for cell in row.find_all('td')]
24
+ for row in rows[1:]] # Skip header row
25
+ return absences
esprit/auth.py ADDED
@@ -0,0 +1,49 @@
1
+ import requests
2
+
3
+
4
+ class Auth:
5
+ def __init__(self, session=None):
6
+ self.login_url = "https://esprit-tn.com/esponline/online/default.aspx"
7
+ self.session = session if session else requests.session()
8
+
9
+ def login(self, id, password):
10
+ id_payload = {
11
+ '__EVENTTARGET': '',
12
+ '__EVENTARGUMENT': '',
13
+ '__VIEWSTATE': '/wEPDwUJNDE1NjEwODA3D2QWAmYPZBYCAgMPZBYCAgUPDxYCHgRUZXh0BQkyMDIzLzIwMjRkZBgBBR5fX0NvbnRyb2xzUmVxdWlyZVBvc3RCYWNrS2V5X18WAQUSY3RsMDAkSW1hZ2VCdXR0b24x4A+yHAaVbbd+c/7zRnwYiufwfrp/gfS8JKpvS+xXvpE=',
14
+ '__VIEWSTATEGENERATOR': '717FCBFE',
15
+ '__EVENTVALIDATION': '/wEdAA3BKAYcHauA5ahZijRjSsR/D4zZrxX92uOlyIx1SyGTQokHj7KsGQZ9KI/q0cgR79eMO7fmjkJSfq6Zbgk2kTWn5BPdHG87XtyblNclsuAS8LvwPnslbtZbTzH+LM3KrmKoScikkrtCyMBYLZBZxv2YCNTGu6fpAlK5HiRhQ3QX7uQuDNsn18Vb/yPhT9ZPmVoNeSKFy2zxLVV4+zExdQxF5O2yeRHTM5Q6txDv+t953Rsahgpohlzzax1rmqU36I8bifdujSibODz2lHN+RHz6gNEqtVw0ulNZz52C7FdPSyEa0/J8qJqqEgP2sogExFA=',
16
+ 'ctl00$ContentPlaceHolder1$TextBox1': '',
17
+ 'ctl00$ContentPlaceHolder1$TextBox5': '',
18
+ 'ctl00$ContentPlaceHolder1$TextBox6': '',
19
+ 'ctl00$ContentPlaceHolder1$TextBox3': id,
20
+ 'ctl00$ContentPlaceHolder1$Button3': 'Suivant',
21
+ 'ctl00$ContentPlaceHolder1$TextBox4': '',
22
+ 'ctl00$ContentPlaceHolder1$pass_parent': '',
23
+ }
24
+
25
+ response_id = self.session.post(self.login_url, data=id_payload)
26
+
27
+ password_payload = {
28
+ '__EVENTTARGET': '',
29
+ '__EVENTARGUMENT': '',
30
+ '__VIEWSTATE': '/wEPDwUJNDE1NjEwODA3D2QWAmYPZBYCAgMPZBYEAgUPDxYCHgRUZXh0BQkyMDIzLzIwMjRkZAIJD2QWAgIQD2QWEAIBDw8WAh4HVmlzaWJsZWhkZAIDDw8WBB8ABQoyMjFKTVQ1MzI2HwFoZGQCBw8PFgIfAWdkZAIJDw8WAh8BZ2RkAgsPDxYCHgdFbmFibGVkZ2RkAg0PDxYCHwFnZGQCDw8PFgIfAWdkZAIRDw8WAh8BaGRkGAEFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYBBRJjdGwwMCRJbWFnZUJ1dHRvbjF0JCLryb54s4inGLRx9VnEDk2ACOOB+Q8HNhb+Z6hPjQ==',
31
+ '__VIEWSTATEGENERATOR': '717FCBFE',
32
+ '__EVENTVALIDATION': '/wEdAA6E6Tm89lU7S/3iqJUOatsjD4zZrxX92uOlyIx1SyGTQokHj7KsGQZ9KI/q0cgR79eMO7fmjkJSfq6Zbgk2kTWn5BPdHG87XtyblNclsuAS8LvwPnslbtZbTzH+LM3KrmKoScikkrtCyMBYLZBZxv2Y4YHt2yH9TCYlNrTCCQccHuaXknurQIHyJEMAivskpdkfOLtcwEziInaQqEgDH0GiDXkihcts8S1VePsxMXUMReTtsnkR0zOUOrcQ7/rfed0bGoYKaIZc82sda5qlN+iPG4n3bo0omzg89pRzfkR8+mvbAUFWGOWJTqU2Q6L6lue8OojTbFO8vtwsRzaPKiZW',
33
+ 'ctl00$ContentPlaceHolder1$TextBox1': '',
34
+ 'ctl00$ContentPlaceHolder1$TextBox5': '',
35
+ 'ctl00$ContentPlaceHolder1$TextBox6': '',
36
+ 'ctl00$ContentPlaceHolder1$TextBox7': password,
37
+ 'ctl00$ContentPlaceHolder1$ButtonEtudiant': 'Connexion',
38
+ 'ctl00$ContentPlaceHolder1$TextBox4': '',
39
+ 'ctl00$ContentPlaceHolder1$pass_parent': '',
40
+ }
41
+
42
+ response_password = self.session.post(
43
+ self.login_url, data=password_payload)
44
+
45
+ if 'Vous pouvez consulter dans cet espace :' in response_password.text:
46
+ print("Login successful!")
47
+ return self.session
48
+ else:
49
+ print("Login failed.")
esprit/credit.py ADDED
@@ -0,0 +1,26 @@
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+
5
+ class Credit:
6
+ def __init__(self, session):
7
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Historique_Cr%C3%A9dit.aspx"
8
+ self.session = session
9
+
10
+ def get_credits(self):
11
+ response = self.session.get(self.url)
12
+ soup = BeautifulSoup(response.text, 'html.parser')
13
+
14
+ # Check if the <span> tag with the class "style5" and the text "Historique des Crédits" exists
15
+ span_tag = soup.find(
16
+ 'span', {'class': 'style5'}, text=' Historique des Crédits ')
17
+ if span_tag is None:
18
+ print("The page does not contain the expected text.")
19
+ return None
20
+
21
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView1'})
22
+ rows = table.find_all('tr')
23
+ headers = [cell.text.strip() for cell in rows[0].find_all('th')]
24
+ grades = [headers] + [[cell.text.strip() for cell in row.find_all('td')]
25
+ for row in rows[1:]] # Skip header row
26
+ return grades
esprit/esprit.py ADDED
@@ -0,0 +1,41 @@
1
+ import requests
2
+ from auth import Auth
3
+ from grade_scrape import Grade
4
+ from absence_scrape import Absence
5
+ from time_schedule_scrape import TimeSchedule
6
+ from credit import Credit
7
+
8
+
9
+ class Esprit:
10
+ def __init__(self):
11
+ self.session = requests.Session()
12
+ self.auth = Auth(self.session)
13
+ self.grade_scrape = Grade(self.session)
14
+ self.grade_scrape = Grade(self.session)
15
+ self.absence_scrape = Absence(self.session)
16
+ self.time_schedule_scrape = TimeSchedule(self.session)
17
+ self.credit = Credit(self.session)
18
+
19
+ def login(self, username, password):
20
+ return self.auth.login(username, password)
21
+
22
+ def get_grades(self):
23
+ return self.grade_scrape.get_grades()
24
+
25
+ def get_absences(self):
26
+ return self.absence_scrape.get_absences()
27
+
28
+ def get_table_schedules(self):
29
+ return self.time_schedule_scrape.get_table_schedules()
30
+
31
+ def get_last_week_schedule(self):
32
+ return self.time_schedule_scrape.get_last_week_schedule()
33
+
34
+ def download_files(self, schedule):
35
+ return self.time_schedule_scrape.download_files(schedule)
36
+
37
+ def get_class_week_schedule(self, file_path, class_name):
38
+ return self.time_schedule_scrape.get_class_week_schedule(file_path, class_name)
39
+
40
+ def get_credits(self):
41
+ return self.credit.get_credits()
esprit/grade_scrape.py ADDED
@@ -0,0 +1,25 @@
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+
5
+ class Grade:
6
+ def __init__(self, session):
7
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Resultat2021.aspx"
8
+ self.session = session
9
+
10
+ def get_grades(self):
11
+ response = self.session.get(self.url)
12
+ soup = BeautifulSoup(response.text, 'html.parser')
13
+
14
+ # Check if the <h1> tag with the text "Notes Des Modules" exists
15
+ h1_tag = soup.find('h1', text=' Notes Des Modules ')
16
+ if h1_tag is None:
17
+ print("The page does not contain the expected text.")
18
+ return None
19
+
20
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView1'})
21
+ rows = table.find_all('tr')
22
+ headers = [cell.text.strip() for cell in rows[0].find_all('th')]
23
+ grades = [headers] + [[cell.text.strip() for cell in row.find_all('td')]
24
+ for row in rows[1:]] # Skip header row
25
+ return grades
@@ -0,0 +1,111 @@
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ from datetime import datetime
4
+ import re
5
+ import os
6
+ from PyPDF2 import PdfReader, PdfWriter
7
+
8
+
9
+ class TimeSchedule:
10
+ def __init__(self, session):
11
+ self.url = "https://esprit-tn.com/ESPOnline/Etudiants/Emplois.aspx"
12
+ self.session = session
13
+
14
+ def get_table_schedules(self):
15
+ response = self.session.get(self.url)
16
+ soup = BeautifulSoup(response.text, 'html.parser')
17
+
18
+ # Check if the <strong> tag with the text "Emploi du temps" exists
19
+ strong_tag = soup.find('strong', text='Emploi du temps')
20
+ if strong_tag is None:
21
+ print("The page does not contain the expected text.")
22
+ return None
23
+
24
+ table = soup.find('table', {'id': 'ContentPlaceHolder1_GridView1'})
25
+ rows = table.find_all('tr')
26
+ time_schedules = []
27
+ for row in rows[1:]:
28
+ row_data = []
29
+ for cell in row.find_all('td'):
30
+ cell_text = cell.text.strip()
31
+ cell_link = cell.find('a')
32
+ if cell_link is not None:
33
+ cell_text += ' ' + cell_link.get('href')
34
+ row_data.append(cell_text)
35
+ time_schedules.append(row_data)
36
+ return time_schedules
37
+
38
+ def get_last_week_schedule(self):
39
+ time_schedules = self.get_table_schedules()
40
+ if time_schedules is None:
41
+ return None
42
+
43
+ # Filter schedules that contain "Semaine" and extract dates
44
+ week_schedules = [
45
+ schedule for schedule in time_schedules if "Semaine" in schedule[0]]
46
+ date_format = "%d-%m-%Y"
47
+ dates_and_schedules = []
48
+
49
+ for schedule in week_schedules:
50
+ # Extract date from file name
51
+ match = re.search(r"\d{2}-\d{2}-\d{4}", schedule[0])
52
+ if match:
53
+ date_str = match.group()
54
+ date = datetime.strptime(date_str, date_format)
55
+ dates_and_schedules.append((date, schedule))
56
+
57
+ # Sort by date and return the latest schedule
58
+ dates_and_schedules.sort(key=lambda x: x[0])
59
+ return dates_and_schedules[-1][1] if dates_and_schedules else None
60
+
61
+ def download_files(self, schedule):
62
+ response = self.session.get(self.url)
63
+ soup = BeautifulSoup(response.text, 'html.parser')
64
+
65
+ # Extract __VIEWSTATE and __EVENTVALIDATION values
66
+ viewstate = soup.find('input', {'id': '__VIEWSTATE'})['value']
67
+ eventvalidation = soup.find(
68
+ 'input', {'id': '__EVENTVALIDATION'})['value']
69
+
70
+ # Extract eventTarget from the JavaScript function call
71
+ event_target = schedule[1].split("'")[1]
72
+
73
+ # Send a POST request to mimic the postback
74
+ post_data = {
75
+ '__EVENTTARGET': event_target,
76
+ '__EVENTARGUMENT': '',
77
+ '__VIEWSTATE': viewstate,
78
+ '__EVENTVALIDATION': eventvalidation,
79
+ }
80
+ file_response = self.session.post(self.url, data=post_data)
81
+
82
+ # Save the file
83
+ file_path = os.path.join(os.getcwd(), schedule[0])
84
+ with open(file_path, 'wb') as f:
85
+ f.write(file_response.content)
86
+
87
+ return file_path
88
+
89
+ def get_class_week_schedule(self, file_path, class_name):
90
+ # Open the existing PDF
91
+ with open(file_path, "rb") as file:
92
+ reader = PdfReader(file)
93
+
94
+ # Assume class_name is found on a specific page
95
+ for page_number in range(len(reader.pages)):
96
+ page = reader.pages[page_number]
97
+ content = page.extract_text()
98
+
99
+ # If class_name is found in content
100
+ if class_name in content:
101
+ writer = PdfWriter()
102
+ writer.add_page(page)
103
+
104
+ # Save the page as a new PDF
105
+ new_file_path = f"{class_name}.pdf"
106
+ with open(new_file_path, "wb") as output_pdf:
107
+ writer.write(output_pdf)
108
+
109
+ return new_file_path
110
+
111
+ return None
@@ -0,0 +1,23 @@
1
+ Metadata-Version: 2.1
2
+ Name: esprit-py
3
+ Version: 0.1.0
4
+ Summary: A Python library for interacting with data from esprit-tn.com
5
+ Author: Lime1 (Aymen Hmani)
6
+ Author-email: <everpadd4@gmail.com>
7
+ Keywords: python,api
8
+ Classifier: Development Status :: 1 - Planning
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: Unix
12
+ Classifier: Operating System :: MacOS :: MacOS X
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: requests
16
+ Requires-Dist: beautifulsoup4
17
+ Requires-Dist: PyPDF2
18
+
19
+
20
+ # esprit-api
21
+
22
+ missing stuff:
23
+ - [ ] download any time table from the esprit website , not only the last one
@@ -0,0 +1,11 @@
1
+ esprit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ esprit/absence_scrape.py,sha256=y7v9JS4ZB3k97KBuM5e2VlesBS8M-siqq0KnlVf1QkA,977
3
+ esprit/auth.py,sha256=He2c7XF8VlzKjQ4CsuTtZfarNsmjPwJszoNfVV5Cgm4,3134
4
+ esprit/credit.py,sha256=isQV2YJQyTtEumfok3sATpiFzsxYM11s6--UNsFkNZQ,1067
5
+ esprit/esprit.py,sha256=UuLK3NRGYBYkjQTIPkQjGJx8UWb5m7hRo3Xk-LKNgVo,1368
6
+ esprit/grade_scrape.py,sha256=k5LjhX2xeoutWpt0HoVM-uwAbRoVE0BSH49uyaxKAhg,974
7
+ esprit/time_schedule_scrape.py,sha256=fCo3HD2xLCPKmibVVEqK16m4oDiWKtHku4MyZZBeSk8,4124
8
+ esprit_py-0.1.0.dist-info/METADATA,sha256=F-TACsJU92OvIln0Vp5gOImT8rHx9Yd1Y5c0-dyNTOg,736
9
+ esprit_py-0.1.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
10
+ esprit_py-0.1.0.dist-info/top_level.txt,sha256=aS9besFTZ4EYTsoBJVf3GaMjQtJLgLaK7WqAxSvGVdQ,7
11
+ esprit_py-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ esprit