quastt-show 0.1.2__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quastt_show/__init__.py +4 -1
- quastt_show/game.py +2 -2
- quastt_show/logger.py +241 -0
- quastt_show/monitoring.py +333 -0
- quastt_show/nothing.py +0 -0
- quastt_show/resources.py +319 -0
- quastt_show-0.3.2.dist-info/METADATA +15 -0
- quastt_show-0.3.2.dist-info/RECORD +11 -0
- quastt_show-0.1.2.dist-info/METADATA +0 -11
- quastt_show-0.1.2.dist-info/RECORD +0 -7
- {quastt_show-0.1.2.dist-info → quastt_show-0.3.2.dist-info}/WHEEL +0 -0
- {quastt_show-0.1.2.dist-info → quastt_show-0.3.2.dist-info}/entry_points.txt +0 -0
- {quastt_show-0.1.2.dist-info → quastt_show-0.3.2.dist-info}/top_level.txt +0 -0
quastt_show/__init__.py
CHANGED
quastt_show/game.py
CHANGED
@@ -78,7 +78,7 @@ class RoundedButton(tk.Canvas):
|
|
78
78
|
# Main Game class
|
79
79
|
# ------------------------------
|
80
80
|
|
81
|
-
class
|
81
|
+
class TriviaShow:
|
82
82
|
def __init__(self, difficulty="medium"):
|
83
83
|
self.difficulty = difficulty
|
84
84
|
self.app = TriviaApp(difficulty)
|
@@ -384,5 +384,5 @@ class StatsFrame(tk.Frame):
|
|
384
384
|
# ------------------------------
|
385
385
|
|
386
386
|
if __name__ == "__main__":
|
387
|
-
game =
|
387
|
+
game = TriviaShow(difficulty="medium")
|
388
388
|
game.run()
|
quastt_show/logger.py
ADDED
@@ -0,0 +1,241 @@
|
|
1
|
+
import time
|
2
|
+
import traceback
|
3
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
4
|
+
from threading import Lock
|
5
|
+
|
6
|
+
|
7
|
+
class SimpleQLogger:
|
8
|
+
"""
|
9
|
+
Logger & Profiler performant pentru quastt_show.
|
10
|
+
- Profilează funcții (decorator) cu timp de execuție precis (perf_counter)
|
11
|
+
- Loghează mesaje (INFO, DEBUG, ERROR) cu timestamp
|
12
|
+
- Suportă execuție paralelă a mai multor funcții profile (n_jobs)
|
13
|
+
- Raport complet erori, timpi, mesaje
|
14
|
+
"""
|
15
|
+
|
16
|
+
LEVELS = {"DEBUG": 10, "INFO": 20, "ERROR": 30}
|
17
|
+
|
18
|
+
def __init__(
|
19
|
+
self,
|
20
|
+
logfile="simpleqlogger.log",
|
21
|
+
n_jobs=1,
|
22
|
+
log_level="INFO",
|
23
|
+
log_to_console=True,
|
24
|
+
max_log_memory=1000,
|
25
|
+
):
|
26
|
+
self.logfile = logfile
|
27
|
+
self.n_jobs = n_jobs if n_jobs != 0 else 1
|
28
|
+
self.log_level = log_level.upper() if log_level.upper() in self.LEVELS else "INFO"
|
29
|
+
self.log_to_console = log_to_console
|
30
|
+
self.max_log_memory = max_log_memory # nr max mesaje in memorie
|
31
|
+
|
32
|
+
self._lock = Lock()
|
33
|
+
self._logs = []
|
34
|
+
self._errors = []
|
35
|
+
self._profiles = {} # {func_name: [exec_times]}
|
36
|
+
|
37
|
+
# ---------------- Timestamp ----------------
|
38
|
+
|
39
|
+
def _timestamp(self):
|
40
|
+
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
41
|
+
|
42
|
+
# ---------------- Logging ----------------
|
43
|
+
|
44
|
+
def _should_log(self, level):
|
45
|
+
return self.LEVELS[level] >= self.LEVELS[self.log_level]
|
46
|
+
|
47
|
+
def _write_to_file(self, entry):
|
48
|
+
try:
|
49
|
+
with open(self.logfile, "a", encoding="utf-8") as f:
|
50
|
+
f.write(entry + "\n")
|
51
|
+
except Exception as e:
|
52
|
+
print(f"[SimpleQLogger ERROR] Cannot write to file: {e}")
|
53
|
+
|
54
|
+
def _add_log(self, entry, level):
|
55
|
+
with self._lock:
|
56
|
+
if len(self._logs) >= self.max_log_memory:
|
57
|
+
self._logs.pop(0)
|
58
|
+
self._logs.append((level, entry))
|
59
|
+
self._write_to_file(entry)
|
60
|
+
if self.log_to_console and level != "DEBUG":
|
61
|
+
print(entry)
|
62
|
+
|
63
|
+
def debug(self, message):
|
64
|
+
if self._should_log("DEBUG"):
|
65
|
+
self._add_log(f"[{self._timestamp()}] DEBUG: {message}", "DEBUG")
|
66
|
+
|
67
|
+
def info(self, message):
|
68
|
+
if self._should_log("INFO"):
|
69
|
+
self._add_log(f"[{self._timestamp()}] INFO: {message}", "INFO")
|
70
|
+
|
71
|
+
def error(self, message, exc: Exception = None):
|
72
|
+
ts = self._timestamp()
|
73
|
+
base_msg = f"[{ts}] ERROR: {message}"
|
74
|
+
if exc:
|
75
|
+
tb = traceback.format_exc()
|
76
|
+
entry = f"{base_msg}\nException:\n{tb}"
|
77
|
+
else:
|
78
|
+
entry = base_msg
|
79
|
+
with self._lock:
|
80
|
+
self._errors.append(entry)
|
81
|
+
self._add_log(entry, "ERROR")
|
82
|
+
|
83
|
+
# ---------------- Profilare funcții ----------------
|
84
|
+
|
85
|
+
def profile(self, func=None):
|
86
|
+
"""
|
87
|
+
Decorator pentru profilare timp execuție.
|
88
|
+
Poate fi folosit cu sau fără paranteze.
|
89
|
+
"""
|
90
|
+
|
91
|
+
def decorator(f):
|
92
|
+
def wrapper(*args, **kwargs):
|
93
|
+
start = time.perf_counter()
|
94
|
+
try:
|
95
|
+
result = f(*args, **kwargs)
|
96
|
+
except Exception as e:
|
97
|
+
self.error(f"Exception in '{f.__name__}'", e)
|
98
|
+
raise
|
99
|
+
elapsed = time.perf_counter() - start
|
100
|
+
with self._lock:
|
101
|
+
if f.__name__ not in self._profiles:
|
102
|
+
self._profiles[f.__name__] = []
|
103
|
+
self._profiles[f.__name__].append(elapsed)
|
104
|
+
self.debug(f"Func '{f.__name__}' exec time: {elapsed:.6f}s")
|
105
|
+
return result
|
106
|
+
|
107
|
+
return wrapper
|
108
|
+
|
109
|
+
if func:
|
110
|
+
return decorator(func)
|
111
|
+
return decorator
|
112
|
+
|
113
|
+
# -------------- Profilare paralelă (ThreadPool) --------------
|
114
|
+
|
115
|
+
def profile_parallel(self, funcs_with_args):
|
116
|
+
"""
|
117
|
+
Execută funcții în paralel și profilează execuția.
|
118
|
+
funcs_with_args: listă de tuple (func, args, kwargs)
|
119
|
+
Returnează lista cu rezultatele.
|
120
|
+
"""
|
121
|
+
n_workers = min(self.n_jobs, len(funcs_with_args))
|
122
|
+
results = [None] * len(funcs_with_args)
|
123
|
+
with ThreadPoolExecutor(max_workers=n_workers) as executor:
|
124
|
+
futures = {
|
125
|
+
executor.submit(self._profile_wrapper, f, *a, **kw): idx
|
126
|
+
for idx, (f, a, kw) in enumerate(funcs_with_args)
|
127
|
+
}
|
128
|
+
for future in as_completed(futures):
|
129
|
+
idx = futures[future]
|
130
|
+
try:
|
131
|
+
results[idx] = future.result()
|
132
|
+
except Exception as e:
|
133
|
+
self.error(f"Error in parallel func idx={idx}", e)
|
134
|
+
results[idx] = None
|
135
|
+
return results
|
136
|
+
|
137
|
+
def _profile_wrapper(self, func, *args, **kwargs):
|
138
|
+
"""
|
139
|
+
Wrapper intern pentru profilare în execuții paralele.
|
140
|
+
"""
|
141
|
+
start = time.perf_counter()
|
142
|
+
try:
|
143
|
+
result = func(*args, **kwargs)
|
144
|
+
except Exception as e:
|
145
|
+
self.error(f"Exception in parallel func '{func.__name__}'", e)
|
146
|
+
raise
|
147
|
+
elapsed = time.perf_counter() - start
|
148
|
+
with self._lock:
|
149
|
+
if func.__name__ not in self._profiles:
|
150
|
+
self._profiles[func.__name__] = []
|
151
|
+
self._profiles[func.__name__].append(elapsed)
|
152
|
+
self.debug(f"[Parallel] Func '{func.__name__}' exec time: {elapsed:.6f}s")
|
153
|
+
return result
|
154
|
+
|
155
|
+
# ---------------- Rapoarte ----------------
|
156
|
+
|
157
|
+
def get_profile_report(self):
|
158
|
+
lines = ["=== Profile Report ==="]
|
159
|
+
with self._lock:
|
160
|
+
if not self._profiles:
|
161
|
+
lines.append("No profiling data.")
|
162
|
+
else:
|
163
|
+
for f, times in self._profiles.items():
|
164
|
+
count = len(times)
|
165
|
+
total = sum(times)
|
166
|
+
avg = total / count if count > 0 else 0
|
167
|
+
mn = min(times) if times else 0
|
168
|
+
mx = max(times) if times else 0
|
169
|
+
lines.append(
|
170
|
+
f"{f}: calls={count}, total={total:.6f}s, avg={avg:.6f}s, min={mn:.6f}s, max={mx:.6f}s"
|
171
|
+
)
|
172
|
+
return "\n".join(lines)
|
173
|
+
|
174
|
+
def get_error_report(self):
|
175
|
+
lines = ["=== Error Report ==="]
|
176
|
+
with self._lock:
|
177
|
+
if not self._errors:
|
178
|
+
lines.append("No errors logged.")
|
179
|
+
else:
|
180
|
+
lines.extend(self._errors)
|
181
|
+
return "\n".join(lines)
|
182
|
+
|
183
|
+
def get_log_report(self, level_filter=None):
|
184
|
+
"""
|
185
|
+
level_filter: None (toate), "INFO", "DEBUG", "ERROR"
|
186
|
+
"""
|
187
|
+
lines = ["=== Log Report ==="]
|
188
|
+
with self._lock:
|
189
|
+
filtered = (
|
190
|
+
[entry for lvl, entry in self._logs if lvl == level_filter]
|
191
|
+
if level_filter
|
192
|
+
else [entry for _, entry in self._logs]
|
193
|
+
)
|
194
|
+
if not filtered:
|
195
|
+
lines.append("No logs found.")
|
196
|
+
else:
|
197
|
+
lines.extend(filtered)
|
198
|
+
return "\n".join(lines)
|
199
|
+
|
200
|
+
# ---------------- Reset ----------------
|
201
|
+
|
202
|
+
def clear(self):
|
203
|
+
with self._lock:
|
204
|
+
self._logs.clear()
|
205
|
+
self._errors.clear()
|
206
|
+
self._profiles.clear()
|
207
|
+
self.info("Logger cleared all data.")
|
208
|
+
|
209
|
+
|
210
|
+
# --------------------- Exemplu de utilizare ---------------------
|
211
|
+
|
212
|
+
if __name__ == "__main__":
|
213
|
+
logger = SimpleQLogger(n_jobs=4, log_level="DEBUG", log_to_console=True)
|
214
|
+
|
215
|
+
@logger.profile
|
216
|
+
def slow_square(x):
|
217
|
+
time.sleep(0.1)
|
218
|
+
return x * x
|
219
|
+
|
220
|
+
@logger.profile
|
221
|
+
def might_fail(x):
|
222
|
+
time.sleep(0.05)
|
223
|
+
if x == 3:
|
224
|
+
raise ValueError("Intentional error!")
|
225
|
+
return x + 10
|
226
|
+
|
227
|
+
# Rulez funcții normal
|
228
|
+
for i in range(5):
|
229
|
+
try:
|
230
|
+
print(f"slow_square({i}) = {slow_square(i)}")
|
231
|
+
except Exception as e:
|
232
|
+
print(f"Caught error: {e}")
|
233
|
+
|
234
|
+
# Rulez funcții în paralel
|
235
|
+
funcs = [(might_fail, (i,), {}) for i in range(5)]
|
236
|
+
results = logger.profile_parallel(funcs)
|
237
|
+
print("Parallel results:", results)
|
238
|
+
|
239
|
+
print("\n" + logger.get_profile_report())
|
240
|
+
print("\n" + logger.get_error_report())
|
241
|
+
print("\n" + logger.get_log_report())
|
@@ -0,0 +1,333 @@
|
|
1
|
+
import threading
|
2
|
+
import time
|
3
|
+
import os
|
4
|
+
import sys
|
5
|
+
import traceback
|
6
|
+
from datetime import datetime
|
7
|
+
|
8
|
+
# Optional GPU check - we'll try to import pynvml, if fails skip GPU monitoring
|
9
|
+
try:
|
10
|
+
import ctypes
|
11
|
+
import platform
|
12
|
+
if platform.system() == 'Windows':
|
13
|
+
_lib_name = "nvml.dll"
|
14
|
+
elif platform.system() == 'Linux':
|
15
|
+
_lib_name = "libnvidia-ml.so.1"
|
16
|
+
else:
|
17
|
+
_lib_name = None
|
18
|
+
if _lib_name:
|
19
|
+
from ctypes import byref, c_uint, c_ulonglong, c_char_p, c_int
|
20
|
+
_nvml = ctypes.cdll.LoadLibrary(_lib_name)
|
21
|
+
|
22
|
+
# Nvml constants & functions:
|
23
|
+
NVML_SUCCESS = 0
|
24
|
+
|
25
|
+
class nvmlUtilization_t(ctypes.Structure):
|
26
|
+
_fields_ = [("gpu", c_uint), ("memory", c_uint)]
|
27
|
+
|
28
|
+
def nvml_init():
|
29
|
+
return _nvml.nvmlInit()
|
30
|
+
|
31
|
+
def nvml_shutdown():
|
32
|
+
return _nvml.nvmlShutdown()
|
33
|
+
|
34
|
+
def nvml_device_get_handle_by_index(index, handle_ptr):
|
35
|
+
return _nvml.nvmlDeviceGetHandleByIndex(index, handle_ptr)
|
36
|
+
|
37
|
+
def nvml_device_get_utilization_rates(handle, util_ptr):
|
38
|
+
return _nvml.nvmlDeviceGetUtilizationRates(handle, util_ptr)
|
39
|
+
|
40
|
+
def nvml_device_get_memory_info(handle, mem_info_ptr):
|
41
|
+
return _nvml.nvmlDeviceGetMemoryInfo(handle, mem_info_ptr)
|
42
|
+
|
43
|
+
class nvmlMemory_t(ctypes.Structure):
|
44
|
+
_fields_ = [("total", c_ulonglong), ("free", c_ulonglong), ("used", c_ulonglong)]
|
45
|
+
else:
|
46
|
+
_nvml = None
|
47
|
+
|
48
|
+
except Exception:
|
49
|
+
_nvml = None
|
50
|
+
|
51
|
+
|
52
|
+
def get_cpu_usage_percent():
|
53
|
+
"""Return CPU usage percent over 0.1 seconds"""
|
54
|
+
try:
|
55
|
+
if sys.platform == "win32":
|
56
|
+
import psutil
|
57
|
+
return psutil.cpu_percent(interval=0.1)
|
58
|
+
else:
|
59
|
+
# Linux/Mac - approximate by reading /proc/stat or use os.times
|
60
|
+
import subprocess
|
61
|
+
output = subprocess.check_output("top -bn2 | grep 'Cpu(s)'", shell=True).decode()
|
62
|
+
lines = output.strip().split("\n")
|
63
|
+
if len(lines) < 2:
|
64
|
+
return None
|
65
|
+
# Parse 2nd sample line
|
66
|
+
line = lines[1]
|
67
|
+
# format: Cpu(s): 3.5%us, 1.1%sy, 0.0%ni, 94.9%id, 0.5%wa, 0.0%hi, 0.0%si, 0.0%st
|
68
|
+
parts = line.split(',')
|
69
|
+
for part in parts:
|
70
|
+
if 'id' in part:
|
71
|
+
idle = float(part.strip().split('%')[0])
|
72
|
+
usage = 100.0 - idle
|
73
|
+
return round(usage, 2)
|
74
|
+
return None
|
75
|
+
except Exception:
|
76
|
+
return None
|
77
|
+
|
78
|
+
|
79
|
+
def get_ram_usage_mb():
|
80
|
+
"""Return RAM used in MB"""
|
81
|
+
try:
|
82
|
+
if sys.platform == "win32":
|
83
|
+
import psutil
|
84
|
+
mem = psutil.virtual_memory()
|
85
|
+
used_mb = (mem.total - mem.available) / (1024 * 1024)
|
86
|
+
return round(used_mb, 2)
|
87
|
+
else:
|
88
|
+
with open('/proc/meminfo', 'r') as f:
|
89
|
+
meminfo = f.read()
|
90
|
+
lines = meminfo.split('\n')
|
91
|
+
mem_total_kb = None
|
92
|
+
mem_free_kb = None
|
93
|
+
buffers_kb = None
|
94
|
+
cached_kb = None
|
95
|
+
for line in lines:
|
96
|
+
if line.startswith("MemTotal:"):
|
97
|
+
mem_total_kb = int(line.split()[1])
|
98
|
+
elif line.startswith("MemFree:"):
|
99
|
+
mem_free_kb = int(line.split()[1])
|
100
|
+
elif line.startswith("Buffers:"):
|
101
|
+
buffers_kb = int(line.split()[1])
|
102
|
+
elif line.startswith("Cached:"):
|
103
|
+
cached_kb = int(line.split()[1])
|
104
|
+
if None in (mem_total_kb, mem_free_kb, buffers_kb, cached_kb):
|
105
|
+
return None
|
106
|
+
used_kb = mem_total_kb - (mem_free_kb + buffers_kb + cached_kb)
|
107
|
+
used_mb = used_kb / 1024
|
108
|
+
return round(used_mb, 2)
|
109
|
+
except Exception:
|
110
|
+
return None
|
111
|
+
|
112
|
+
|
113
|
+
class ResourceMonitor:
|
114
|
+
"""
|
115
|
+
ResourceMonitor: Monitors CPU, RAM and optionally GPU usage with configurable thresholds,
|
116
|
+
periodic checks, logging, and customizable alerts.
|
117
|
+
|
118
|
+
Parameters:
|
119
|
+
----------
|
120
|
+
cpu_threshold : float (0-100)
|
121
|
+
CPU usage percentage above which alert triggers (default 90.0)
|
122
|
+
ram_threshold_mb : float
|
123
|
+
RAM used in MB above which alert triggers (default 4000.0 MB)
|
124
|
+
gpu_threshold : float (0-100)
|
125
|
+
GPU usage percentage above which alert triggers (default 90.0)
|
126
|
+
monitor_interval_sec : float
|
127
|
+
How often to check resource usage in seconds (default 5.0)
|
128
|
+
log_to_console : bool
|
129
|
+
If True, print logs to console (default True)
|
130
|
+
log_to_file : bool
|
131
|
+
If True, save logs to file (default False)
|
132
|
+
log_file_path : str
|
133
|
+
File path for logs (default "resource_monitor.log")
|
134
|
+
max_log_lines : int
|
135
|
+
Maximum lines to keep in log file before rotation (default 10000)
|
136
|
+
alert_callback : callable
|
137
|
+
Function called on alert with signature func(resource:str, usage:float)
|
138
|
+
stop_on_alert : bool
|
139
|
+
If True, stops monitoring on first alert (default False)
|
140
|
+
n_jobs : int
|
141
|
+
Number of threads to use for parallel monitoring (default 1)
|
142
|
+
|
143
|
+
Usage:
|
144
|
+
------
|
145
|
+
monitor = ResourceMonitor(cpu_threshold=80, ram_threshold_mb=3000,
|
146
|
+
monitor_interval_sec=2, log_to_console=True,
|
147
|
+
stop_on_alert=True)
|
148
|
+
monitor.start()
|
149
|
+
# ... do work ...
|
150
|
+
monitor.stop()
|
151
|
+
"""
|
152
|
+
|
153
|
+
def __init__(self,
|
154
|
+
cpu_threshold=90.0,
|
155
|
+
ram_threshold_mb=4000.0,
|
156
|
+
gpu_threshold=90.0,
|
157
|
+
monitor_interval_sec=5.0,
|
158
|
+
log_to_console=True,
|
159
|
+
log_to_file=False,
|
160
|
+
log_file_path="resource_monitor.log",
|
161
|
+
max_log_lines=10000,
|
162
|
+
alert_callback=None,
|
163
|
+
stop_on_alert=False,
|
164
|
+
n_jobs=1):
|
165
|
+
self.cpu_threshold = cpu_threshold
|
166
|
+
self.ram_threshold_mb = ram_threshold_mb
|
167
|
+
self.gpu_threshold = gpu_threshold
|
168
|
+
self.monitor_interval_sec = monitor_interval_sec
|
169
|
+
self.log_to_console = log_to_console
|
170
|
+
self.log_to_file = log_to_file
|
171
|
+
self.log_file_path = log_file_path
|
172
|
+
self.max_log_lines = max_log_lines
|
173
|
+
self.alert_callback = alert_callback
|
174
|
+
self.stop_on_alert = stop_on_alert
|
175
|
+
self.n_jobs = max(1, n_jobs)
|
176
|
+
|
177
|
+
self._stop_event = threading.Event()
|
178
|
+
self._thread = None
|
179
|
+
self._log_lines = []
|
180
|
+
|
181
|
+
# GPU related
|
182
|
+
self.gpu_available = False
|
183
|
+
self._gpu_handle = None
|
184
|
+
self._init_gpu()
|
185
|
+
|
186
|
+
def _init_gpu(self):
|
187
|
+
if _nvml:
|
188
|
+
try:
|
189
|
+
ret = nvml_init()
|
190
|
+
if ret == NVML_SUCCESS:
|
191
|
+
# get handle for GPU 0 (first GPU)
|
192
|
+
handle = ctypes.c_void_p()
|
193
|
+
if nvml_device_get_handle_by_index(0, byref(handle)) == NVML_SUCCESS:
|
194
|
+
self.gpu_available = True
|
195
|
+
self._gpu_handle = handle
|
196
|
+
else:
|
197
|
+
self.gpu_available = False
|
198
|
+
else:
|
199
|
+
self.gpu_available = False
|
200
|
+
except Exception:
|
201
|
+
self.gpu_available = False
|
202
|
+
else:
|
203
|
+
self.gpu_available = False
|
204
|
+
|
205
|
+
def _log(self, msg):
|
206
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
207
|
+
full_msg = f"[{timestamp}] {msg}"
|
208
|
+
self._log_lines.append(full_msg)
|
209
|
+
if self.log_to_console:
|
210
|
+
print(full_msg)
|
211
|
+
if self.log_to_file:
|
212
|
+
self._write_log_file(full_msg)
|
213
|
+
|
214
|
+
# Rotate log lines if too long
|
215
|
+
if len(self._log_lines) > self.max_log_lines:
|
216
|
+
self._log_lines = self._log_lines[-self.max_log_lines:]
|
217
|
+
|
218
|
+
def _write_log_file(self, line):
|
219
|
+
try:
|
220
|
+
with open(self.log_file_path, "a", encoding="utf-8") as f:
|
221
|
+
f.write(line + "\n")
|
222
|
+
except Exception:
|
223
|
+
pass
|
224
|
+
|
225
|
+
def _check_alerts(self, cpu_usage, ram_usage, gpu_usage):
|
226
|
+
alert_triggered = False
|
227
|
+
if cpu_usage is not None and cpu_usage > self.cpu_threshold:
|
228
|
+
self._log(f"ALERT: CPU usage high: {cpu_usage}% > threshold {self.cpu_threshold}%")
|
229
|
+
if self.alert_callback:
|
230
|
+
self.alert_callback("CPU", cpu_usage)
|
231
|
+
alert_triggered = True
|
232
|
+
|
233
|
+
if ram_usage is not None and ram_usage > self.ram_threshold_mb:
|
234
|
+
self._log(f"ALERT: RAM usage high: {ram_usage}MB > threshold {self.ram_threshold_mb}MB")
|
235
|
+
if self.alert_callback:
|
236
|
+
self.alert_callback("RAM", ram_usage)
|
237
|
+
alert_triggered = True
|
238
|
+
|
239
|
+
if self.gpu_available and gpu_usage is not None and gpu_usage > self.gpu_threshold:
|
240
|
+
self._log(f"ALERT: GPU usage high: {gpu_usage}% > threshold {self.gpu_threshold}%")
|
241
|
+
if self.alert_callback:
|
242
|
+
self.alert_callback("GPU", gpu_usage)
|
243
|
+
alert_triggered = True
|
244
|
+
|
245
|
+
if alert_triggered and self.stop_on_alert:
|
246
|
+
self._log("Stopping monitor due to alert.")
|
247
|
+
self.stop()
|
248
|
+
|
249
|
+
def _get_gpu_usage(self):
|
250
|
+
if not self.gpu_available or not self._gpu_handle:
|
251
|
+
return None
|
252
|
+
try:
|
253
|
+
util = nvmlUtilization_t()
|
254
|
+
if nvml_device_get_utilization_rates(self._gpu_handle, byref(util)) == NVML_SUCCESS:
|
255
|
+
return util.gpu
|
256
|
+
except Exception:
|
257
|
+
return None
|
258
|
+
return None
|
259
|
+
|
260
|
+
def _snapshot(self):
|
261
|
+
cpu = get_cpu_usage_percent()
|
262
|
+
ram = get_ram_usage_mb()
|
263
|
+
gpu = self._get_gpu_usage()
|
264
|
+
return cpu, ram, gpu
|
265
|
+
|
266
|
+
def _monitor_loop(self):
|
267
|
+
self._log("ResourceMonitor started.")
|
268
|
+
while not self._stop_event.is_set():
|
269
|
+
cpu, ram, gpu = self._snapshot()
|
270
|
+
self._log(f"Snapshot - CPU: {cpu}%, RAM: {ram}MB, GPU: {gpu}%")
|
271
|
+
self._check_alerts(cpu, ram, gpu)
|
272
|
+
time.sleep(self.monitor_interval_sec)
|
273
|
+
self._log("ResourceMonitor stopped.")
|
274
|
+
|
275
|
+
def start(self):
|
276
|
+
"""Start monitoring resources in a background thread."""
|
277
|
+
if self._thread and self._thread.is_alive():
|
278
|
+
self._log("Monitor already running.")
|
279
|
+
return
|
280
|
+
self._stop_event.clear()
|
281
|
+
if self.n_jobs == 1:
|
282
|
+
self._thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
283
|
+
self._thread.start()
|
284
|
+
else:
|
285
|
+
# For multiple threads monitoring (e.g. for different resources)
|
286
|
+
self._log(f"Starting {self.n_jobs} monitor threads.")
|
287
|
+
self._threads = []
|
288
|
+
for i in range(self.n_jobs):
|
289
|
+
t = threading.Thread(target=self._monitor_loop, daemon=True)
|
290
|
+
self._threads.append(t)
|
291
|
+
t.start()
|
292
|
+
|
293
|
+
def stop(self):
|
294
|
+
"""Stop monitoring."""
|
295
|
+
self._stop_event.set()
|
296
|
+
if self._thread:
|
297
|
+
self._thread.join()
|
298
|
+
if hasattr(self, "_threads"):
|
299
|
+
for t in self._threads:
|
300
|
+
t.join()
|
301
|
+
|
302
|
+
def get_snapshot(self):
|
303
|
+
"""
|
304
|
+
Return current usage snapshot as a dict:
|
305
|
+
{'cpu': float, 'ram': float, 'gpu': float or None}
|
306
|
+
"""
|
307
|
+
cpu, ram, gpu = self._snapshot()
|
308
|
+
return {"cpu": cpu, "ram": ram, "gpu": gpu}
|
309
|
+
|
310
|
+
def get_report(self):
|
311
|
+
"""Return a summary report string with last logged lines."""
|
312
|
+
return "\n".join(self._log_lines[-20:])
|
313
|
+
|
314
|
+
|
315
|
+
if __name__ == "__main__":
|
316
|
+
# Demo usage
|
317
|
+
def alert_fn(resource, usage):
|
318
|
+
print(f"*** ALERT callback: {resource} usage high: {usage}")
|
319
|
+
|
320
|
+
monitor = ResourceMonitor(cpu_threshold=10,
|
321
|
+
ram_threshold_mb=500,
|
322
|
+
monitor_interval_sec=2,
|
323
|
+
alert_callback=alert_fn,
|
324
|
+
stop_on_alert=False,
|
325
|
+
log_to_console=True,
|
326
|
+
n_jobs=1)
|
327
|
+
|
328
|
+
monitor.start()
|
329
|
+
time.sleep(10)
|
330
|
+
print("Snapshot:", monitor.get_snapshot())
|
331
|
+
monitor.stop()
|
332
|
+
print("Final Report:")
|
333
|
+
print(monitor.get_report())
|
quastt_show/nothing.py
ADDED
File without changes
|
quastt_show/resources.py
ADDED
@@ -0,0 +1,319 @@
|
|
1
|
+
import time
|
2
|
+
import traceback
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
class SimpleNeuralNet:
|
6
|
+
"""
|
7
|
+
A very simple feed-forward neural network for demonstration.
|
8
|
+
Input: vectorized error features
|
9
|
+
Output: vector representing categories/suggestions
|
10
|
+
"""
|
11
|
+
def __init__(self, input_size, hidden_sizes, output_size, lr=0.01):
|
12
|
+
self.lr = lr
|
13
|
+
self.weights = []
|
14
|
+
self.biases = []
|
15
|
+
layer_sizes = [input_size] + hidden_sizes + [output_size]
|
16
|
+
for i in range(len(layer_sizes) - 1):
|
17
|
+
w = np.random.randn(layer_sizes[i], layer_sizes[i+1]) * 0.1
|
18
|
+
b = np.zeros(layer_sizes[i+1])
|
19
|
+
self.weights.append(w)
|
20
|
+
self.biases.append(b)
|
21
|
+
|
22
|
+
def relu(self, x):
|
23
|
+
return np.maximum(0, x)
|
24
|
+
|
25
|
+
def softmax(self, x):
|
26
|
+
e_x = np.exp(x - np.max(x))
|
27
|
+
return e_x / e_x.sum(axis=-1, keepdims=True)
|
28
|
+
|
29
|
+
def forward(self, x):
|
30
|
+
self.zs = []
|
31
|
+
self.activations = [x]
|
32
|
+
a = x
|
33
|
+
for i in range(len(self.weights) - 1):
|
34
|
+
z = a @ self.weights[i] + self.biases[i]
|
35
|
+
a = self.relu(z)
|
36
|
+
self.zs.append(z)
|
37
|
+
self.activations.append(a)
|
38
|
+
# Output layer
|
39
|
+
z = a @ self.weights[-1] + self.biases[-1]
|
40
|
+
self.zs.append(z)
|
41
|
+
a = self.softmax(z)
|
42
|
+
self.activations.append(a)
|
43
|
+
return a
|
44
|
+
|
45
|
+
def predict(self, x):
|
46
|
+
return self.forward(x)
|
47
|
+
|
48
|
+
def train(self, x, y_true):
|
49
|
+
"""
|
50
|
+
Simple one-step training (cross-entropy loss)
|
51
|
+
x: input vector (numpy)
|
52
|
+
y_true: one-hot vector for target
|
53
|
+
"""
|
54
|
+
output = self.forward(x)
|
55
|
+
loss = -np.sum(y_true * np.log(output + 1e-8))
|
56
|
+
|
57
|
+
# Backpropagation
|
58
|
+
delta = output - y_true # derivative of loss wrt softmax input
|
59
|
+
for i in reversed(range(len(self.weights))):
|
60
|
+
a_prev = self.activations[i]
|
61
|
+
dw = np.outer(a_prev, delta)
|
62
|
+
db = delta
|
63
|
+
|
64
|
+
self.weights[i] -= self.lr * dw
|
65
|
+
self.biases[i] -= self.lr * db
|
66
|
+
|
67
|
+
if i > 0:
|
68
|
+
dz = delta @ self.weights[i].T
|
69
|
+
delta = dz * (self.zs[i-1] > 0) # relu derivative
|
70
|
+
|
71
|
+
return loss
|
72
|
+
|
73
|
+
|
74
|
+
class LFMTracker:
|
75
|
+
"""
|
76
|
+
Learning-From-Mistakes Tracker.
|
77
|
+
Tracks and learns from errors/exceptions in user code,
|
78
|
+
provides detailed explanations and suggestions
|
79
|
+
"""
|
80
|
+
|
81
|
+
def __init__(self,
|
82
|
+
max_history=100,
|
83
|
+
error_vector_size=50,
|
84
|
+
hidden_sizes=[64, 32],
|
85
|
+
suggestion_categories=5,
|
86
|
+
learning_rate=0.01,
|
87
|
+
verbose=True):
|
88
|
+
"""
|
89
|
+
Parameters:
|
90
|
+
- max_history: max number of tracked errors to store
|
91
|
+
- error_vector_size: size of input vector
|
92
|
+
- hidden_sizes: list of hidden layers sizes
|
93
|
+
- suggestion_categories: number of suggestion types outputs
|
94
|
+
- learning_rate: training rate
|
95
|
+
- verbose: print tracking info
|
96
|
+
"""
|
97
|
+
self.max_history = max_history
|
98
|
+
self.error_vector_size = error_vector_size
|
99
|
+
self.verbose = verbose
|
100
|
+
self.history = []
|
101
|
+
self.nn = SimpleNeuralNet(input_size=error_vector_size,
|
102
|
+
hidden_sizes=hidden_sizes,
|
103
|
+
output_size=suggestion_categories,
|
104
|
+
lr=learning_rate)
|
105
|
+
|
106
|
+
def _vectorize_error(self, error: Exception) -> np.ndarray:
|
107
|
+
"""
|
108
|
+
Vectorizes the error into a fixed-size numerical vector.
|
109
|
+
Uses traceback and error type information.
|
110
|
+
"""
|
111
|
+
tb_str = ''.join(traceback.format_exception_only(type(error), error))
|
112
|
+
tb_hash = hash(tb_str) % (10 ** 8)
|
113
|
+
tb_vec = np.zeros(self.error_vector_size)
|
114
|
+
|
115
|
+
# Simple vectorization: encode hash bits into vector
|
116
|
+
for i in range(min(self.error_vector_size, 32)):
|
117
|
+
tb_vec[i] = (tb_hash >> i) & 1
|
118
|
+
|
119
|
+
# Add error type info (one hot for common types)
|
120
|
+
error_types = ['ValueError', 'TypeError', 'IndexError', 'KeyError', 'ZeroDivisionError']
|
121
|
+
for i, etype in enumerate(error_types):
|
122
|
+
if etype == type(error).__name__:
|
123
|
+
if i + 32 < self.error_vector_size:
|
124
|
+
tb_vec[i + 32] = 1
|
125
|
+
|
126
|
+
return tb_vec
|
127
|
+
|
128
|
+
def track_error(self, error: Exception, custom_msg: str = None):
|
129
|
+
"""
|
130
|
+
Track an error occurrence and update model with feedback.
|
131
|
+
"""
|
132
|
+
vec = self._vectorize_error(error)
|
133
|
+
|
134
|
+
# Predict suggestions (softmax output)
|
135
|
+
suggestions = self.nn.predict(vec)
|
136
|
+
|
137
|
+
if self.verbose:
|
138
|
+
print(f"[LFMTracker] Tracked error: {type(error).__name__}: {error}")
|
139
|
+
print(f"[LFMTracker] suggestions (probabilities): {suggestions}")
|
140
|
+
|
141
|
+
# Store history with timestamp and message
|
142
|
+
if len(self.history) >= self.max_history:
|
143
|
+
self.history.pop(0)
|
144
|
+
self.history.append({'error': error, 'vector': vec, 'time': time.time(), 'msg': custom_msg})
|
145
|
+
|
146
|
+
return suggestions
|
147
|
+
|
148
|
+
def give_feedback(self, error: Exception, correct_category: int):
|
149
|
+
"""
|
150
|
+
Train based on user feedback for the error category.
|
151
|
+
correct_category: index of the correct suggestion category (0-based)
|
152
|
+
"""
|
153
|
+
vec = self._vectorize_error(error)
|
154
|
+
y_true = np.zeros(self.nn.biases[-1].shape)
|
155
|
+
y_true[correct_category] = 1
|
156
|
+
loss = self.nn.train(vec, y_true)
|
157
|
+
if self.verbose:
|
158
|
+
print(f"[LFMTracker] trained with feedback. Loss: {loss:.5f}")
|
159
|
+
|
160
|
+
def report(self):
|
161
|
+
"""
|
162
|
+
Prints a summary report of tracked errors.
|
163
|
+
"""
|
164
|
+
print(f"\n[LFMTracker] Total errors tracked: {len(self.history)}")
|
165
|
+
counts = {}
|
166
|
+
for entry in self.history:
|
167
|
+
etype = type(entry['error']).__name__
|
168
|
+
counts[etype] = counts.get(etype, 0) + 1
|
169
|
+
print("[LFMTracker] Error type counts:")
|
170
|
+
for etype, count in counts.items():
|
171
|
+
print(f" {etype}: {count}")
|
172
|
+
|
173
|
+
|
174
|
+
class AutoRefactoringPrompter:
|
175
|
+
"""
|
176
|
+
AutoRefactoringPrompter suggests refactorings for code smells and complexity,
|
177
|
+
and can apply automated fixes (conceptual, not real code parsing).
|
178
|
+
Uses a simple heuristic to rank suggestions.
|
179
|
+
"""
|
180
|
+
|
181
|
+
def __init__(self,
|
182
|
+
max_function_length=50,
|
183
|
+
max_variable_name_length=20,
|
184
|
+
min_duplication_length=5,
|
185
|
+
max_duplicate_occurrences=3,
|
186
|
+
suggestion_categories=4,
|
187
|
+
hidden_sizes=[32, 16],
|
188
|
+
learning_rate=0.005,
|
189
|
+
verbose=True):
|
190
|
+
"""
|
191
|
+
Parameters:
|
192
|
+
- max_function_length: max allowed lines in a function before suggestion
|
193
|
+
- max_variable_name_length: max allowed variable name length
|
194
|
+
- min_duplication_length: min lines duplicated to suggest extraction
|
195
|
+
- max_duplicate_occurrences: max duplicated occurrences to flag
|
196
|
+
- suggestion_categories: number of refactor suggestions
|
197
|
+
- hidden_sizes: hidden layers sizes for AI
|
198
|
+
- learning_rate: training rate
|
199
|
+
- verbose: print info
|
200
|
+
"""
|
201
|
+
self.max_function_length = max_function_length
|
202
|
+
self.max_variable_name_length = max_variable_name_length
|
203
|
+
self.min_duplication_length = min_duplication_length
|
204
|
+
self.max_duplicate_occurrences = max_duplicate_occurrences
|
205
|
+
self.verbose = verbose
|
206
|
+
self.nn = SimpleNeuralNet(input_size=5,
|
207
|
+
hidden_sizes=hidden_sizes,
|
208
|
+
output_size=suggestion_categories,
|
209
|
+
lr=learning_rate)
|
210
|
+
|
211
|
+
def analyze_code(self, code_str: str):
|
212
|
+
"""
|
213
|
+
Analyzes code string to detect smells:
|
214
|
+
- long functions
|
215
|
+
- long variable names
|
216
|
+
- duplicated lines (simple count)
|
217
|
+
Returns a dict with analysis data.
|
218
|
+
"""
|
219
|
+
lines = code_str.split('\n')
|
220
|
+
functions = self._extract_functions(lines)
|
221
|
+
long_funcs = [f for f in functions if len(f['lines']) > self.max_function_length]
|
222
|
+
|
223
|
+
long_var_names = []
|
224
|
+
for line in lines:
|
225
|
+
words = line.strip().split()
|
226
|
+
for w in words:
|
227
|
+
if w.isidentifier() and len(w) > self.max_variable_name_length:
|
228
|
+
long_var_names.append(w)
|
229
|
+
|
230
|
+
duplicates = self._detect_duplicates(lines)
|
231
|
+
|
232
|
+
analysis = {
|
233
|
+
'long_functions': len(long_funcs),
|
234
|
+
'long_var_names': len(long_var_names),
|
235
|
+
'duplicates': duplicates['count'],
|
236
|
+
'duplicate_lines': duplicates['lines']
|
237
|
+
}
|
238
|
+
if self.verbose:
|
239
|
+
print(f"[AutoRefactoring] Long funcs: {analysis['long_functions']}, Long vars: {analysis['long_var_names']}, Duplicates: {analysis['duplicates']}")
|
240
|
+
return analysis
|
241
|
+
|
242
|
+
def _extract_functions(self, lines):
|
243
|
+
"""
|
244
|
+
Extracts function code blocks (very basic heuristic)
|
245
|
+
Returns list of dict {name, lines}
|
246
|
+
"""
|
247
|
+
functions = []
|
248
|
+
current_func = None
|
249
|
+
func_lines = []
|
250
|
+
for line in lines:
|
251
|
+
if line.strip().startswith("def "):
|
252
|
+
if current_func is not None:
|
253
|
+
functions.append({'name': current_func, 'lines': func_lines})
|
254
|
+
current_func = line.strip()
|
255
|
+
func_lines = []
|
256
|
+
elif current_func:
|
257
|
+
func_lines.append(line)
|
258
|
+
if current_func is not None:
|
259
|
+
functions.append({'name': current_func, 'lines': func_lines})
|
260
|
+
return functions
|
261
|
+
|
262
|
+
def _detect_duplicates(self, lines):
|
263
|
+
"""
|
264
|
+
Detects duplicated lines and their counts (naive).
|
265
|
+
Returns dict with count and lines.
|
266
|
+
"""
|
267
|
+
counts = {}
|
268
|
+
for line in lines:
|
269
|
+
line = line.strip()
|
270
|
+
if len(line) > 0:
|
271
|
+
counts[line] = counts.get(line, 0) + 1
|
272
|
+
duplicates = {line: cnt for line, cnt in counts.items() if cnt > 1}
|
273
|
+
return {'count': len(duplicates), 'lines': duplicates}
|
274
|
+
|
275
|
+
def suggest_refactors(self, code_str: str):
|
276
|
+
"""
|
277
|
+
Uses to suggest best refactor action based on code analysis.
|
278
|
+
Categories:
|
279
|
+
0 - Extract Method
|
280
|
+
1 - Rename Variables
|
281
|
+
2 - Remove Duplicates
|
282
|
+
3 - General Cleanup
|
283
|
+
Returns category index and explanation.
|
284
|
+
"""
|
285
|
+
analysis = self.analyze_code(code_str)
|
286
|
+
feature_vector = np.array([
|
287
|
+
analysis['long_functions'],
|
288
|
+
analysis['long_var_names'],
|
289
|
+
analysis['duplicates'],
|
290
|
+
len(code_str),
|
291
|
+
len(code_str.split('\n'))
|
292
|
+
], dtype=float)
|
293
|
+
|
294
|
+
# Normalize features for AI input roughly
|
295
|
+
feature_vector = feature_vector / (np.max(feature_vector) + 1e-5)
|
296
|
+
|
297
|
+
prediction = self.nn.predict(feature_vector)
|
298
|
+
suggestion_idx = int(np.argmax(prediction))
|
299
|
+
|
300
|
+
suggestions_map = {
|
301
|
+
0: "Extract long functions into smaller methods.",
|
302
|
+
1: "Rename long or unclear variable names to concise ones.",
|
303
|
+
2: "Remove duplicated code blocks by abstraction.",
|
304
|
+
3: "Perform general code cleanup and formatting."
|
305
|
+
}
|
306
|
+
if self.verbose:
|
307
|
+
print(f"[AutoRefactoring] suggestion: {suggestions_map[suggestion_idx]} (confidence {prediction[suggestion_idx]:.2f})")
|
308
|
+
|
309
|
+
return suggestion_idx, suggestions_map[suggestion_idx]
|
310
|
+
|
311
|
+
def train_feedback(self, feature_vector: np.ndarray, correct_suggestion: int):
|
312
|
+
"""
|
313
|
+
Train with feedback vector and correct category.
|
314
|
+
"""
|
315
|
+
y_true = np.zeros(self.nn.biases[-1].shape)
|
316
|
+
y_true[correct_suggestion] = 1
|
317
|
+
loss = self.nn.train(feature_vector, y_true)
|
318
|
+
if self.verbose:
|
319
|
+
print(f"[AutoRefactoring] trained with feedback. Loss: {loss:.5f}")
|
@@ -0,0 +1,15 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: quastt_show
|
3
|
+
Version: 0.3.2
|
4
|
+
Summary: Useful tools for monitoring,tracking,learning from mistakes.Just check our docs: https://quastt.com/
|
5
|
+
Author: RVA
|
6
|
+
Requires-Python: >=3.7
|
7
|
+
Requires-Dist: requests
|
8
|
+
Requires-Dist: traceback
|
9
|
+
Requires-Dist: time
|
10
|
+
Requires-Dist: threading
|
11
|
+
Requires-Dist: concurrent
|
12
|
+
Dynamic: author
|
13
|
+
Dynamic: requires-dist
|
14
|
+
Dynamic: requires-python
|
15
|
+
Dynamic: summary
|
@@ -0,0 +1,11 @@
|
|
1
|
+
quastt_show/__init__.py,sha256=Bnh74XiP7l-MyZ4o4A0wwrmKVUpC5ChxAiUKWxyqDTQ,168
|
2
|
+
quastt_show/game.py,sha256=sfwC0O1Cc7Eg-A4_ykxvdgYj8je-Rl_hP41CNIfn4hE,13701
|
3
|
+
quastt_show/logger.py,sha256=lKqU6v36eUyY0GaUFRD2Cu-A24KX2MZAZORacyZOqZo,8368
|
4
|
+
quastt_show/monitoring.py,sha256=xBbZG5_pNLuotoATzFAkYS6bZP0IabRZIPXZyIRsEIE,12254
|
5
|
+
quastt_show/nothing.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
quastt_show/resources.py,sha256=t-FWPGcmV5wKC_j5Nt2CxrHrrYs6q564666F-DzrKFQ,12076
|
7
|
+
quastt_show-0.3.2.dist-info/METADATA,sha256=h1JJERUE6rLo6p689Xj5Qu12Sr8cafh8VefaHvYW3-0,416
|
8
|
+
quastt_show-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
9
|
+
quastt_show-0.3.2.dist-info/entry_points.txt,sha256=qLcSh8ANs-GHsS2TCzOciVdEyy-25IyVCujHZmG2S-I,54
|
10
|
+
quastt_show-0.3.2.dist-info/top_level.txt,sha256=BtMM1888oCrruk37yJGnahqbhyirtsL6d9ES0pK081g,12
|
11
|
+
quastt_show-0.3.2.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
quastt_show/__init__.py,sha256=eMgN52YNlIhpHGDEdg7q2HV8d-bZBvoYQ4VY9Tk1BVY,24
|
2
|
-
quastt_show/game.py,sha256=jKPgruUttd0YTTqRNPeNmSNTqNadYZDL8vjCcmYTalU,13689
|
3
|
-
quastt_show-0.1.2.dist-info/METADATA,sha256=b_y5BTgojWawI6Kj4adP_LxVKJYXyUURPo6oCL-1LAU,244
|
4
|
-
quastt_show-0.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
5
|
-
quastt_show-0.1.2.dist-info/entry_points.txt,sha256=qLcSh8ANs-GHsS2TCzOciVdEyy-25IyVCujHZmG2S-I,54
|
6
|
-
quastt_show-0.1.2.dist-info/top_level.txt,sha256=BtMM1888oCrruk37yJGnahqbhyirtsL6d9ES0pK081g,12
|
7
|
-
quastt_show-0.1.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|