ecoml 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ecoml-0.1.3/PKG-INFO +18 -0
- ecoml-0.1.3/ecoml/__init__.py +26 -0
- ecoml-0.1.3/ecoml/dashboard.py +57 -0
- ecoml-0.1.3/ecoml/enable.py +29 -0
- ecoml-0.1.3/ecoml/gemini_helper.py +56 -0
- ecoml-0.1.3/ecoml/hardware_monitor.py +78 -0
- ecoml-0.1.3/ecoml/metrics.py +103 -0
- ecoml-0.1.3/ecoml/predictor.py +105 -0
- ecoml-0.1.3/ecoml/recommender.py +95 -0
- ecoml-0.1.3/ecoml/tracker.py +101 -0
- ecoml-0.1.3/ecoml/ui.py +247 -0
- ecoml-0.1.3/ecoml/utils.py +32 -0
- ecoml-0.1.3/ecoml.egg-info/PKG-INFO +18 -0
- ecoml-0.1.3/ecoml.egg-info/SOURCES.txt +29 -0
- ecoml-0.1.3/ecoml.egg-info/dependency_links.txt +1 -0
- ecoml-0.1.3/ecoml.egg-info/requires.txt +9 -0
- ecoml-0.1.3/ecoml.egg-info/top_level.txt +1 -0
- ecoml-0.1.3/pyproject.toml +31 -0
- ecoml-0.1.3/setup.cfg +13 -0
ecoml-0.1.3/PKG-INFO
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ecoml
|
|
3
|
+
Version: 0.1.3
|
|
4
|
+
Summary: Real-time GPU, CPU, and CO₂ tracking with pill UI for Jupyter notebooks.
|
|
5
|
+
Author-email: Your Name <you@example.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/yourname/EcoML
|
|
8
|
+
Project-URL: Source, https://github.com/yourname/EcoML
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: psutil
|
|
12
|
+
Requires-Dist: pynvml
|
|
13
|
+
Requires-Dist: GPUtil
|
|
14
|
+
Requires-Dist: python-dotenv
|
|
15
|
+
Requires-Dist: pandas
|
|
16
|
+
Requires-Dist: ipython
|
|
17
|
+
Provides-Extra: gemini
|
|
18
|
+
Requires-Dist: google-generativeai; extra == "gemini"
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from .tracker import EcoTracker
|
|
2
|
+
from .ui import CellHook
|
|
3
|
+
from .recommender import RecommendationEngine
|
|
4
|
+
from .gemini_helper import GeminiAdvisor
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
def enable_tracking():
|
|
8
|
+
"""
|
|
9
|
+
One-line activation for EcoML inside any Jupyter Notebook.
|
|
10
|
+
"""
|
|
11
|
+
log_path = Path.home() / ".ecoml" / "emissions_log.csv"
|
|
12
|
+
log_path.parent.mkdir(exist_ok=True, parents=True)
|
|
13
|
+
|
|
14
|
+
tracker = EcoTracker(log_path=str(log_path))
|
|
15
|
+
recommender = RecommendationEngine(tracker)
|
|
16
|
+
CellHook(tracker, recommender).register()
|
|
17
|
+
|
|
18
|
+
print("🌱 EcoML tracking enabled – monitoring CPU/GPU/CO₂")
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"EcoTracker",
|
|
22
|
+
"CellHook",
|
|
23
|
+
"RecommendationEngine",
|
|
24
|
+
"GeminiAdvisor",
|
|
25
|
+
"enable_tracking"
|
|
26
|
+
]
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Simple dashboard helpers.
|
|
3
|
+
|
|
4
|
+
You can use these inside a notebook to quickly inspect the CSV log.
|
|
5
|
+
For PowerBI you will mostly just load data/data/emissions_log.csv directly.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
import pandas as pd
|
|
11
|
+
import matplotlib.pyplot as plt
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def load_log(log_path: str) -> pd.DataFrame:
|
|
15
|
+
df = pd.read_csv(log_path)
|
|
16
|
+
if "timestamp" in df.columns:
|
|
17
|
+
df["timestamp"] = pd.to_datetime(df["timestamp"])
|
|
18
|
+
return df
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def show_summary(df: pd.DataFrame) -> None:
|
|
22
|
+
total_runs = len(df)
|
|
23
|
+
total_runtime = df["runtime_sec"].sum()
|
|
24
|
+
total_co2 = df["co2_g"].sum()
|
|
25
|
+
avg_co2 = df["co2_g"].mean()
|
|
26
|
+
|
|
27
|
+
print("==== EcoML Summary ====")
|
|
28
|
+
print(f"Runs: {total_runs}")
|
|
29
|
+
print(f"Total runtime: {total_runtime:.2f} sec")
|
|
30
|
+
print(f"Total CO₂: {total_co2:.3f} g")
|
|
31
|
+
print(f"Average CO₂ per cell: {avg_co2:.4f} g")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def plot_emissions_over_time(df: pd.DataFrame) -> None:
|
|
35
|
+
if "timestamp" not in df.columns:
|
|
36
|
+
print("No timestamp column – cannot plot over time.")
|
|
37
|
+
return
|
|
38
|
+
|
|
39
|
+
df_sorted = df.sort_values("timestamp")
|
|
40
|
+
plt.figure()
|
|
41
|
+
plt.plot(df_sorted["timestamp"], df_sorted["co2_g"])
|
|
42
|
+
plt.xlabel("Time")
|
|
43
|
+
plt.ylabel("CO₂ per cell (g)")
|
|
44
|
+
plt.title("EcoML: CO₂ Emissions Over Time")
|
|
45
|
+
plt.xticks(rotation=45)
|
|
46
|
+
plt.tight_layout()
|
|
47
|
+
plt.show()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def plot_runtime_vs_emissions(df: pd.DataFrame) -> None:
|
|
51
|
+
plt.figure()
|
|
52
|
+
plt.scatter(df["runtime_sec"], df["co2_g"])
|
|
53
|
+
plt.xlabel("Runtime (sec)")
|
|
54
|
+
plt.ylabel("CO₂ (g)")
|
|
55
|
+
plt.title("EcoML: Runtime vs CO₂")
|
|
56
|
+
plt.tight_layout()
|
|
57
|
+
plt.show()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from .tracker import EcoTracker
|
|
2
|
+
from .ui import CellHook
|
|
3
|
+
from .recommender import RecommendationEngine
|
|
4
|
+
from .gemini_helper import GeminiAdvisor
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def enable_tracking(
|
|
8
|
+
notebook_name: str = "Notebook",
|
|
9
|
+
log_path: str | None = None,
|
|
10
|
+
) -> None:
|
|
11
|
+
"""
|
|
12
|
+
One-line helper to enable EcoML tracking in any Jupyter notebook.
|
|
13
|
+
|
|
14
|
+
Usage
|
|
15
|
+
-----
|
|
16
|
+
from ecoml import enable_tracking
|
|
17
|
+
enable_tracking()
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
tracker = EcoTracker(
|
|
21
|
+
notebook_name=notebook_name,
|
|
22
|
+
log_path=log_path or "data/emissions_log.csv",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
gemini = GeminiAdvisor()
|
|
26
|
+
recommender = RecommendationEngine(tracker=tracker, gemini=gemini)
|
|
27
|
+
|
|
28
|
+
CellHook(tracker, recommender).register()
|
|
29
|
+
print("🔥 EcoML tracking enabled – pills will appear after each cell.")
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from dotenv import load_dotenv
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
import google.generativeai as genai
|
|
7
|
+
except:
|
|
8
|
+
genai = None
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GeminiAdvisor:
|
|
12
|
+
|
|
13
|
+
def __init__(self):
|
|
14
|
+
root = Path(__file__).resolve().parents[1]
|
|
15
|
+
load_dotenv(root / ".env")
|
|
16
|
+
|
|
17
|
+
key = os.getenv("GEMINI_API_KEY")
|
|
18
|
+
if not key:
|
|
19
|
+
print("⚠ Gemini disabled – no API key found")
|
|
20
|
+
self.enabled = False
|
|
21
|
+
return
|
|
22
|
+
|
|
23
|
+
if not genai:
|
|
24
|
+
print("⚠ Gemini client missing (pip install google-generativeai)")
|
|
25
|
+
self.enabled = False
|
|
26
|
+
return
|
|
27
|
+
|
|
28
|
+
genai.configure(api_key=key)
|
|
29
|
+
self.model = genai.GenerativeModel("gemini-2.5-flash")
|
|
30
|
+
self.enabled = True
|
|
31
|
+
print("🤖 Gemini Enabled ✓")
|
|
32
|
+
|
|
33
|
+
# ------------------------------------------------------
|
|
34
|
+
def ask(self, prompt: str) -> str | None:
|
|
35
|
+
"""One-line optimization advice"""
|
|
36
|
+
try:
|
|
37
|
+
out = self.model.generate_content(prompt).text.strip()
|
|
38
|
+
return out
|
|
39
|
+
except:
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
# ------------------------------------------------------
|
|
43
|
+
def generate_fix(self, err: Exception, code: str | None):
|
|
44
|
+
"""Generate 1-line fix suggestion"""
|
|
45
|
+
prompt = f"""
|
|
46
|
+
You are a senior Python debugger.
|
|
47
|
+
|
|
48
|
+
Error:
|
|
49
|
+
{err}
|
|
50
|
+
|
|
51
|
+
Code that triggered it:
|
|
52
|
+
{code}
|
|
53
|
+
|
|
54
|
+
Give **ONE LINE FIX ONLY**.
|
|
55
|
+
"""
|
|
56
|
+
return self.ask(prompt)
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
# Try to load pythonnet CLR
|
|
6
|
+
try:
|
|
7
|
+
import clr
|
|
8
|
+
CLR_AVAILABLE = True
|
|
9
|
+
except ImportError:
|
|
10
|
+
CLR_AVAILABLE = False
|
|
11
|
+
|
|
12
|
+
# Path to the DLL inside ecoml/libs/
|
|
13
|
+
DLL_PATH = Path(__file__).resolve().parent / "libs" / "LibreHardwareMonitorLib.dll"
|
|
14
|
+
|
|
15
|
+
def init_hardware():
|
|
16
|
+
"""
|
|
17
|
+
Initialize LibreHardwareMonitor.
|
|
18
|
+
Returns a Computer() object or None if unavailable.
|
|
19
|
+
"""
|
|
20
|
+
if not CLR_AVAILABLE:
|
|
21
|
+
print("pythonnet not installed — LibreHardwareMonitor disabled.")
|
|
22
|
+
return None
|
|
23
|
+
|
|
24
|
+
if not DLL_PATH.exists():
|
|
25
|
+
print(f"⚠ DLL missing at {DLL_PATH}")
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
# Add DLL folder to sys.path
|
|
30
|
+
sys.path.append(str(DLL_PATH.parent))
|
|
31
|
+
|
|
32
|
+
# Load the DLL using pythonnet
|
|
33
|
+
clr.AddReference(str(DLL_PATH))
|
|
34
|
+
except Exception as e:
|
|
35
|
+
print("⚠ Failed to load LibreHardwareMonitor DLL:", e)
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
from LibreHardwareMonitor.Hardware import Computer
|
|
40
|
+
|
|
41
|
+
computer = Computer()
|
|
42
|
+
computer.CPUEnabled = True
|
|
43
|
+
computer.Open()
|
|
44
|
+
return computer
|
|
45
|
+
|
|
46
|
+
except Exception as e:
|
|
47
|
+
print("⚠ LibreHardwareMonitor init error:", e)
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# Initialize only once
|
|
52
|
+
_computer = init_hardware()
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def get_cpu_temp_lhm():
|
|
56
|
+
"""
|
|
57
|
+
Read CPU temperature via LibreHardwareMonitor.
|
|
58
|
+
Returns:
|
|
59
|
+
float (temp °C) or None if unavailable.
|
|
60
|
+
"""
|
|
61
|
+
if _computer is None:
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
from LibreHardwareMonitor.Hardware import SensorType
|
|
66
|
+
|
|
67
|
+
for hardware in _computer.Hardware:
|
|
68
|
+
hardware.Update()
|
|
69
|
+
|
|
70
|
+
if "cpu" in hardware.Name.lower():
|
|
71
|
+
for sensor in hardware.Sensors:
|
|
72
|
+
if sensor.SensorType == SensorType.Temperature:
|
|
73
|
+
return float(sensor.Value)
|
|
74
|
+
|
|
75
|
+
except:
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
return None
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
import psutil, GPUtil, subprocess
|
|
2
|
+
|
|
3
|
+
# Needed for temperature fallback
|
|
4
|
+
def _read_gpu_temp_wmi():
|
|
5
|
+
"""
|
|
6
|
+
Windows WMI GPU temperature reader.
|
|
7
|
+
Works on MX series GPUs (MX330).
|
|
8
|
+
"""
|
|
9
|
+
try:
|
|
10
|
+
import wmi
|
|
11
|
+
w = wmi.WMI(namespace="root\\OpenHardwareMonitor")
|
|
12
|
+
for sensor in w.Sensor():
|
|
13
|
+
if sensor.SensorType == "Temperature" and "gpu" in sensor.Name.lower():
|
|
14
|
+
return float(sensor.Value)
|
|
15
|
+
except:
|
|
16
|
+
return None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_metrics() -> dict:
|
|
20
|
+
"""
|
|
21
|
+
SAFE SYSTEM METRICS
|
|
22
|
+
ALWAYS returns full dict → UI NEVER breaks
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
m = {}
|
|
26
|
+
|
|
27
|
+
# ==========================================================
|
|
28
|
+
# CPU UTIL
|
|
29
|
+
# ==========================================================
|
|
30
|
+
try:
|
|
31
|
+
m["cpu_util_avg"] = float(psutil.cpu_percent(interval=None))
|
|
32
|
+
except:
|
|
33
|
+
m["cpu_util_avg"] = 0.0
|
|
34
|
+
|
|
35
|
+
# ==========================================================
|
|
36
|
+
# CPU TEMP
|
|
37
|
+
# ==========================================================
|
|
38
|
+
try:
|
|
39
|
+
temps = psutil.sensors_temperatures()
|
|
40
|
+
if temps and "coretemp" in temps:
|
|
41
|
+
m["cpu_temp_c"] = temps["coretemp"][0].current
|
|
42
|
+
else:
|
|
43
|
+
m["cpu_temp_c"] = 0.0
|
|
44
|
+
except:
|
|
45
|
+
m["cpu_temp_c"] = 0.0
|
|
46
|
+
|
|
47
|
+
# ==========================================================
|
|
48
|
+
# GPU (PRIMARY: GPUtil)
|
|
49
|
+
# ==========================================================
|
|
50
|
+
try:
|
|
51
|
+
g = GPUtil.getGPUs()
|
|
52
|
+
if g:
|
|
53
|
+
gpu = g[0]
|
|
54
|
+
|
|
55
|
+
m.update({
|
|
56
|
+
"gpu_util_avg": gpu.load * 100,
|
|
57
|
+
"gpu_temp_c": gpu.temperature,
|
|
58
|
+
"gpu_mem_used_mb": gpu.memoryUsed,
|
|
59
|
+
"gpu_mem_free_mb": gpu.memoryFree,
|
|
60
|
+
"gpu_mem_total_mb": gpu.memoryTotal,
|
|
61
|
+
"gpu_power_w": 0.0, # GPUtil cannot read power
|
|
62
|
+
"hardware_type": "GPU",
|
|
63
|
+
"hardware_name": gpu.name
|
|
64
|
+
})
|
|
65
|
+
|
|
66
|
+
else:
|
|
67
|
+
raise Exception("NO GPU")
|
|
68
|
+
|
|
69
|
+
except:
|
|
70
|
+
pass # FALLBACK BELOW
|
|
71
|
+
|
|
72
|
+
# ==========================================================
|
|
73
|
+
# GPU TEMP FALLBACK (WMI)
|
|
74
|
+
# ==========================================================
|
|
75
|
+
if m.get("gpu_temp_c", 0) in [None, 0.0]:
|
|
76
|
+
t = _read_gpu_temp_wmi()
|
|
77
|
+
if t:
|
|
78
|
+
m["gpu_temp_c"] = t
|
|
79
|
+
|
|
80
|
+
# ==========================================================
|
|
81
|
+
# CPU-ONLY FALLBACK
|
|
82
|
+
# ==========================================================
|
|
83
|
+
if "hardware_type" not in m:
|
|
84
|
+
m.update({
|
|
85
|
+
"gpu_util_avg": 0.0,
|
|
86
|
+
"gpu_temp_c": 0.0,
|
|
87
|
+
"gpu_power_w": 0.0,
|
|
88
|
+
"gpu_mem_used_mb": 0.0,
|
|
89
|
+
"gpu_mem_free_mb": 0.0,
|
|
90
|
+
"gpu_mem_total_mb": 0.0,
|
|
91
|
+
"hardware_type": "CPU",
|
|
92
|
+
"hardware_name": "CPU Only"
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
# ==========================================================
|
|
96
|
+
# CPU POWER ESTIMATE
|
|
97
|
+
# ==========================================================
|
|
98
|
+
try:
|
|
99
|
+
m["cpu_power_w"] = max(2.0, m["cpu_util_avg"] * 0.35)
|
|
100
|
+
except:
|
|
101
|
+
m["cpu_power_w"] = 3.0
|
|
102
|
+
|
|
103
|
+
return m
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
|
|
4
|
+
import joblib
|
|
5
|
+
import pandas as pd
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
# =========================================================
|
|
9
|
+
# PATHS
|
|
10
|
+
# =========================================================
|
|
11
|
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
12
|
+
|
|
13
|
+
DATA_PATH = os.path.join(os.path.dirname(BASE_DIR), "data", "emissions_log.csv")
|
|
14
|
+
|
|
15
|
+
# ✅ Models live in ecoml/model/
|
|
16
|
+
SCALER_PATH = os.path.join(BASE_DIR, "model", "scaler.pkl")
|
|
17
|
+
RECOMMENDER_PATH = os.path.join(BASE_DIR, "model", "hardware_recommender_model.pkl")
|
|
18
|
+
|
|
19
|
+
LOG_PATH = os.path.join(os.path.dirname(BASE_DIR), "data", "recommendations_log.csv")
|
|
20
|
+
|
|
21
|
+
# =========================================================
|
|
22
|
+
# ✨ MANUAL CLASS LABEL MAP
|
|
23
|
+
# =========================================================
|
|
24
|
+
LABEL_MAP = {
|
|
25
|
+
0: "CPU Efficient — No GPU Needed",
|
|
26
|
+
1: "GPU Efficient — Continue GPU Usage",
|
|
27
|
+
2: "Upgrade Recommended — Mid-range GPU",
|
|
28
|
+
3: "High-End GPU Required for heavy workloads",
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
# =========================================================
|
|
32
|
+
# LOAD MODELS
|
|
33
|
+
# =========================================================
|
|
34
|
+
if not os.path.exists(SCALER_PATH) or not os.path.exists(RECOMMENDER_PATH):
|
|
35
|
+
raise FileNotFoundError(
|
|
36
|
+
f"Scaler or recommender model not found in {os.path.join(BASE_DIR, 'model')}"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
scaler = joblib.load(SCALER_PATH)
|
|
40
|
+
recommender = joblib.load(RECOMMENDER_PATH)
|
|
41
|
+
|
|
42
|
+
# =========================================================
|
|
43
|
+
# GPU CHECK
|
|
44
|
+
# =========================================================
|
|
45
|
+
GPU_AVAILABLE = torch.cuda.is_available()
|
|
46
|
+
|
|
47
|
+
print("\n=============================")
|
|
48
|
+
print("🟢 GPU Available" if GPU_AVAILABLE else "⚠ No GPU detected → CPU only")
|
|
49
|
+
print("=============================\n")
|
|
50
|
+
|
|
51
|
+
# =========================================================
|
|
52
|
+
# GET CLEAN LIVE INPUT
|
|
53
|
+
# =========================================================
|
|
54
|
+
def get_live_data():
|
|
55
|
+
df = pd.read_csv(DATA_PATH)
|
|
56
|
+
|
|
57
|
+
latest = df.tail(1).copy()
|
|
58
|
+
|
|
59
|
+
# keep numeric only
|
|
60
|
+
numeric = latest.select_dtypes(include=["number"])
|
|
61
|
+
latest = numeric.copy()
|
|
62
|
+
|
|
63
|
+
needed = scaler.feature_names_in_
|
|
64
|
+
latest = latest.reindex(columns=needed, fill_value=0)
|
|
65
|
+
|
|
66
|
+
scaled = scaler.transform(latest)
|
|
67
|
+
return scaled, latest
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# =========================================================
|
|
71
|
+
# LOGGING
|
|
72
|
+
# =========================================================
|
|
73
|
+
def save_log(rec: str):
|
|
74
|
+
entry = {
|
|
75
|
+
"timestamp": datetime.now().isoformat(),
|
|
76
|
+
"recommendation": rec,
|
|
77
|
+
}
|
|
78
|
+
df = pd.DataFrame([entry])
|
|
79
|
+
|
|
80
|
+
if not os.path.exists(LOG_PATH):
|
|
81
|
+
df.to_csv(LOG_PATH, index=False)
|
|
82
|
+
else:
|
|
83
|
+
df.to_csv(LOG_PATH, mode="a", header=False, index=False)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
# =========================================================
|
|
87
|
+
# PREDICT
|
|
88
|
+
# =========================================================
|
|
89
|
+
def run_prediction():
|
|
90
|
+
X_scaled, raw = get_live_data()
|
|
91
|
+
|
|
92
|
+
pred_int = int(recommender.predict(X_scaled)[0])
|
|
93
|
+
rec = LABEL_MAP.get(pred_int, f"Unknown Class {pred_int}")
|
|
94
|
+
|
|
95
|
+
print("🔍 INPUT:", raw.to_dict("records")[0])
|
|
96
|
+
print("⚡ RECOMMENDATION:", rec, "\n")
|
|
97
|
+
|
|
98
|
+
save_log(rec)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# =========================================================
|
|
102
|
+
# AUTO EXECUTE WHEN RUN AS SCRIPT
|
|
103
|
+
# =========================================================
|
|
104
|
+
if __name__ == "__main__":
|
|
105
|
+
run_prediction()
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
from typing import Dict, Any
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class RecommendationEngine:
|
|
5
|
+
|
|
6
|
+
def __init__(self, tracker=None, gemini=None):
|
|
7
|
+
self.tracker = tracker
|
|
8
|
+
self.gemini = gemini
|
|
9
|
+
|
|
10
|
+
# ==========================================================
|
|
11
|
+
def recommend(self, m: Dict[str, Any], code=None, error=None):
|
|
12
|
+
"""
|
|
13
|
+
Takes metric record dictionary
|
|
14
|
+
Returns dict with:
|
|
15
|
+
recommended_hardware
|
|
16
|
+
recommended_confidence
|
|
17
|
+
recommended_reasons
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
suggestions = []
|
|
21
|
+
label = "Balanced – Continue Current Hardware"
|
|
22
|
+
confidence = None
|
|
23
|
+
|
|
24
|
+
cpu = float(m.get("cpu_util_avg", 0))
|
|
25
|
+
gpu = float(m.get("gpu_util_avg", 0))
|
|
26
|
+
temp = float(m.get("cpu_temp_c", 0))
|
|
27
|
+
|
|
28
|
+
# ---------------------------------------------
|
|
29
|
+
# RULE 1: CPU bottleneck
|
|
30
|
+
# ---------------------------------------------
|
|
31
|
+
if cpu > 60 and gpu < 40:
|
|
32
|
+
label = "CPU Bottleneck – Enable GPU"
|
|
33
|
+
suggestions.append("⚠ High CPU load while GPU is idle → move workload to GPU")
|
|
34
|
+
|
|
35
|
+
# ---------------------------------------------
|
|
36
|
+
# RULE 2: GPU overload
|
|
37
|
+
# ---------------------------------------------
|
|
38
|
+
if gpu > 85:
|
|
39
|
+
suggestions.append("⚠ GPU heavily utilized")
|
|
40
|
+
|
|
41
|
+
# ---------------------------------------------
|
|
42
|
+
# RULE 3: Overheating
|
|
43
|
+
# ---------------------------------------------
|
|
44
|
+
if temp > 65:
|
|
45
|
+
suggestions.append("🔥 CPU overheating detected")
|
|
46
|
+
|
|
47
|
+
# ---------------------------------------------
|
|
48
|
+
# RULE 4: Runtime error
|
|
49
|
+
# ---------------------------------------------
|
|
50
|
+
if error:
|
|
51
|
+
suggestions.append(f"❌ Runtime error: {str(error)[:80]}")
|
|
52
|
+
|
|
53
|
+
# =========================================================
|
|
54
|
+
# 🧠 ONLY CALL GEMINI IF SOMETHING IS WRONG
|
|
55
|
+
# =========================================================
|
|
56
|
+
problem_detected = (
|
|
57
|
+
cpu > 70 or
|
|
58
|
+
gpu > 75 or
|
|
59
|
+
temp > 75 or
|
|
60
|
+
error is not None
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
if self.gemini and self.gemini.enabled and problem_detected:
|
|
64
|
+
|
|
65
|
+
# 🔹 Optimization hint
|
|
66
|
+
ask = f"CPU {cpu}% | GPU {gpu}% | Temp {temp}°C → Give 1-line optimization tip"
|
|
67
|
+
out = self.gemini.ask(ask)
|
|
68
|
+
|
|
69
|
+
if out:
|
|
70
|
+
suggestions.append("🤖 " + out)
|
|
71
|
+
|
|
72
|
+
# 🔹 If error, request a FIX
|
|
73
|
+
if error and code:
|
|
74
|
+
fix = self.gemini.generate_fix(error, code)
|
|
75
|
+
if fix:
|
|
76
|
+
suggestions.append("💡 Fix → " + fix)
|
|
77
|
+
|
|
78
|
+
# =========================================================
|
|
79
|
+
# DEFAULT → NO PROBLEMS
|
|
80
|
+
# =========================================================
|
|
81
|
+
if not suggestions:
|
|
82
|
+
return {
|
|
83
|
+
"recommended_hardware": "Optimal",
|
|
84
|
+
"recommended_confidence": 1.0,
|
|
85
|
+
"recommended_reasons": "No changes required – system efficient"
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
# =========================================================
|
|
89
|
+
# RETURN STANDARD STRUCTURE
|
|
90
|
+
# =========================================================
|
|
91
|
+
return {
|
|
92
|
+
"recommended_hardware": label,
|
|
93
|
+
"recommended_confidence": confidence,
|
|
94
|
+
"recommended_reasons": " | ".join(suggestions)
|
|
95
|
+
}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from .metrics import get_metrics
|
|
4
|
+
from . import utils
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EcoTracker:
|
|
8
|
+
"""
|
|
9
|
+
Tracks runtime, energy, CO₂ emissions, power usage and
|
|
10
|
+
passes metrics to recommender + UI.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, log_path, notebook_name="Notebook", emission_factor=0.475):
|
|
14
|
+
self.log_path = log_path
|
|
15
|
+
self.notebook_name = notebook_name
|
|
16
|
+
self.emission_factor = emission_factor
|
|
17
|
+
|
|
18
|
+
self._cell_id = 0
|
|
19
|
+
self._start_time = None
|
|
20
|
+
self._start_metrics = None
|
|
21
|
+
|
|
22
|
+
# ================================================================
|
|
23
|
+
def start_cell(self):
|
|
24
|
+
"""Called BEFORE code executes"""
|
|
25
|
+
self._cell_id += 1
|
|
26
|
+
self._start_time = time.time()
|
|
27
|
+
self._start_metrics = get_metrics()
|
|
28
|
+
|
|
29
|
+
# ================================================================
|
|
30
|
+
def end_cell(self, recommender=None, error=None, code=None) -> Dict[str, Any]:
|
|
31
|
+
"""
|
|
32
|
+
Called AFTER code executes.
|
|
33
|
+
|
|
34
|
+
⚠️ 100% SAFE VERSION:
|
|
35
|
+
- Prevents "NoneType - float" crash
|
|
36
|
+
- Returns {} instead of exploding
|
|
37
|
+
- Logs metrics + insights
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
# -----------------------------------------------------------------
|
|
41
|
+
# 🛑 SAFETY CHECK — if start_cell NEVER ran → DO NOT CRASH
|
|
42
|
+
# -----------------------------------------------------------------
|
|
43
|
+
if self._start_time is None or self._start_metrics is None:
|
|
44
|
+
return {} # UI handles empty dict safely
|
|
45
|
+
|
|
46
|
+
# -----------------------------------------------------------------
|
|
47
|
+
# BASE METRICS
|
|
48
|
+
# -----------------------------------------------------------------
|
|
49
|
+
runtime = time.time() - self._start_time
|
|
50
|
+
end = get_metrics()
|
|
51
|
+
|
|
52
|
+
cpu_avg = (self._start_metrics["cpu_util_avg"] + end["cpu_util_avg"]) / 2
|
|
53
|
+
gpu_avg = (self._start_metrics["gpu_util_avg"] + end["gpu_util_avg"]) / 2
|
|
54
|
+
|
|
55
|
+
# Energy (Wh) and CO₂ (g)
|
|
56
|
+
energy_wh = (end["cpu_power_w"] + end["gpu_power_w"]) * (runtime / 3600)
|
|
57
|
+
co2 = energy_wh * self.emission_factor
|
|
58
|
+
|
|
59
|
+
# -----------------------------------------------------------------
|
|
60
|
+
# FINAL RECORD (BASE FIELDS)
|
|
61
|
+
# -----------------------------------------------------------------
|
|
62
|
+
rec = dict(
|
|
63
|
+
timestamp=utils.now(),
|
|
64
|
+
notebook=self.notebook_name,
|
|
65
|
+
cell_id=self._cell_id,
|
|
66
|
+
runtime_sec=runtime,
|
|
67
|
+
|
|
68
|
+
cpu_util_avg=cpu_avg,
|
|
69
|
+
gpu_util_avg=gpu_avg,
|
|
70
|
+
cpu_power_w=end["cpu_power_w"],
|
|
71
|
+
gpu_power_w=end["gpu_power_w"],
|
|
72
|
+
|
|
73
|
+
energy_kwh=energy_wh / 1000,
|
|
74
|
+
co2_g=co2,
|
|
75
|
+
|
|
76
|
+
hardware_type=end["hardware_type"],
|
|
77
|
+
hardware_name=end["hardware_name"],
|
|
78
|
+
|
|
79
|
+
cpu_temp_c=end["cpu_temp_c"],
|
|
80
|
+
gpu_temp_c=end["gpu_temp_c"],
|
|
81
|
+
|
|
82
|
+
gpu_mem_used_mb=end["gpu_mem_used_mb"],
|
|
83
|
+
gpu_mem_free_mb=end["gpu_mem_free_mb"],
|
|
84
|
+
gpu_mem_total_mb=end["gpu_mem_total_mb"],
|
|
85
|
+
|
|
86
|
+
notes="",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# -----------------------------------------------------------------
|
|
90
|
+
# RECOMMENDER (Gemini + rule based)
|
|
91
|
+
# -----------------------------------------------------------------
|
|
92
|
+
if recommender:
|
|
93
|
+
out = recommender.recommend(rec, code=code, error=error)
|
|
94
|
+
rec.update(out)
|
|
95
|
+
|
|
96
|
+
# -----------------------------------------------------------------
|
|
97
|
+
# WRITE TO CSV (SAFE – never crashes runtime)
|
|
98
|
+
# -----------------------------------------------------------------
|
|
99
|
+
utils.append_log_row(self.log_path, rec)
|
|
100
|
+
|
|
101
|
+
return rec
|
ecoml-0.1.3/ecoml/ui.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import Any, Optional
|
|
3
|
+
from IPython.display import HTML, display
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# =====================================================================
|
|
7
|
+
# CSS + JAVASCRIPT (GLOBAL, EXECUTED EVERY RENDER)
|
|
8
|
+
# =====================================================================
|
|
9
|
+
STYLE = """
|
|
10
|
+
<style>
|
|
11
|
+
.ecoml-pill {
|
|
12
|
+
display:inline-flex; align-items:center; gap:10px;
|
|
13
|
+
background:#1a1a1a; border:1px solid #2f2f2f;
|
|
14
|
+
padding:7px 16px; border-radius:24px;
|
|
15
|
+
font-family:system-ui,-apple-system,BlinkMacSystemFont,"Segoe UI";
|
|
16
|
+
color:#e9ffe9; font-size:14px; font-weight:600;
|
|
17
|
+
cursor:pointer; transition:.15s;
|
|
18
|
+
}
|
|
19
|
+
.ecoml-pill:active { transform:scale(.97); opacity:.8; }
|
|
20
|
+
|
|
21
|
+
.ecoml-icon {
|
|
22
|
+
width:22px;height:22px;border-radius:50%;
|
|
23
|
+
display:flex;align-items:center;justify-content:center;
|
|
24
|
+
font-size:15px;background:#2ecc71;
|
|
25
|
+
}
|
|
26
|
+
.ecoml-warm .ecoml-icon { background:#f1c40f!important; }
|
|
27
|
+
.ecoml-hot .ecoml-icon { background:#e74c3c!important; }
|
|
28
|
+
|
|
29
|
+
.ecoml-box {
|
|
30
|
+
border-radius:14px;
|
|
31
|
+
background:#101010;
|
|
32
|
+
border:1px solid #2f2f2f;
|
|
33
|
+
padding:14px 18px;
|
|
34
|
+
color:#eaeaea;
|
|
35
|
+
display:none;
|
|
36
|
+
font-size:14px;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
.ecoml-table {
|
|
40
|
+
width:100%; border-collapse:collapse;
|
|
41
|
+
font-size:13px; margin-top:6px;
|
|
42
|
+
}
|
|
43
|
+
.ecoml-table td {
|
|
44
|
+
padding:5px 4px;
|
|
45
|
+
border-bottom:1px solid #2a2a2a;
|
|
46
|
+
text-align:left !important;
|
|
47
|
+
}
|
|
48
|
+
.ecoml-table tr:last-child td { border-bottom:none; }
|
|
49
|
+
.ecoml-table td:first-child { opacity:.65; font-weight:500;}
|
|
50
|
+
|
|
51
|
+
.eco-header { font-weight:700; margin-bottom:4px; color:#d5ffd5; }
|
|
52
|
+
.eco-suggest {
|
|
53
|
+
font-size:12px; margin:3px 0; padding-left:7px;
|
|
54
|
+
border-left:3px solid #3effaa; opacity:.9;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
.ecoml-score-wrap { margin-top:10px;font-size:13px; }
|
|
58
|
+
.ecoml-score-bar {
|
|
59
|
+
width:150px; height:6px;
|
|
60
|
+
background:#333; border-radius:10px;
|
|
61
|
+
overflow:hidden; margin-top:4px;
|
|
62
|
+
}
|
|
63
|
+
.ecoml-score-fill {
|
|
64
|
+
height:100%; width:0%;
|
|
65
|
+
background:linear-gradient(90deg,#12ff6e,#35ffb0);
|
|
66
|
+
transition:width .35s;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
.eco-compare-row { margin-top:10px; display:flex; justify-content:flex-end; }
|
|
70
|
+
.eco-compare-btn {
|
|
71
|
+
padding:6px 12px; border-radius:999px;
|
|
72
|
+
background:#181818; border:1px solid #333;
|
|
73
|
+
font-size:13px; cursor:pointer; color:#fff;
|
|
74
|
+
}
|
|
75
|
+
.eco-compare-btn:hover { background:#222; }
|
|
76
|
+
</style>
|
|
77
|
+
|
|
78
|
+
<script>
|
|
79
|
+
if (!window.ecoToggle){
|
|
80
|
+
window.ecoToggle = function(id, fill, score){
|
|
81
|
+
let box = document.getElementById(id);
|
|
82
|
+
let bar = document.getElementById(fill);
|
|
83
|
+
if (!box) return;
|
|
84
|
+
if (box.style.display === "block"){
|
|
85
|
+
box.style.display="none";
|
|
86
|
+
bar.style.width="0%";
|
|
87
|
+
} else {
|
|
88
|
+
box.style.display="block";
|
|
89
|
+
setTimeout(() => bar.style.width = score + "%", 60);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
</script>
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# =====================================================================
|
|
98
|
+
def _fmt(x, d=3):
|
|
99
|
+
try: return f"{float(x):.{d}f}"
|
|
100
|
+
except: return "-"
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# =====================================================================
|
|
104
|
+
# CELL HOOK
|
|
105
|
+
# =====================================================================
|
|
106
|
+
class CellHook:
|
|
107
|
+
def __init__(self, tracker, recommender: Optional[Any] = None):
|
|
108
|
+
self.tracker = tracker
|
|
109
|
+
self.recommender = recommender
|
|
110
|
+
self.shell = None
|
|
111
|
+
|
|
112
|
+
# -----------------------------------------------------------------
|
|
113
|
+
def register(self):
|
|
114
|
+
from IPython import get_ipython
|
|
115
|
+
sh = get_ipython()
|
|
116
|
+
if not sh:
|
|
117
|
+
print("⚠ Not inside IPython – UI disabled")
|
|
118
|
+
return
|
|
119
|
+
sh.events.register("pre_run_cell", self._pre)
|
|
120
|
+
sh.events.register("post_run_cell", self._post)
|
|
121
|
+
print("EcoML UI CellHook Registered ✓")
|
|
122
|
+
|
|
123
|
+
# -----------------------------------------------------------------
|
|
124
|
+
def _pre(self, *_):
|
|
125
|
+
try: self.tracker.start_cell()
|
|
126
|
+
except: pass
|
|
127
|
+
|
|
128
|
+
# -----------------------------------------------------------------
|
|
129
|
+
def _post(self, result):
|
|
130
|
+
code = getattr(result.info, "raw_cell", None)
|
|
131
|
+
error = result.error_in_exec if result.error_in_exec else None
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
rec = self.tracker.end_cell(
|
|
135
|
+
recommender=self.recommender,
|
|
136
|
+
error=error,
|
|
137
|
+
code=code
|
|
138
|
+
)
|
|
139
|
+
except Exception as e:
|
|
140
|
+
display(HTML(f"<div style='color:#ff4444'>EcoML Error: {e}</div>"))
|
|
141
|
+
return
|
|
142
|
+
|
|
143
|
+
if not rec:
|
|
144
|
+
display(HTML(""))
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
# SAFE FIELDS
|
|
148
|
+
runtime = float(rec.get("runtime_sec", 0))
|
|
149
|
+
cpu = float(rec.get("cpu_util_avg", 0))
|
|
150
|
+
gpu = float(rec.get("gpu_util_avg", 0))
|
|
151
|
+
temp = float(rec.get("gpu_temp_c") or rec.get("cpu_temp_c") or 0)
|
|
152
|
+
co2 = float(rec.get("co2_g", 0))
|
|
153
|
+
energy = float(rec.get("energy_kwh", 0))
|
|
154
|
+
|
|
155
|
+
rec_hw = rec.get("recommended_hardware", "Unknown")
|
|
156
|
+
reasons = rec.get("recommended_reasons", "")
|
|
157
|
+
|
|
158
|
+
if isinstance(reasons, list):
|
|
159
|
+
reasons = " | ".join(reasons)
|
|
160
|
+
|
|
161
|
+
cid = rec.get("cell_id", 0)
|
|
162
|
+
box1 = f"eco-box-co2-{cid}"
|
|
163
|
+
box2 = f"eco-box-rec-{cid}"
|
|
164
|
+
fill1 = f"eco-fill-co2-{cid}"
|
|
165
|
+
fill2 = f"eco-fill-rec-{cid}"
|
|
166
|
+
|
|
167
|
+
score = max(0, min(100, int(100 - runtime*3 - gpu/2 - max(0, temp-60))))
|
|
168
|
+
cls = "ecoml-hot" if temp>90 else ("ecoml-warm" if temp>80 else "")
|
|
169
|
+
|
|
170
|
+
# ---------------- INSIGHTS BLOCK ----------------
|
|
171
|
+
insights = ""
|
|
172
|
+
if reasons:
|
|
173
|
+
insights += "<div class='eco-header'>💡 Insights</div>"
|
|
174
|
+
for line in reasons.split(" | "):
|
|
175
|
+
insights += f"<div class='eco-suggest'>{line}</div>"
|
|
176
|
+
temp_cpu = float(rec.get("cpu_temp_c", 0))
|
|
177
|
+
temp_gpu = float(rec.get("gpu_temp_c", 0))
|
|
178
|
+
|
|
179
|
+
# ---------------- TABLE 1 ----------------
|
|
180
|
+
table1 = f"""
|
|
181
|
+
<table class='ecoml-table'>
|
|
182
|
+
<tr><td>⏱ Runtime</td><td>{_fmt(runtime)} s</td></tr>
|
|
183
|
+
<tr><td>⚡ Energy</td><td>{_fmt(energy,6)} kWh</td></tr>
|
|
184
|
+
<tr><td>🖥 CPU</td><td>{_fmt(cpu,1)}%</td></tr>
|
|
185
|
+
<tr><td>🎮 GPU</td><td>{_fmt(gpu,1)}%</td></tr>
|
|
186
|
+
<tr><td>🌡️ Temp</td><td>{_fmt(temp_gpu,1)} °C</td></tr>
|
|
187
|
+
|
|
188
|
+
</table>
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
# ---------------- TABLE 2 ----------------
|
|
192
|
+
table2 = f"""
|
|
193
|
+
<table class='ecoml-table'>
|
|
194
|
+
<tr><td>🔧 Recommended</td><td><b>{rec_hw}</b></td></tr>
|
|
195
|
+
<tr><td>♻ CO₂ (g)</td><td>{_fmt(co2,6)}</td></tr>
|
|
196
|
+
<tr><td>⏱ Runtime</td><td>{_fmt(runtime)} s</td></tr>
|
|
197
|
+
</table>
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
# =====================================================================
|
|
201
|
+
# 🔥 SIDE-BY-SIDE BOX LAYOUT
|
|
202
|
+
# =====================================================================
|
|
203
|
+
html = f"""
|
|
204
|
+
{STYLE}
|
|
205
|
+
|
|
206
|
+
<div style="display:flex;gap:14px;align-items:center;margin-top:6px;">
|
|
207
|
+
<div class="ecoml-pill {cls}" onclick="ecoToggle('{box1}','{fill1}',{score})">
|
|
208
|
+
<div class="ecoml-icon">🌿</div><b>{_fmt(co2,3)} g CO₂</b>
|
|
209
|
+
</div>
|
|
210
|
+
|
|
211
|
+
<div class="ecoml-pill {cls}" onclick="ecoToggle('{box2}','{fill2}',{score})">
|
|
212
|
+
<div class="ecoml-icon">⚙️</div><b>{rec_hw}</b>
|
|
213
|
+
</div>
|
|
214
|
+
|
|
215
|
+
<span style="opacity:.6;font-size:12px;">(Click pill)</span>
|
|
216
|
+
</div>
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
<!-- ⭐ TWO BOXES SIDE-BY-SIDE ⭐ -->
|
|
220
|
+
<div style="display:flex; gap:18px; margin-top:10px; align-items:flex-start;">
|
|
221
|
+
|
|
222
|
+
<!-- LEFT BOX -->
|
|
223
|
+
<div class="ecoml-box" id="{box1}" style="width:50%;">
|
|
224
|
+
{insights}
|
|
225
|
+
{table1}
|
|
226
|
+
<div class="ecoml-score-wrap">
|
|
227
|
+
Eco Score <b>{score}/100</b>
|
|
228
|
+
<div class="ecoml-score-bar"><div class="ecoml-score-fill" id="{fill1}"></div></div>
|
|
229
|
+
</div>
|
|
230
|
+
</div>
|
|
231
|
+
|
|
232
|
+
<!-- RIGHT BOX -->
|
|
233
|
+
<div class="ecoml-box" id="{box2}" style="width:50%;">
|
|
234
|
+
{table2}
|
|
235
|
+
<div class="ecoml-score-wrap">
|
|
236
|
+
Eco Score <b>{score}/100</b>
|
|
237
|
+
<div class="ecoml-score-bar"><div class="ecoml-score-fill" id="{fill2}"></div></div>
|
|
238
|
+
</div>
|
|
239
|
+
<div class="eco-compare-row">
|
|
240
|
+
<button class="eco-compare-btn">📊 Compare</button>
|
|
241
|
+
</div>
|
|
242
|
+
</div>
|
|
243
|
+
|
|
244
|
+
</div>
|
|
245
|
+
"""
|
|
246
|
+
|
|
247
|
+
display(HTML(html))
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import os, csv, datetime as dt
|
|
2
|
+
|
|
3
|
+
LOG_HEADERS = [
|
|
4
|
+
"timestamp","notebook","cell_id","runtime_sec",
|
|
5
|
+
"cpu_util_avg","gpu_util_avg","cpu_power_w","gpu_power_w",
|
|
6
|
+
"energy_kwh","co2_g","hardware_type","hardware_name",
|
|
7
|
+
"cpu_temp_c","gpu_temp_c","gpu_mem_used_mb","gpu_mem_free_mb",
|
|
8
|
+
"gpu_mem_total_mb","notes","recommended_hardware","recommended_confidence","recommended_reasons"
|
|
9
|
+
]
|
|
10
|
+
|
|
11
|
+
# ==================================================
|
|
12
|
+
def now():
|
|
13
|
+
return dt.datetime.now().isoformat(timespec="seconds")
|
|
14
|
+
|
|
15
|
+
# ==================================================
|
|
16
|
+
def ensure_log(path):
|
|
17
|
+
os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
|
|
18
|
+
if not os.path.exists(path):
|
|
19
|
+
with open(path, "w", newline="", encoding="utf-8") as f:
|
|
20
|
+
csv.writer(f).writerow(LOG_HEADERS)
|
|
21
|
+
|
|
22
|
+
# ==================================================
|
|
23
|
+
def append_log_row(path: str, row: dict):
|
|
24
|
+
ensure_log(path)
|
|
25
|
+
|
|
26
|
+
ordered = [row.get(h, "") for h in LOG_HEADERS]
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
with open(path, "a", newline="", encoding="utf-8") as f:
|
|
30
|
+
csv.writer(f).writerow(ordered)
|
|
31
|
+
except:
|
|
32
|
+
print("⚠ Failed to write log row – continuing.")
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ecoml
|
|
3
|
+
Version: 0.1.3
|
|
4
|
+
Summary: Real-time GPU, CPU, and CO₂ tracking with pill UI for Jupyter notebooks.
|
|
5
|
+
Author-email: Your Name <you@example.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/yourname/EcoML
|
|
8
|
+
Project-URL: Source, https://github.com/yourname/EcoML
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: psutil
|
|
12
|
+
Requires-Dist: pynvml
|
|
13
|
+
Requires-Dist: GPUtil
|
|
14
|
+
Requires-Dist: python-dotenv
|
|
15
|
+
Requires-Dist: pandas
|
|
16
|
+
Requires-Dist: ipython
|
|
17
|
+
Provides-Extra: gemini
|
|
18
|
+
Requires-Dist: google-generativeai; extra == "gemini"
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
pyproject.toml
|
|
2
|
+
setup.cfg
|
|
3
|
+
./ecoml/__init__.py
|
|
4
|
+
./ecoml/dashboard.py
|
|
5
|
+
./ecoml/enable.py
|
|
6
|
+
./ecoml/gemini_helper.py
|
|
7
|
+
./ecoml/hardware_monitor.py
|
|
8
|
+
./ecoml/metrics.py
|
|
9
|
+
./ecoml/predictor.py
|
|
10
|
+
./ecoml/recommender.py
|
|
11
|
+
./ecoml/tracker.py
|
|
12
|
+
./ecoml/ui.py
|
|
13
|
+
./ecoml/utils.py
|
|
14
|
+
ecoml/__init__.py
|
|
15
|
+
ecoml/dashboard.py
|
|
16
|
+
ecoml/enable.py
|
|
17
|
+
ecoml/gemini_helper.py
|
|
18
|
+
ecoml/hardware_monitor.py
|
|
19
|
+
ecoml/metrics.py
|
|
20
|
+
ecoml/predictor.py
|
|
21
|
+
ecoml/recommender.py
|
|
22
|
+
ecoml/tracker.py
|
|
23
|
+
ecoml/ui.py
|
|
24
|
+
ecoml/utils.py
|
|
25
|
+
ecoml.egg-info/PKG-INFO
|
|
26
|
+
ecoml.egg-info/SOURCES.txt
|
|
27
|
+
ecoml.egg-info/dependency_links.txt
|
|
28
|
+
ecoml.egg-info/requires.txt
|
|
29
|
+
ecoml.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
ecoml
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "ecoml"
|
|
7
|
+
version = "0.1.3"
|
|
8
|
+
description = "Real-time GPU, CPU, and CO₂ tracking with pill UI for Jupyter notebooks."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.9"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Your Name", email = "you@example.com" }
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
dependencies = [
|
|
17
|
+
"psutil",
|
|
18
|
+
"pynvml",
|
|
19
|
+
"GPUtil",
|
|
20
|
+
"python-dotenv",
|
|
21
|
+
"pandas",
|
|
22
|
+
"ipython",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
# Optional extra for Gemini
|
|
26
|
+
[project.optional-dependencies]
|
|
27
|
+
gemini = ["google-generativeai"]
|
|
28
|
+
|
|
29
|
+
[project.urls]
|
|
30
|
+
Homepage = "https://github.com/yourname/EcoML"
|
|
31
|
+
Source = "https://github.com/yourname/EcoML"
|