greentensor 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 GreenTensor
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,143 @@
1
+ Metadata-Version: 2.4
2
+ Name: greentensor
3
+ Version: 0.1.0
4
+ Summary: Energy-efficient middleware for PyTorch ML workloads — tracks carbon, optimizes GPU, reports savings.
5
+ Author: GreenTensor
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/YOUR_USERNAME/greentensor
8
+ Project-URL: Issues, https://github.com/YOUR_USERNAME/greentensor/issues
9
+ Keywords: green ai,carbon,energy,pytorch,sustainability,mlops
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Requires-Python: >=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE
18
+ Requires-Dist: torch
19
+ Requires-Dist: numpy
20
+ Requires-Dist: codecarbon
21
+ Requires-Dist: psutil
22
+ Requires-Dist: streamlit
23
+ Dynamic: license-file
24
+
25
+ # GreenTensor
26
+
27
+ **Energy-efficient middleware for PyTorch ML workloads.**
28
+
29
+ GreenTensor wraps your training loop with one line of code and gives you:
30
+ - Real carbon emissions and energy tracking (via CodeCarbon)
31
+ - Automatic GPU optimizations (cuDNN benchmark, mixed precision)
32
+ - Idle GPU detection to reduce wasted energy
33
+ - A detailed savings report comparing against your baseline
34
+
35
+ ---
36
+
37
+ ## Install
38
+
39
+ ```bash
40
+ pip install greentensor
41
+ ```
42
+
43
+ ---
44
+
45
+ ## Quickstart
46
+
47
+ ```python
48
+ from greentensor import GreenTensor
49
+
50
+ with GreenTensor() as gt:
51
+ with gt.mixed_precision():
52
+ train() # your existing training code, unchanged
53
+ ```
54
+
55
+ Output:
56
+ ```
57
+ +======================================+
58
+ | GreenTensor Report |
59
+ +======================================+
60
+ Runtime : 12.34 s
61
+ Energy Used : 0.000412 kWh
62
+ CO2 Emissions : 0.000096 kg
63
+ ======================================
64
+ ```
65
+
66
+ ---
67
+
68
+ ## Compare against a baseline
69
+
70
+ ```python
71
+ import pickle
72
+ from greentensor import GreenTensor
73
+ from greentensor.core.tracker import Tracker
74
+ from greentensor.report.metrics import RunMetrics
75
+ import time
76
+
77
+ # 1. Run baseline (no optimizations)
78
+ tracker = Tracker()
79
+ tracker.start()
80
+ t0 = time.perf_counter()
81
+ train()
82
+ duration = time.perf_counter() - t0
83
+ emissions_kg, energy_kwh = tracker.stop()
84
+ baseline = RunMetrics(duration_s=duration, energy_kwh=energy_kwh, emissions_kg=emissions_kg)
85
+
86
+ # 2. Run optimized — report shows real savings
87
+ with GreenTensor(baseline=baseline) as gt:
88
+ with gt.mixed_precision():
89
+ train()
90
+ ```
91
+
92
+ ---
93
+
94
+ ## Batch size optimizer
95
+
96
+ ```python
97
+ from greentensor import optimize_batch_size
98
+
99
+ batch_size = optimize_batch_size(32) # auto-scales based on available GPU memory
100
+ ```
101
+
102
+ ---
103
+
104
+ ## Dashboard
105
+
106
+ ```bash
107
+ streamlit run dashboard/app.py
108
+ ```
109
+
110
+ Enter baseline and optimized metrics manually, or upload `.pkl` files saved from your runs.
111
+
112
+ ---
113
+
114
+ ## Configuration
115
+
116
+ ```python
117
+ from greentensor import GreenTensor, Config
118
+
119
+ config = Config(
120
+ enable_cudnn_benchmark=True,
121
+ enable_mixed_precision=True,
122
+ idle_threshold_pct=10.0, # GPU util % below which idle is detected
123
+ max_batch_size=512,
124
+ )
125
+
126
+ with GreenTensor(config=config) as gt:
127
+ train()
128
+ ```
129
+
130
+ ---
131
+
132
+ ## Run tests
133
+
134
+ ```bash
135
+ pip install pytest
136
+ pytest tests/ -v
137
+ ```
138
+
139
+ ---
140
+
141
+ ## License
142
+
143
+ MIT
@@ -0,0 +1,119 @@
1
+ # GreenTensor
2
+
3
+ **Energy-efficient middleware for PyTorch ML workloads.**
4
+
5
+ GreenTensor wraps your training loop with one line of code and gives you:
6
+ - Real carbon emissions and energy tracking (via CodeCarbon)
7
+ - Automatic GPU optimizations (cuDNN benchmark, mixed precision)
8
+ - Idle GPU detection to reduce wasted energy
9
+ - A detailed savings report comparing against your baseline
10
+
11
+ ---
12
+
13
+ ## Install
14
+
15
+ ```bash
16
+ pip install greentensor
17
+ ```
18
+
19
+ ---
20
+
21
+ ## Quickstart
22
+
23
+ ```python
24
+ from greentensor import GreenTensor
25
+
26
+ with GreenTensor() as gt:
27
+ with gt.mixed_precision():
28
+ train() # your existing training code, unchanged
29
+ ```
30
+
31
+ Output:
32
+ ```
33
+ +======================================+
34
+ | GreenTensor Report |
35
+ +======================================+
36
+ Runtime : 12.34 s
37
+ Energy Used : 0.000412 kWh
38
+ CO2 Emissions : 0.000096 kg
39
+ ======================================
40
+ ```
41
+
42
+ ---
43
+
44
+ ## Compare against a baseline
45
+
46
+ ```python
47
+ import pickle
48
+ from greentensor import GreenTensor
49
+ from greentensor.core.tracker import Tracker
50
+ from greentensor.report.metrics import RunMetrics
51
+ import time
52
+
53
+ # 1. Run baseline (no optimizations)
54
+ tracker = Tracker()
55
+ tracker.start()
56
+ t0 = time.perf_counter()
57
+ train()
58
+ duration = time.perf_counter() - t0
59
+ emissions_kg, energy_kwh = tracker.stop()
60
+ baseline = RunMetrics(duration_s=duration, energy_kwh=energy_kwh, emissions_kg=emissions_kg)
61
+
62
+ # 2. Run optimized — report shows real savings
63
+ with GreenTensor(baseline=baseline) as gt:
64
+ with gt.mixed_precision():
65
+ train()
66
+ ```
67
+
68
+ ---
69
+
70
+ ## Batch size optimizer
71
+
72
+ ```python
73
+ from greentensor import optimize_batch_size
74
+
75
+ batch_size = optimize_batch_size(32) # auto-scales based on available GPU memory
76
+ ```
77
+
78
+ ---
79
+
80
+ ## Dashboard
81
+
82
+ ```bash
83
+ streamlit run dashboard/app.py
84
+ ```
85
+
86
+ Enter baseline and optimized metrics manually, or upload `.pkl` files saved from your runs.
87
+
88
+ ---
89
+
90
+ ## Configuration
91
+
92
+ ```python
93
+ from greentensor import GreenTensor, Config
94
+
95
+ config = Config(
96
+ enable_cudnn_benchmark=True,
97
+ enable_mixed_precision=True,
98
+ idle_threshold_pct=10.0, # GPU util % below which idle is detected
99
+ max_batch_size=512,
100
+ )
101
+
102
+ with GreenTensor(config=config) as gt:
103
+ train()
104
+ ```
105
+
106
+ ---
107
+
108
+ ## Run tests
109
+
110
+ ```bash
111
+ pip install pytest
112
+ pytest tests/ -v
113
+ ```
114
+
115
+ ---
116
+
117
+ ## License
118
+
119
+ MIT
@@ -0,0 +1,9 @@
1
+ """
2
+ GreenTensor — energy-efficient ML workload middleware.
3
+ """
4
+ from greentensor.core.context import GreenTensor
5
+ from greentensor.report.metrics import RunMetrics, calculate_savings
6
+ from greentensor.optimizers.batch_optimizer import optimize_batch_size
7
+ from greentensor.utils.config import Config
8
+
9
+ __all__ = ["GreenTensor", "RunMetrics", "calculate_savings", "optimize_batch_size", "Config"]
@@ -0,0 +1,64 @@
1
+ import time
2
+ import functools
3
+ from greentensor.core.tracker import Tracker
4
+ from greentensor.optimizers.gpu_optimizer import GPUOptimizer
5
+ from greentensor.optimizers.idle_optimizer import IdleOptimizer
6
+ from greentensor.report.report import generate_report
7
+ from greentensor.report.metrics import RunMetrics
8
+ from greentensor.utils.config import Config
9
+ from greentensor.utils.logger import logger
10
+
11
+
12
+ class GreenTensor:
13
+ def __init__(self, config=None, baseline=None, verbose=True):
14
+ self.config = config or Config()
15
+ self.baseline = baseline
16
+ self.verbose = verbose
17
+ self.tracker = Tracker(self.config)
18
+ self.gpu_optimizer = GPUOptimizer(self.config)
19
+ self.idle_optimizer = IdleOptimizer(self.config)
20
+ self.metrics = None
21
+
22
+ def __enter__(self):
23
+ logger.info("GreenTensor session starting...")
24
+ self.gpu_optimizer.apply()
25
+ self.idle_optimizer.apply()
26
+ self.tracker.start()
27
+ self._start = time.perf_counter()
28
+ return self
29
+
30
+ def __exit__(self, exc_type, exc_val, exc_tb):
31
+ duration = time.perf_counter() - self._start
32
+ emissions_kg, energy_kwh = self.tracker.stop()
33
+ self.idle_optimizer.revert()
34
+ self.gpu_optimizer.revert()
35
+
36
+ self.metrics = RunMetrics(
37
+ duration_s=duration,
38
+ energy_kwh=energy_kwh,
39
+ emissions_kg=emissions_kg,
40
+ idle_seconds=self.idle_optimizer.idle_seconds,
41
+ )
42
+
43
+ if self.verbose:
44
+ report = generate_report(
45
+ duration=duration,
46
+ emissions_kg=emissions_kg,
47
+ energy_kwh=energy_kwh,
48
+ idle_seconds=self.idle_optimizer.idle_seconds,
49
+ baseline=self.baseline,
50
+ )
51
+ print(report)
52
+
53
+ return False
54
+
55
+ def mixed_precision(self):
56
+ return self.gpu_optimizer.mixed_precision()
57
+
58
+ @staticmethod
59
+ def profile(func):
60
+ @functools.wraps(func)
61
+ def wrapper(*args, **kwargs):
62
+ with GreenTensor():
63
+ return func(*args, **kwargs)
64
+ return wrapper
@@ -0,0 +1,60 @@
1
+ import subprocess
2
+ import platform
3
+ import time
4
+ import functools
5
+ from greentensor.utils.logger import logger
6
+
7
+ class Profiler:
8
+
9
+ @staticmethod
10
+ def get_gpu_metrics() -> dict:
11
+ """
12
+ Query live GPU utilization (%) and power draw (W) via nvidia-smi.
13
+ Returns zeros gracefully when no GPU / nvidia-smi is unavailable.
14
+ """
15
+ try:
16
+ result = subprocess.run(
17
+ [
18
+ "nvidia-smi",
19
+ "--query-gpu=utilization.gpu,power.draw",
20
+ "--format=csv,noheader,nounits",
21
+ ],
22
+ capture_output=True,
23
+ text=True,
24
+ timeout=5,
25
+ )
26
+ line = result.stdout.strip()
27
+ if line:
28
+ parts = line.split(", ")
29
+ util = float(parts[0])
30
+ power = float(parts[1])
31
+ return {"util_%": util, "power_W": power}
32
+ except FileNotFoundError:
33
+ pass # nvidia-smi not installed
34
+ except Exception as e:
35
+ logger.debug(f"nvidia-smi query failed: {e}")
36
+ return {"util_%": 0.0, "power_W": 0.0}
37
+
38
+ @staticmethod
39
+ def track_gpu(func):
40
+ """Decorator that captures GPU metrics before and after a function call."""
41
+ @functools.wraps(func)
42
+ def wrapper(*args, **kwargs):
43
+ start = Profiler.get_gpu_metrics()
44
+ t0 = time.perf_counter()
45
+ result = func(*args, **kwargs)
46
+ elapsed = time.perf_counter() - t0
47
+ end = Profiler.get_gpu_metrics()
48
+ profile = {
49
+ "duration_s": elapsed,
50
+ "gpu_start": start,
51
+ "gpu_end": end,
52
+ "avg_power_W": (start["power_W"] + end["power_W"]) / 2,
53
+ }
54
+ return result, profile
55
+ return wrapper
56
+
57
+ @staticmethod
58
+ def estimate_energy_kwh(power_w: float, duration_s: float) -> float:
59
+ """Convert average power (W) and duration (s) to energy in kWh."""
60
+ return (power_w * duration_s) / 3_600_000
@@ -0,0 +1,102 @@
1
+ import time
2
+ from greentensor.utils.config import Config
3
+ from greentensor.utils.logger import logger
4
+
5
+ class Tracker:
6
+ """
7
+ Tracks energy and carbon emissions for a workload.
8
+ Uses CodeCarbon when available; falls back to nvidia-smi power sampling.
9
+ """
10
+
11
+ def __init__(self, config: Config = None):
12
+ self.config = config or Config()
13
+ self._codecarbon = None
14
+ self._start_time = None
15
+ self._power_samples: list[float] = []
16
+ self._sampling = False
17
+ self._sample_thread = None
18
+
19
+ # ------------------------------------------------------------------ #
20
+ # Public API #
21
+ # ------------------------------------------------------------------ #
22
+
23
+ def start(self):
24
+ self._start_time = time.perf_counter()
25
+ if self._try_start_codecarbon():
26
+ logger.info("Tracking emissions via CodeCarbon.")
27
+ else:
28
+ logger.info("CodeCarbon unavailable — falling back to nvidia-smi power sampling.")
29
+ self._start_power_sampling()
30
+
31
+ def stop(self) -> tuple[float, float]:
32
+ """
33
+ Returns (emissions_kg_co2, energy_kwh).
34
+ """
35
+ duration = time.perf_counter() - self._start_time
36
+
37
+ if self._codecarbon:
38
+ return self._stop_codecarbon()
39
+
40
+ return self._stop_power_sampling(duration)
41
+
42
+ # ------------------------------------------------------------------ #
43
+ # CodeCarbon path #
44
+ # ------------------------------------------------------------------ #
45
+
46
+ def _try_start_codecarbon(self) -> bool:
47
+ try:
48
+ from codecarbon import EmissionsTracker
49
+ self._codecarbon = EmissionsTracker(
50
+ log_level="error",
51
+ save_to_file=False,
52
+ save_to_api=False,
53
+ )
54
+ self._codecarbon.start()
55
+ return True
56
+ except Exception as e:
57
+ logger.debug(f"CodeCarbon init failed: {e}")
58
+ self._codecarbon = None
59
+ return False
60
+
61
+ def _stop_codecarbon(self) -> tuple[float, float]:
62
+ emissions_kg = self._codecarbon.stop() or 0.0
63
+ # CodeCarbon exposes energy via its internal tracker
64
+ try:
65
+ energy_kwh = self._codecarbon._total_energy.kWh
66
+ except Exception:
67
+ # fallback: derive from emissions using config intensity
68
+ energy_kwh = emissions_kg / self.config.carbon_intensity_kg_per_kwh
69
+ return emissions_kg, energy_kwh
70
+
71
+ # ------------------------------------------------------------------ #
72
+ # Power-sampling fallback #
73
+ # ------------------------------------------------------------------ #
74
+
75
+ def _start_power_sampling(self):
76
+ import threading
77
+ self._sampling = True
78
+ self._power_samples = []
79
+
80
+ def _sample():
81
+ from greentensor.core.profiler import Profiler
82
+ while self._sampling:
83
+ m = Profiler.get_gpu_metrics()
84
+ self._power_samples.append(m["power_W"])
85
+ time.sleep(0.5)
86
+
87
+ self._sample_thread = threading.Thread(target=_sample, daemon=True)
88
+ self._sample_thread.start()
89
+
90
+ def _stop_power_sampling(self, duration_s: float) -> tuple[float, float]:
91
+ self._sampling = False
92
+ if self._sample_thread:
93
+ self._sample_thread.join(timeout=2)
94
+
95
+ if self._power_samples:
96
+ avg_power_w = sum(self._power_samples) / len(self._power_samples)
97
+ else:
98
+ avg_power_w = 0.0
99
+
100
+ energy_kwh = (avg_power_w * duration_s) / 3_600_000
101
+ emissions_kg = energy_kwh * self.config.carbon_intensity_kg_per_kwh
102
+ return emissions_kg, energy_kwh
@@ -0,0 +1,19 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ class BaseOptimizer(ABC):
4
+ """All optimizers must implement apply() and revert()."""
5
+
6
+ @abstractmethod
7
+ def apply(self):
8
+ """Apply the optimization."""
9
+
10
+ @abstractmethod
11
+ def revert(self):
12
+ """Revert the optimization to its original state."""
13
+
14
+ def __enter__(self):
15
+ self.apply()
16
+ return self
17
+
18
+ def __exit__(self, *args):
19
+ self.revert()
@@ -0,0 +1,45 @@
1
+ from .base import BaseOptimizer
2
+ from greentensor.utils.config import Config
3
+ from greentensor.utils.logger import logger
4
+
5
+ class BatchOptimizer(BaseOptimizer):
6
+ """Suggests an optimal batch size based on GPU memory availability."""
7
+
8
+ def __init__(self, current_batch: int, config: Config = None):
9
+ self.config = config or Config()
10
+ self.original_batch = current_batch
11
+ self.optimal_batch = current_batch
12
+
13
+ def apply(self):
14
+ self.optimal_batch = self._compute_optimal(self.original_batch)
15
+ if self.optimal_batch != self.original_batch:
16
+ logger.info(f"Batch size optimized: {self.original_batch} → {self.optimal_batch}")
17
+ return self.optimal_batch
18
+
19
+ def revert(self):
20
+ self.optimal_batch = self.original_batch
21
+
22
+ def _compute_optimal(self, batch: int) -> int:
23
+ import torch
24
+ if torch.cuda.is_available():
25
+ free_mem, total_mem = torch.cuda.mem_get_info()
26
+ free_gb = free_mem / (1024 ** 3)
27
+ # scale batch size proportionally to free memory, capped by config
28
+ if free_gb > 4:
29
+ candidate = min(batch * 4, self.config.max_batch_size)
30
+ elif free_gb > 2:
31
+ candidate = min(batch * 2, self.config.max_batch_size)
32
+ else:
33
+ candidate = max(batch, self.config.min_batch_size)
34
+ return candidate
35
+ # CPU: just double if below min threshold
36
+ if batch < self.config.min_batch_size:
37
+ return self.config.min_batch_size
38
+ if batch < 64:
39
+ return min(batch * 2, self.config.max_batch_size)
40
+ return batch
41
+
42
+
43
+ def optimize_batch_size(current_batch: int, config: Config = None) -> int:
44
+ """Convenience function — returns the recommended batch size."""
45
+ return BatchOptimizer(current_batch, config).apply()
@@ -0,0 +1,38 @@
1
+ import torch
2
+ from .base import BaseOptimizer
3
+ from greentensor.utils.logger import logger
4
+ from greentensor.utils.config import Config
5
+
6
+ class GPUOptimizer(BaseOptimizer):
7
+ def __init__(self, config: Config = None):
8
+ self.config = config or Config()
9
+ self._prev_cudnn_benchmark = None
10
+ self._prev_cudnn_enabled = None
11
+
12
+ def apply(self):
13
+ if not torch.cuda.is_available():
14
+ logger.warning("No CUDA GPU detected — GPU optimizations skipped.")
15
+ return
16
+
17
+ if self.config.enable_cudnn_benchmark:
18
+ self._prev_cudnn_benchmark = torch.backends.cudnn.benchmark
19
+ self._prev_cudnn_enabled = torch.backends.cudnn.enabled
20
+ torch.backends.cudnn.benchmark = True
21
+ torch.backends.cudnn.enabled = True
22
+ logger.info("cuDNN benchmark mode enabled.")
23
+
24
+ def revert(self):
25
+ if not torch.cuda.is_available():
26
+ return
27
+ if self._prev_cudnn_benchmark is not None:
28
+ torch.backends.cudnn.benchmark = self._prev_cudnn_benchmark
29
+ torch.backends.cudnn.enabled = self._prev_cudnn_enabled
30
+ logger.info("cuDNN settings restored.")
31
+
32
+ def mixed_precision(self):
33
+ """Returns an autocast context for mixed precision training."""
34
+ if torch.cuda.is_available() and self.config.enable_mixed_precision:
35
+ return torch.cuda.amp.autocast()
36
+ # no-op context on CPU
37
+ import contextlib
38
+ return contextlib.nullcontext()
@@ -0,0 +1,42 @@
1
+ import time
2
+ import threading
3
+ from .base import BaseOptimizer
4
+ from greentensor.utils.config import Config
5
+ from greentensor.utils.logger import logger
6
+
7
+ class IdleOptimizer(BaseOptimizer):
8
+ """
9
+ Monitors GPU utilization in a background thread.
10
+ When the GPU is idle (below threshold), it throttles the process
11
+ to reduce wasted energy during idle periods.
12
+ """
13
+
14
+ def __init__(self, config: Config = None):
15
+ self.config = config or Config()
16
+ self._stop_event = threading.Event()
17
+ self._thread = None
18
+ self.idle_seconds = 0.0
19
+ self._lock = threading.Lock()
20
+
21
+ def apply(self):
22
+ self._stop_event.clear()
23
+ self._thread = threading.Thread(target=self._monitor, daemon=True)
24
+ self._thread.start()
25
+ logger.info("Idle optimizer started.")
26
+
27
+ def revert(self):
28
+ self._stop_event.set()
29
+ if self._thread:
30
+ self._thread.join(timeout=2)
31
+ logger.info(f"Idle optimizer stopped. Total idle time: {self.idle_seconds:.2f}s")
32
+
33
+ def _monitor(self):
34
+ from greentensor.core.profiler import Profiler
35
+ while not self._stop_event.is_set():
36
+ metrics = Profiler.get_gpu_metrics()
37
+ if metrics["util_%"] < self.config.idle_threshold_pct:
38
+ time.sleep(self.config.idle_sleep_s)
39
+ with self._lock:
40
+ self.idle_seconds += self.config.idle_sleep_s
41
+ else:
42
+ time.sleep(0.1)
@@ -0,0 +1,22 @@
1
+ from dataclasses import dataclass
2
+
3
+ @dataclass
4
+ class RunMetrics:
5
+ duration_s: float
6
+ energy_kwh: float
7
+ emissions_kg: float
8
+ idle_seconds: float = 0.0
9
+
10
+ def calculate_savings(baseline: RunMetrics, optimized: RunMetrics) -> dict:
11
+ """Compare two real RunMetrics objects and return absolute + relative savings."""
12
+ energy_saved = baseline.energy_kwh - optimized.energy_kwh
13
+ emissions_saved = baseline.emissions_kg - optimized.emissions_kg
14
+ energy_reduction_pct = (energy_saved / baseline.energy_kwh * 100) if baseline.energy_kwh else 0.0
15
+ time_saved = baseline.duration_s - optimized.duration_s
16
+
17
+ return {
18
+ "energy_saved_kwh": energy_saved,
19
+ "emissions_saved_kg": emissions_saved,
20
+ "energy_reduction_pct": energy_reduction_pct,
21
+ "time_saved_s": time_saved,
22
+ }
@@ -0,0 +1,39 @@
1
+ # -*- coding: utf-8 -*-
2
+ from greentensor.report.metrics import RunMetrics
3
+
4
+ def generate_report(duration, emissions_kg, energy_kwh,
5
+ idle_seconds=0.0, baseline=None):
6
+ """
7
+ Generate a human-readable GreenTensor report.
8
+ If a baseline RunMetrics is provided, savings are computed against real data.
9
+ """
10
+ lines = [
11
+ "",
12
+ " +======================================+",
13
+ " | GreenTensor Report |",
14
+ " +======================================+",
15
+ " Runtime : {:.2f} s".format(duration),
16
+ " Energy Used : {:.6f} kWh".format(energy_kwh),
17
+ " CO2 Emissions : {:.6f} kg".format(emissions_kg),
18
+ ]
19
+
20
+ if idle_seconds > 0:
21
+ lines.append(" Idle Time : {:.2f} s".format(idle_seconds))
22
+
23
+ if baseline:
24
+ energy_saved = baseline.energy_kwh - energy_kwh
25
+ emissions_saved = baseline.emissions_kg - emissions_kg
26
+ reduction_pct = (energy_saved / baseline.energy_kwh * 100) if baseline.energy_kwh else 0.0
27
+ time_saved = baseline.duration_s - duration
28
+
29
+ lines += [
30
+ "",
31
+ " -- Savings vs Baseline --",
32
+ " Baseline Energy : {:.6f} kWh".format(baseline.energy_kwh),
33
+ " Energy Saved : {:.6f} kWh ({:.1f}% reduction)".format(energy_saved, reduction_pct),
34
+ " Emissions Saved : {:.6f} kg CO2".format(emissions_saved),
35
+ " Time Saved : {:.2f} s".format(time_saved),
36
+ ]
37
+
38
+ lines.append(" ======================================\n")
39
+ return "\n".join(lines)
@@ -0,0 +1,18 @@
1
+ from dataclasses import dataclass, field
2
+
3
+ @dataclass
4
+ class Config:
5
+ # Batch optimization
6
+ min_batch_size: int = 32
7
+ max_batch_size: int = 512
8
+
9
+ # GPU
10
+ enable_cudnn_benchmark: bool = True
11
+ enable_mixed_precision: bool = True
12
+
13
+ # Idle detection
14
+ idle_threshold_pct: float = 10.0 # GPU util % below which we consider it idle
15
+ idle_sleep_s: float = 0.5 # seconds to sleep when idle detected
16
+
17
+ # Reporting
18
+ carbon_intensity_kg_per_kwh: float = 0.000233 # world avg kg CO2 per Wh → per kWh = 0.233 kg
@@ -0,0 +1,13 @@
1
+ import logging
2
+ import sys
3
+
4
+ def get_logger(name: str = "greentensor") -> logging.Logger:
5
+ logger = logging.getLogger(name)
6
+ if not logger.handlers:
7
+ handler = logging.StreamHandler(sys.stdout)
8
+ handler.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s %(name)s: %(message)s", "%H:%M:%S"))
9
+ logger.addHandler(handler)
10
+ logger.setLevel(logging.INFO)
11
+ return logger
12
+
13
+ logger = get_logger()
@@ -0,0 +1,143 @@
1
+ Metadata-Version: 2.4
2
+ Name: greentensor
3
+ Version: 0.1.0
4
+ Summary: Energy-efficient middleware for PyTorch ML workloads — tracks carbon, optimizes GPU, reports savings.
5
+ Author: GreenTensor
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/YOUR_USERNAME/greentensor
8
+ Project-URL: Issues, https://github.com/YOUR_USERNAME/greentensor/issues
9
+ Keywords: green ai,carbon,energy,pytorch,sustainability,mlops
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Requires-Python: >=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE
18
+ Requires-Dist: torch
19
+ Requires-Dist: numpy
20
+ Requires-Dist: codecarbon
21
+ Requires-Dist: psutil
22
+ Requires-Dist: streamlit
23
+ Dynamic: license-file
24
+
25
+ # GreenTensor
26
+
27
+ **Energy-efficient middleware for PyTorch ML workloads.**
28
+
29
+ GreenTensor wraps your training loop with one line of code and gives you:
30
+ - Real carbon emissions and energy tracking (via CodeCarbon)
31
+ - Automatic GPU optimizations (cuDNN benchmark, mixed precision)
32
+ - Idle GPU detection to reduce wasted energy
33
+ - A detailed savings report comparing against your baseline
34
+
35
+ ---
36
+
37
+ ## Install
38
+
39
+ ```bash
40
+ pip install greentensor
41
+ ```
42
+
43
+ ---
44
+
45
+ ## Quickstart
46
+
47
+ ```python
48
+ from greentensor import GreenTensor
49
+
50
+ with GreenTensor() as gt:
51
+ with gt.mixed_precision():
52
+ train() # your existing training code, unchanged
53
+ ```
54
+
55
+ Output:
56
+ ```
57
+ +======================================+
58
+ | GreenTensor Report |
59
+ +======================================+
60
+ Runtime : 12.34 s
61
+ Energy Used : 0.000412 kWh
62
+ CO2 Emissions : 0.000096 kg
63
+ ======================================
64
+ ```
65
+
66
+ ---
67
+
68
+ ## Compare against a baseline
69
+
70
+ ```python
71
+ import pickle
72
+ from greentensor import GreenTensor
73
+ from greentensor.core.tracker import Tracker
74
+ from greentensor.report.metrics import RunMetrics
75
+ import time
76
+
77
+ # 1. Run baseline (no optimizations)
78
+ tracker = Tracker()
79
+ tracker.start()
80
+ t0 = time.perf_counter()
81
+ train()
82
+ duration = time.perf_counter() - t0
83
+ emissions_kg, energy_kwh = tracker.stop()
84
+ baseline = RunMetrics(duration_s=duration, energy_kwh=energy_kwh, emissions_kg=emissions_kg)
85
+
86
+ # 2. Run optimized — report shows real savings
87
+ with GreenTensor(baseline=baseline) as gt:
88
+ with gt.mixed_precision():
89
+ train()
90
+ ```
91
+
92
+ ---
93
+
94
+ ## Batch size optimizer
95
+
96
+ ```python
97
+ from greentensor import optimize_batch_size
98
+
99
+ batch_size = optimize_batch_size(32) # auto-scales based on available GPU memory
100
+ ```
101
+
102
+ ---
103
+
104
+ ## Dashboard
105
+
106
+ ```bash
107
+ streamlit run dashboard/app.py
108
+ ```
109
+
110
+ Enter baseline and optimized metrics manually, or upload `.pkl` files saved from your runs.
111
+
112
+ ---
113
+
114
+ ## Configuration
115
+
116
+ ```python
117
+ from greentensor import GreenTensor, Config
118
+
119
+ config = Config(
120
+ enable_cudnn_benchmark=True,
121
+ enable_mixed_precision=True,
122
+ idle_threshold_pct=10.0, # GPU util % below which idle is detected
123
+ max_batch_size=512,
124
+ )
125
+
126
+ with GreenTensor(config=config) as gt:
127
+ train()
128
+ ```
129
+
130
+ ---
131
+
132
+ ## Run tests
133
+
134
+ ```bash
135
+ pip install pytest
136
+ pytest tests/ -v
137
+ ```
138
+
139
+ ---
140
+
141
+ ## License
142
+
143
+ MIT
@@ -0,0 +1,26 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ greentensor/__init__.py
5
+ greentensor.egg-info/PKG-INFO
6
+ greentensor.egg-info/SOURCES.txt
7
+ greentensor.egg-info/dependency_links.txt
8
+ greentensor.egg-info/requires.txt
9
+ greentensor.egg-info/top_level.txt
10
+ greentensor/core/context.py
11
+ greentensor/core/profiler.py
12
+ greentensor/core/tracker.py
13
+ greentensor/optimizers/base.py
14
+ greentensor/optimizers/batch_optimizer.py
15
+ greentensor/optimizers/gpu_optimizer.py
16
+ greentensor/optimizers/idle_optimizer.py
17
+ greentensor/report/metrics.py
18
+ greentensor/report/report.py
19
+ greentensor/utils/config.py
20
+ greentensor/utils/logger.py
21
+ tests/test_batch_optimizer.py
22
+ tests/test_config.py
23
+ tests/test_context.py
24
+ tests/test_metrics.py
25
+ tests/test_profiler.py
26
+ tests/test_report.py
@@ -0,0 +1,5 @@
1
+ torch
2
+ numpy
3
+ codecarbon
4
+ psutil
5
+ streamlit
@@ -0,0 +1 @@
1
+ greentensor
@@ -0,0 +1,30 @@
1
+ [build-system]
2
+ requires = ["setuptools>=42", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "greentensor"
7
+ version = "0.1.0"
8
+ description = "Energy-efficient middleware for PyTorch ML workloads — tracks carbon, optimizes GPU, reports savings."
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ authors = [{ name = "GreenTensor" }]
12
+ keywords = ["green ai", "carbon", "energy", "pytorch", "sustainability", "mlops"]
13
+ classifiers = [
14
+ "Programming Language :: Python :: 3",
15
+ "Operating System :: OS Independent",
16
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
17
+ "Intended Audience :: Developers",
18
+ "Intended Audience :: Science/Research",
19
+ ]
20
+ requires-python = ">=3.9"
21
+ dependencies = ["torch", "numpy", "codecarbon", "psutil", "streamlit"]
22
+
23
+ [project.urls]
24
+ Homepage = "https://github.com/YOUR_USERNAME/greentensor"
25
+ Issues = "https://github.com/YOUR_USERNAME/greentensor/issues"
26
+
27
+ [tool.setuptools.packages.find]
28
+ where = ["."]
29
+ include = ["greentensor*"]
30
+ exclude = ["tests*"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,30 @@
1
+ from unittest.mock import patch
2
+ from greentensor.optimizers.batch_optimizer import BatchOptimizer, optimize_batch_size
3
+ from greentensor.utils.config import Config
4
+
5
+ def test_optimize_batch_cpu():
6
+ # On CPU path: batch < min_batch_size → bumped to min
7
+ config = Config(min_batch_size=32, max_batch_size=512)
8
+ with patch("torch.cuda.is_available", return_value=False):
9
+ result = optimize_batch_size(16, config)
10
+ assert result == 32
11
+
12
+ def test_optimize_batch_cpu_doubles():
13
+ config = Config(min_batch_size=32, max_batch_size=512)
14
+ with patch("torch.cuda.is_available", return_value=False):
15
+ result = optimize_batch_size(32, config)
16
+ assert result == 64
17
+
18
+ def test_optimize_batch_respects_max():
19
+ config = Config(min_batch_size=32, max_batch_size=64)
20
+ with patch("torch.cuda.is_available", return_value=False):
21
+ result = optimize_batch_size(64, config)
22
+ assert result == 64
23
+
24
+ def test_batch_optimizer_revert():
25
+ config = Config(min_batch_size=32, max_batch_size=512)
26
+ with patch("torch.cuda.is_available", return_value=False):
27
+ opt = BatchOptimizer(32, config)
28
+ opt.apply()
29
+ opt.revert()
30
+ assert opt.optimal_batch == 32
@@ -0,0 +1,15 @@
1
+ from greentensor.utils.config import Config
2
+
3
+ def test_defaults():
4
+ c = Config()
5
+ assert c.min_batch_size == 32
6
+ assert c.max_batch_size == 512
7
+ assert c.enable_cudnn_benchmark is True
8
+ assert c.enable_mixed_precision is True
9
+ assert c.idle_threshold_pct == 10.0
10
+ assert c.carbon_intensity_kg_per_kwh == 0.000233
11
+
12
+ def test_custom():
13
+ c = Config(min_batch_size=16, max_batch_size=256)
14
+ assert c.min_batch_size == 16
15
+ assert c.max_batch_size == 256
@@ -0,0 +1,52 @@
1
+ import time
2
+ from unittest.mock import patch, MagicMock
3
+ from greentensor.core.context import GreenTensor
4
+ from greentensor.report.metrics import RunMetrics
5
+
6
+ def _mock_tracker():
7
+ t = MagicMock()
8
+ t.start.return_value = None
9
+ t.stop.return_value = (0.00001, 0.00005) # (emissions_kg, energy_kwh)
10
+ return t
11
+
12
+ def test_context_manager_runs():
13
+ with patch("greentensor.core.context.Tracker", return_value=_mock_tracker()), \
14
+ patch("greentensor.core.context.GPUOptimizer") as mock_gpu, \
15
+ patch("greentensor.core.context.IdleOptimizer") as mock_idle:
16
+
17
+ mock_idle.return_value.idle_seconds = 0.0
18
+ with GreenTensor(verbose=False) as gt:
19
+ time.sleep(0.01)
20
+
21
+ assert gt.metrics is not None
22
+ assert gt.metrics.duration_s > 0
23
+ assert gt.metrics.energy_kwh == 0.00005
24
+ assert gt.metrics.emissions_kg == 0.00001
25
+
26
+ def test_context_manager_does_not_suppress_exceptions():
27
+ with patch("greentensor.core.context.Tracker", return_value=_mock_tracker()), \
28
+ patch("greentensor.core.context.GPUOptimizer"), \
29
+ patch("greentensor.core.context.IdleOptimizer") as mock_idle:
30
+
31
+ mock_idle.return_value.idle_seconds = 0.0
32
+ try:
33
+ with GreenTensor(verbose=False):
34
+ raise ValueError("test error")
35
+ except ValueError as e:
36
+ assert str(e) == "test error"
37
+ else:
38
+ assert False, "Exception should not have been suppressed"
39
+
40
+ def test_profile_decorator():
41
+ with patch("greentensor.core.context.Tracker", return_value=_mock_tracker()), \
42
+ patch("greentensor.core.context.GPUOptimizer"), \
43
+ patch("greentensor.core.context.IdleOptimizer") as mock_idle:
44
+
45
+ mock_idle.return_value.idle_seconds = 0.0
46
+
47
+ @GreenTensor.profile
48
+ def my_func():
49
+ return 99
50
+
51
+ result = my_func()
52
+ assert result == 99
@@ -0,0 +1,21 @@
1
+ from greentensor.report.metrics import RunMetrics, calculate_savings
2
+
3
+ def test_calculate_savings_basic():
4
+ baseline = RunMetrics(duration_s=10.0, energy_kwh=0.002, emissions_kg=0.001)
5
+ optimized = RunMetrics(duration_s=7.0, energy_kwh=0.0012, emissions_kg=0.0006)
6
+ s = calculate_savings(baseline, optimized)
7
+
8
+ assert abs(s["energy_saved_kwh"] - 0.0008) < 1e-9
9
+ assert abs(s["emissions_saved_kg"] - 0.0004) < 1e-9
10
+ assert abs(s["energy_reduction_pct"] - 40.0) < 1e-6
11
+ assert abs(s["time_saved_s"] - 3.0) < 1e-9
12
+
13
+ def test_calculate_savings_zero_baseline():
14
+ baseline = RunMetrics(duration_s=0.0, energy_kwh=0.0, emissions_kg=0.0)
15
+ optimized = RunMetrics(duration_s=1.0, energy_kwh=0.001, emissions_kg=0.0)
16
+ s = calculate_savings(baseline, optimized)
17
+ assert s["energy_reduction_pct"] == 0.0 # no div-by-zero
18
+
19
+ def test_runmetrics_defaults():
20
+ m = RunMetrics(duration_s=5.0, energy_kwh=0.001, emissions_kg=0.0002)
21
+ assert m.idle_seconds == 0.0
@@ -0,0 +1,32 @@
1
+ from unittest.mock import patch
2
+ from greentensor.core.profiler import Profiler
3
+
4
+ def test_get_gpu_metrics_no_nvidia_smi():
5
+ # When nvidia-smi is not available, should return zeros gracefully
6
+ with patch("subprocess.run", side_effect=FileNotFoundError):
7
+ m = Profiler.get_gpu_metrics()
8
+ assert m == {"util_%": 0.0, "power_W": 0.0}
9
+
10
+ def test_get_gpu_metrics_parses_output():
11
+ mock = type("R", (), {"stdout": "45, 120.5\n", "returncode": 0})()
12
+ with patch("subprocess.run", return_value=mock):
13
+ m = Profiler.get_gpu_metrics()
14
+ assert m["util_%"] == 45.0
15
+ assert m["power_W"] == 120.5
16
+
17
+ def test_estimate_energy_kwh():
18
+ # 100W for 3600s = 0.1 kWh
19
+ kwh = Profiler.estimate_energy_kwh(100.0, 3600.0)
20
+ assert abs(kwh - 0.1) < 1e-9
21
+
22
+ def test_track_gpu_decorator():
23
+ @Profiler.track_gpu
24
+ def dummy():
25
+ return 42
26
+
27
+ with patch.object(Profiler, "get_gpu_metrics", return_value={"util_%": 0.0, "power_W": 0.0}):
28
+ result, profile = dummy()
29
+
30
+ assert result == 42
31
+ assert "duration_s" in profile
32
+ assert "avg_power_W" in profile
@@ -0,0 +1,19 @@
1
+ from greentensor.report.report import generate_report
2
+ from greentensor.report.metrics import RunMetrics
3
+
4
+ def test_report_no_baseline():
5
+ r = generate_report(5.0, 0.0003, 0.0013)
6
+ assert "5.00 s" in r
7
+ assert "0.001300 kWh" in r
8
+ assert "0.000300 kg" in r
9
+ assert "Savings" not in r
10
+
11
+ def test_report_with_baseline():
12
+ baseline = RunMetrics(duration_s=10.0, energy_kwh=0.002, emissions_kg=0.001)
13
+ r = generate_report(7.0, 0.0006, 0.0013, baseline=baseline)
14
+ assert "Savings vs Baseline" in r
15
+ assert "reduction" in r
16
+
17
+ def test_report_with_idle():
18
+ r = generate_report(5.0, 0.0003, 0.0013, idle_seconds=2.5)
19
+ assert "2.50 s" in r