greenmining 1.2.4__tar.gz → 1.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {greenmining-1.2.4 → greenmining-1.2.6}/CHANGELOG.md +17 -0
  2. {greenmining-1.2.4/greenmining.egg-info → greenmining-1.2.6}/PKG-INFO +1 -1
  3. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/__init__.py +7 -1
  4. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/analyzers/__init__.py +1 -1
  5. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/analyzers/code_diff_analyzer.py +6 -6
  6. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/analyzers/metrics_power_correlator.py +17 -17
  7. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/analyzers/statistical_analyzer.py +5 -5
  8. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/analyzers/temporal_analyzer.py +16 -17
  9. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/controllers/repository_controller.py +1 -3
  10. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/energy/__init__.py +3 -3
  11. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/energy/base.py +15 -16
  12. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/energy/carbon_reporter.py +10 -10
  13. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/energy/codecarbon_meter.py +4 -6
  14. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/energy/cpu_meter.py +6 -7
  15. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/energy/rapl.py +6 -8
  16. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/models/aggregated_stats.py +2 -3
  17. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/models/commit.py +2 -2
  18. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/models/repository.py +5 -6
  19. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/github_graphql_fetcher.py +8 -8
  20. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/local_repo_analyzer.py +79 -43
  21. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/reports.py +22 -22
  22. {greenmining-1.2.4 → greenmining-1.2.6/greenmining.egg-info}/PKG-INFO +1 -1
  23. {greenmining-1.2.4 → greenmining-1.2.6}/pyproject.toml +1 -1
  24. {greenmining-1.2.4 → greenmining-1.2.6}/LICENSE +0 -0
  25. {greenmining-1.2.4 → greenmining-1.2.6}/MANIFEST.in +0 -0
  26. {greenmining-1.2.4 → greenmining-1.2.6}/README.md +0 -0
  27. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/__main__.py +0 -0
  28. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/controllers/__init__.py +0 -0
  29. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/gsf_patterns.py +0 -0
  30. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/models/__init__.py +0 -0
  31. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/models/analysis_result.py +0 -0
  32. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/__init__.py +2 -2
  33. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/commit_extractor.py +0 -0
  34. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/data_aggregator.py +0 -0
  35. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/services/data_analyzer.py +0 -0
  36. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining/utils.py +0 -0
  37. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining.egg-info/SOURCES.txt +0 -0
  38. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining.egg-info/dependency_links.txt +0 -0
  39. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining.egg-info/requires.txt +0 -0
  40. {greenmining-1.2.4 → greenmining-1.2.6}/greenmining.egg-info/top_level.txt +0 -0
  41. {greenmining-1.2.4 → greenmining-1.2.6}/setup.cfg +0 -0
  42. {greenmining-1.2.4 → greenmining-1.2.6}/setup.py +0 -0
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## [1.2.5] - 2026-02-02
4
+
5
+ ### Added
6
+ - `shallow_clone` parameter (default: True) to significantly reduce repository download size
7
+ - `clone_depth` parameter for custom clone depth (auto-calculated as `max(50, max_commits * 3)` if None)
8
+ - Manual git shallow cloning before PyDriller analysis for dramatic performance improvement
9
+
10
+ ### Changed
11
+ - Repository cloning now uses `git clone --depth=N` by default, reducing download times by ~90%
12
+ - Clone depth automatically calculated based on max_commits to ensure sufficient history
13
+
14
+ ### Performance
15
+ - Reduced clone size from 528 MB to ~50 MB for typical 10-repo analysis (90% reduction)
16
+ - Example: ant-design repo reduced from 184 MB (full) to 14 MB (depth=50) - 92% smaller
17
+ - Analysis time for small experiments reduced from 6+ minutes to under 1 minute
18
+
3
19
  ## [1.2.4] - 2026-02-01
4
20
 
5
21
  ### Added
@@ -116,6 +132,7 @@
116
132
  - Green awareness analysis
117
133
  - Docker containerization
118
134
 
135
+ [1.2.5]: https://github.com/adam-bouafia/greenmining/compare/v1.2.4...v1.2.5
119
136
  [1.2.4]: https://github.com/adam-bouafia/greenmining/compare/v1.2.3...v1.2.4
120
137
  [1.2.3]: https://github.com/adam-bouafia/greenmining/compare/v1.2.1...v1.2.3
121
138
  [1.2.1]: https://github.com/adam-bouafia/greenmining/compare/v1.2.0...v1.2.1
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: greenmining
3
- Version: 1.2.4
3
+ Version: 1.2.6
4
4
  Summary: An empirical Python library for Mining Software Repositories (MSR) in Green IT research
5
5
  Author-email: Adam Bouafia <a.bouafia@student.vu.nl>
6
6
  License: MIT
@@ -8,7 +8,7 @@ from greenmining.gsf_patterns import (
8
8
  is_green_aware,
9
9
  )
10
10
 
11
- __version__ = "1.2.4"
11
+ __version__ = "1.2.6"
12
12
 
13
13
 
14
14
  def fetch_repositories(
@@ -75,6 +75,8 @@ def analyze_repositories(
75
75
  cleanup_after: bool = True,
76
76
  skip_merges: bool = True,
77
77
  commit_order: str = "newest_first",
78
+ shallow_clone: bool = True,
79
+ clone_depth: int = None,
78
80
  ):
79
81
  # Analyze multiple repositories from URLs.
80
82
  # Args:
@@ -93,6 +95,8 @@ def analyze_repositories(
93
95
  # cleanup_after: Remove cloned repos after analysis (default True)
94
96
  # skip_merges: Skip merge commits (default True)
95
97
  # commit_order: "newest_first" (default) or "oldest_first"
98
+ # shallow_clone: Use shallow cloning to reduce download size (default True)
99
+ # clone_depth: Git clone depth (auto-calculated from max_commits if None)
96
100
  from greenmining.services.local_repo_analyzer import LocalRepoAnalyzer
97
101
 
98
102
  kwargs = {}
@@ -116,6 +120,8 @@ def analyze_repositories(
116
120
  cleanup_after=cleanup_after,
117
121
  skip_merges=skip_merges,
118
122
  commit_order=commit_order,
123
+ shallow_clone=shallow_clone,
124
+ clone_depth=clone_depth,
119
125
  **kwargs,
120
126
  )
121
127
 
@@ -1,9 +1,9 @@
1
1
  # Analyzers for GreenMining framework.
2
2
 
3
3
  from .code_diff_analyzer import CodeDiffAnalyzer
4
+ from .metrics_power_correlator import CorrelationResult, MetricsPowerCorrelator
4
5
  from .statistical_analyzer import StatisticalAnalyzer
5
6
  from .temporal_analyzer import TemporalAnalyzer
6
- from .metrics_power_correlator import MetricsPowerCorrelator, CorrelationResult
7
7
 
8
8
  __all__ = [
9
9
  "CodeDiffAnalyzer",
@@ -1,7 +1,7 @@
1
1
  # Code diff analyzer for detecting green software patterns in code changes.
2
2
 
3
3
  import re
4
- from typing import Any, Dict, List
4
+ from typing import Any
5
5
 
6
6
  from pydriller import Commit, ModifiedFile
7
7
 
@@ -207,7 +207,7 @@ class CodeDiffAnalyzer:
207
207
  },
208
208
  }
209
209
 
210
- def analyze_commit_diff(self, commit: Commit) -> Dict[str, Any]:
210
+ def analyze_commit_diff(self, commit: Commit) -> dict[str, Any]:
211
211
  # Analyze code changes in a commit to detect green patterns.
212
212
  patterns_detected = []
213
213
  evidence = {}
@@ -244,12 +244,12 @@ class CodeDiffAnalyzer:
244
244
  "metrics": metrics,
245
245
  }
246
246
 
247
- def _detect_patterns_in_line(self, code_line: str) -> List[str]:
247
+ def _detect_patterns_in_line(self, code_line: str) -> list[str]:
248
248
  # Detect patterns in a single line of code.
249
249
  detected = []
250
250
 
251
251
  for pattern_name, signatures in self.PATTERN_SIGNATURES.items():
252
- for signature_type, patterns in signatures.items():
252
+ for _signature_type, patterns in signatures.items():
253
253
  for pattern_regex in patterns:
254
254
  if re.search(pattern_regex, code_line, re.IGNORECASE):
255
255
  detected.append(pattern_name)
@@ -257,7 +257,7 @@ class CodeDiffAnalyzer:
257
257
 
258
258
  return detected
259
259
 
260
- def _calculate_metrics(self, commit: Commit) -> Dict[str, int]:
260
+ def _calculate_metrics(self, commit: Commit) -> dict[str, int]:
261
261
  # Calculate code change metrics.
262
262
  lines_added = sum(f.added_lines for f in commit.modified_files)
263
263
  lines_removed = sum(f.deleted_lines for f in commit.modified_files)
@@ -276,7 +276,7 @@ class CodeDiffAnalyzer:
276
276
  }
277
277
 
278
278
  def _calculate_diff_confidence(
279
- self, patterns: List[str], evidence: Dict[str, List[str]], metrics: Dict[str, int]
279
+ self, patterns: list[str], evidence: dict[str, list[str]], metrics: dict[str, int]
280
280
  ) -> str:
281
281
  # Calculate confidence level for diff-based detection.
282
282
  if not patterns:
@@ -3,8 +3,8 @@
3
3
 
4
4
  from __future__ import annotations
5
5
 
6
- from dataclasses import dataclass, field
7
- from typing import Any, Dict, List, Optional
6
+ from dataclasses import dataclass
7
+ from typing import Any
8
8
 
9
9
  import numpy as np
10
10
  from scipy import stats
@@ -22,7 +22,7 @@ class CorrelationResult:
22
22
  significant: bool = False
23
23
  strength: str = "none"
24
24
 
25
- def to_dict(self) -> Dict[str, Any]:
25
+ def to_dict(self) -> dict[str, Any]:
26
26
  return {
27
27
  "metric_name": self.metric_name,
28
28
  "pearson_r": round(self.pearson_r, 4),
@@ -44,17 +44,17 @@ class MetricsPowerCorrelator:
44
44
  # Args:
45
45
  # significance_level: P-value threshold for significance
46
46
  self.significance_level = significance_level
47
- self._metrics_data: Dict[str, List[float]] = {}
48
- self._power_data: List[float] = []
47
+ self._metrics_data: dict[str, list[float]] = {}
48
+ self._power_data: list[float] = []
49
49
  self._fitted = False
50
- self._results: Dict[str, CorrelationResult] = {}
51
- self._feature_importance: Dict[str, float] = {}
50
+ self._results: dict[str, CorrelationResult] = {}
51
+ self._feature_importance: dict[str, float] = {}
52
52
 
53
53
  def fit(
54
54
  self,
55
- metrics: List[str],
56
- metrics_values: Dict[str, List[float]],
57
- power_measurements: List[float],
55
+ metrics: list[str],
56
+ metrics_values: dict[str, list[float]],
57
+ power_measurements: list[float],
58
58
  ) -> None:
59
59
  # Fit the correlator with metrics and power data.
60
60
  # Args:
@@ -86,7 +86,7 @@ class MetricsPowerCorrelator:
86
86
  self._fitted = True
87
87
 
88
88
  def _compute_correlation(
89
- self, metric_name: str, metric_values: List[float], power_values: List[float]
89
+ self, metric_name: str, metric_values: list[float], power_values: list[float]
90
90
  ) -> CorrelationResult:
91
91
  # Compute Pearson and Spearman correlations for a single metric.
92
92
  x = np.array(metric_values, dtype=float)
@@ -127,29 +127,29 @@ class MetricsPowerCorrelator:
127
127
  )
128
128
 
129
129
  @property
130
- def pearson(self) -> Dict[str, float]:
130
+ def pearson(self) -> dict[str, float]:
131
131
  # Get Pearson correlations for all metrics.
132
132
  return {name: r.pearson_r for name, r in self._results.items()}
133
133
 
134
134
  @property
135
- def spearman(self) -> Dict[str, float]:
135
+ def spearman(self) -> dict[str, float]:
136
136
  # Get Spearman correlations for all metrics.
137
137
  return {name: r.spearman_r for name, r in self._results.items()}
138
138
 
139
139
  @property
140
- def feature_importance(self) -> Dict[str, float]:
140
+ def feature_importance(self) -> dict[str, float]:
141
141
  # Get normalized feature importance scores.
142
142
  return self._feature_importance
143
143
 
144
- def get_results(self) -> Dict[str, CorrelationResult]:
144
+ def get_results(self) -> dict[str, CorrelationResult]:
145
145
  # Get all correlation results.
146
146
  return self._results
147
147
 
148
- def get_significant_correlations(self) -> Dict[str, CorrelationResult]:
148
+ def get_significant_correlations(self) -> dict[str, CorrelationResult]:
149
149
  # Get only statistically significant correlations.
150
150
  return {name: r for name, r in self._results.items() if r.significant}
151
151
 
152
- def summary(self) -> Dict[str, Any]:
152
+ def summary(self) -> dict[str, Any]:
153
153
  # Generate summary of correlation analysis.
154
154
  return {
155
155
  "total_metrics": len(self._results),
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Dict, List
5
+ from typing import Any
6
6
 
7
7
  import numpy as np
8
8
  import pandas as pd
@@ -12,7 +12,7 @@ from scipy import stats
12
12
  class StatisticalAnalyzer:
13
13
  # Advanced statistical analyses for green software patterns.
14
14
 
15
- def analyze_pattern_correlations(self, commit_data: pd.DataFrame) -> Dict[str, Any]:
15
+ def analyze_pattern_correlations(self, commit_data: pd.DataFrame) -> dict[str, Any]:
16
16
  # Analyze correlations between patterns.
17
17
  # Create pattern co-occurrence matrix
18
18
  pattern_columns = [col for col in commit_data.columns if col.startswith("pattern_")]
@@ -47,7 +47,7 @@ class StatisticalAnalyzer:
47
47
  "interpretation": self._interpret_correlations(significant_pairs),
48
48
  }
49
49
 
50
- def temporal_trend_analysis(self, commits_df: pd.DataFrame) -> Dict[str, Any]:
50
+ def temporal_trend_analysis(self, commits_df: pd.DataFrame) -> dict[str, Any]:
51
51
  # Analyze temporal trends in green awareness.
52
52
  # Prepare time series data
53
53
  commits_df["date"] = pd.to_datetime(commits_df["date"], utc=True, errors="coerce")
@@ -101,7 +101,7 @@ class StatisticalAnalyzer:
101
101
  "monthly_data": monthly.to_dict(),
102
102
  }
103
103
 
104
- def effect_size_analysis(self, group1: List[float], group2: List[float]) -> Dict[str, Any]:
104
+ def effect_size_analysis(self, group1: list[float], group2: list[float]) -> dict[str, Any]:
105
105
  # Calculate effect size between two groups.
106
106
  # Cohen's d (effect size)
107
107
  mean1, mean2 = np.mean(group1), np.mean(group2)
@@ -135,7 +135,7 @@ class StatisticalAnalyzer:
135
135
  "significant": bool(p_value < 0.05),
136
136
  }
137
137
 
138
- def _interpret_correlations(self, significant_pairs: List[Dict[str, Any]]) -> str:
138
+ def _interpret_correlations(self, significant_pairs: list[dict[str, Any]]) -> str:
139
139
  # Generate interpretation of correlation results.
140
140
  if not significant_pairs:
141
141
  return "No significant correlations found between patterns."
@@ -2,11 +2,10 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from datetime import datetime, timedelta
6
- from typing import Dict, List, Optional, Tuple
7
- from dataclasses import dataclass
8
- from collections import defaultdict
9
5
  import statistics
6
+ from collections import defaultdict
7
+ from dataclasses import dataclass
8
+ from datetime import datetime, timedelta
10
9
 
11
10
 
12
11
  @dataclass
@@ -20,7 +19,7 @@ class TemporalMetrics:
20
19
  green_commit_count: int
21
20
  green_awareness_rate: float
22
21
  unique_patterns: int
23
- dominant_pattern: Optional[str]
22
+ dominant_pattern: str | None
24
23
  velocity: float # commits per day
25
24
 
26
25
 
@@ -44,8 +43,8 @@ class TemporalAnalyzer:
44
43
  self.granularity = granularity
45
44
 
46
45
  def group_commits_by_period(
47
- self, commits: List[Dict], date_field: str = "date"
48
- ) -> Dict[str, List[Dict]]:
46
+ self, commits: list[dict], date_field: str = "date"
47
+ ) -> dict[str, list[dict]]:
49
48
  # Group commits into time periods.
50
49
  periods = defaultdict(list)
51
50
 
@@ -86,7 +85,7 @@ class TemporalAnalyzer:
86
85
  else:
87
86
  return date.strftime("%Y-%m")
88
87
 
89
- def _parse_period_key(self, period_key: str) -> Tuple[datetime, datetime]:
88
+ def _parse_period_key(self, period_key: str) -> tuple[datetime, datetime]:
90
89
  # Parse period key back to start and end dates.
91
90
  if "W" in period_key:
92
91
  # Week format: 2024-W15
@@ -138,7 +137,7 @@ class TemporalAnalyzer:
138
137
  return start, end
139
138
 
140
139
  def calculate_period_metrics(
141
- self, period_key: str, commits: List[Dict], analysis_results: List[Dict]
140
+ self, period_key: str, commits: list[dict], analysis_results: list[dict]
142
141
  ) -> TemporalMetrics:
143
142
  # Calculate metrics for a time period.
144
143
  start_date, end_date = self._parse_period_key(period_key)
@@ -185,7 +184,7 @@ class TemporalAnalyzer:
185
184
  velocity=round(velocity, 2),
186
185
  )
187
186
 
188
- def analyze_trends(self, commits: List[Dict], analysis_results: List[Dict]) -> Dict:
187
+ def analyze_trends(self, commits: list[dict], analysis_results: list[dict]) -> dict:
189
188
  # Comprehensive temporal trend analysis.
190
189
  # Group by periods
191
190
  grouped = self.group_commits_by_period(commits)
@@ -227,7 +226,7 @@ class TemporalAnalyzer:
227
226
  },
228
227
  }
229
228
 
230
- def _calculate_trend(self, periods: List[TemporalMetrics]) -> Optional[TrendAnalysis]:
229
+ def _calculate_trend(self, periods: list[TemporalMetrics]) -> TrendAnalysis | None:
231
230
  # Calculate linear trend using least squares regression.
232
231
  if len(periods) < 2:
233
232
  return None
@@ -275,7 +274,7 @@ class TemporalAnalyzer:
275
274
  change_percentage=round(change, 2),
276
275
  )
277
276
 
278
- def _calculate_adoption_curve(self, periods: List[TemporalMetrics]) -> List[Tuple[str, float]]:
277
+ def _calculate_adoption_curve(self, periods: list[TemporalMetrics]) -> list[tuple[str, float]]:
279
278
  # Calculate cumulative adoption over time.
280
279
  cumulative_green = 0
281
280
  cumulative_total = 0
@@ -291,7 +290,7 @@ class TemporalAnalyzer:
291
290
 
292
291
  return curve
293
292
 
294
- def _calculate_velocity_trend(self, periods: List[TemporalMetrics]) -> Dict:
293
+ def _calculate_velocity_trend(self, periods: list[TemporalMetrics]) -> dict:
295
294
  # Analyze velocity changes over time.
296
295
  if not periods:
297
296
  return {}
@@ -307,8 +306,8 @@ class TemporalAnalyzer:
307
306
  }
308
307
 
309
308
  def _analyze_pattern_evolution(
310
- self, periods: List[TemporalMetrics], analysis_results: List[Dict]
311
- ) -> Dict:
309
+ self, periods: list[TemporalMetrics], analysis_results: list[dict]
310
+ ) -> dict:
312
311
  # Track when different patterns emerged and dominated.
313
312
  pattern_timeline = defaultdict(lambda: {"first_seen": None, "occurrences_by_period": {}})
314
313
 
@@ -349,7 +348,7 @@ class TemporalAnalyzer:
349
348
  for pattern, data in pattern_timeline.items()
350
349
  }
351
350
 
352
- def _metrics_to_dict(self, metrics: TemporalMetrics) -> Dict:
351
+ def _metrics_to_dict(self, metrics: TemporalMetrics) -> dict:
353
352
  # Convert TemporalMetrics to dictionary.
354
353
  return {
355
354
  "period": metrics.period,
@@ -363,7 +362,7 @@ class TemporalAnalyzer:
363
362
  "velocity": metrics.velocity,
364
363
  }
365
364
 
366
- def _trend_to_dict(self, trend: Optional[TrendAnalysis]) -> Dict:
365
+ def _trend_to_dict(self, trend: TrendAnalysis | None) -> dict:
367
366
  # Convert TrendAnalysis to dictionary.
368
367
  if not trend:
369
368
  return {}
@@ -41,9 +41,7 @@ class RepositoryController:
41
41
  f" Created: {created_after or 'any'} to {created_before or 'any'}", "cyan"
42
42
  )
43
43
  if pushed_after or pushed_before:
44
- colored_print(
45
- f" Pushed: {pushed_after or 'any'} to {pushed_before or 'any'}", "cyan"
46
- )
44
+ colored_print(f" Pushed: {pushed_after or 'any'} to {pushed_before or 'any'}", "cyan")
47
45
 
48
46
  try:
49
47
  repositories = self.graphql_fetcher.search_repositories(
@@ -1,10 +1,10 @@
1
1
  # Energy measurement module for GreenMining.
2
2
 
3
- from .base import EnergyMeter, EnergyMetrics, EnergyBackend, CommitEnergyProfile, get_energy_meter
4
- from .rapl import RAPLEnergyMeter
3
+ from .base import CommitEnergyProfile, EnergyBackend, EnergyMeter, EnergyMetrics, get_energy_meter
4
+ from .carbon_reporter import CarbonReport, CarbonReporter
5
5
  from .codecarbon_meter import CodeCarbonMeter
6
6
  from .cpu_meter import CPUEnergyMeter
7
- from .carbon_reporter import CarbonReporter, CarbonReport
7
+ from .rapl import RAPLEnergyMeter
8
8
 
9
9
  __all__ = [
10
10
  "EnergyMeter",
@@ -3,11 +3,10 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from abc import ABC, abstractmethod
6
- from dataclasses import dataclass, field
6
+ from dataclasses import dataclass
7
7
  from datetime import datetime
8
8
  from enum import Enum
9
- from typing import Any, Dict, List, Optional, Callable
10
- import time
9
+ from typing import Any, Callable
11
10
 
12
11
 
13
12
  class EnergyBackend(Enum):
@@ -31,16 +30,16 @@ class EnergyMetrics:
31
30
  # Component-specific energy (if available)
32
31
  cpu_energy_joules: float = 0.0 # CPU-specific energy
33
32
  dram_energy_joules: float = 0.0 # Memory energy
34
- gpu_energy_joules: Optional[float] = None # GPU energy if available
33
+ gpu_energy_joules: float | None = None # GPU energy if available
35
34
 
36
35
  # Carbon footprint (if carbon tracking enabled)
37
- carbon_grams: Optional[float] = None # CO2 equivalent in grams
38
- carbon_intensity: Optional[float] = None # gCO2/kWh of grid
36
+ carbon_grams: float | None = None # CO2 equivalent in grams
37
+ carbon_intensity: float | None = None # gCO2/kWh of grid
39
38
 
40
39
  # Metadata
41
40
  backend: str = ""
42
- start_time: Optional[datetime] = None
43
- end_time: Optional[datetime] = None
41
+ start_time: datetime | None = None
42
+ end_time: datetime | None = None
44
43
 
45
44
  @property
46
45
  def energy_joules(self) -> float:
@@ -50,7 +49,7 @@ class EnergyMetrics:
50
49
  def average_power_watts(self) -> float:
51
50
  return self.watts_avg
52
51
 
53
- def to_dict(self) -> Dict[str, Any]:
52
+ def to_dict(self) -> dict[str, Any]:
54
53
  # Convert to dictionary.
55
54
  return {
56
55
  "joules": self.joules,
@@ -73,13 +72,13 @@ class CommitEnergyProfile:
73
72
  # Energy profile for a specific commit.
74
73
 
75
74
  commit_hash: str
76
- energy_before: Optional[EnergyMetrics] = None # Parent commit energy
77
- energy_after: Optional[EnergyMetrics] = None # This commit energy
75
+ energy_before: EnergyMetrics | None = None # Parent commit energy
76
+ energy_after: EnergyMetrics | None = None # This commit energy
78
77
  energy_delta: float = 0.0 # Change in joules
79
78
  energy_regression: bool = False # True if energy increased
80
79
  regression_percentage: float = 0.0 # % change
81
80
 
82
- def to_dict(self) -> Dict[str, Any]:
81
+ def to_dict(self) -> dict[str, Any]:
83
82
  # Convert to dictionary.
84
83
  return {
85
84
  "commit_hash": self.commit_hash,
@@ -98,8 +97,8 @@ class EnergyMeter(ABC):
98
97
  # Initialize the energy meter.
99
98
  self.backend = backend
100
99
  self._is_measuring = False
101
- self._start_time: Optional[float] = None
102
- self._measurements: List[float] = []
100
+ self._start_time: float | None = None
101
+ self._measurements: list[float] = []
103
102
 
104
103
  @abstractmethod
105
104
  def is_available(self) -> bool:
@@ -125,7 +124,7 @@ class EnergyMeter(ABC):
125
124
  metrics = self.stop()
126
125
  return result, metrics
127
126
 
128
- def measure_command(self, command: str, timeout: Optional[int] = None) -> EnergyMetrics:
127
+ def measure_command(self, command: str, timeout: int | None = None) -> EnergyMetrics:
129
128
  # Measure energy consumption of a shell command.
130
129
  import subprocess
131
130
 
@@ -156,9 +155,9 @@ class EnergyMeter(ABC):
156
155
  def get_energy_meter(backend: str = "rapl") -> EnergyMeter:
157
156
  # Factory function to get an energy meter instance.
158
157
  # Supported backends: rapl, codecarbon, cpu_meter, auto
159
- from .rapl import RAPLEnergyMeter
160
158
  from .codecarbon_meter import CodeCarbonMeter
161
159
  from .cpu_meter import CPUEnergyMeter
160
+ from .rapl import RAPLEnergyMeter
162
161
 
163
162
  backend_lower = backend.lower()
164
163
 
@@ -4,7 +4,7 @@
4
4
  from __future__ import annotations
5
5
 
6
6
  from dataclasses import dataclass, field
7
- from typing import Any, Dict, List, Optional
7
+ from typing import Any
8
8
 
9
9
  from .base import EnergyMetrics
10
10
 
@@ -88,9 +88,9 @@ class CarbonReport:
88
88
  tree_months: float = 0.0 # Equivalent tree-months to offset
89
89
  smartphone_charges: float = 0.0 # Equivalent smartphone charges
90
90
  km_driven: float = 0.0 # Equivalent km driven in average car
91
- analysis_results: List[Dict[str, Any]] = field(default_factory=list)
91
+ analysis_results: list[dict[str, Any]] = field(default_factory=list)
92
92
 
93
- def to_dict(self) -> Dict[str, Any]:
93
+ def to_dict(self) -> dict[str, Any]:
94
94
  return {
95
95
  "total_energy_kwh": round(self.total_energy_kwh, 6),
96
96
  "total_emissions_kg": round(self.total_emissions_kg, 6),
@@ -143,8 +143,8 @@ class CarbonReporter:
143
143
  def __init__(
144
144
  self,
145
145
  country_iso: str = "USA",
146
- cloud_provider: Optional[str] = None,
147
- region: Optional[str] = None,
146
+ cloud_provider: str | None = None,
147
+ region: str | None = None,
148
148
  ):
149
149
  # Initialize carbon reporter.
150
150
  # Args:
@@ -169,9 +169,9 @@ class CarbonReporter:
169
169
 
170
170
  def generate_report(
171
171
  self,
172
- energy_metrics: Optional[EnergyMetrics] = None,
173
- analysis_results: Optional[List[Dict[str, Any]]] = None,
174
- total_joules: Optional[float] = None,
172
+ energy_metrics: EnergyMetrics | None = None,
173
+ analysis_results: list[dict[str, Any]] | None = None,
174
+ total_joules: float | None = None,
175
175
  ) -> CarbonReport:
176
176
  # Generate a carbon footprint report.
177
177
  # Args:
@@ -232,11 +232,11 @@ class CarbonReporter:
232
232
  return self.carbon_intensity
233
233
 
234
234
  @staticmethod
235
- def get_supported_countries() -> List[str]:
235
+ def get_supported_countries() -> list[str]:
236
236
  # Get list of supported country ISO codes.
237
237
  return list(CARBON_INTENSITY_BY_COUNTRY.keys())
238
238
 
239
239
  @staticmethod
240
- def get_supported_cloud_regions(provider: str) -> List[str]:
240
+ def get_supported_cloud_regions(provider: str) -> list[str]:
241
241
  # Get list of supported cloud regions for a provider.
242
242
  return list(CLOUD_REGION_INTENSITY.get(provider.lower(), {}).keys())
@@ -4,9 +4,8 @@ from __future__ import annotations
4
4
 
5
5
  import time
6
6
  from datetime import datetime
7
- from typing import Optional
8
7
 
9
- from .base import EnergyMeter, EnergyMetrics, EnergyBackend
8
+ from .base import EnergyBackend, EnergyMeter, EnergyMetrics
10
9
 
11
10
 
12
11
  class CodeCarbonMeter(EnergyMeter):
@@ -15,7 +14,7 @@ class CodeCarbonMeter(EnergyMeter):
15
14
  def __init__(
16
15
  self,
17
16
  project_name: str = "greenmining",
18
- output_dir: Optional[str] = None,
17
+ output_dir: str | None = None,
19
18
  save_to_file: bool = False,
20
19
  ):
21
20
  # Initialize CodeCarbon energy meter.
@@ -24,13 +23,13 @@ class CodeCarbonMeter(EnergyMeter):
24
23
  self.output_dir = output_dir
25
24
  self.save_to_file = save_to_file
26
25
  self._tracker = None
27
- self._start_time: Optional[float] = None
26
+ self._start_time: float | None = None
28
27
  self._codecarbon_available = self._check_codecarbon()
29
28
 
30
29
  def _check_codecarbon(self) -> bool:
31
30
  # Check if CodeCarbon is installed.
32
31
  try:
33
- from codecarbon import EmissionsTracker
32
+ from codecarbon import EmissionsTracker # noqa: F401
34
33
 
35
34
  return True
36
35
  except ImportError:
@@ -123,4 +122,3 @@ class CodeCarbonMeter(EnergyMeter):
123
122
  start_time=datetime.fromtimestamp(self._start_time),
124
123
  end_time=datetime.fromtimestamp(end_time),
125
124
  )
126
-
@@ -3,12 +3,11 @@
3
3
 
4
4
  from __future__ import annotations
5
5
 
6
- import time
7
6
  import platform
7
+ import time
8
8
  from datetime import datetime
9
- from typing import List, Optional
10
9
 
11
- from .base import EnergyMeter, EnergyMetrics, EnergyBackend
10
+ from .base import EnergyBackend, EnergyMeter, EnergyMetrics
12
11
 
13
12
 
14
13
  class CPUEnergyMeter(EnergyMeter):
@@ -23,7 +22,7 @@ class CPUEnergyMeter(EnergyMeter):
23
22
  "Windows": 65.0,
24
23
  }
25
24
 
26
- def __init__(self, tdp_watts: Optional[float] = None, sample_interval: float = 0.5):
25
+ def __init__(self, tdp_watts: float | None = None, sample_interval: float = 0.5):
27
26
  # Initialize CPU energy meter.
28
27
  # Args:
29
28
  # tdp_watts: CPU Thermal Design Power in watts (auto-detected if None)
@@ -31,15 +30,15 @@ class CPUEnergyMeter(EnergyMeter):
31
30
  super().__init__(EnergyBackend.CPU_METER)
32
31
  self.tdp_watts = tdp_watts or self._detect_tdp()
33
32
  self.sample_interval = sample_interval
34
- self._start_time: Optional[float] = None
35
- self._samples: List[float] = []
33
+ self._start_time: float | None = None
34
+ self._samples: list[float] = []
36
35
  self._platform = platform.system()
37
36
  self._psutil_available = self._check_psutil()
38
37
 
39
38
  def _check_psutil(self) -> bool:
40
39
  # Check if psutil is available.
41
40
  try:
42
- import psutil
41
+ import psutil # noqa: F401
43
42
 
44
43
  return True
45
44
  except ImportError: