greenmining 1.1.7__py3-none-any.whl → 1.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- greenmining/__init__.py +1 -1
- greenmining/analyzers/metrics_power_correlator.py +1 -1
- greenmining/analyzers/power_regression.py +0 -1
- greenmining/analyzers/qualitative_analyzer.py +1 -1
- greenmining/analyzers/statistical_analyzer.py +0 -32
- greenmining/config.py +21 -130
- greenmining/controllers/repository_controller.py +0 -2
- greenmining/energy/codecarbon_meter.py +0 -21
- greenmining/energy/cpu_meter.py +1 -1
- greenmining/gsf_patterns.py +41 -0
- greenmining/models/aggregated_stats.py +1 -1
- greenmining/models/commit.py +0 -1
- greenmining/models/repository.py +1 -1
- greenmining/services/commit_extractor.py +2 -41
- greenmining/services/data_aggregator.py +1 -6
- greenmining/services/data_analyzer.py +1 -57
- greenmining/services/local_repo_analyzer.py +1 -2
- greenmining/services/reports.py +1 -6
- greenmining/utils.py +0 -87
- {greenmining-1.1.7.dist-info → greenmining-1.1.8.dist-info}/METADATA +3 -3
- greenmining-1.1.8.dist-info/RECORD +40 -0
- greenmining/__version__.py +0 -3
- greenmining/services/github_fetcher.py +0 -2
- greenmining-1.1.7.dist-info/RECORD +0 -42
- {greenmining-1.1.7.dist-info → greenmining-1.1.8.dist-info}/WHEEL +0 -0
- {greenmining-1.1.7.dist-info → greenmining-1.1.8.dist-info}/licenses/LICENSE +0 -0
- {greenmining-1.1.7.dist-info → greenmining-1.1.8.dist-info}/top_level.txt +0 -0
greenmining/__init__.py
CHANGED
|
@@ -135,38 +135,6 @@ class StatisticalAnalyzer:
|
|
|
135
135
|
"significant": bool(p_value < 0.05),
|
|
136
136
|
}
|
|
137
137
|
|
|
138
|
-
def pattern_adoption_rate_analysis(self, commits_df: pd.DataFrame) -> Dict[str, Any]:
|
|
139
|
-
# Analyze pattern adoption rates over repository lifetime.
|
|
140
|
-
results = {}
|
|
141
|
-
|
|
142
|
-
for pattern in commits_df["pattern"].unique():
|
|
143
|
-
pattern_commits = commits_df[commits_df["pattern"] == pattern].sort_values("date")
|
|
144
|
-
|
|
145
|
-
if len(pattern_commits) == 0:
|
|
146
|
-
continue
|
|
147
|
-
|
|
148
|
-
# Time to first adoption
|
|
149
|
-
first_adoption = pattern_commits.iloc[0]["date"]
|
|
150
|
-
repo_start = commits_df["date"].min()
|
|
151
|
-
ttfa_days = (first_adoption - repo_start).days
|
|
152
|
-
|
|
153
|
-
# Adoption frequency over time
|
|
154
|
-
monthly_adoption = pattern_commits.set_index("date").resample("ME").size()
|
|
155
|
-
|
|
156
|
-
# Pattern stickiness (months with at least one adoption)
|
|
157
|
-
total_months = len(commits_df.set_index("date").resample("ME").size())
|
|
158
|
-
active_months = len(monthly_adoption[monthly_adoption > 0])
|
|
159
|
-
stickiness = active_months / total_months if total_months > 0 else 0
|
|
160
|
-
|
|
161
|
-
results[pattern] = {
|
|
162
|
-
"ttfa_days": ttfa_days,
|
|
163
|
-
"total_adoptions": len(pattern_commits),
|
|
164
|
-
"stickiness": stickiness,
|
|
165
|
-
"monthly_adoption_rate": monthly_adoption.mean(),
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
return results
|
|
169
|
-
|
|
170
138
|
def _interpret_correlations(self, significant_pairs: List[Dict[str, Any]]) -> str:
|
|
171
139
|
# Generate interpretation of correlation results.
|
|
172
140
|
if not significant_pairs:
|
greenmining/config.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from pathlib import Path
|
|
3
|
-
from typing import Any, Dict, List
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
4
|
|
|
5
5
|
from dotenv import load_dotenv
|
|
6
6
|
|
|
@@ -25,14 +25,13 @@ class Config:
|
|
|
25
25
|
|
|
26
26
|
def __init__(self, env_file: str = ".env", yaml_file: str = "greenmining.yaml"):
|
|
27
27
|
# Initialize configuration from environment and YAML file.
|
|
28
|
-
# Load environment variables
|
|
29
28
|
env_path = Path(env_file)
|
|
30
29
|
if env_path.exists():
|
|
31
30
|
load_dotenv(env_path)
|
|
32
31
|
else:
|
|
33
|
-
load_dotenv()
|
|
32
|
+
load_dotenv()
|
|
34
33
|
|
|
35
|
-
# Load YAML config
|
|
34
|
+
# Load YAML config
|
|
36
35
|
yaml_path = Path(yaml_file)
|
|
37
36
|
self._yaml_config = _load_yaml_config(yaml_path)
|
|
38
37
|
|
|
@@ -41,78 +40,38 @@ class Config:
|
|
|
41
40
|
if not self.GITHUB_TOKEN or self.GITHUB_TOKEN == "your_github_pat_here":
|
|
42
41
|
raise ValueError("GITHUB_TOKEN not set. Please set it in .env file or environment.")
|
|
43
42
|
|
|
44
|
-
#
|
|
45
|
-
self.ANALYSIS_TYPE = "keyword_heuristic"
|
|
46
|
-
|
|
47
|
-
# Search and Processing Configuration (YAML: sources.search.keywords)
|
|
43
|
+
# Search Configuration (YAML: sources.search.*)
|
|
48
44
|
yaml_search = self._yaml_config.get("sources", {}).get("search", {})
|
|
49
|
-
self.GITHUB_SEARCH_KEYWORDS = yaml_search.get(
|
|
50
|
-
"keywords", ["microservices", "microservice-architecture", "cloud-native"]
|
|
51
|
-
)
|
|
52
45
|
|
|
53
|
-
|
|
54
|
-
self.SUPPORTED_LANGUAGES = yaml_search.get(
|
|
46
|
+
self.SUPPORTED_LANGUAGES: List[str] = yaml_search.get(
|
|
55
47
|
"languages",
|
|
56
48
|
[
|
|
57
|
-
"Java",
|
|
58
49
|
"Python",
|
|
59
|
-
"Go",
|
|
60
50
|
"JavaScript",
|
|
61
51
|
"TypeScript",
|
|
52
|
+
"Java",
|
|
53
|
+
"C++",
|
|
62
54
|
"C#",
|
|
55
|
+
"Go",
|
|
63
56
|
"Rust",
|
|
57
|
+
"PHP",
|
|
58
|
+
"Ruby",
|
|
59
|
+
"Swift",
|
|
60
|
+
"Kotlin",
|
|
61
|
+
"Scala",
|
|
62
|
+
"R",
|
|
63
|
+
"MATLAB",
|
|
64
|
+
"Dart",
|
|
65
|
+
"Lua",
|
|
66
|
+
"Perl",
|
|
67
|
+
"Haskell",
|
|
68
|
+
"Elixir",
|
|
64
69
|
],
|
|
65
70
|
)
|
|
66
71
|
|
|
67
|
-
# Repository
|
|
68
|
-
yaml_extraction = self._yaml_config.get("extraction", {})
|
|
72
|
+
# Repository Limits
|
|
69
73
|
self.MIN_STARS = yaml_search.get("min_stars", int(os.getenv("MIN_STARS", "100")))
|
|
70
74
|
self.MAX_REPOS = int(os.getenv("MAX_REPOS", "100"))
|
|
71
|
-
self.COMMITS_PER_REPO = yaml_extraction.get(
|
|
72
|
-
"max_commits", int(os.getenv("COMMITS_PER_REPO", "50"))
|
|
73
|
-
)
|
|
74
|
-
self.DAYS_BACK = yaml_extraction.get("days_back", int(os.getenv("DAYS_BACK", "730")))
|
|
75
|
-
self.SKIP_MERGES = yaml_extraction.get("skip_merges", True)
|
|
76
|
-
|
|
77
|
-
# Analysis Configuration (YAML: analysis.*)
|
|
78
|
-
yaml_analysis = self._yaml_config.get("analysis", {})
|
|
79
|
-
self.ENABLE_NLP_ANALYSIS = os.getenv("ENABLE_NLP_ANALYSIS", "false").lower() == "true"
|
|
80
|
-
self.ENABLE_TEMPORAL_ANALYSIS = (
|
|
81
|
-
os.getenv("ENABLE_TEMPORAL_ANALYSIS", "false").lower() == "true"
|
|
82
|
-
)
|
|
83
|
-
self.TEMPORAL_GRANULARITY = os.getenv("TEMPORAL_GRANULARITY", "quarter")
|
|
84
|
-
self.ENABLE_ML_FEATURES = os.getenv("ENABLE_ML_FEATURES", "false").lower() == "true"
|
|
85
|
-
self.VALIDATION_SAMPLE_SIZE = int(os.getenv("VALIDATION_SAMPLE_SIZE", "30"))
|
|
86
|
-
|
|
87
|
-
# PyDriller options (YAML: analysis.process_metrics, etc.)
|
|
88
|
-
self.PROCESS_METRICS_ENABLED = yaml_analysis.get(
|
|
89
|
-
"process_metrics", os.getenv("PROCESS_METRICS_ENABLED", "true").lower() == "true"
|
|
90
|
-
)
|
|
91
|
-
self.STRUCTURAL_METRICS_ENABLED = yaml_analysis.get(
|
|
92
|
-
"structural_metrics", os.getenv("STRUCTURAL_METRICS_ENABLED", "true").lower() == "true"
|
|
93
|
-
)
|
|
94
|
-
self.DMM_ENABLED = yaml_analysis.get(
|
|
95
|
-
"delta_maintainability", os.getenv("DMM_ENABLED", "true").lower() == "true"
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
# Temporal Filtering
|
|
99
|
-
self.CREATED_AFTER = os.getenv("CREATED_AFTER")
|
|
100
|
-
self.CREATED_BEFORE = os.getenv("CREATED_BEFORE")
|
|
101
|
-
self.PUSHED_AFTER = os.getenv("PUSHED_AFTER")
|
|
102
|
-
self.PUSHED_BEFORE = os.getenv("PUSHED_BEFORE")
|
|
103
|
-
self.COMMIT_DATE_FROM = os.getenv("COMMIT_DATE_FROM")
|
|
104
|
-
self.COMMIT_DATE_TO = os.getenv("COMMIT_DATE_TO")
|
|
105
|
-
self.MIN_COMMITS = int(os.getenv("MIN_COMMITS", "0"))
|
|
106
|
-
self.ACTIVITY_WINDOW_DAYS = int(os.getenv("ACTIVITY_WINDOW_DAYS", "730"))
|
|
107
|
-
|
|
108
|
-
# Analysis Configuration
|
|
109
|
-
self.BATCH_SIZE = int(os.getenv("BATCH_SIZE", "10"))
|
|
110
|
-
|
|
111
|
-
# Processing Configuration
|
|
112
|
-
self.TIMEOUT_SECONDS = int(os.getenv("TIMEOUT_SECONDS", "30"))
|
|
113
|
-
self.MAX_RETRIES = int(os.getenv("MAX_RETRIES", "3"))
|
|
114
|
-
self.RETRY_DELAY = 2
|
|
115
|
-
self.EXPONENTIAL_BACKOFF = True
|
|
116
75
|
|
|
117
76
|
# Output Configuration (YAML: output.directory)
|
|
118
77
|
yaml_output = self._yaml_config.get("output", {})
|
|
@@ -121,80 +80,12 @@ class Config:
|
|
|
121
80
|
|
|
122
81
|
# File Paths
|
|
123
82
|
self.REPOS_FILE = self.OUTPUT_DIR / "repositories.json"
|
|
124
|
-
self.COMMITS_FILE = self.OUTPUT_DIR / "commits.json"
|
|
125
|
-
self.ANALYSIS_FILE = self.OUTPUT_DIR / "analysis_results.json"
|
|
126
|
-
self.AGGREGATED_FILE = self.OUTPUT_DIR / "aggregated_statistics.json"
|
|
127
|
-
self.CSV_FILE = self.OUTPUT_DIR / "green_analysis_results.csv"
|
|
128
|
-
self.REPORT_FILE = self.OUTPUT_DIR / "green_microservices_analysis.md"
|
|
129
|
-
self.CHECKPOINT_FILE = self.OUTPUT_DIR / "checkpoint.json"
|
|
130
|
-
|
|
131
|
-
# Direct Repository URL Support (YAML: sources.urls)
|
|
132
|
-
yaml_urls = self._yaml_config.get("sources", {}).get("urls", [])
|
|
133
|
-
env_urls = self._parse_repository_urls(os.getenv("REPOSITORY_URLS", ""))
|
|
134
|
-
self.REPOSITORY_URLS: List[str] = yaml_urls if yaml_urls else env_urls
|
|
135
|
-
|
|
136
|
-
# Clone path (YAML: extraction.clone_path)
|
|
137
|
-
self.CLONE_PATH = Path(
|
|
138
|
-
yaml_extraction.get("clone_path", os.getenv("CLONE_PATH", "/tmp/greenmining_repos"))
|
|
139
|
-
)
|
|
140
|
-
self.CLEANUP_AFTER_ANALYSIS = os.getenv("CLEANUP_AFTER_ANALYSIS", "true").lower() == "true"
|
|
141
|
-
|
|
142
|
-
# Energy Measurement (YAML: energy.*)
|
|
143
|
-
yaml_energy = self._yaml_config.get("energy", {})
|
|
144
|
-
self.ENERGY_ENABLED = yaml_energy.get(
|
|
145
|
-
"enabled", os.getenv("ENERGY_ENABLED", "false").lower() == "true"
|
|
146
|
-
)
|
|
147
|
-
self.ENERGY_BACKEND = yaml_energy.get("backend", os.getenv("ENERGY_BACKEND", "rapl"))
|
|
148
|
-
self.CARBON_TRACKING = yaml_energy.get(
|
|
149
|
-
"carbon_tracking", os.getenv("CARBON_TRACKING", "false").lower() == "true"
|
|
150
|
-
)
|
|
151
|
-
self.COUNTRY_ISO = yaml_energy.get("country_iso", os.getenv("COUNTRY_ISO", "USA"))
|
|
152
|
-
|
|
153
|
-
# Power profiling (YAML: energy.power_profiling.*)
|
|
154
|
-
yaml_power = yaml_energy.get("power_profiling", {})
|
|
155
|
-
self.POWER_PROFILING_ENABLED = yaml_power.get("enabled", False)
|
|
156
|
-
self.POWER_TEST_COMMAND = yaml_power.get("test_command", None)
|
|
157
|
-
self.POWER_REGRESSION_THRESHOLD = yaml_power.get("regression_threshold", 5.0)
|
|
158
|
-
|
|
159
|
-
# Logging
|
|
160
|
-
self.VERBOSE = os.getenv("VERBOSE", "false").lower() == "true"
|
|
161
|
-
self.LOG_FILE = self.OUTPUT_DIR / "mining.log"
|
|
162
|
-
|
|
163
|
-
def _parse_repository_urls(self, urls_str: str) -> List[str]:
|
|
164
|
-
# Parse comma-separated repository URLs from environment variable.
|
|
165
|
-
if not urls_str:
|
|
166
|
-
return []
|
|
167
|
-
return [url.strip() for url in urls_str.split(",") if url.strip()]
|
|
168
|
-
|
|
169
|
-
def validate(self) -> bool:
|
|
170
|
-
# Validate that all required configuration is present.
|
|
171
|
-
required_attrs = ["GITHUB_TOKEN", "MAX_REPOS", "COMMITS_PER_REPO"]
|
|
172
|
-
|
|
173
|
-
for attr in required_attrs:
|
|
174
|
-
if not getattr(self, attr, None):
|
|
175
|
-
raise ValueError(f"Missing required configuration: {attr}")
|
|
176
|
-
|
|
177
|
-
return True
|
|
178
83
|
|
|
179
84
|
def __repr__(self) -> str:
|
|
180
85
|
# String representation of configuration (hiding sensitive data).
|
|
181
86
|
return (
|
|
182
87
|
f"Config("
|
|
183
88
|
f"MAX_REPOS={self.MAX_REPOS}, "
|
|
184
|
-
f"COMMITS_PER_REPO={self.COMMITS_PER_REPO}, "
|
|
185
|
-
f"BATCH_SIZE={self.BATCH_SIZE}, "
|
|
186
89
|
f"OUTPUT_DIR={self.OUTPUT_DIR}"
|
|
187
90
|
f")"
|
|
188
91
|
)
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
# Global config instance
|
|
192
|
-
_config_instance = None
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
def get_config(env_file: str = ".env") -> Config:
|
|
196
|
-
# Get or create global configuration instance.
|
|
197
|
-
global _config_instance
|
|
198
|
-
if _config_instance is None:
|
|
199
|
-
_config_instance = Config(env_file)
|
|
200
|
-
return _config_instance
|
|
@@ -124,24 +124,3 @@ class CodeCarbonMeter(EnergyMeter):
|
|
|
124
124
|
end_time=datetime.fromtimestamp(end_time),
|
|
125
125
|
)
|
|
126
126
|
|
|
127
|
-
def get_carbon_intensity(self) -> Optional[float]:
|
|
128
|
-
# Get current carbon intensity for the configured region.
|
|
129
|
-
if not self._codecarbon_available:
|
|
130
|
-
return None
|
|
131
|
-
|
|
132
|
-
try:
|
|
133
|
-
from codecarbon import EmissionsTracker
|
|
134
|
-
|
|
135
|
-
# Create temporary tracker to get carbon intensity
|
|
136
|
-
tracker = EmissionsTracker(
|
|
137
|
-
project_name="carbon_check",
|
|
138
|
-
country_iso_code=self.country_iso_code,
|
|
139
|
-
save_to_file=False,
|
|
140
|
-
log_level="error",
|
|
141
|
-
)
|
|
142
|
-
tracker.start()
|
|
143
|
-
tracker.stop()
|
|
144
|
-
|
|
145
|
-
return getattr(tracker, "_carbon_intensity", None)
|
|
146
|
-
except Exception:
|
|
147
|
-
return None
|
greenmining/energy/cpu_meter.py
CHANGED
greenmining/gsf_patterns.py
CHANGED
|
@@ -254,6 +254,35 @@ GSF_PATTERNS = {
|
|
|
254
254
|
"description": "Choose hardware optimized for energy efficiency",
|
|
255
255
|
"sci_impact": "Direct reduction in energy consumption",
|
|
256
256
|
},
|
|
257
|
+
"match_preconfigured_server": {
|
|
258
|
+
"name": "Match Utilization Requirements with Pre-configured Servers",
|
|
259
|
+
"category": "cloud",
|
|
260
|
+
"keywords": [
|
|
261
|
+
"pre-configured server",
|
|
262
|
+
"energy proportionality",
|
|
263
|
+
"server utilization",
|
|
264
|
+
"oversized server",
|
|
265
|
+
"underutilized server",
|
|
266
|
+
"server consolidation",
|
|
267
|
+
],
|
|
268
|
+
"description": "Select pre-configured servers that match utilization needs; one highly utilized server is more energy-efficient than two underutilized ones",
|
|
269
|
+
"sci_impact": "Higher utilization improves energy proportionality; fewer servers reduces embodied carbon",
|
|
270
|
+
},
|
|
271
|
+
"optimize_customer_device_impact": {
|
|
272
|
+
"name": "Optimize Impact on Customer Devices and Equipment",
|
|
273
|
+
"category": "cloud",
|
|
274
|
+
"keywords": [
|
|
275
|
+
"customer device",
|
|
276
|
+
"backward compatible",
|
|
277
|
+
"backwards compatible",
|
|
278
|
+
"older hardware",
|
|
279
|
+
"device lifetime",
|
|
280
|
+
"older browser",
|
|
281
|
+
"end-of-life hardware",
|
|
282
|
+
],
|
|
283
|
+
"description": "Design software to extend customer hardware lifetimes through backward compatibility with older devices, browsers, and operating systems",
|
|
284
|
+
"sci_impact": "Extending device lifetimes reduces embodied carbon; optimizing for older hardware may also reduce energy intensity",
|
|
285
|
+
},
|
|
257
286
|
# ==================== WEB PATTERNS (15+) ====================
|
|
258
287
|
"avoid_chaining_requests": {
|
|
259
288
|
"name": "Avoid Chaining Critical Requests",
|
|
@@ -1555,6 +1584,18 @@ GREEN_KEYWORDS = [
|
|
|
1555
1584
|
"workload",
|
|
1556
1585
|
"overhead",
|
|
1557
1586
|
"footprint",
|
|
1587
|
+
# Server utilization & customer device patterns
|
|
1588
|
+
"pre-configured server",
|
|
1589
|
+
"energy proportionality",
|
|
1590
|
+
"server consolidation",
|
|
1591
|
+
"underutilized server",
|
|
1592
|
+
"oversized server",
|
|
1593
|
+
"backward compatible",
|
|
1594
|
+
"backwards compatible",
|
|
1595
|
+
"customer device",
|
|
1596
|
+
"device lifetime",
|
|
1597
|
+
"older browser",
|
|
1598
|
+
"end-of-life hardware",
|
|
1558
1599
|
]
|
|
1559
1600
|
|
|
1560
1601
|
|
greenmining/models/commit.py
CHANGED
greenmining/models/repository.py
CHANGED
|
@@ -2,21 +2,17 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
import json
|
|
6
5
|
from datetime import datetime, timedelta
|
|
7
6
|
from pathlib import Path
|
|
8
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
9
8
|
|
|
10
9
|
from github import Github
|
|
11
10
|
from tqdm import tqdm
|
|
12
11
|
|
|
13
|
-
from greenmining.config import get_config
|
|
14
12
|
from greenmining.models.repository import Repository
|
|
15
13
|
from greenmining.utils import (
|
|
16
14
|
colored_print,
|
|
17
15
|
format_timestamp,
|
|
18
|
-
load_json_file,
|
|
19
|
-
print_banner,
|
|
20
16
|
retry_on_exception,
|
|
21
17
|
save_json_file,
|
|
22
18
|
)
|
|
@@ -110,8 +106,7 @@ class CommitExtractor:
|
|
|
110
106
|
try:
|
|
111
107
|
# Get repository from GitHub API
|
|
112
108
|
if not self.github:
|
|
113
|
-
|
|
114
|
-
self.github = Github(config.GITHUB_TOKEN)
|
|
109
|
+
raise ValueError("github_token is required for commit extraction")
|
|
115
110
|
|
|
116
111
|
gh_repo = self.github.get_repo(repo_name)
|
|
117
112
|
|
|
@@ -143,40 +138,6 @@ class CommitExtractor:
|
|
|
143
138
|
|
|
144
139
|
return commits
|
|
145
140
|
|
|
146
|
-
def _extract_commit_metadata(self, commit, repo_name: str) -> dict[str, Any]:
|
|
147
|
-
# Extract metadata from commit object.
|
|
148
|
-
# Get modified files
|
|
149
|
-
files_changed = []
|
|
150
|
-
lines_added = 0
|
|
151
|
-
lines_deleted = 0
|
|
152
|
-
|
|
153
|
-
try:
|
|
154
|
-
for modified_file in commit.modified_files:
|
|
155
|
-
files_changed.append(modified_file.filename)
|
|
156
|
-
lines_added += modified_file.added_lines
|
|
157
|
-
lines_deleted += modified_file.deleted_lines
|
|
158
|
-
except Exception:
|
|
159
|
-
pass
|
|
160
|
-
|
|
161
|
-
return {
|
|
162
|
-
"commit_id": commit.hash,
|
|
163
|
-
"repo_name": repo_name,
|
|
164
|
-
"date": commit.committer_date.isoformat(),
|
|
165
|
-
"author": commit.author.name,
|
|
166
|
-
"author_email": commit.author.email,
|
|
167
|
-
"message": commit.msg.strip(),
|
|
168
|
-
"files_changed": files_changed[:20], # Limit to 20 files
|
|
169
|
-
"lines_added": lines_added,
|
|
170
|
-
"lines_deleted": lines_deleted,
|
|
171
|
-
"insertions": lines_added,
|
|
172
|
-
"deletions": lines_deleted,
|
|
173
|
-
"is_merge": commit.merge,
|
|
174
|
-
"branches": (
|
|
175
|
-
list(commit.branches) if hasattr(commit, "branches") and commit.branches else []
|
|
176
|
-
),
|
|
177
|
-
"in_main_branch": commit.in_main_branch if hasattr(commit, "in_main_branch") else True,
|
|
178
|
-
}
|
|
179
|
-
|
|
180
141
|
def _extract_commit_metadata_from_github(self, commit, repo_name: str) -> dict[str, Any]:
|
|
181
142
|
# Extract metadata from GitHub API commit object.
|
|
182
143
|
# Get modified files and stats
|
|
@@ -2,26 +2,21 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
import json
|
|
6
5
|
from collections import defaultdict
|
|
7
6
|
from pathlib import Path
|
|
8
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
9
8
|
|
|
10
9
|
import pandas as pd
|
|
11
10
|
|
|
12
11
|
from greenmining.analyzers import (
|
|
13
12
|
StatisticalAnalyzer,
|
|
14
13
|
TemporalAnalyzer,
|
|
15
|
-
QualitativeAnalyzer,
|
|
16
14
|
)
|
|
17
|
-
from greenmining.config import get_config
|
|
18
15
|
from greenmining.models.repository import Repository
|
|
19
16
|
from greenmining.utils import (
|
|
20
17
|
colored_print,
|
|
21
18
|
format_number,
|
|
22
19
|
format_percentage,
|
|
23
|
-
load_json_file,
|
|
24
|
-
print_banner,
|
|
25
20
|
save_csv_file,
|
|
26
21
|
save_json_file,
|
|
27
22
|
)
|
|
@@ -2,18 +2,15 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
import json
|
|
6
|
-
import re
|
|
7
5
|
from collections import Counter
|
|
8
6
|
from pathlib import Path
|
|
9
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
10
8
|
|
|
11
9
|
from tqdm import tqdm
|
|
12
10
|
|
|
13
11
|
from greenmining.analyzers import (
|
|
14
12
|
CodeDiffAnalyzer,
|
|
15
13
|
)
|
|
16
|
-
from greenmining.config import get_config
|
|
17
14
|
from greenmining.gsf_patterns import (
|
|
18
15
|
GREEN_KEYWORDS,
|
|
19
16
|
GSF_PATTERNS,
|
|
@@ -22,11 +19,7 @@ from greenmining.gsf_patterns import (
|
|
|
22
19
|
)
|
|
23
20
|
from greenmining.utils import (
|
|
24
21
|
colored_print,
|
|
25
|
-
create_checkpoint,
|
|
26
22
|
format_timestamp,
|
|
27
|
-
load_checkpoint,
|
|
28
|
-
load_json_file,
|
|
29
|
-
print_banner,
|
|
30
23
|
save_json_file,
|
|
31
24
|
)
|
|
32
25
|
|
|
@@ -156,55 +149,6 @@ class DataAnalyzer:
|
|
|
156
149
|
|
|
157
150
|
return result
|
|
158
151
|
|
|
159
|
-
def _check_green_awareness(self, message: str, files: list[str]) -> tuple[bool, Optional[str]]:
|
|
160
|
-
# Check if commit explicitly mentions green/energy concerns.
|
|
161
|
-
# Check message for green keywords
|
|
162
|
-
for keyword in self.GREEN_KEYWORDS:
|
|
163
|
-
if keyword in message:
|
|
164
|
-
# Extract context around keyword
|
|
165
|
-
pattern = rf".{{0,30}}{re.escape(keyword)}.{{0,30}}"
|
|
166
|
-
match = re.search(pattern, message, re.IGNORECASE)
|
|
167
|
-
if match:
|
|
168
|
-
evidence = match.group(0).strip()
|
|
169
|
-
return True, f"Keyword '{keyword}': {evidence}"
|
|
170
|
-
|
|
171
|
-
# Check file names for patterns
|
|
172
|
-
cache_files = [f for f in files if "cache" in f or "redis" in f]
|
|
173
|
-
if cache_files:
|
|
174
|
-
return True, f"Modified cache-related file: {cache_files[0]}"
|
|
175
|
-
|
|
176
|
-
perf_files = [f for f in files if "performance" in f or "optimization" in f]
|
|
177
|
-
if perf_files:
|
|
178
|
-
return True, f"Modified performance file: {perf_files[0]}"
|
|
179
|
-
|
|
180
|
-
return False, None
|
|
181
|
-
|
|
182
|
-
def _detect_known_pattern(self, message: str, files: list[str]) -> tuple[Optional[str], str]:
|
|
183
|
-
# Detect known green software pattern.
|
|
184
|
-
matches = []
|
|
185
|
-
|
|
186
|
-
# Check each pattern
|
|
187
|
-
for pattern_name, keywords in self.GREEN_PATTERNS.items():
|
|
188
|
-
for keyword in keywords:
|
|
189
|
-
if keyword in message:
|
|
190
|
-
# Calculate confidence based on specificity
|
|
191
|
-
confidence = "HIGH" if len(keyword) > 10 else "MEDIUM"
|
|
192
|
-
matches.append((pattern_name, confidence, len(keyword)))
|
|
193
|
-
|
|
194
|
-
# Check file names for pattern hints
|
|
195
|
-
all_files = " ".join(files)
|
|
196
|
-
for pattern_name, keywords in self.GREEN_PATTERNS.items():
|
|
197
|
-
for keyword in keywords:
|
|
198
|
-
if keyword in all_files:
|
|
199
|
-
matches.append((pattern_name, "MEDIUM", len(keyword)))
|
|
200
|
-
|
|
201
|
-
if not matches:
|
|
202
|
-
return "NONE DETECTED", "NONE"
|
|
203
|
-
|
|
204
|
-
# Return most specific match (longest keyword)
|
|
205
|
-
matches.sort(key=lambda x: x[2], reverse=True)
|
|
206
|
-
return matches[0][0], matches[0][1]
|
|
207
|
-
|
|
208
152
|
def save_results(self, results: list[dict[str, Any]], output_file: Path):
|
|
209
153
|
# Save analysis results to JSON file.
|
|
210
154
|
# Calculate summary statistics
|
|
@@ -5,13 +5,12 @@ from __future__ import annotations
|
|
|
5
5
|
import os
|
|
6
6
|
import re
|
|
7
7
|
import shutil
|
|
8
|
-
import subprocess
|
|
9
8
|
import tempfile
|
|
10
9
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
11
10
|
from dataclasses import dataclass, field
|
|
12
11
|
from datetime import datetime, timedelta
|
|
13
12
|
from pathlib import Path
|
|
14
|
-
from typing import Any, Dict, List, Optional
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
15
14
|
|
|
16
15
|
from pydriller import Repository
|
|
17
16
|
from pydriller.metrics.process.change_set import ChangeSet
|
greenmining/services/reports.py
CHANGED
|
@@ -1,20 +1,15 @@
|
|
|
1
1
|
# Report generation for green mining analysis.
|
|
2
|
-
"""Report generation module for GreenMining analysis results."""
|
|
3
2
|
|
|
4
3
|
from __future__ import annotations
|
|
5
4
|
|
|
6
|
-
import json
|
|
7
5
|
from datetime import datetime
|
|
8
6
|
from pathlib import Path
|
|
9
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
10
8
|
|
|
11
|
-
from greenmining.config import get_config
|
|
12
9
|
from greenmining.utils import (
|
|
13
10
|
colored_print,
|
|
14
11
|
format_number,
|
|
15
12
|
format_percentage,
|
|
16
|
-
load_json_file,
|
|
17
|
-
print_banner,
|
|
18
13
|
)
|
|
19
14
|
|
|
20
15
|
|
greenmining/utils.py
CHANGED
|
@@ -38,41 +38,12 @@ def save_json_file(data: dict[str, Any], path: Path, indent: int = 2) -> None:
|
|
|
38
38
|
json.dump(data, f, indent=indent, ensure_ascii=False)
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
def load_csv_file(path: Path) -> pd.DataFrame:
|
|
42
|
-
# Load CSV file as pandas DataFrame.
|
|
43
|
-
if not path.exists():
|
|
44
|
-
raise FileNotFoundError(f"File not found: {path}")
|
|
45
|
-
|
|
46
|
-
return pd.read_csv(path)
|
|
47
|
-
|
|
48
|
-
|
|
49
41
|
def save_csv_file(df: pd.DataFrame, path: Path) -> None:
|
|
50
42
|
# Save DataFrame to CSV file.
|
|
51
43
|
path.parent.mkdir(parents=True, exist_ok=True)
|
|
52
44
|
df.to_csv(path, index=False, encoding="utf-8")
|
|
53
45
|
|
|
54
46
|
|
|
55
|
-
def estimate_tokens(text: str) -> int:
|
|
56
|
-
# Estimate number of tokens in text.
|
|
57
|
-
return len(text) // 4
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def estimate_cost(tokens: int, model: str = "claude-sonnet-4-20250514") -> float:
|
|
61
|
-
# Estimate API cost based on token usage.
|
|
62
|
-
# Claude Sonnet 4 pricing (as of Dec 2024)
|
|
63
|
-
# Input: $3 per million tokens
|
|
64
|
-
# Output: $15 per million tokens
|
|
65
|
-
# Average estimate: assume 50% input, 50% output
|
|
66
|
-
|
|
67
|
-
if "sonnet" in model.lower():
|
|
68
|
-
input_cost = 3.0 / 1_000_000 # per token
|
|
69
|
-
output_cost = 15.0 / 1_000_000 # per token
|
|
70
|
-
avg_cost = (input_cost + output_cost) / 2
|
|
71
|
-
return tokens * avg_cost
|
|
72
|
-
|
|
73
|
-
return 0.0
|
|
74
|
-
|
|
75
|
-
|
|
76
47
|
def retry_on_exception(
|
|
77
48
|
max_retries: int = 3,
|
|
78
49
|
delay: float = 2.0,
|
|
@@ -124,14 +95,6 @@ def colored_print(text: str, color: str = "white") -> None:
|
|
|
124
95
|
print(f"{color_code}{text}{Style.RESET_ALL}")
|
|
125
96
|
|
|
126
97
|
|
|
127
|
-
def handle_github_rate_limit(response) -> None:
|
|
128
|
-
# Handle GitHub API rate limiting.
|
|
129
|
-
if hasattr(response, "status") and response.status == 403:
|
|
130
|
-
colored_print("GitHub API rate limit exceeded!", "red")
|
|
131
|
-
colored_print("Please wait or use an authenticated token.", "yellow")
|
|
132
|
-
raise Exception("GitHub API rate limit exceeded")
|
|
133
|
-
|
|
134
|
-
|
|
135
98
|
def format_number(num: int) -> str:
|
|
136
99
|
# Format large numbers with thousand separators.
|
|
137
100
|
return f"{num:,}"
|
|
@@ -140,53 +103,3 @@ def format_number(num: int) -> str:
|
|
|
140
103
|
def format_percentage(value: float, decimals: int = 1) -> str:
|
|
141
104
|
# Format percentage value.
|
|
142
105
|
return f"{value:.{decimals}f}%"
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
def format_duration(seconds: float) -> str:
|
|
146
|
-
# Format duration in human-readable format.
|
|
147
|
-
if seconds < 60:
|
|
148
|
-
return f"{int(seconds)}s"
|
|
149
|
-
elif seconds < 3600:
|
|
150
|
-
minutes = int(seconds / 60)
|
|
151
|
-
secs = int(seconds % 60)
|
|
152
|
-
return f"{minutes}m {secs}s"
|
|
153
|
-
else:
|
|
154
|
-
hours = int(seconds / 3600)
|
|
155
|
-
minutes = int((seconds % 3600) / 60)
|
|
156
|
-
return f"{hours}h {minutes}m"
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
def truncate_text(text: str, max_length: int = 100) -> str:
|
|
160
|
-
# Truncate text to maximum length.
|
|
161
|
-
if len(text) <= max_length:
|
|
162
|
-
return text
|
|
163
|
-
return text[: max_length - 3] + "..."
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
def create_checkpoint(checkpoint_file: Path, data: dict[str, Any]) -> None:
|
|
167
|
-
# Create checkpoint file for resuming operations.
|
|
168
|
-
save_json_file(data, checkpoint_file)
|
|
169
|
-
colored_print(f"Checkpoint saved: {checkpoint_file}", "green")
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
def load_checkpoint(checkpoint_file: Path) -> Optional[dict[str, Any]]:
|
|
173
|
-
# Load checkpoint data if exists.
|
|
174
|
-
if checkpoint_file.exists():
|
|
175
|
-
try:
|
|
176
|
-
return load_json_file(checkpoint_file)
|
|
177
|
-
except Exception as e:
|
|
178
|
-
colored_print(f"Failed to load checkpoint: {e}", "yellow")
|
|
179
|
-
return None
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
def print_banner(title: str) -> None:
|
|
183
|
-
# Print formatted banner.
|
|
184
|
-
colored_print("\n" + "=" * 60, "cyan")
|
|
185
|
-
colored_print(f" {title}", "cyan")
|
|
186
|
-
colored_print("=" * 60 + "\n", "cyan")
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
def print_section(title: str) -> None:
|
|
190
|
-
# Print section header.
|
|
191
|
-
colored_print(f"\n {title}", "blue")
|
|
192
|
-
colored_print("-" * 60, "blue")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: greenmining
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.8
|
|
4
4
|
Summary: An empirical Python library for Mining Software Repositories (MSR) in Green IT research
|
|
5
5
|
Author-email: Adam Bouafia <a.bouafia@student.vu.nl>
|
|
6
6
|
License: MIT
|
|
@@ -68,8 +68,8 @@ An empirical Python library for Mining Software Repositories (MSR) in Green IT r
|
|
|
68
68
|
|
|
69
69
|
`greenmining` is a research-grade Python library designed for **empirical Mining Software Repositories (MSR)** studies in **Green IT**. It enables researchers and practitioners to:
|
|
70
70
|
|
|
71
|
-
- **Mine repositories at scale** - Fetch and analyze GitHub repositories via GraphQL API with configurable filters
|
|
72
|
-
|
|
71
|
+
- **Mine repositories at scale** - Search, Fetch and analyze GitHub repositories via GraphQL API with configurable filters
|
|
72
|
+
|
|
73
73
|
- **Classify green commits** - Detect 122 sustainability patterns from the Green Software Foundation (GSF) catalog
|
|
74
74
|
- **Analyze any repository by URL** - Direct Git-based analysis with support for private repositories
|
|
75
75
|
- **Measure energy consumption** - RAPL, CodeCarbon, and CPU Energy Meter backends for power profiling
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
greenmining/__init__.py,sha256=LRyVPcKKGGK9l2r2nn5EAEsPFTiFomity3Hc4CE6hGM,3390
|
|
2
|
+
greenmining/__main__.py,sha256=NYOVS7D4w2XDLn6SyXHXPKE5GrNGOeoWSTb_KazgK5c,590
|
|
3
|
+
greenmining/config.py,sha256=MQ5aPaa_Y9MZke774dmibz2-XSqRVsQiiNaLDr8f7S0,2771
|
|
4
|
+
greenmining/gsf_patterns.py,sha256=UvNJPY3HlAx1SicwUqci40TlLg8lCL0tszSOH4haxQs,55921
|
|
5
|
+
greenmining/utils.py,sha256=-dnLUw9taCzvQ2dk6uc66GAohOFiXJFKs9TLSEPk5kM,2893
|
|
6
|
+
greenmining/analyzers/__init__.py,sha256=rTgpDfFE6za4QAHW59ncnS6TW02omn-TZMnYNVUIZp0,753
|
|
7
|
+
greenmining/analyzers/code_diff_analyzer.py,sha256=1dk68R3O0RZG8gx1cm9B_UlZ1Uwyb_Q3oScRbCVx4tM,10950
|
|
8
|
+
greenmining/analyzers/metrics_power_correlator.py,sha256=MgKXAIYjNihzzyilCd88_AMjZP9sdC6NkCAVbrvvOus,5957
|
|
9
|
+
greenmining/analyzers/power_regression.py,sha256=j_SL8BHQi89zkjjKPPcjsPrvfDAeGpLeZujQiNw_RKI,7375
|
|
10
|
+
greenmining/analyzers/qualitative_analyzer.py,sha256=5LiqP2It3q6_RLiLGkyGRZaRxg00dcyTPvlN5l-wq_k,15379
|
|
11
|
+
greenmining/analyzers/statistical_analyzer.py,sha256=PA0w0sytRmMO6N1a2iH7VdA6Icg4DcyBLFXOGq7PepY,5942
|
|
12
|
+
greenmining/analyzers/temporal_analyzer.py,sha256=JfTcAoI20oCFMehGrSRnDqhJTXI-RUbdCTMwDOTW9-g,14259
|
|
13
|
+
greenmining/analyzers/version_power_analyzer.py,sha256=2P6zOqBg-ButtIhF-4cutiwD2Q1geMY49VFUghHXXoI,8119
|
|
14
|
+
greenmining/controllers/__init__.py,sha256=UiAT6zBvC1z_9cJWfzq1cLA0I4r9b2vURHipj8oDczI,180
|
|
15
|
+
greenmining/controllers/repository_controller.py,sha256=hQb9kRuGNeA5cNHKqX-CZTzYvPpgzWCX6w8NzllxuDc,3857
|
|
16
|
+
greenmining/energy/__init__.py,sha256=GoCYh7hitWBoPMtan1HF1yezCHi7o4sa_YUJgGkeJc8,558
|
|
17
|
+
greenmining/energy/base.py,sha256=3hIPgc4B0Nz9V7DTh2Xd6trDRtmozUBBpa5UWRuWzcw,5918
|
|
18
|
+
greenmining/energy/carbon_reporter.py,sha256=bKIFlLhHfYzI4DBu_ff4GW1Psz4oSCAF4NmzQb-EShA,8298
|
|
19
|
+
greenmining/energy/codecarbon_meter.py,sha256=8obsfiJi0V3R_2BMHjTQCZSN52YPvFn5d9q_MKOZVb4,4214
|
|
20
|
+
greenmining/energy/cpu_meter.py,sha256=GmUZsOIzWnAWcuSW4RndDdgszDHzqnBjAIeLBgelZ0w,5001
|
|
21
|
+
greenmining/energy/rapl.py,sha256=b63M1mS7uF9Uo0vFi0z7Qwdo56U1TqxIYQXINhYp9Jo,5292
|
|
22
|
+
greenmining/models/__init__.py,sha256=2hkB0quhMePvvA1AkYfj5uiF_HyGtXVxn0BU-5m_oSg,302
|
|
23
|
+
greenmining/models/aggregated_stats.py,sha256=CZxjwXswvtmYPwpcbodLUsZpsbsNKBDIqvU9DpFO_t0,1004
|
|
24
|
+
greenmining/models/analysis_result.py,sha256=YICTCEcrJxZ1R8Xaio3AZOjCGwMzC_62BMAL0J_XY1w,1509
|
|
25
|
+
greenmining/models/commit.py,sha256=LCwDcRu4-BeCJQdk590oQNZZZM9t8W9FlaHlo9DCVmc,2415
|
|
26
|
+
greenmining/models/repository.py,sha256=MUeCOtVMOsU4Oa_BBoB163Ij5BKytTKwbzoGORJx4rU,2850
|
|
27
|
+
greenmining/presenters/__init__.py,sha256=d1CMtqtUAHYHYNzigPyjtGOUtnH1drtUwf7-bFQq2B8,138
|
|
28
|
+
greenmining/presenters/console_presenter.py,sha256=qagn2c2aOym0WNKV8n175MQ-BTheLjrXzW8c1OafzAQ,4904
|
|
29
|
+
greenmining/services/__init__.py,sha256=ZEMOVut0KRdume_vz58beSNps3YgeoGBXmUjEqNgIhc,690
|
|
30
|
+
greenmining/services/commit_extractor.py,sha256=qBM9QpGzPZRmGMFufJ6gP8eWIuufTowLX8mQxqZwyEU,6996
|
|
31
|
+
greenmining/services/data_aggregator.py,sha256=BU_HUb-8c0n0sa_7VZRB8jIVnaVhRLf-E6KA4ASh-08,19427
|
|
32
|
+
greenmining/services/data_analyzer.py,sha256=0XqW-slrnt7RotrHDweOqKtoN8XIA7y6p7s2Jau6cMg,7431
|
|
33
|
+
greenmining/services/github_graphql_fetcher.py,sha256=ZklXdEAc60KeFL83zRYMwW_-2OwMKpfPY7Wrifl0D50,11539
|
|
34
|
+
greenmining/services/local_repo_analyzer.py,sha256=PYHj-zz0cePWbQq9HtGvd2OcZUYM8rRGe8eKIAp1_fI,24874
|
|
35
|
+
greenmining/services/reports.py,sha256=nhJuYiA5tPD_9AjtgSLEnrpW3x15sZXrwIxpxQEBbh0,23219
|
|
36
|
+
greenmining-1.1.8.dist-info/licenses/LICENSE,sha256=M7ma3JHGeiIZIs3ea0HTcFl_wLFPX2NZElUliYs4bCA,1083
|
|
37
|
+
greenmining-1.1.8.dist-info/METADATA,sha256=6gb4TO8nLcxqhekL3_uFYnBsIGEZKM_51dq_L24BiEA,30175
|
|
38
|
+
greenmining-1.1.8.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
39
|
+
greenmining-1.1.8.dist-info/top_level.txt,sha256=nreXgXxZIWI-42yQknQ0HXtUrFnzZ8N1ra4Mdy2KcsI,12
|
|
40
|
+
greenmining-1.1.8.dist-info/RECORD,,
|
greenmining/__version__.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
greenmining/__init__.py,sha256=S7eN_NkK3aC68nLMNiVgcPdcVwW-PvbyfY_3ZpHsuQw,3390
|
|
2
|
-
greenmining/__main__.py,sha256=NYOVS7D4w2XDLn6SyXHXPKE5GrNGOeoWSTb_KazgK5c,590
|
|
3
|
-
greenmining/__version__.py,sha256=xZc02a8bS3vUJlzh8k9RoxemB1irQmq_SpVVj6Cg5M0,62
|
|
4
|
-
greenmining/config.py,sha256=M4a7AwM1ErCmOY0n5Vmyoo9HPblSkTZ-HD3k2YHzs4A,8340
|
|
5
|
-
greenmining/gsf_patterns.py,sha256=hnd9GuWB8GEflrusEib5hjvl8CD5TSbGcBtb0gfxFp4,54193
|
|
6
|
-
greenmining/utils.py,sha256=dSFwQzQwbS8rYZSgwLIxM_geLqbldwqVOIXMqEg08Qs,5609
|
|
7
|
-
greenmining/analyzers/__init__.py,sha256=rTgpDfFE6za4QAHW59ncnS6TW02omn-TZMnYNVUIZp0,753
|
|
8
|
-
greenmining/analyzers/code_diff_analyzer.py,sha256=1dk68R3O0RZG8gx1cm9B_UlZ1Uwyb_Q3oScRbCVx4tM,10950
|
|
9
|
-
greenmining/analyzers/metrics_power_correlator.py,sha256=qMKr4hSTzT0Un3vsGZNkPCp9TxyzdFwrhjw5M1IKOgk,5964
|
|
10
|
-
greenmining/analyzers/power_regression.py,sha256=5pxs7IoTtGcwwX5KzGeM5hOm2I9Axr-0X4N_4007iMw,7387
|
|
11
|
-
greenmining/analyzers/qualitative_analyzer.py,sha256=RcjOMLj_DPH869ey9J0uI7JK_krCefMhNkPLOJUDFF8,15391
|
|
12
|
-
greenmining/analyzers/statistical_analyzer.py,sha256=rqLsRGuOHhxEMQAFx5dmWMHDgdb6ktL7CY3dAebQvpA,7262
|
|
13
|
-
greenmining/analyzers/temporal_analyzer.py,sha256=JfTcAoI20oCFMehGrSRnDqhJTXI-RUbdCTMwDOTW9-g,14259
|
|
14
|
-
greenmining/analyzers/version_power_analyzer.py,sha256=2P6zOqBg-ButtIhF-4cutiwD2Q1geMY49VFUghHXXoI,8119
|
|
15
|
-
greenmining/controllers/__init__.py,sha256=UiAT6zBvC1z_9cJWfzq1cLA0I4r9b2vURHipj8oDczI,180
|
|
16
|
-
greenmining/controllers/repository_controller.py,sha256=DM9BabUAwZJARGngCk_4wEYPw2adn8iESCiFQ7Um4LQ,3880
|
|
17
|
-
greenmining/energy/__init__.py,sha256=GoCYh7hitWBoPMtan1HF1yezCHi7o4sa_YUJgGkeJc8,558
|
|
18
|
-
greenmining/energy/base.py,sha256=3hIPgc4B0Nz9V7DTh2Xd6trDRtmozUBBpa5UWRuWzcw,5918
|
|
19
|
-
greenmining/energy/carbon_reporter.py,sha256=bKIFlLhHfYzI4DBu_ff4GW1Psz4oSCAF4NmzQb-EShA,8298
|
|
20
|
-
greenmining/energy/codecarbon_meter.py,sha256=HyQptyEaS1ZMu_qdxg0Tyuly1PCmmbbNwwYX8qYsTs4,4927
|
|
21
|
-
greenmining/energy/cpu_meter.py,sha256=mhEG3Y7fjz3wV5lojcYeFXvCXXgmelGQaBfN2q7yTNc,5007
|
|
22
|
-
greenmining/energy/rapl.py,sha256=b63M1mS7uF9Uo0vFi0z7Qwdo56U1TqxIYQXINhYp9Jo,5292
|
|
23
|
-
greenmining/models/__init__.py,sha256=2hkB0quhMePvvA1AkYfj5uiF_HyGtXVxn0BU-5m_oSg,302
|
|
24
|
-
greenmining/models/aggregated_stats.py,sha256=N-ZGcQO7IJ33Joa8luMVjtHhKYzNe48VW8hFqs9a5Jc,1016
|
|
25
|
-
greenmining/models/analysis_result.py,sha256=YICTCEcrJxZ1R8Xaio3AZOjCGwMzC_62BMAL0J_XY1w,1509
|
|
26
|
-
greenmining/models/commit.py,sha256=mnRDWSiIyGtJeGXI8sav9hukWUyVFpoNe6GixRlZjY4,2439
|
|
27
|
-
greenmining/models/repository.py,sha256=SKjS01onOptpMioumtAPZxKpKheHAeVXnXyvatl7CfM,2856
|
|
28
|
-
greenmining/presenters/__init__.py,sha256=d1CMtqtUAHYHYNzigPyjtGOUtnH1drtUwf7-bFQq2B8,138
|
|
29
|
-
greenmining/presenters/console_presenter.py,sha256=qagn2c2aOym0WNKV8n175MQ-BTheLjrXzW8c1OafzAQ,4904
|
|
30
|
-
greenmining/services/__init__.py,sha256=ZEMOVut0KRdume_vz58beSNps3YgeoGBXmUjEqNgIhc,690
|
|
31
|
-
greenmining/services/commit_extractor.py,sha256=Fz2WTWjIZ_vQhSfkJKnWpJnBpI2nm0KacA4qYAvCpSE,8451
|
|
32
|
-
greenmining/services/data_aggregator.py,sha256=TsFT0oGOnnHk0QGZ1tT6ZhKGc5X1H1D1u7-7OpiPo7Y,19566
|
|
33
|
-
greenmining/services/data_analyzer.py,sha256=f0nlJkPAclHHCzzTyQW5bjhYrgE0XXiR1x7_o3fJaDs,9732
|
|
34
|
-
greenmining/services/github_fetcher.py,sha256=sdkS-LhHmX7mgMdlClCwEUVnZrItc0Pt6FVtlWk5iLU,106
|
|
35
|
-
greenmining/services/github_graphql_fetcher.py,sha256=ZklXdEAc60KeFL83zRYMwW_-2OwMKpfPY7Wrifl0D50,11539
|
|
36
|
-
greenmining/services/local_repo_analyzer.py,sha256=pnH_Hf3GjCFovurEl2uZgOgD3qjqcsP2K1QnKHddkSY,24903
|
|
37
|
-
greenmining/services/reports.py,sha256=Vrw_pBNmVw2mTAf1dpcAqjBe6gXv-O4w_XweoVTt7L8,23392
|
|
38
|
-
greenmining-1.1.7.dist-info/licenses/LICENSE,sha256=M7ma3JHGeiIZIs3ea0HTcFl_wLFPX2NZElUliYs4bCA,1083
|
|
39
|
-
greenmining-1.1.7.dist-info/METADATA,sha256=LiRreZ7kVcsr76DZHlkyTcBD7u58IokOvJe4lfDu3vw,30280
|
|
40
|
-
greenmining-1.1.7.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
41
|
-
greenmining-1.1.7.dist-info/top_level.txt,sha256=nreXgXxZIWI-42yQknQ0HXtUrFnzZ8N1ra4Mdy2KcsI,12
|
|
42
|
-
greenmining-1.1.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|