sciml 0.0.10__py3-none-any.whl → 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sciml/__init__.py +1 -1
- sciml/ccc.py +35 -35
- sciml/metrics.py +122 -122
- sciml/models.py +796 -276
- sciml/pipelines.py +225 -225
- sciml/regress2.py +216 -216
- {sciml-0.0.10.dist-info → sciml-0.0.11.dist-info}/LICENSE +21 -21
- {sciml-0.0.10.dist-info → sciml-0.0.11.dist-info}/METADATA +13 -13
- sciml-0.0.11.dist-info/RECORD +11 -0
- {sciml-0.0.10.dist-info → sciml-0.0.11.dist-info}/WHEEL +1 -1
- sciml-0.0.10.dist-info/RECORD +0 -11
- {sciml-0.0.10.dist-info → sciml-0.0.11.dist-info}/top_level.txt +0 -0
sciml/models.py
CHANGED
@@ -1,276 +1,796 @@
|
|
1
|
-
import numpy as np
|
2
|
-
import copy
|
3
|
-
import itertools
|
4
|
-
import warnings
|
5
|
-
from xgboost import XGBRegressor
|
6
|
-
from sklearn.metrics import mean_squared_error
|
7
|
-
from sklearn.model_selection import train_test_split
|
8
|
-
|
9
|
-
class SmartForest:
|
10
|
-
"""
|
11
|
-
SmartForest: A deep, intelligent decision forest model for complex sequential and tabular data.
|
12
|
-
|
13
|
-
SmartForest blends ideas from deep forests (cascade forest structures), LSTM-style forget gates,
|
14
|
-
and ensemble learning using XGBoost. It is especially suited for time series or structured tabular data
|
15
|
-
where layer-wise feature expansion and memory-inspired filtering can enhance performance.
|
16
|
-
|
17
|
-
Key Features:
|
18
|
-
-------------
|
19
|
-
- Deep cascade of XGBoost regressors
|
20
|
-
- Optional Multi-Grained Scanning (MGS) for local feature extraction
|
21
|
-
- Forget-gate-inspired mechanism to regulate information flow across layers
|
22
|
-
- Early stopping to prevent overfitting
|
23
|
-
- Full retention of best-performing model (lowest validation RMSE)
|
24
|
-
|
25
|
-
Parameters:
|
26
|
-
-----------
|
27
|
-
n_estimators_per_layer : int
|
28
|
-
Number of XGBoost regressors per layer.
|
29
|
-
|
30
|
-
max_layers : int
|
31
|
-
Maximum number of layers (depth) in the model.
|
32
|
-
|
33
|
-
early_stopping_rounds : int
|
34
|
-
Number of layers with no improvement before early stopping is triggered.
|
35
|
-
|
36
|
-
param_grid : dict
|
37
|
-
Grid of XGBoost hyperparameters to search over.
|
38
|
-
|
39
|
-
use_gpu : bool
|
40
|
-
If True, use GPU-accelerated training (CUDA required).
|
41
|
-
|
42
|
-
gpu_id : int
|
43
|
-
ID of GPU to use (if use_gpu=True).
|
44
|
-
|
45
|
-
window_sizes : list of int
|
46
|
-
Enables Multi-Grained Scanning if non-empty, with specified sliding window sizes.
|
47
|
-
|
48
|
-
forget_factor : float in [0, 1]
|
49
|
-
Simulates LSTM-style forget gate; higher values forget more past information.
|
50
|
-
|
51
|
-
verbose : int
|
52
|
-
Verbosity level (0 = silent, 1 = progress updates).
|
53
|
-
|
54
|
-
Methods:
|
55
|
-
--------
|
56
|
-
fit(X, y, X_val=None, y_val=None):
|
57
|
-
Train the SmartForest model layer by layer, using optional validation for early stopping.
|
58
|
-
|
59
|
-
predict(X):
|
60
|
-
Make predictions on new data using the trained cascade structure.
|
61
|
-
|
62
|
-
get_best_model():
|
63
|
-
Returns a copy of the best model and the corresponding RMSE from validation.
|
64
|
-
|
65
|
-
Example:
|
66
|
-
--------
|
67
|
-
>>> model = SmartForest(n_estimators_per_layer=5, max_layers=10, window_sizes=[2, 3], forget_factor=0.2)
|
68
|
-
>>> model.fit(X_train, y_train, X_val, y_val)
|
69
|
-
>>> y_pred = model.predict(X_val)
|
70
|
-
>>> best_model, best_rmse = model.get_best_model()
|
71
|
-
"""
|
72
|
-
def __init__(self, n_estimators_per_layer = 5, max_layers = 10, early_stopping_rounds = 3, param_grid = None,
|
73
|
-
use_gpu = False, gpu_id = 0, window_sizes = [], forget_factor = 0, verbose = 1):
|
74
|
-
self.n_estimators_per_layer = n_estimators_per_layer
|
75
|
-
self.max_layers = max_layers
|
76
|
-
self.early_stopping_rounds = early_stopping_rounds
|
77
|
-
self.param_grid = param_grid or {
|
78
|
-
"objective": ["reg:squarederror"],
|
79
|
-
"random_state": [42],
|
80
|
-
'seed': [0],
|
81
|
-
'n_estimators': [100],
|
82
|
-
'max_depth': [6],
|
83
|
-
'min_child_weight': [4],
|
84
|
-
'subsample': [0.8],
|
85
|
-
'colsample_bytree': [0.8],
|
86
|
-
'gamma': [0],
|
87
|
-
'reg_alpha': [0],
|
88
|
-
'reg_lambda': [1],
|
89
|
-
'learning_rate': [0.05],
|
90
|
-
}
|
91
|
-
self.use_gpu = use_gpu
|
92
|
-
self.gpu_id = gpu_id
|
93
|
-
self.window_sizes = window_sizes
|
94
|
-
self.forget_factor = forget_factor
|
95
|
-
self.layers = []
|
96
|
-
self.best_model = None
|
97
|
-
self.best_rmse = float("inf")
|
98
|
-
self.verbose = verbose
|
99
|
-
|
100
|
-
def _get_param_combinations(self):
|
101
|
-
keys, values = zip(*self.param_grid.items())
|
102
|
-
return [dict(zip(keys, v)) for v in itertools.product(*values)]
|
103
|
-
|
104
|
-
def _multi_grained_scanning(self, X, y):
|
105
|
-
new_features = []
|
106
|
-
for window_size in self.window_sizes:
|
107
|
-
if X.shape[1] < window_size:
|
108
|
-
continue
|
109
|
-
for start in range(X.shape[1] - window_size + 1):
|
110
|
-
window = X[:, start:start + window_size]
|
111
|
-
if y is None:
|
112
|
-
new_features.append(window)
|
113
|
-
continue
|
114
|
-
|
115
|
-
param_combos = self._get_param_combinations()
|
116
|
-
for params in param_combos:
|
117
|
-
if self.use_gpu:
|
118
|
-
params['tree_method'] = 'hist'
|
119
|
-
params['device'] = 'cuda'
|
120
|
-
model = XGBRegressor(**params)
|
121
|
-
model.fit(window, y)
|
122
|
-
preds = model.predict(window).reshape(-1, 1)
|
123
|
-
new_features.append(preds)
|
124
|
-
return np.hstack(new_features) if new_features else X
|
125
|
-
|
126
|
-
def _apply_forget_gate(self, X, layer_index):
|
127
|
-
forget_weights = np.random.rand(X.shape[1]) * self.forget_factor
|
128
|
-
return X * (1 - forget_weights)
|
129
|
-
|
130
|
-
def _fit_layer(self, X, y, X_val=None, y_val=None, layer_index=0):
|
131
|
-
layer = []
|
132
|
-
layer_outputs = []
|
133
|
-
param_combos = self._get_param_combinations()
|
134
|
-
X = self._apply_forget_gate(X, layer_index)
|
135
|
-
|
136
|
-
for i in range(self.n_estimators_per_layer):
|
137
|
-
best_rmse = float('inf')
|
138
|
-
best_model = None
|
139
|
-
|
140
|
-
for params in param_combos:
|
141
|
-
if self.use_gpu:
|
142
|
-
params['tree_method'] = 'hist'
|
143
|
-
params['device'] = 'cuda'
|
144
|
-
|
145
|
-
params = params.copy() # Prevent modification from affecting the next loop iteration
|
146
|
-
params['random_state'] = i # Use a different random seed for each model to enhance diversity
|
147
|
-
|
148
|
-
model = XGBRegressor(**params)
|
149
|
-
model.fit(X, y)
|
150
|
-
|
151
|
-
if X_val is not None:
|
152
|
-
preds_val = model.predict(X_val)
|
153
|
-
rmse = np.sqrt(mean_squared_error(y_val, preds_val))
|
154
|
-
if rmse < best_rmse:
|
155
|
-
best_rmse = rmse
|
156
|
-
best_model = model
|
157
|
-
else:
|
158
|
-
best_model = model
|
159
|
-
|
160
|
-
preds = best_model.predict(X).reshape(-1, 1)
|
161
|
-
layer.append(best_model)
|
162
|
-
layer_outputs.append(preds)
|
163
|
-
|
164
|
-
output = np.hstack(layer_outputs)
|
165
|
-
return layer, output
|
166
|
-
|
167
|
-
def fit(self, X, y, X_val=None, y_val=None):
|
168
|
-
X_current = self._multi_grained_scanning(X, y)
|
169
|
-
X_val_current = self._multi_grained_scanning(X_val, y_val) if X_val is not None else None
|
170
|
-
no_improve_rounds = 0
|
171
|
-
|
172
|
-
for layer_index in range(self.max_layers):
|
173
|
-
if self.verbose: print(f"Training Layer {layer_index + 1}")
|
174
|
-
layer, output = self._fit_layer(X_current, y, X_val_current, y_val, layer_index)
|
175
|
-
self.layers.append(layer)
|
176
|
-
X_current = np.hstack([X_current, output])
|
177
|
-
|
178
|
-
if X_val is not None:
|
179
|
-
val_outputs = []
|
180
|
-
for reg in layer:
|
181
|
-
n_features = reg.n_features_in_
|
182
|
-
preds = reg.predict(X_val_current[:, :n_features]).reshape(-1, 1)
|
183
|
-
val_outputs.append(preds)
|
184
|
-
val_output = np.hstack(val_outputs)
|
185
|
-
X_val_current = np.hstack([X_val_current, val_output])
|
186
|
-
|
187
|
-
y_pred = self.predict(X_val)
|
188
|
-
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
|
189
|
-
if self.verbose: print(f"Validation RMSE: {rmse:.4f}")
|
190
|
-
|
191
|
-
if rmse < self.best_rmse:
|
192
|
-
self.best_rmse = rmse
|
193
|
-
self.best_model = copy.deepcopy(self.layers)
|
194
|
-
no_improve_rounds = 0
|
195
|
-
if self.verbose: print(f"✅ New best RMSE: {self.best_rmse:.4f}")
|
196
|
-
else:
|
197
|
-
no_improve_rounds += 1
|
198
|
-
if no_improve_rounds >= self.early_stopping_rounds:
|
199
|
-
if self.verbose: print("Early stopping triggered.")
|
200
|
-
break
|
201
|
-
|
202
|
-
def predict(self, X):
|
203
|
-
X_current = self._multi_grained_scanning(X, None)
|
204
|
-
X_current = self._apply_forget_gate(X_current, layer_index=-1)
|
205
|
-
|
206
|
-
for layer in self.layers:
|
207
|
-
layer_outputs = []
|
208
|
-
for reg in layer:
|
209
|
-
n_features = reg.n_features_in_
|
210
|
-
preds = reg.predict(X_current[:, :n_features]).reshape(-1, 1)
|
211
|
-
layer_outputs.append(preds)
|
212
|
-
output = np.hstack(layer_outputs)
|
213
|
-
X_current = np.hstack([X_current, output])
|
214
|
-
|
215
|
-
final_outputs = []
|
216
|
-
for reg in self.layers[-1]:
|
217
|
-
n_features = reg.n_features_in_
|
218
|
-
final_outputs.append(reg.predict(X_current[:, :n_features]).reshape(-1, 1))
|
219
|
-
return np.mean(np.hstack(final_outputs), axis=1)
|
220
|
-
|
221
|
-
def get_best_model(self):
|
222
|
-
return self.best_model, self.best_rmse
|
223
|
-
|
224
|
-
"""
|
225
|
-
# ============================== Test Example ==============================
|
226
|
-
import warnings
|
227
|
-
import numpy as np
|
228
|
-
from sklearn.datasets import load_diabetes
|
229
|
-
from sklearn.datasets import fetch_california_housing
|
230
|
-
from sklearn.model_selection import train_test_split
|
231
|
-
from sklearn.metrics import mean_squared_error
|
232
|
-
|
233
|
-
# X, y = load_diabetes(return_X_y=True) # Using diabetes dataset
|
234
|
-
X, y = fetch_california_housing(return_X_y=True) # Using house price dataset
|
235
|
-
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=42)
|
236
|
-
|
237
|
-
# Hyperparameter grid
|
238
|
-
param_grid = {
|
239
|
-
"objective": ["reg:squarederror"],
|
240
|
-
"random_state": [42],
|
241
|
-
'seed': [0],
|
242
|
-
'n_estimators': [100],
|
243
|
-
'max_depth': [6],
|
244
|
-
'min_child_weight': [4],
|
245
|
-
'subsample': [0.8],
|
246
|
-
'colsample_bytree': [0.8],
|
247
|
-
'gamma': [0],
|
248
|
-
'reg_alpha': [0],
|
249
|
-
'reg_lambda': [1],
|
250
|
-
'learning_rate': [0.05],
|
251
|
-
}
|
252
|
-
|
253
|
-
# Create the model with Multi-Grained Scanning enabled (with window sizes 2 and 3)
|
254
|
-
regr = SmartForest(
|
255
|
-
n_estimators_per_layer = 5,
|
256
|
-
max_layers = 10,
|
257
|
-
early_stopping_rounds = 5,
|
258
|
-
param_grid = param_grid,
|
259
|
-
use_gpu = False,
|
260
|
-
gpu_id = 0,
|
261
|
-
window_sizes = [], # Enables MGS if e.g., [2, 3], else empty disables MGS.
|
262
|
-
forget_factor = 0., # Set forget factor to simulate forget gate behavior
|
263
|
-
verbose = 1
|
264
|
-
)
|
265
|
-
|
266
|
-
regr.fit(X_train, y_train, X_val, y_val)
|
267
|
-
|
268
|
-
# Predict on validation set and evaluate
|
269
|
-
y_pred = regr.predict(X_val)
|
270
|
-
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
|
271
|
-
print("\nFinal RMSE:", rmse)
|
272
|
-
|
273
|
-
# Output best model and RMSE
|
274
|
-
best_model, best_rmse = regr.get_best_model()
|
275
|
-
print("\nBest validation RMSE:", best_rmse)
|
276
|
-
"""
|
1
|
+
import numpy as np
|
2
|
+
import copy
|
3
|
+
import itertools
|
4
|
+
import warnings
|
5
|
+
from xgboost import XGBRegressor
|
6
|
+
from sklearn.metrics import mean_squared_error
|
7
|
+
from sklearn.model_selection import train_test_split
|
8
|
+
|
9
|
+
class SmartForest:
|
10
|
+
"""
|
11
|
+
SmartForest: A deep, intelligent decision forest model for complex sequential and tabular data.
|
12
|
+
|
13
|
+
SmartForest blends ideas from deep forests (cascade forest structures), LSTM-style forget gates,
|
14
|
+
and ensemble learning using XGBoost. It is especially suited for time series or structured tabular data
|
15
|
+
where layer-wise feature expansion and memory-inspired filtering can enhance performance.
|
16
|
+
|
17
|
+
Key Features:
|
18
|
+
-------------
|
19
|
+
- Deep cascade of XGBoost regressors
|
20
|
+
- Optional Multi-Grained Scanning (MGS) for local feature extraction
|
21
|
+
- Forget-gate-inspired mechanism to regulate information flow across layers
|
22
|
+
- Early stopping to prevent overfitting
|
23
|
+
- Full retention of best-performing model (lowest validation RMSE)
|
24
|
+
|
25
|
+
Parameters:
|
26
|
+
-----------
|
27
|
+
n_estimators_per_layer : int
|
28
|
+
Number of XGBoost regressors per layer.
|
29
|
+
|
30
|
+
max_layers : int
|
31
|
+
Maximum number of layers (depth) in the model.
|
32
|
+
|
33
|
+
early_stopping_rounds : int
|
34
|
+
Number of layers with no improvement before early stopping is triggered.
|
35
|
+
|
36
|
+
param_grid : dict
|
37
|
+
Grid of XGBoost hyperparameters to search over.
|
38
|
+
|
39
|
+
use_gpu : bool
|
40
|
+
If True, use GPU-accelerated training (CUDA required).
|
41
|
+
|
42
|
+
gpu_id : int
|
43
|
+
ID of GPU to use (if use_gpu=True).
|
44
|
+
|
45
|
+
window_sizes : list of int
|
46
|
+
Enables Multi-Grained Scanning if non-empty, with specified sliding window sizes.
|
47
|
+
|
48
|
+
forget_factor : float in [0, 1]
|
49
|
+
Simulates LSTM-style forget gate; higher values forget more past information.
|
50
|
+
|
51
|
+
verbose : int
|
52
|
+
Verbosity level (0 = silent, 1 = progress updates).
|
53
|
+
|
54
|
+
Methods:
|
55
|
+
--------
|
56
|
+
fit(X, y, X_val=None, y_val=None):
|
57
|
+
Train the SmartForest model layer by layer, using optional validation for early stopping.
|
58
|
+
|
59
|
+
predict(X):
|
60
|
+
Make predictions on new data using the trained cascade structure.
|
61
|
+
|
62
|
+
get_best_model():
|
63
|
+
Returns a copy of the best model and the corresponding RMSE from validation.
|
64
|
+
|
65
|
+
Example:
|
66
|
+
--------
|
67
|
+
>>> model = SmartForest(n_estimators_per_layer=5, max_layers=10, window_sizes=[2, 3], forget_factor=0.2)
|
68
|
+
>>> model.fit(X_train, y_train, X_val, y_val)
|
69
|
+
>>> y_pred = model.predict(X_val)
|
70
|
+
>>> best_model, best_rmse = model.get_best_model()
|
71
|
+
"""
|
72
|
+
def __init__(self, n_estimators_per_layer = 5, max_layers = 10, early_stopping_rounds = 3, param_grid = None,
|
73
|
+
use_gpu = False, gpu_id = 0, window_sizes = [], forget_factor = 0, verbose = 1):
|
74
|
+
self.n_estimators_per_layer = n_estimators_per_layer
|
75
|
+
self.max_layers = max_layers
|
76
|
+
self.early_stopping_rounds = early_stopping_rounds
|
77
|
+
self.param_grid = param_grid or {
|
78
|
+
"objective": ["reg:squarederror"],
|
79
|
+
"random_state": [42],
|
80
|
+
'seed': [0],
|
81
|
+
'n_estimators': [100],
|
82
|
+
'max_depth': [6],
|
83
|
+
'min_child_weight': [4],
|
84
|
+
'subsample': [0.8],
|
85
|
+
'colsample_bytree': [0.8],
|
86
|
+
'gamma': [0],
|
87
|
+
'reg_alpha': [0],
|
88
|
+
'reg_lambda': [1],
|
89
|
+
'learning_rate': [0.05],
|
90
|
+
}
|
91
|
+
self.use_gpu = use_gpu
|
92
|
+
self.gpu_id = gpu_id
|
93
|
+
self.window_sizes = window_sizes
|
94
|
+
self.forget_factor = forget_factor
|
95
|
+
self.layers = []
|
96
|
+
self.best_model = None
|
97
|
+
self.best_rmse = float("inf")
|
98
|
+
self.verbose = verbose
|
99
|
+
|
100
|
+
def _get_param_combinations(self):
|
101
|
+
keys, values = zip(*self.param_grid.items())
|
102
|
+
return [dict(zip(keys, v)) for v in itertools.product(*values)]
|
103
|
+
|
104
|
+
def _multi_grained_scanning(self, X, y):
|
105
|
+
new_features = []
|
106
|
+
for window_size in self.window_sizes:
|
107
|
+
if X.shape[1] < window_size:
|
108
|
+
continue
|
109
|
+
for start in range(X.shape[1] - window_size + 1):
|
110
|
+
window = X[:, start:start + window_size]
|
111
|
+
if y is None:
|
112
|
+
new_features.append(window)
|
113
|
+
continue
|
114
|
+
|
115
|
+
param_combos = self._get_param_combinations()
|
116
|
+
for params in param_combos:
|
117
|
+
if self.use_gpu:
|
118
|
+
params['tree_method'] = 'hist'
|
119
|
+
params['device'] = 'cuda'
|
120
|
+
model = XGBRegressor(**params)
|
121
|
+
model.fit(window, y)
|
122
|
+
preds = model.predict(window).reshape(-1, 1)
|
123
|
+
new_features.append(preds)
|
124
|
+
return np.hstack(new_features) if new_features else X
|
125
|
+
|
126
|
+
def _apply_forget_gate(self, X, layer_index):
|
127
|
+
forget_weights = np.random.rand(X.shape[1]) * self.forget_factor
|
128
|
+
return X * (1 - forget_weights)
|
129
|
+
|
130
|
+
def _fit_layer(self, X, y, X_val=None, y_val=None, layer_index=0):
|
131
|
+
layer = []
|
132
|
+
layer_outputs = []
|
133
|
+
param_combos = self._get_param_combinations()
|
134
|
+
X = self._apply_forget_gate(X, layer_index)
|
135
|
+
|
136
|
+
for i in range(self.n_estimators_per_layer):
|
137
|
+
best_rmse = float('inf')
|
138
|
+
best_model = None
|
139
|
+
|
140
|
+
for params in param_combos:
|
141
|
+
if self.use_gpu:
|
142
|
+
params['tree_method'] = 'hist'
|
143
|
+
params['device'] = 'cuda'
|
144
|
+
|
145
|
+
params = params.copy() # Prevent modification from affecting the next loop iteration
|
146
|
+
params['random_state'] = i # Use a different random seed for each model to enhance diversity
|
147
|
+
|
148
|
+
model = XGBRegressor(**params)
|
149
|
+
model.fit(X, y)
|
150
|
+
|
151
|
+
if X_val is not None:
|
152
|
+
preds_val = model.predict(X_val)
|
153
|
+
rmse = np.sqrt(mean_squared_error(y_val, preds_val))
|
154
|
+
if rmse < best_rmse:
|
155
|
+
best_rmse = rmse
|
156
|
+
best_model = model
|
157
|
+
else:
|
158
|
+
best_model = model
|
159
|
+
|
160
|
+
preds = best_model.predict(X).reshape(-1, 1)
|
161
|
+
layer.append(best_model)
|
162
|
+
layer_outputs.append(preds)
|
163
|
+
|
164
|
+
output = np.hstack(layer_outputs)
|
165
|
+
return layer, output
|
166
|
+
|
167
|
+
def fit(self, X, y, X_val=None, y_val=None):
|
168
|
+
X_current = self._multi_grained_scanning(X, y)
|
169
|
+
X_val_current = self._multi_grained_scanning(X_val, y_val) if X_val is not None else None
|
170
|
+
no_improve_rounds = 0
|
171
|
+
|
172
|
+
for layer_index in range(self.max_layers):
|
173
|
+
if self.verbose: print(f"Training Layer {layer_index + 1}")
|
174
|
+
layer, output = self._fit_layer(X_current, y, X_val_current, y_val, layer_index)
|
175
|
+
self.layers.append(layer)
|
176
|
+
X_current = np.hstack([X_current, output])
|
177
|
+
|
178
|
+
if X_val is not None:
|
179
|
+
val_outputs = []
|
180
|
+
for reg in layer:
|
181
|
+
n_features = reg.n_features_in_
|
182
|
+
preds = reg.predict(X_val_current[:, :n_features]).reshape(-1, 1)
|
183
|
+
val_outputs.append(preds)
|
184
|
+
val_output = np.hstack(val_outputs)
|
185
|
+
X_val_current = np.hstack([X_val_current, val_output])
|
186
|
+
|
187
|
+
y_pred = self.predict(X_val)
|
188
|
+
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
|
189
|
+
if self.verbose: print(f"Validation RMSE: {rmse:.4f}")
|
190
|
+
|
191
|
+
if rmse < self.best_rmse:
|
192
|
+
self.best_rmse = rmse
|
193
|
+
self.best_model = copy.deepcopy(self.layers)
|
194
|
+
no_improve_rounds = 0
|
195
|
+
if self.verbose: print(f"✅ New best RMSE: {self.best_rmse:.4f}")
|
196
|
+
else:
|
197
|
+
no_improve_rounds += 1
|
198
|
+
if no_improve_rounds >= self.early_stopping_rounds:
|
199
|
+
if self.verbose: print("Early stopping triggered.")
|
200
|
+
break
|
201
|
+
|
202
|
+
def predict(self, X):
|
203
|
+
X_current = self._multi_grained_scanning(X, None)
|
204
|
+
X_current = self._apply_forget_gate(X_current, layer_index=-1)
|
205
|
+
|
206
|
+
for layer in self.layers:
|
207
|
+
layer_outputs = []
|
208
|
+
for reg in layer:
|
209
|
+
n_features = reg.n_features_in_
|
210
|
+
preds = reg.predict(X_current[:, :n_features]).reshape(-1, 1)
|
211
|
+
layer_outputs.append(preds)
|
212
|
+
output = np.hstack(layer_outputs)
|
213
|
+
X_current = np.hstack([X_current, output])
|
214
|
+
|
215
|
+
final_outputs = []
|
216
|
+
for reg in self.layers[-1]:
|
217
|
+
n_features = reg.n_features_in_
|
218
|
+
final_outputs.append(reg.predict(X_current[:, :n_features]).reshape(-1, 1))
|
219
|
+
return np.mean(np.hstack(final_outputs), axis=1)
|
220
|
+
|
221
|
+
def get_best_model(self):
|
222
|
+
return self.best_model, self.best_rmse
|
223
|
+
|
224
|
+
"""
|
225
|
+
# ============================== Test Example ==============================
|
226
|
+
import warnings
|
227
|
+
import numpy as np
|
228
|
+
from sklearn.datasets import load_diabetes
|
229
|
+
from sklearn.datasets import fetch_california_housing
|
230
|
+
from sklearn.model_selection import train_test_split
|
231
|
+
from sklearn.metrics import mean_squared_error
|
232
|
+
|
233
|
+
# X, y = load_diabetes(return_X_y=True) # Using diabetes dataset
|
234
|
+
X, y = fetch_california_housing(return_X_y=True) # Using house price dataset
|
235
|
+
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=42)
|
236
|
+
|
237
|
+
# Hyperparameter grid
|
238
|
+
param_grid = {
|
239
|
+
"objective": ["reg:squarederror"],
|
240
|
+
"random_state": [42],
|
241
|
+
'seed': [0],
|
242
|
+
'n_estimators': [100],
|
243
|
+
'max_depth': [6],
|
244
|
+
'min_child_weight': [4],
|
245
|
+
'subsample': [0.8],
|
246
|
+
'colsample_bytree': [0.8],
|
247
|
+
'gamma': [0],
|
248
|
+
'reg_alpha': [0],
|
249
|
+
'reg_lambda': [1],
|
250
|
+
'learning_rate': [0.05],
|
251
|
+
}
|
252
|
+
|
253
|
+
# Create the model with Multi-Grained Scanning enabled (with window sizes 2 and 3)
|
254
|
+
regr = SmartForest(
|
255
|
+
n_estimators_per_layer = 5,
|
256
|
+
max_layers = 10,
|
257
|
+
early_stopping_rounds = 5,
|
258
|
+
param_grid = param_grid,
|
259
|
+
use_gpu = False,
|
260
|
+
gpu_id = 0,
|
261
|
+
window_sizes = [], # Enables MGS if e.g., [2, 3], else empty disables MGS.
|
262
|
+
forget_factor = 0., # Set forget factor to simulate forget gate behavior
|
263
|
+
verbose = 1
|
264
|
+
)
|
265
|
+
|
266
|
+
regr.fit(X_train, y_train, X_val, y_val)
|
267
|
+
|
268
|
+
# Predict on validation set and evaluate
|
269
|
+
y_pred = regr.predict(X_val)
|
270
|
+
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
|
271
|
+
print("\nFinal RMSE:", rmse)
|
272
|
+
|
273
|
+
# Output best model and RMSE
|
274
|
+
best_model, best_rmse = regr.get_best_model()
|
275
|
+
print("\nBest validation RMSE:", best_rmse)
|
276
|
+
"""
|
277
|
+
|
278
|
+
# ============================================================================================================================================================
|
279
|
+
|
280
|
+
import numpy as np
|
281
|
+
import copy
|
282
|
+
import itertools
|
283
|
+
from scipy import ndimage
|
284
|
+
from xgboost import XGBRegressor
|
285
|
+
from sklearn.metrics import mean_squared_error
|
286
|
+
from sklearn.model_selection import train_test_split
|
287
|
+
|
288
|
+
class SmartForest4D:
|
289
|
+
"""
|
290
|
+
SmartForest4D is an ensemble learning model designed to handle complex 4D input data
|
291
|
+
(samples, time, spatial, features). It integrates ideas from gradient-boosted decision trees
|
292
|
+
(XGBoost) with LSTM-style forget gates and spatial max pooling.
|
293
|
+
|
294
|
+
The model builds layers of regressors, each layer taking the previous output as part of its
|
295
|
+
input (deep forest style). A forget gate mechanism is applied along the time dimension to
|
296
|
+
emphasize recent temporal information. Spatial max pooling is used to reduce dimensionality
|
297
|
+
across spatial units before flattening and feeding into the regressors.
|
298
|
+
|
299
|
+
Parameters:
|
300
|
+
-----------
|
301
|
+
n_estimators_per_layer : int
|
302
|
+
Number of XGBoost regressors per layer.
|
303
|
+
|
304
|
+
max_layers : int
|
305
|
+
Maximum number of layers in the deep forest.
|
306
|
+
|
307
|
+
early_stopping_rounds : int
|
308
|
+
Number of rounds without improvement on the validation set before early stopping.
|
309
|
+
|
310
|
+
param_grid : dict
|
311
|
+
Dictionary of hyperparameter lists to search over for XGBoost.
|
312
|
+
|
313
|
+
use_gpu : bool
|
314
|
+
Whether to use GPU for training XGBoost models.
|
315
|
+
|
316
|
+
gpu_id : int
|
317
|
+
GPU device ID to use if use_gpu is True.
|
318
|
+
|
319
|
+
kernel: np.ndarray
|
320
|
+
Convolutional kernel for spatial processing.
|
321
|
+
# ===============================
|
322
|
+
# 0. Do nothing
|
323
|
+
# ===============================
|
324
|
+
|
325
|
+
identity_kernel = np.array([
|
326
|
+
[0, 0, 0],
|
327
|
+
[0, 1, 0],
|
328
|
+
[0, 0, 0]
|
329
|
+
])
|
330
|
+
|
331
|
+
# ===============================
|
332
|
+
# 1. Sobel Edge Detection Kernels
|
333
|
+
# ===============================
|
334
|
+
|
335
|
+
sobel_x = np.array([
|
336
|
+
[-1, 0, 1],
|
337
|
+
[-2, 0, 2],
|
338
|
+
[-1, 0, 1]
|
339
|
+
])
|
340
|
+
|
341
|
+
sobel_y = np.array([
|
342
|
+
[-1, -2, -1],
|
343
|
+
[ 0, 0, 0],
|
344
|
+
[ 1, 2, 1]
|
345
|
+
])
|
346
|
+
|
347
|
+
# ===============================
|
348
|
+
# 2. Gaussian Blur Kernel (3x3)
|
349
|
+
# ===============================
|
350
|
+
gaussian_kernel = (1/16) * np.array([
|
351
|
+
[1, 2, 1],
|
352
|
+
[2, 4, 2],
|
353
|
+
[1, 2, 1]
|
354
|
+
])
|
355
|
+
|
356
|
+
# ===============================
|
357
|
+
# 3. Morphological Structuring Element (3x3 cross)
|
358
|
+
# Used in binary dilation/erosion
|
359
|
+
# ===============================
|
360
|
+
morph_kernel = np.array([
|
361
|
+
[0, 1, 0],
|
362
|
+
[1, 1, 1],
|
363
|
+
[0, 1, 0]
|
364
|
+
])
|
365
|
+
|
366
|
+
# ===============================
|
367
|
+
# 4. Sharpening Kernel
|
368
|
+
# Enhances edges and contrast
|
369
|
+
# ===============================
|
370
|
+
sharpen_kernel = np.array([
|
371
|
+
[ 0, -1, 0],
|
372
|
+
[-1, 5, -1],
|
373
|
+
[ 0, -1, 0]
|
374
|
+
])
|
375
|
+
|
376
|
+
# ===============================
|
377
|
+
# 5. Embossing Kernel
|
378
|
+
# Creates a 3D-like shadowed effect
|
379
|
+
# ===============================
|
380
|
+
emboss_kernel = np.array([
|
381
|
+
[-2, -1, 0],
|
382
|
+
[-1, 1, 1],
|
383
|
+
[ 0, 1, 2]
|
384
|
+
])
|
385
|
+
|
386
|
+
spatial_h : int
|
387
|
+
The height of the 2D grid for the flattened spatial dimension.
|
388
|
+
|
389
|
+
spatial_w : int
|
390
|
+
The width of the 2D grid for the flattened spatial dimension.
|
391
|
+
|
392
|
+
forget_factor : float
|
393
|
+
Exponential decay rate applied along the time axis. Higher values mean stronger forgetting.
|
394
|
+
|
395
|
+
verbose : int
|
396
|
+
Verbosity level for training output.
|
397
|
+
|
398
|
+
Attributes:
|
399
|
+
-----------
|
400
|
+
layers : list
|
401
|
+
List of trained layers, each containing a list of regressors.
|
402
|
+
|
403
|
+
best_model : list
|
404
|
+
The set of layers corresponding to the best validation RMSE seen during training.
|
405
|
+
|
406
|
+
best_rmse : float
|
407
|
+
The lowest RMSE achieved on the validation set.
|
408
|
+
|
409
|
+
Methods:
|
410
|
+
--------
|
411
|
+
fit(X, y, X_val=None, y_val=None):
|
412
|
+
Train the SmartForest4D model on the given 4D input data.
|
413
|
+
|
414
|
+
predict(X):
|
415
|
+
Predict targets for new 4D input data using the trained model.
|
416
|
+
|
417
|
+
get_best_model():
|
418
|
+
Return the best set of layers and corresponding RMSE.
|
419
|
+
|
420
|
+
Notes:
|
421
|
+
------
|
422
|
+
- The product of spatial_h and spatial_w must equal spatial_size (spatial_h * spatial_w = spatial_size).
|
423
|
+
|
424
|
+
Example:
|
425
|
+
--------
|
426
|
+
>>> model = SmartForest4D(n_estimators_per_layer=5, max_layers=10, early_stopping_rounds=3, forget_factor=0.3, verbose=1)
|
427
|
+
>>> model.fit(X_train, y_train, X_val, y_val)
|
428
|
+
>>> y_pred = model.predict(X_val)
|
429
|
+
>>> best_model, best_rmse = model.get_best_model()
|
430
|
+
"""
|
431
|
+
def __init__(self, n_estimators_per_layer=5, max_layers=10, early_stopping_rounds=3, param_grid=None,
|
432
|
+
use_gpu=False, gpu_id=0, kernel = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]), spatial_h=None, spatial_w=None,
|
433
|
+
forget_factor=0.0, verbose=1):
|
434
|
+
self.n_estimators_per_layer = n_estimators_per_layer
|
435
|
+
self.max_layers = max_layers
|
436
|
+
self.early_stopping_rounds = early_stopping_rounds
|
437
|
+
self.param_grid = param_grid or {
|
438
|
+
"objective": ["reg:squarederror"],
|
439
|
+
"random_state": [42],
|
440
|
+
'n_estimators': [100],
|
441
|
+
'max_depth': [6],
|
442
|
+
'min_child_weight': [4],
|
443
|
+
'subsample': [0.8],
|
444
|
+
'colsample_bytree': [0.8],
|
445
|
+
'gamma': [0],
|
446
|
+
'reg_alpha': [0],
|
447
|
+
'reg_lambda': [1],
|
448
|
+
'learning_rate': [0.05],
|
449
|
+
}
|
450
|
+
self.use_gpu = use_gpu
|
451
|
+
self.gpu_id = gpu_id
|
452
|
+
self.kernel = kernel
|
453
|
+
self.spatial_h = spatial_h
|
454
|
+
self.spatial_w = spatial_w
|
455
|
+
self.forget_factor = forget_factor
|
456
|
+
self.layers = []
|
457
|
+
self.best_model = None
|
458
|
+
self.best_rmse = float("inf")
|
459
|
+
self.verbose = verbose
|
460
|
+
if (self.spatial_h is None) or (self.spatial_w is None):
|
461
|
+
raise ValueError("Please specify spatial_h and spatial_w")
|
462
|
+
|
463
|
+
def _get_param_combinations(self):
|
464
|
+
keys, values = zip(*self.param_grid.items())
|
465
|
+
return [dict(zip(keys, v)) for v in itertools.product(*values)]
|
466
|
+
|
467
|
+
def _prepare_input(self, X, y=None, apply_forget=False, layer_index=0):
|
468
|
+
# Ensure 4D: (samples, time, spatial, features)
|
469
|
+
if X.ndim == 2:
|
470
|
+
X = X[:, np.newaxis, np.newaxis, :]
|
471
|
+
elif X.ndim == 3:
|
472
|
+
X = X[:, :, np.newaxis, :]
|
473
|
+
elif X.ndim == 4:
|
474
|
+
pass
|
475
|
+
else:
|
476
|
+
raise ValueError("Input must be 2D, 3D, or 4D.")
|
477
|
+
|
478
|
+
n_samples, n_time, n_spatial, n_features = X.shape
|
479
|
+
|
480
|
+
if apply_forget and self.forget_factor > 0:
|
481
|
+
decay = np.exp(-self.forget_factor * np.arange(n_time))[::-1]
|
482
|
+
decay = decay / decay.sum()
|
483
|
+
decay = decay.reshape(1, n_time, 1, 1)
|
484
|
+
X = X * decay
|
485
|
+
|
486
|
+
# Apply convolutional kernels:
|
487
|
+
if n_spatial != 1:
|
488
|
+
if self.spatial_h * self.spatial_w != n_spatial: raise ValueError("spatial_h * spatial_w != n_spatial")
|
489
|
+
X_out = np.zeros_like(X)
|
490
|
+
for sample in range(X.shape[0]):
|
491
|
+
for t in range(X.shape[1]):
|
492
|
+
for f in range(X.shape[3]):
|
493
|
+
spatial_2d = X[sample, t, :, f].reshape(self.spatial_h, self.spatial_w)
|
494
|
+
# Apply 2D convolution
|
495
|
+
filtered = ndimage.convolve(spatial_2d, self.kernel, mode='constant', cval=0.0)
|
496
|
+
# Flatten back to (20,) and store
|
497
|
+
X_out[sample, t, :, f] = filtered.reshape(n_spatial)
|
498
|
+
X = X_out; del(X_out)
|
499
|
+
# Max pooling over spatial dim
|
500
|
+
X_pooled = X.max(axis=2) # (samples, time, features)
|
501
|
+
X_flattened = X_pooled.reshape(n_samples, -1) # (samples, time * features)
|
502
|
+
return X_flattened
|
503
|
+
|
504
|
+
def _fit_layer(self, X, y, X_val=None, y_val=None, layer_index=0):
|
505
|
+
layer = []
|
506
|
+
layer_outputs = []
|
507
|
+
param_combos = self._get_param_combinations()
|
508
|
+
|
509
|
+
for i in range(self.n_estimators_per_layer):
|
510
|
+
best_rmse = float('inf')
|
511
|
+
best_model = None
|
512
|
+
|
513
|
+
for params in param_combos:
|
514
|
+
if self.use_gpu:
|
515
|
+
params['tree_method'] = 'hist'
|
516
|
+
params['device'] = 'cuda'
|
517
|
+
|
518
|
+
params = params.copy()
|
519
|
+
params['random_state'] = i
|
520
|
+
|
521
|
+
model = XGBRegressor(**params)
|
522
|
+
model.fit(X, y)
|
523
|
+
|
524
|
+
if X_val is not None:
|
525
|
+
preds_val = model.predict(X_val)
|
526
|
+
rmse = np.sqrt(mean_squared_error(y_val, preds_val))
|
527
|
+
if rmse < best_rmse:
|
528
|
+
best_rmse = rmse
|
529
|
+
best_model = model
|
530
|
+
else:
|
531
|
+
best_model = model
|
532
|
+
|
533
|
+
preds = best_model.predict(X).reshape(-1, 1)
|
534
|
+
layer.append(best_model)
|
535
|
+
layer_outputs.append(preds)
|
536
|
+
|
537
|
+
output = np.hstack(layer_outputs)
|
538
|
+
return layer, output
|
539
|
+
|
540
|
+
def fit(self, X, y, X_val=None, y_val=None):
|
541
|
+
y = y.ravel()
|
542
|
+
X_current = self._prepare_input(X, apply_forget=True)
|
543
|
+
X_val_current = self._prepare_input(X_val, apply_forget=True) if X_val is not None else None
|
544
|
+
|
545
|
+
no_improve_rounds = 0
|
546
|
+
|
547
|
+
for layer_index in range(self.max_layers):
|
548
|
+
if self.verbose:
|
549
|
+
print(f"Training Layer {layer_index + 1}")
|
550
|
+
|
551
|
+
layer, output = self._fit_layer(X_current, y, X_val_current, y_val, layer_index)
|
552
|
+
self.layers.append(layer)
|
553
|
+
X_current = np.hstack([X_current, output])
|
554
|
+
|
555
|
+
if X_val is not None:
|
556
|
+
val_outputs = []
|
557
|
+
for reg in layer:
|
558
|
+
n_features = reg.n_features_in_
|
559
|
+
preds = reg.predict(X_val_current[:, :n_features]).reshape(-1, 1)
|
560
|
+
val_outputs.append(preds)
|
561
|
+
val_output = np.hstack(val_outputs)
|
562
|
+
X_val_current = np.hstack([X_val_current, val_output])
|
563
|
+
|
564
|
+
y_pred = self.predict(X_val)
|
565
|
+
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
|
566
|
+
if self.verbose:
|
567
|
+
print(f"Validation RMSE: {rmse:.4f}")
|
568
|
+
|
569
|
+
if rmse < self.best_rmse:
|
570
|
+
self.best_rmse = rmse
|
571
|
+
self.best_model = copy.deepcopy(self.layers)
|
572
|
+
no_improve_rounds = 0
|
573
|
+
if self.verbose:
|
574
|
+
print(f"✅ New best RMSE: {self.best_rmse:.4f}")
|
575
|
+
else:
|
576
|
+
no_improve_rounds += 1
|
577
|
+
if no_improve_rounds >= self.early_stopping_rounds:
|
578
|
+
if self.verbose:
|
579
|
+
print("Early stopping triggered.")
|
580
|
+
break
|
581
|
+
|
582
|
+
def predict(self, X):
|
583
|
+
X_current = self._prepare_input(X, apply_forget=True)
|
584
|
+
|
585
|
+
for layer in self.layers:
|
586
|
+
layer_outputs = []
|
587
|
+
for reg in layer:
|
588
|
+
n_features = reg.n_features_in_
|
589
|
+
preds = reg.predict(X_current[:, :n_features]).reshape(-1, 1)
|
590
|
+
layer_outputs.append(preds)
|
591
|
+
output = np.hstack(layer_outputs)
|
592
|
+
X_current = np.hstack([X_current, output])
|
593
|
+
|
594
|
+
final_outputs = []
|
595
|
+
for reg in self.layers[-1]:
|
596
|
+
n_features = reg.n_features_in_
|
597
|
+
final_outputs.append(reg.predict(X_current[:, :n_features]).reshape(-1, 1))
|
598
|
+
return np.mean(np.hstack(final_outputs), axis=1)
|
599
|
+
|
600
|
+
def get_best_model(self):
|
601
|
+
return self.best_model, self.best_rmse
|
602
|
+
|
603
|
+
"""
|
604
|
+
# ============================== Test Example ==============================
|
605
|
+
import numpy as np
|
606
|
+
import copy
|
607
|
+
import itertools
|
608
|
+
from scipy import ndimage
|
609
|
+
from xgboost import XGBRegressor
|
610
|
+
from sklearn.metrics import mean_squared_error
|
611
|
+
from sklearn.model_selection import train_test_split
|
612
|
+
|
613
|
+
# Generate synthetic 4D data: (samples, time, spatial, features)
|
614
|
+
# time order is like [t (today), t - 1 (yesterday), t -2, ...]
|
615
|
+
n_samples = 200
|
616
|
+
n_time = 5
|
617
|
+
n_spatial = 4
|
618
|
+
n_features = 5
|
619
|
+
|
620
|
+
np.random.seed(42)
|
621
|
+
X = np.random.rand(n_samples, n_time, n_spatial, n_features)
|
622
|
+
y = X[:, :3, :2, :4].mean(axis=(1, 2, 3)) + 0.1 * np.random.randn(n_samples)
|
623
|
+
y = y.ravel()
|
624
|
+
|
625
|
+
# Split
|
626
|
+
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=42)
|
627
|
+
|
628
|
+
# Train model
|
629
|
+
model = SmartForest4D(
|
630
|
+
n_estimators_per_layer=5,
|
631
|
+
max_layers=20,
|
632
|
+
early_stopping_rounds=5,
|
633
|
+
spatial_h = 2,
|
634
|
+
spatial_w = 2,
|
635
|
+
forget_factor=0.1,
|
636
|
+
verbose=1
|
637
|
+
)
|
638
|
+
model.fit(X_train, y_train, X_val, y_val)
|
639
|
+
|
640
|
+
# Predict
|
641
|
+
y_pred = model.predict(X_val)
|
642
|
+
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
|
643
|
+
print("\n✅ Final RMSE on validation set:", rmse)
|
644
|
+
|
645
|
+
|
646
|
+
# Output best model and RMSE
|
647
|
+
best_model, best_rmse = model.get_best_model()
|
648
|
+
print("\nBest validation RMSE:", best_rmse)
|
649
|
+
"""
|
650
|
+
|
651
|
+
# ============================================================================================================================================================
|
652
|
+
# Function mode
|
653
|
+
|
654
|
+
import tensorflow as tf
|
655
|
+
from tensorflow import keras
|
656
|
+
from tensorflow.keras import layers
|
657
|
+
from tensorflow.keras.models import load_model
|
658
|
+
|
659
|
+
def srcnn(learning_rate=0.001):
|
660
|
+
"""
|
661
|
+
Builds and compiles a Super-Resolution Convolutional Neural Network (SRCNN) model
|
662
|
+
that fuses features from both low-resolution and high-resolution images.
|
663
|
+
|
664
|
+
This model uses two parallel input streams:
|
665
|
+
- A low-resolution input which undergoes upscaling through convolutional layers.
|
666
|
+
- A high-resolution input from which texture features are extracted and fused with the low-resolution stream.
|
667
|
+
|
668
|
+
Args:
|
669
|
+
save_path (str, optional): Path to save the compiled model. If None, the model is not saved.
|
670
|
+
learning_rate (float): Learning rate for the Adam optimizer.
|
671
|
+
|
672
|
+
Returns:
|
673
|
+
keras.Model: A compiled Keras model ready for training.
|
674
|
+
"""
|
675
|
+
# Input layers
|
676
|
+
lowres_input = layers.Input(shape=(None, None, 1)) # Low-resolution input
|
677
|
+
highres_input = layers.Input(shape=(None, None, 1)) # High-resolution image
|
678
|
+
|
679
|
+
# Feature extraction from high-resolution image
|
680
|
+
highres_features = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(highres_input)
|
681
|
+
highres_features = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(highres_features)
|
682
|
+
|
683
|
+
# Processing low-resoltuion input
|
684
|
+
x = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(lowres_input)
|
685
|
+
x = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(x)
|
686
|
+
|
687
|
+
# Fusion of high-resolution image textures
|
688
|
+
fusion = layers.Concatenate()([x, highres_features])
|
689
|
+
fusion = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(fusion)
|
690
|
+
fusion = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(fusion)
|
691
|
+
|
692
|
+
# Output
|
693
|
+
output = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(fusion)
|
694
|
+
|
695
|
+
model = keras.Model(inputs=[lowres_input, highres_input], outputs=output)
|
696
|
+
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss="mse")
|
697
|
+
|
698
|
+
return model
|
699
|
+
|
700
|
+
def print_model(model):
|
701
|
+
return model.summary()
|
702
|
+
|
703
|
+
def train(lowres_data, highres_data, epochs=100, batch_size=1, verbose=1, save_path=None):
|
704
|
+
model = srcnn()
|
705
|
+
# Train SRCNN
|
706
|
+
model.fit([modis_data_1, s2_data], s2_data, epochs=epochs, batch_size=batch_size, verbose=verbose)
|
707
|
+
# Save the complete model
|
708
|
+
# Recommended in newer versions of Keras (TensorFlow 2.11+): e.g., 'texture_fusion_model.keras'
|
709
|
+
if save_path: model.save(save_path)
|
710
|
+
|
711
|
+
def apply(model, lowres_data_app, highres_data):
|
712
|
+
super_resolved = model.predict([lowres_data_app, highres_data]).squeeze()
|
713
|
+
super_resolved = xr.DataArray(
|
714
|
+
super_resolved,
|
715
|
+
dims = ("latitude", "longitude"),
|
716
|
+
coords={"latitude": highres_data.latitude, "longitude": highres_data.longitude},
|
717
|
+
name="super_res"
|
718
|
+
)
|
719
|
+
return super_resolved
|
720
|
+
|
721
|
+
def load_model(save_path):
|
722
|
+
model = load_model('texture_fusion_model.keras')
|
723
|
+
|
724
|
+
# ------------------------------------------------------------------------------------------------------------------------------------------------------------
|
725
|
+
# Class mode
|
726
|
+
|
727
|
+
import numpy as np
|
728
|
+
import xarray as xr
|
729
|
+
import tensorflow as tf
|
730
|
+
from tensorflow import keras
|
731
|
+
from tensorflow.keras import layers
|
732
|
+
from tensorflow.keras.callbacks import EarlyStopping
|
733
|
+
|
734
|
+
class TextureFusionSRCNN:
|
735
|
+
def __init__(self, learning_rate=0.001):
|
736
|
+
self.learning_rate = learning_rate
|
737
|
+
self.model = self._build_model()
|
738
|
+
|
739
|
+
def _build_model(self):
|
740
|
+
# Input layers
|
741
|
+
lowres_input = layers.Input(shape=(None, None, 1)) # Low-resolution input
|
742
|
+
highres_input = layers.Input(shape=(None, None, 1)) # High-resolution image
|
743
|
+
|
744
|
+
# Feature extraction from high-resolution image
|
745
|
+
highres_features = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(highres_input)
|
746
|
+
highres_features = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(highres_features)
|
747
|
+
|
748
|
+
# Processing low-resolution input
|
749
|
+
x = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(lowres_input)
|
750
|
+
x = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(x)
|
751
|
+
|
752
|
+
# Fusion of high-resolution image textures
|
753
|
+
fusion = layers.Concatenate()([x, highres_features])
|
754
|
+
fusion = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(fusion)
|
755
|
+
fusion = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(fusion)
|
756
|
+
|
757
|
+
# Output
|
758
|
+
output = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(fusion)
|
759
|
+
|
760
|
+
model = keras.Model(inputs=[lowres_input, highres_input], outputs=output)
|
761
|
+
model.compile(optimizer=keras.optimizers.Adam(learning_rate=self.learning_rate), loss="mse")
|
762
|
+
|
763
|
+
return model
|
764
|
+
|
765
|
+
def summary(self):
|
766
|
+
return self.model.summary()
|
767
|
+
|
768
|
+
def train(self, lowres_data, highres_data, epochs=100, batch_size=1, verbose=1, save_path=None):
|
769
|
+
early_stop = EarlyStopping(
|
770
|
+
monitor='loss', # You can change to 'val_loss' if you add validation
|
771
|
+
patience=10, # Number of epochs with no improvement after which training will be stopped
|
772
|
+
restore_best_weights=True
|
773
|
+
)
|
774
|
+
|
775
|
+
self.model.fit(
|
776
|
+
[lowres_data, highres_data], highres_data,
|
777
|
+
epochs=epochs,
|
778
|
+
batch_size=batch_size,
|
779
|
+
verbose=verbose,
|
780
|
+
callbacks=[early_stop]
|
781
|
+
)
|
782
|
+
|
783
|
+
if save_path:
|
784
|
+
self.model.save(save_path)
|
785
|
+
|
786
|
+
def apply(self, lowres_data_app, highres_data):
|
787
|
+
super_resolved = self.model.predict([lowres_data_app, highres_data]).squeeze()
|
788
|
+
return super_resolved
|
789
|
+
|
790
|
+
@staticmethod
|
791
|
+
def load(save_path):
|
792
|
+
model = keras.models.load_model(save_path)
|
793
|
+
instance = TextureFusionSRCNN()
|
794
|
+
instance.model = model
|
795
|
+
return instance
|
796
|
+
|