@nahisaho/satori 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/LICENCE +0 -0
  2. package/README.md +191 -0
  3. package/bin/satori.js +95 -0
  4. package/package.json +29 -0
  5. package/src/.github/skills/scientific-academic-writing/SKILL.md +361 -0
  6. package/src/.github/skills/scientific-academic-writing/assets/acs_article.md +199 -0
  7. package/src/.github/skills/scientific-academic-writing/assets/elsevier_article.md +244 -0
  8. package/src/.github/skills/scientific-academic-writing/assets/ieee_transactions.md +212 -0
  9. package/src/.github/skills/scientific-academic-writing/assets/imrad_standard.md +181 -0
  10. package/src/.github/skills/scientific-academic-writing/assets/nature_article.md +179 -0
  11. package/src/.github/skills/scientific-academic-writing/assets/qiita_technical_article.md +385 -0
  12. package/src/.github/skills/scientific-academic-writing/assets/science_research_article.md +169 -0
  13. package/src/.github/skills/scientific-bioinformatics/SKILL.md +220 -0
  14. package/src/.github/skills/scientific-biosignal-processing/SKILL.md +357 -0
  15. package/src/.github/skills/scientific-causal-inference/SKILL.md +347 -0
  16. package/src/.github/skills/scientific-cheminformatics/SKILL.md +196 -0
  17. package/src/.github/skills/scientific-data-preprocessing/SKILL.md +413 -0
  18. package/src/.github/skills/scientific-data-simulation/SKILL.md +244 -0
  19. package/src/.github/skills/scientific-doe/SKILL.md +360 -0
  20. package/src/.github/skills/scientific-eda-correlation/SKILL.md +141 -0
  21. package/src/.github/skills/scientific-feature-importance/SKILL.md +208 -0
  22. package/src/.github/skills/scientific-image-analysis/SKILL.md +310 -0
  23. package/src/.github/skills/scientific-materials-characterization/SKILL.md +368 -0
  24. package/src/.github/skills/scientific-meta-analysis/SKILL.md +352 -0
  25. package/src/.github/skills/scientific-metabolomics/SKILL.md +326 -0
  26. package/src/.github/skills/scientific-ml-classification/SKILL.md +265 -0
  27. package/src/.github/skills/scientific-ml-regression/SKILL.md +215 -0
  28. package/src/.github/skills/scientific-multi-omics/SKILL.md +303 -0
  29. package/src/.github/skills/scientific-network-analysis/SKILL.md +257 -0
  30. package/src/.github/skills/scientific-pca-tsne/SKILL.md +235 -0
  31. package/src/.github/skills/scientific-pipeline-scaffold/SKILL.md +331 -0
  32. package/src/.github/skills/scientific-process-optimization/SKILL.md +215 -0
  33. package/src/.github/skills/scientific-publication-figures/SKILL.md +208 -0
  34. package/src/.github/skills/scientific-sequence-analysis/SKILL.md +389 -0
  35. package/src/.github/skills/scientific-spectral-signal/SKILL.md +227 -0
  36. package/src/.github/skills/scientific-statistical-testing/SKILL.md +240 -0
  37. package/src/.github/skills/scientific-survival-clinical/SKILL.md +239 -0
  38. package/src/.github/skills/scientific-time-series/SKILL.md +291 -0
@@ -0,0 +1,265 @@
1
+ ---
2
+ name: scientific-ml-classification
3
+ description: |
4
+ 機械学習分類パイプラインのスキル。複数の分類モデル(Logistic Regression, Random Forest,
5
+ SVM, XGBoost)を StratifiedKFold 交差検証で比較し、ROC 曲線・混同行列で評価する際に使用。
6
+ Scientific Skills Exp-03, 05 で確立したパターン。
7
+ ---
8
+
9
+ # Scientific ML Classification Pipeline
10
+
11
+ バイナリ/マルチクラス分類タスクのための統一 ML パイプライン。
12
+ StratifiedKFold 交差検証で複数モデルを公平に比較し、ROC-AUC と混同行列で評価する。
13
+
14
+ ## When to Use
15
+
16
+ - カテゴリ予測タスク(二値分類・多クラス分類)
17
+ - がん分類、毒性予測、材料分類などの判別問題
18
+ - 複数分類モデルの性能比較が必要なとき
19
+
20
+ ## Quick Start
21
+
22
+ ## 標準パイプライン
23
+
24
+ ### 1. モデル定義
25
+
26
+ ```python
27
+ from sklearn.linear_model import LogisticRegression
28
+ from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
29
+ from sklearn.svm import SVC
30
+ from sklearn.model_selection import StratifiedKFold, cross_val_score
31
+ from sklearn.preprocessing import StandardScaler
32
+ from sklearn.metrics import (accuracy_score, precision_score, recall_score,
33
+ f1_score, roc_auc_score, roc_curve,
34
+ confusion_matrix, classification_report)
35
+
36
+ MODEL_DEFS = {
37
+ "Logistic Regression": LogisticRegression(max_iter=1000, random_state=42),
38
+ "Random Forest": RandomForestClassifier(n_estimators=200, max_depth=15,
39
+ random_state=42),
40
+ "Gradient Boosting": GradientBoostingClassifier(n_estimators=200,
41
+ random_state=42),
42
+ "SVM": SVC(kernel="rbf", probability=True, random_state=42),
43
+ }
44
+ ```
45
+
46
+ ### 2. StratifiedKFold 学習 & 評価
47
+
48
+ ```python
49
+ import numpy as np
50
+ import pandas as pd
51
+
52
+ def train_evaluate_classifiers(X_train, X_test, y_train, y_test,
53
+ model_defs=None, n_splits=5):
54
+ """全分類モデルを StratifiedKFold で評価する。"""
55
+ if model_defs is None:
56
+ model_defs = MODEL_DEFS
57
+
58
+ scaler = StandardScaler()
59
+ X_train_sc = scaler.fit_transform(X_train)
60
+ X_test_sc = scaler.transform(X_test)
61
+
62
+ skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
63
+ results = []
64
+ trained_models = {}
65
+
66
+ for name, model in model_defs.items():
67
+ import copy
68
+ m = copy.deepcopy(model)
69
+ m.fit(X_train_sc, y_train)
70
+ y_pred = m.predict(X_test_sc)
71
+ y_proba = m.predict_proba(X_test_sc)[:, 1] if hasattr(m, "predict_proba") else None
72
+
73
+ cv_scores = cross_val_score(copy.deepcopy(model), X_train_sc, y_train,
74
+ cv=skf, scoring="accuracy")
75
+
76
+ results.append({
77
+ "Model": name,
78
+ "Accuracy": accuracy_score(y_test, y_pred),
79
+ "Precision": precision_score(y_test, y_pred, average="weighted"),
80
+ "Recall": recall_score(y_test, y_pred, average="weighted"),
81
+ "F1": f1_score(y_test, y_pred, average="weighted"),
82
+ "ROC_AUC": roc_auc_score(y_test, y_proba) if y_proba is not None else np.nan,
83
+ "CV_Accuracy_mean": cv_scores.mean(),
84
+ "CV_Accuracy_std": cv_scores.std(),
85
+ })
86
+ trained_models[name] = {"model": m, "y_pred": y_pred, "y_proba": y_proba}
87
+
88
+ results_df = pd.DataFrame(results)
89
+ results_df.to_csv("results/classification_metrics.csv", index=False)
90
+ return results_df, trained_models
91
+ ```
92
+
93
+ ### 3. ROC 曲線
94
+
95
+ ```python
96
+ import matplotlib.pyplot as plt
97
+
98
+ def plot_roc_curves(y_test, trained_models, figsize=(8, 8)):
99
+ """全モデルの ROC 曲線を重ねて描画する。"""
100
+ fig, ax = plt.subplots(figsize=figsize)
101
+
102
+ for name, info in trained_models.items():
103
+ if info["y_proba"] is not None:
104
+ fpr, tpr, _ = roc_curve(y_test, info["y_proba"])
105
+ auc = roc_auc_score(y_test, info["y_proba"])
106
+ ax.plot(fpr, tpr, linewidth=2, label=f"{name} (AUC={auc:.3f})")
107
+
108
+ ax.plot([0, 1], [0, 1], "k--", linewidth=1)
109
+ ax.set_xlabel("False Positive Rate")
110
+ ax.set_ylabel("True Positive Rate")
111
+ ax.set_title("ROC Curves", fontweight="bold")
112
+ ax.legend()
113
+ plt.tight_layout()
114
+ plt.savefig("figures/roc_curves.png", dpi=300, bbox_inches="tight")
115
+ plt.close()
116
+ ```
117
+
118
+ ### 4. 混同行列
119
+
120
+ ```python
121
+ import seaborn as sns
122
+
123
+ def plot_confusion_matrices(y_test, trained_models, class_names=None,
124
+ ncols=2, figsize=(14, 12)):
125
+ """全モデルの混同行列をグリッド表示する。"""
126
+ n_models = len(trained_models)
127
+ nrows = (n_models + ncols - 1) // ncols
128
+ fig, axes = plt.subplots(nrows, ncols, figsize=figsize)
129
+ axes = axes.flatten()
130
+
131
+ for i, (name, info) in enumerate(trained_models.items()):
132
+ cm = confusion_matrix(y_test, info["y_pred"])
133
+ sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", ax=axes[i],
134
+ xticklabels=class_names, yticklabels=class_names)
135
+ axes[i].set_title(name, fontweight="bold")
136
+ axes[i].set_xlabel("Predicted")
137
+ axes[i].set_ylabel("Actual")
138
+
139
+ for j in range(i + 1, len(axes)):
140
+ axes[j].set_visible(False)
141
+
142
+ plt.tight_layout()
143
+ plt.savefig("figures/confusion_matrices.png", dpi=300, bbox_inches="tight")
144
+ plt.close()
145
+ ```
146
+
147
+ ### 5. Precision-Recall 曲線
148
+
149
+ ```python
150
+ from sklearn.metrics import precision_recall_curve, average_precision_score
151
+
152
+ def plot_precision_recall_curves(y_test, trained_models, figsize=(8, 8)):
153
+ """全モデルの Precision-Recall 曲線を重ねて描画する。
154
+ クラス不均衡データでは ROC よりも PR 曲線が適切な評価指標となる。"""
155
+ fig, ax = plt.subplots(figsize=figsize)
156
+
157
+ for name, info in trained_models.items():
158
+ if info["y_proba"] is not None:
159
+ precision, recall, _ = precision_recall_curve(y_test, info["y_proba"])
160
+ ap = average_precision_score(y_test, info["y_proba"])
161
+ ax.plot(recall, precision, linewidth=2,
162
+ label=f"{name} (AP={ap:.3f})")
163
+
164
+ # ベースライン(陽性率)
165
+ baseline = y_test.mean()
166
+ ax.axhline(baseline, color="gray", linestyle="--", linewidth=1,
167
+ label=f"Baseline ({baseline:.3f})")
168
+
169
+ ax.set_xlabel("Recall")
170
+ ax.set_ylabel("Precision")
171
+ ax.set_title("Precision-Recall Curves", fontweight="bold")
172
+ ax.legend()
173
+ ax.set_xlim([0, 1])
174
+ ax.set_ylim([0, 1.05])
175
+ plt.tight_layout()
176
+ plt.savefig("figures/precision_recall_curves.png", dpi=300,
177
+ bbox_inches="tight")
178
+ plt.close()
179
+ ```
180
+
181
+ ### 6. Partial Dependence Plot (PDP)
182
+
183
+ ```python
184
+ from sklearn.inspection import PartialDependenceDisplay
185
+
186
+ def plot_partial_dependence(model, X_train_scaled, feature_names,
187
+ top_n=6, figsize=(14, 8)):
188
+ """重要な特徴量の部分依存プロットを描画する。
189
+ モデルの予測が各特徴量に対してどのように変化するかを可視化。"""
190
+ fig, ax = plt.subplots(figsize=figsize)
191
+ features_idx = list(range(min(top_n, len(feature_names))))
192
+ PartialDependenceDisplay.from_estimator(
193
+ model, X_train_scaled, features=features_idx,
194
+ feature_names=feature_names,
195
+ ax=ax, grid_resolution=50
196
+ )
197
+ plt.suptitle("Partial Dependence Plots", fontweight="bold", y=1.02)
198
+ plt.tight_layout()
199
+ plt.savefig("figures/partial_dependence.png", dpi=300, bbox_inches="tight")
200
+ plt.close()
201
+ ```
202
+
203
+ ### 7. Volcano Plot(差次発現/差次特徴量)
204
+
205
+ ```python
206
+ from scipy import stats
207
+
208
+ def volcano_plot(df, group_col, value_cols, group1, group2,
209
+ fc_threshold=1.0, p_threshold=0.05, figsize=(10, 8)):
210
+ """Fold Change と p 値による Volcano Plot を描画する。"""
211
+ results = []
212
+ g1 = df[df[group_col] == group1]
213
+ g2 = df[df[group_col] == group2]
214
+
215
+ for col in value_cols:
216
+ stat, pval = stats.mannwhitneyu(g1[col].dropna(), g2[col].dropna(),
217
+ alternative="two-sided")
218
+ fc = g2[col].mean() - g1[col].mean() # or log2FC
219
+ results.append({"Feature": col, "log2FC": fc, "pvalue": pval,
220
+ "neg_log10p": -np.log10(pval + 1e-300)})
221
+
222
+ vdf = pd.DataFrame(results)
223
+
224
+ fig, ax = plt.subplots(figsize=figsize)
225
+ # 色分け: significant up / down / ns
226
+ sig_up = (vdf["log2FC"] > fc_threshold) & (vdf["pvalue"] < p_threshold)
227
+ sig_down = (vdf["log2FC"] < -fc_threshold) & (vdf["pvalue"] < p_threshold)
228
+ ns = ~(sig_up | sig_down)
229
+
230
+ ax.scatter(vdf.loc[ns, "log2FC"], vdf.loc[ns, "neg_log10p"],
231
+ c="gray", alpha=0.5, s=20, label="NS")
232
+ ax.scatter(vdf.loc[sig_up, "log2FC"], vdf.loc[sig_up, "neg_log10p"],
233
+ c="red", alpha=0.7, s=30, label="Up")
234
+ ax.scatter(vdf.loc[sig_down, "log2FC"], vdf.loc[sig_down, "neg_log10p"],
235
+ c="blue", alpha=0.7, s=30, label="Down")
236
+ ax.axhline(-np.log10(p_threshold), color="gray", linestyle="--")
237
+ ax.axvline(fc_threshold, color="gray", linestyle="--")
238
+ ax.axvline(-fc_threshold, color="gray", linestyle="--")
239
+ ax.set_xlabel("log₂ Fold Change")
240
+ ax.set_ylabel("-log₁₀(p-value)")
241
+ ax.set_title("Volcano Plot", fontweight="bold")
242
+ ax.legend()
243
+ plt.tight_layout()
244
+ plt.savefig("figures/volcano_plot.png", dpi=300, bbox_inches="tight")
245
+ plt.close()
246
+ return vdf
247
+ ```
248
+
249
+ ## References
250
+
251
+ ### Output Files
252
+
253
+ | ファイル | 形式 |
254
+ |---|---|
255
+ | `results/classification_metrics.csv` | CSV |
256
+ | `figures/roc_curves.png` | PNG |
257
+ | `figures/precision_recall_curves.png` | PNG |
258
+ | `figures/confusion_matrices.png` | PNG |
259
+ | `figures/partial_dependence.png` | PNG |
260
+ | `figures/volcano_plot.png` | PNG |
261
+
262
+ #### 参照実験
263
+
264
+ - **Exp-03**: がん遺伝子発現の 4 モデル分類 + ROC + 混同行列
265
+ - **Exp-05**: 毒性予測の二値分類 + Volcano Plot + PR 曲線 + PDP
@@ -0,0 +1,215 @@
1
+ ---
2
+ name: scientific-ml-regression
3
+ description: |
4
+ マルチターゲット回帰モデルの学習・評価・比較スキル。複数の回帰モデル(Ridge, Lasso,
5
+ Random Forest, Gradient Boosting, Extra Trees)を KFold 交差検証で比較する際に使用。
6
+ Scientific Skills Exp-12, 13 で確立したパターン。
7
+ ---
8
+
9
+ # Scientific ML Regression Pipeline
10
+
11
+ 複数の回帰モデルを統一フレームワークで学習・評価・比較するパイプライン。
12
+ マルチターゲット回帰(複数の出力変数を同時に予測)に対応。
13
+
14
+ ## When to Use
15
+
16
+ - 連続値の予測タスク
17
+ - 複数モデルの性能比較が必要なとき
18
+ - マルチターゲット回帰(複数出力変数の同時予測)
19
+ - 交差検証による汎化性能の評価
20
+
21
+ ## Quick Start
22
+
23
+ ## 標準パイプライン
24
+
25
+ ### 1. モデル定義
26
+
27
+ ```python
28
+ from sklearn.linear_model import Ridge, Lasso, ElasticNet
29
+ from sklearn.ensemble import (RandomForestRegressor,
30
+ GradientBoostingRegressor,
31
+ ExtraTreesRegressor)
32
+ from sklearn.model_selection import KFold, cross_val_score
33
+ from sklearn.preprocessing import StandardScaler
34
+ from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
35
+ import numpy as np
36
+ import pandas as pd
37
+
38
+ MODEL_DEFS = {
39
+ "Ridge": Ridge(alpha=1.0),
40
+ "Lasso": Lasso(alpha=0.1, max_iter=10000),
41
+ "Random Forest": RandomForestRegressor(n_estimators=200, max_depth=15,
42
+ random_state=42),
43
+ "Gradient Boosting": GradientBoostingRegressor(n_estimators=200,
44
+ random_state=42),
45
+ "Extra Trees": ExtraTreesRegressor(n_estimators=200, max_depth=15,
46
+ random_state=42),
47
+ }
48
+ ```
49
+
50
+ ### 2. マルチターゲット学習 & 評価
51
+
52
+ ```python
53
+ def train_evaluate_models(X_train, X_test, y_train, y_test,
54
+ target_names, model_defs=None, n_splits=5):
55
+ """
56
+ 全モデル × 全ターゲットの学習・評価を一括実行する。
57
+ Returns: results_df (モデル×ターゲットの評価指標), best_models dict
58
+ """
59
+ if model_defs is None:
60
+ model_defs = MODEL_DEFS
61
+
62
+ scaler = StandardScaler()
63
+ X_train_sc = scaler.fit_transform(X_train)
64
+ X_test_sc = scaler.transform(X_test)
65
+
66
+ results = []
67
+ best_models = {}
68
+ kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
69
+
70
+ for target in target_names:
71
+ y_tr = y_train[target].values
72
+ y_te = y_test[target].values
73
+ best_r2 = -np.inf
74
+
75
+ for name, model in model_defs.items():
76
+ import copy
77
+ m = copy.deepcopy(model)
78
+ m.fit(X_train_sc, y_tr)
79
+ y_pred = m.predict(X_test_sc)
80
+
81
+ r2 = r2_score(y_te, y_pred)
82
+ rmse = np.sqrt(mean_squared_error(y_te, y_pred))
83
+ mae = mean_absolute_error(y_te, y_pred)
84
+
85
+ # 交差検証
86
+ cv_scores = cross_val_score(
87
+ copy.deepcopy(model), X_train_sc, y_tr,
88
+ cv=kf, scoring="neg_root_mean_squared_error"
89
+ )
90
+ cv_rmse_mean = -cv_scores.mean()
91
+ cv_rmse_std = cv_scores.std()
92
+
93
+ results.append({
94
+ "Target": target,
95
+ "Model": name,
96
+ "R2": r2,
97
+ "RMSE": rmse,
98
+ "MAE": mae,
99
+ "CV_RMSE_mean": cv_rmse_mean,
100
+ "CV_RMSE_std": cv_rmse_std,
101
+ })
102
+
103
+ if r2 > best_r2:
104
+ best_r2 = r2
105
+ best_models[target] = {"model": m, "name": name, "r2": r2}
106
+
107
+ results_df = pd.DataFrame(results)
108
+ results_df.to_csv("results/model_metrics.csv", index=False)
109
+ return results_df, best_models
110
+ ```
111
+
112
+ ### 3. モデル比較可視化(R² バーチャート)
113
+
114
+ ```python
115
+ import matplotlib.pyplot as plt
116
+ import seaborn as sns
117
+
118
+ def plot_model_comparison_r2(results_df, figsize=(14, 6)):
119
+ """R² の grouped bar chart を描画する。"""
120
+ pivot = results_df.pivot(index="Target", columns="Model", values="R2")
121
+ ax = pivot.plot(kind="bar", figsize=figsize, colormap="Set2", edgecolor="black")
122
+ ax.set_ylabel("R² Score")
123
+ ax.set_title("Model Comparison: R² by Target", fontweight="bold")
124
+ ax.set_ylim(0, 1.05)
125
+ ax.legend(title="Model", bbox_to_anchor=(1.05, 1))
126
+ ax.axhline(y=0.8, color="gray", linestyle="--", alpha=0.5, label="R²=0.8")
127
+ plt.xticks(rotation=45, ha="right")
128
+ plt.tight_layout()
129
+ plt.savefig("figures/model_comparison_r2.png", dpi=300, bbox_inches="tight")
130
+ plt.close()
131
+ ```
132
+
133
+ ### 4. Actual vs Predicted プロット
134
+
135
+ ```python
136
+ def plot_actual_vs_predicted(X_test_sc, y_test, best_models, target_names,
137
+ ncols=3, figsize=(18, 12)):
138
+ """最良モデルの実測値 vs 予測値プロットを描画する。"""
139
+ nrows = (len(target_names) + ncols - 1) // ncols
140
+ fig, axes = plt.subplots(nrows, ncols, figsize=figsize)
141
+ axes = axes.flatten()
142
+
143
+ for i, target in enumerate(target_names):
144
+ m = best_models[target]["model"]
145
+ y_pred = m.predict(X_test_sc)
146
+ y_true = y_test[target].values
147
+ r2 = best_models[target]["r2"]
148
+
149
+ axes[i].scatter(y_true, y_pred, alpha=0.5, s=20, edgecolors="k", linewidth=0.3)
150
+ lims = [min(y_true.min(), y_pred.min()), max(y_true.max(), y_pred.max())]
151
+ axes[i].plot(lims, lims, "r--", linewidth=1.5)
152
+ axes[i].set_xlabel("Actual")
153
+ axes[i].set_ylabel("Predicted")
154
+ axes[i].set_title(f"{target}\n({best_models[target]['name']}, R²={r2:.3f})",
155
+ fontsize=10, fontweight="bold")
156
+
157
+ for j in range(i + 1, len(axes)):
158
+ axes[j].set_visible(False)
159
+
160
+ plt.tight_layout()
161
+ plt.savefig("figures/actual_vs_predicted.png", dpi=300, bbox_inches="tight")
162
+ plt.close()
163
+ ```
164
+
165
+ ### 5. レーダーチャート比較(Exp-05 パターン)
166
+
167
+ ```python
168
+ def plot_radar_comparison(results_df, target_name, figsize=(8, 8)):
169
+ """指定ターゲットのモデル比較レーダーチャートを描画する。"""
170
+ subset = results_df[results_df["Target"] == target_name]
171
+ metrics = ["R2", "1-RMSE_norm", "1-MAE_norm", "Stability"]
172
+
173
+ # 正規化(全指標を 0-1, 高い方が良い に統一)
174
+ subset = subset.copy()
175
+ for col in ["RMSE", "MAE"]:
176
+ max_val = subset[col].max()
177
+ subset[f"1-{col}_norm"] = 1 - subset[col] / max_val if max_val > 0 else 1
178
+ subset["Stability"] = 1 - subset["CV_RMSE_std"] / subset["CV_RMSE_mean"]
179
+
180
+ N = len(metrics)
181
+ angles = np.linspace(0, 2 * np.pi, N, endpoint=False).tolist()
182
+ angles += angles[:1]
183
+
184
+ fig, ax = plt.subplots(figsize=figsize, subplot_kw=dict(polar=True))
185
+
186
+ for _, row in subset.iterrows():
187
+ values = [row[m] for m in metrics] + [row[metrics[0]]]
188
+ ax.plot(angles, values, "o-", linewidth=2, label=row["Model"])
189
+ ax.fill(angles, values, alpha=0.1)
190
+
191
+ ax.set_xticks(angles[:-1])
192
+ ax.set_xticklabels(metrics)
193
+ ax.set_title(f"Radar: {target_name}", fontweight="bold")
194
+ ax.legend(loc="upper right", bbox_to_anchor=(1.3, 1.1))
195
+ plt.tight_layout()
196
+ plt.savefig(f"figures/radar_{target_name}.png", dpi=300, bbox_inches="tight")
197
+ plt.close()
198
+ ```
199
+
200
+ ## References
201
+
202
+ ### Output Files
203
+
204
+ | ファイル | 形式 |
205
+ |---|---|
206
+ | `results/model_metrics.csv` | CSV |
207
+ | `figures/model_comparison_r2.png` | PNG |
208
+ | `figures/actual_vs_predicted.png` | PNG |
209
+ | `figures/radar_*.png` | PNG |
210
+
211
+ #### 参照実験
212
+
213
+ - **Exp-12**: 6 モデル × 4 ターゲット(エッチング品質特性)
214
+ - **Exp-13**: 5 モデル × 6 ターゲット(PSP 構造・物性)
215
+ - **Exp-05**: レーダーチャートによるモデル比較