sdufseval 1.0.2__tar.gz → 1.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sdufseval
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: Evaluation and Benchmark Tool for Feature Selection
5
5
  Project-URL: Homepage, https://github.com/mrajabinasab/FSEVAL
6
6
  Project-URL: Bug Tracker, https://github.com/mrajabinasab/FSEVAL/issues
@@ -118,7 +118,9 @@ Initializes the evalutation and benchmark object.
118
118
  | :--- | :--- | :--- |
119
119
  | **`output_dir`** | results | Folder where CSV result files are saved. |
120
120
  | **`cv`** | 5 | Cross-validation folds (supervised only). |
121
- | **`avg_steps`** | 10 | Number of random restarts / seeds to average over.|
121
+ | **`avg_steps`** | 10 | Number of repetitions for stochastic methods.|
122
+ | **`supervised_iter`** | 5 | Number of classifier's runs with different random seeds.|
123
+ | **`unsupervised_iter`** | 10 | Number of clustering runs with different random seeds.|
122
124
  | **`eval_type`** | both | "supervised", "unsupervised", or "both". |
123
125
  | **`metrics`** | ["CLSACC", "NMI", "ACC", "AUC"] | Evaluation metrics to calculate. |
124
126
  | **`experiments`** | ["10Percent", "100Percent"] | Which feature ratio grids to evaluate. |
@@ -95,7 +95,9 @@ Initializes the evalutation and benchmark object.
95
95
  | :--- | :--- | :--- |
96
96
  | **`output_dir`** | results | Folder where CSV result files are saved. |
97
97
  | **`cv`** | 5 | Cross-validation folds (supervised only). |
98
- | **`avg_steps`** | 10 | Number of random restarts / seeds to average over.|
98
+ | **`avg_steps`** | 10 | Number of repetitions for stochastic methods.|
99
+ | **`supervised_iter`** | 5 | Number of classifier's runs with different random seeds.|
100
+ | **`unsupervised_iter`** | 10 | Number of clustering runs with different random seeds.|
99
101
  | **`eval_type`** | both | "supervised", "unsupervised", or "both". |
100
102
  | **`metrics`** | ["CLSACC", "NMI", "ACC", "AUC"] | Evaluation metrics to calculate. |
101
103
  | **`experiments`** | ["10Percent", "100Percent"] | Which feature ratio grids to evaluate. |
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sdufseval"
7
- version = "1.0.2"
7
+ version = "1.0.3"
8
8
  authors = [
9
9
  { name="Muhammad Rajabinasab", email="muhammad.rajabinasab@outlook.com" },
10
10
  ]
@@ -12,7 +12,9 @@ class FSEVAL:
12
12
  def __init__(self,
13
13
  output_dir="results",
14
14
  cv=5,
15
- avg_steps=10,
15
+ avg_steps=10,
16
+ supervised_iter=5,
17
+ unsupervised_iter=10,
16
18
  eval_type="both",
17
19
  metrics=None,
18
20
  experiments=None):
@@ -22,6 +24,8 @@ class FSEVAL:
22
24
  self.output_dir = output_dir
23
25
  self.cv = cv
24
26
  self.avg_steps = avg_steps
27
+ self.supervised_iter = supervised_iter
28
+ self.unsupervised_iter = unsupervised_iter
25
29
  self.eval_type = eval_type
26
30
 
27
31
  # Metric configuration
@@ -68,7 +72,7 @@ class FSEVAL:
68
72
  name = m_info['name']
69
73
  fs_func = m_info['func']
70
74
  # Stochastic methods run 10 times and average
71
- repeats = 10 if m_info.get('stochastic', False) else 1
75
+ repeats = self.avg_steps if m_info.get('stochastic', False) else 1
72
76
 
73
77
  # Internal storage for current dataset results
74
78
  ds_results = {s: {met: [] for met in self.selected_metrics} for s in self.scales}
@@ -91,11 +95,11 @@ class FSEVAL:
91
95
  c_acc, nmi, acc, auc = np.nan, np.nan, np.nan, np.nan
92
96
 
93
97
  if self.eval_type in ["unsupervised", "both"]:
94
- c_acc, nmi = unsupervised_eval(X_subset, y, avg_steps=self.avg_steps)
98
+ c_acc, nmi = unsupervised_eval(X_subset, y, avg_steps=self.unsupervised_iter)
95
99
 
96
100
  if self.eval_type in ["supervised", "both"]:
97
101
  # Passes classifier (None or instance) to eval.py
98
- acc, auc = supervised_eval(X_subset, y, classifier=classifier, cv=self.cv, avg_steps=self.avg_steps)
102
+ acc, auc = supervised_eval(X_subset, y, classifier=classifier, cv=self.cv, avg_steps=self.supervised_iter)
99
103
 
100
104
  # Map metrics to columns
101
105
  mapping = {"CLSACC": c_acc, "NMI": nmi, "ACC": acc, "AUC": auc}
File without changes
File without changes