sdufseval 1.0.1__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sdufseval
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: Evaluation and Benchmark Tool for Feature Selection
5
5
  Project-URL: Homepage, https://github.com/mrajabinasab/FSEVAL
6
6
  Project-URL: Bug Tracker, https://github.com/mrajabinasab/FSEVAL/issues
@@ -62,7 +62,7 @@ import numpy as np
62
62
 
63
63
  if __name__ == "__main__":
64
64
 
65
- # The 23 real datasets
65
+ # The 23 benchmark datasets
66
66
  DATASETS_TO_RUN = [
67
67
  'ALLAML', 'CLL_SUB_111', 'COIL20', 'Carcinom', 'GLIOMA', 'GLI_85',
68
68
  'Isolet', 'ORL', 'Prostate_GE', 'SMK_CAN_187', 'TOX_171', 'Yale',
@@ -73,7 +73,7 @@ if __name__ == "__main__":
73
73
  # Initialize FSEVAL
74
74
  evaluator = FSEVAL(output_dir="benchmark_results", avg_steps=10)
75
75
 
76
- # Configuration for methods using the class internal random_baseline
76
+ # Configuration for methods
77
77
  methods_list = [
78
78
  {
79
79
  'name': 'Random',
@@ -87,8 +87,19 @@ if __name__ == "__main__":
87
87
  }
88
88
  ]
89
89
 
90
- # Run Benchmark (Defaults to RF)
90
+ # --- 1. Run Standard Benchmark ---
91
+ # Evaluates methods on real-world datasets across different feature scales
91
92
  evaluator.run(DATASETS_TO_RUN, methods_list)
93
+
94
+ # --- 2. Run Runtime Analysis ---
95
+ # Performs scalability testing on synthetic data with a time cap.
96
+ # vary_param='both' triggers both 'features' and 'instances' experiments.
97
+ print("\n>>> Starting Scalability Analysis...")
98
+ evaluator.timer(
99
+ methods=methods_list,
100
+ vary_param='both',
101
+ time_limit=3600 # 1 hour limit
102
+ )
92
103
  ```
93
104
 
94
105
  ## Data Loading
@@ -107,9 +118,9 @@ Initializes the evalutation and benchmark object.
107
118
  | :--- | :--- | :--- |
108
119
  | **`output_dir`** | results | Folder where CSV result files are saved. |
109
120
  | **`cv`** | 5 | Cross-validation folds (supervised only). |
110
- | **`avg_steps`** | 10 | Number of random restarts / seeds to average over. |
111
- | **`eval_type`** | both | Number of random restarts / seeds to average over. |
112
- | **`metrics`** | ["CLSACC", "NMI", "ACC", "AUC"] | "supervised", "unsupervised", or "both". |
121
+ | **`avg_steps`** | 10 | Number of random restarts / seeds to average over.|
122
+ | **`eval_type`** | both | "supervised", "unsupervised", or "both". |
123
+ | **`metrics`** | ["CLSACC", "NMI", "ACC", "AUC"] | Evaluation metrics to calculate. |
113
124
  | **`experiments`** | ["10Percent", "100Percent"] | Which feature ratio grids to evaluate. |
114
125
 
115
126
  ### ⚙️ `run(datasets, methods, classifier=None)`
@@ -122,6 +133,16 @@ Initializes the evalutation and benchmark object.
122
133
  | **`methods`** | List[dict] | "[{""name"": str, ""func"": callable, ""stochastic"": bool}, ...]" |
123
134
  | **`classifier`** | sklearn classifier | Classifier for supervised eval (default: RandomForestClassifier) |
124
135
 
136
+ ### ⚙️ `timer(methods, vary_param='features', time_limit=3600)`
137
+
138
+ Runs a runtime analysis on the methods.
139
+
140
+ | Argument | Type | Description |
141
+ | :--- | :--- | :--- |
142
+ | **`methods`** | List[dict] | "[{""name"": str, ""func"": callable, ""stochastic"": bool}, ...]" |
143
+ | **`vary_param`** | ["CLSACC", "NMI", "ACC", "AUC"] | "features", "instances", or "both". |
144
+ | **`time_limit`** | 3600 | Terminate the method after reecording first time it exceeds this limit. |
145
+
125
146
  # Dashboard
126
147
 
127
148
  There is a Feature Selection Evaluation Dashboard based on the benchmarks provided by FSEVAL, available on:
@@ -39,7 +39,7 @@ import numpy as np
39
39
 
40
40
  if __name__ == "__main__":
41
41
 
42
- # The 23 real datasets
42
+ # The 23 benchmark datasets
43
43
  DATASETS_TO_RUN = [
44
44
  'ALLAML', 'CLL_SUB_111', 'COIL20', 'Carcinom', 'GLIOMA', 'GLI_85',
45
45
  'Isolet', 'ORL', 'Prostate_GE', 'SMK_CAN_187', 'TOX_171', 'Yale',
@@ -50,7 +50,7 @@ if __name__ == "__main__":
50
50
  # Initialize FSEVAL
51
51
  evaluator = FSEVAL(output_dir="benchmark_results", avg_steps=10)
52
52
 
53
- # Configuration for methods using the class internal random_baseline
53
+ # Configuration for methods
54
54
  methods_list = [
55
55
  {
56
56
  'name': 'Random',
@@ -64,8 +64,19 @@ if __name__ == "__main__":
64
64
  }
65
65
  ]
66
66
 
67
- # Run Benchmark (Defaults to RF)
67
+ # --- 1. Run Standard Benchmark ---
68
+ # Evaluates methods on real-world datasets across different feature scales
68
69
  evaluator.run(DATASETS_TO_RUN, methods_list)
70
+
71
+ # --- 2. Run Runtime Analysis ---
72
+ # Performs scalability testing on synthetic data with a time cap.
73
+ # vary_param='both' triggers both 'features' and 'instances' experiments.
74
+ print("\n>>> Starting Scalability Analysis...")
75
+ evaluator.timer(
76
+ methods=methods_list,
77
+ vary_param='both',
78
+ time_limit=3600 # 1 hour limit
79
+ )
69
80
  ```
70
81
 
71
82
  ## Data Loading
@@ -84,9 +95,9 @@ Initializes the evalutation and benchmark object.
84
95
  | :--- | :--- | :--- |
85
96
  | **`output_dir`** | results | Folder where CSV result files are saved. |
86
97
  | **`cv`** | 5 | Cross-validation folds (supervised only). |
87
- | **`avg_steps`** | 10 | Number of random restarts / seeds to average over. |
88
- | **`eval_type`** | both | Number of random restarts / seeds to average over. |
89
- | **`metrics`** | ["CLSACC", "NMI", "ACC", "AUC"] | "supervised", "unsupervised", or "both". |
98
+ | **`avg_steps`** | 10 | Number of random restarts / seeds to average over.|
99
+ | **`eval_type`** | both | "supervised", "unsupervised", or "both". |
100
+ | **`metrics`** | ["CLSACC", "NMI", "ACC", "AUC"] | Evaluation metrics to calculate. |
90
101
  | **`experiments`** | ["10Percent", "100Percent"] | Which feature ratio grids to evaluate. |
91
102
 
92
103
  ### ⚙️ `run(datasets, methods, classifier=None)`
@@ -99,6 +110,16 @@ Initializes the evalutation and benchmark object.
99
110
  | **`methods`** | List[dict] | "[{""name"": str, ""func"": callable, ""stochastic"": bool}, ...]" |
100
111
  | **`classifier`** | sklearn classifier | Classifier for supervised eval (default: RandomForestClassifier) |
101
112
 
113
+ ### ⚙️ `timer(methods, vary_param='features', time_limit=3600)`
114
+
115
+ Runs a runtime analysis on the methods.
116
+
117
+ | Argument | Type | Description |
118
+ | :--- | :--- | :--- |
119
+ | **`methods`** | List[dict] | "[{""name"": str, ""func"": callable, ""stochastic"": bool}, ...]" |
120
+ | **`vary_param`** | ["CLSACC", "NMI", "ACC", "AUC"] | "features", "instances", or "both". |
121
+ | **`time_limit`** | 3600 | Terminate the method after reecording first time it exceeds this limit. |
122
+
102
123
  # Dashboard
103
124
 
104
125
  There is a Feature Selection Evaluation Dashboard based on the benchmarks provided by FSEVAL, available on:
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sdufseval"
7
- version = "1.0.1"
7
+ version = "1.0.2"
8
8
  authors = [
9
9
  { name="Muhammad Rajabinasab", email="muhammad.rajabinasab@outlook.com" },
10
10
  ]
@@ -1,5 +1,6 @@
1
1
  import os
2
2
  import math
3
+ import time
3
4
  import warnings
4
5
  import numpy as np
5
6
  import pandas as pd
@@ -107,6 +108,101 @@ class FSEVAL:
107
108
  # Save/Update results for this method/dataset
108
109
  self._save_results(name, ds_results)
109
110
 
111
+
112
+ def timer(self, methods, vary_param='both', time_limit=3600):
113
+ """
114
+ Runs a standalone runtime analysis experiment with a time cap.
115
+
116
+ Args:
117
+ methods: List of dicts {'name': str, 'func': callable}.
118
+ vary_param: 'features', 'instances', or 'both'.
119
+ time_limit: Max seconds per method before it is skipped.
120
+ """
121
+
122
+ # Determine which experiments to run
123
+ experiments = []
124
+ if vary_param in ['features', 'both']:
125
+ experiments.append({
126
+ 'name': 'features',
127
+ 'fixed_val': 100,
128
+ 'range': range(1000, 20001, 500),
129
+ 'file': 'time_analysis_features.csv'
130
+ })
131
+ if vary_param in ['instances', 'both']:
132
+ experiments.append({
133
+ 'name': 'instances',
134
+ 'fixed_val': 100,
135
+ 'range': range(1000, 20001, 500),
136
+ 'file': 'time_analysis_instances.csv'
137
+ })
138
+
139
+ for exp in experiments:
140
+ vary_type = exp['name']
141
+ val_range = exp['range']
142
+ filename = os.path.join(self.output_dir, exp['file'])
143
+
144
+ # Tracking for this specific experiment
145
+ timed_out_methods = set()
146
+ results = {m['name']: [] for m in methods}
147
+
148
+ print(f"\n--- Starting Experiment: Varying {vary_type} ---")
149
+ print(f"Time limit: {time_limit}s | Output: {filename}")
150
+
151
+ for val in val_range:
152
+ # 1. Generate synthetic data based on vary_param
153
+ if vary_type == 'features':
154
+ n_samples, n_features = exp['fixed_val'], val
155
+ else:
156
+ n_samples, n_features = val, exp['fixed_val']
157
+
158
+ try:
159
+ X = np.random.rand(n_samples, n_features)
160
+ except MemoryError:
161
+ print(f" FATAL: MemoryError: Failed to allocate {n_samples}x{n_features} data.")
162
+ for m in methods:
163
+ results[m['name']].append(-1 if m['name'] in timed_out_methods else np.nan)
164
+ continue
165
+
166
+ # 2. Run each method
167
+ for m_info in methods:
168
+ name = m_info['name']
169
+ func = m_info['func']
170
+
171
+ # Check if method has already timed out in this experiment
172
+ if name in timed_out_methods:
173
+ results[name].append(-1)
174
+ continue
175
+
176
+ try:
177
+ start_time = time.time()
178
+
179
+ # Execute the method (assuming benchmark format)
180
+ func(X)
181
+
182
+ duration = time.time() - start_time
183
+
184
+ if duration > time_limit:
185
+ print(f" - {name:<18}: {duration:.4f}s (TIMEOUT - skipping future runs)")
186
+ timed_out_methods.add(name)
187
+ else:
188
+ print(f" - {name:<18}: {duration:.4f}s")
189
+
190
+ results[name].append(duration)
191
+
192
+ except Exception as e:
193
+ print(f" - {name:<18}: FAILED ({type(e).__name__})")
194
+ results[name].append(np.nan)
195
+
196
+ # 3. Save results to CSV
197
+ try:
198
+ df_results = pd.DataFrame.from_dict(results, orient='index', columns=list(val_range))
199
+ df_results.index.name = 'Method'
200
+ df_results.to_csv(filename)
201
+ print(f"\n--- Results saved to {filename} ---")
202
+ except Exception as e:
203
+ print(f"\n--- FAILED to save results: {e} ---")
204
+
205
+
110
206
  def _save_results(self, method_name, ds_results):
111
207
  """Aggregates repeats and saves to disk after each dataset."""
112
208
  for scale, metrics in ds_results.items():
File without changes
File without changes