metacountregressor 0.1.78__py3-none-any.whl → 0.1.83__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- metacountregressor/app_main.py +258 -0
- metacountregressor/helperprocess.py +257 -5
- metacountregressor/main.py +269 -61
- metacountregressor/metaheuristics.py +22 -11
- metacountregressor/setup.py +3 -2
- metacountregressor/solution.py +555 -214
- {metacountregressor-0.1.78.dist-info → metacountregressor-0.1.83.dist-info}/METADATA +256 -35
- {metacountregressor-0.1.78.dist-info → metacountregressor-0.1.83.dist-info}/RECORD +11 -10
- {metacountregressor-0.1.78.dist-info → metacountregressor-0.1.83.dist-info}/WHEEL +1 -1
- {metacountregressor-0.1.78.dist-info → metacountregressor-0.1.83.dist-info}/LICENSE.txt +0 -0
- {metacountregressor-0.1.78.dist-info → metacountregressor-0.1.83.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
import argparse
|
|
3
|
+
import csv
|
|
4
|
+
import faulthandler
|
|
5
|
+
import ast
|
|
6
|
+
from typing import Any
|
|
7
|
+
import cProfile
|
|
8
|
+
import numpy as np
|
|
9
|
+
import pandas as pd
|
|
10
|
+
from pandas import DataFrame
|
|
11
|
+
from pandas.io.parsers import TextFileReader
|
|
12
|
+
import helperprocess
|
|
13
|
+
from metaheuristics import (differential_evolution,
|
|
14
|
+
harmony_search,
|
|
15
|
+
simulated_annealing)
|
|
16
|
+
from solution import ObjectiveFunction
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
warnings.simplefilter("ignore")
|
|
20
|
+
|
|
21
|
+
faulthandler.enable()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def convert_df_columns_to_binary_and_wide(df):
|
|
25
|
+
columns = list(df.columns)
|
|
26
|
+
|
|
27
|
+
df = pd.get_dummies(df, columns=columns, drop_first=True)
|
|
28
|
+
return df
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def process_arguments():
|
|
32
|
+
'''
|
|
33
|
+
TRYING TO TURN THE CSV FILES INTO RELEVANT ARGS
|
|
34
|
+
'''
|
|
35
|
+
try:
|
|
36
|
+
data_characteristic = pd.read_csv('problem_data.csv')
|
|
37
|
+
analyst_d = pd.read_csv('decisions.csv')
|
|
38
|
+
hyper = pd.read_csv('setup_hyper.csv')
|
|
39
|
+
except Exception as e:
|
|
40
|
+
print(e)
|
|
41
|
+
print('Files Have Not Been Set Up Yet..')
|
|
42
|
+
print('Run the App')
|
|
43
|
+
exit()
|
|
44
|
+
|
|
45
|
+
new_data = {'data': data_characteristic,
|
|
46
|
+
'analyst':analyst_d,
|
|
47
|
+
'hyper': hyper}
|
|
48
|
+
return new_data
|
|
49
|
+
|
|
50
|
+
def main(args, **kwargs):
|
|
51
|
+
'''METACOUNT REGRESSOR TESTING ENVIRONMENT'''
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
print('the args is:', args)
|
|
57
|
+
print('the kwargs is', kwargs)
|
|
58
|
+
|
|
59
|
+
# removing junk files if specicified
|
|
60
|
+
helperprocess.remove_files(args.get('removeFiles', True))
|
|
61
|
+
|
|
62
|
+
# do we want to run a test
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
data_info = process_arguments()
|
|
66
|
+
data_info['hyper']
|
|
67
|
+
data_info['analyst']
|
|
68
|
+
data_info['data']['Y']
|
|
69
|
+
#data_info['data']['Group'][0]
|
|
70
|
+
#data_info['data']['Panel'][0]
|
|
71
|
+
args['decisions'] = data_info['analyst']
|
|
72
|
+
grouped_c = data_info['data']['Grouped'][0]
|
|
73
|
+
if isinstance(data_info['data']['Grouped'][0],str):
|
|
74
|
+
args['group'] = data_info['data']['Grouped'][0]
|
|
75
|
+
args['ID'] = data_info['data']['Panel'][0]
|
|
76
|
+
if isinstance(data_info['data']['Panel'][0],str):
|
|
77
|
+
args['panels'] = data_info['data']['Panel'][0]
|
|
78
|
+
|
|
79
|
+
df = pd.read_csv(str(data_info['data']['Problem'][0]))
|
|
80
|
+
x_df = df.drop(columns=[data_info['data']['Y'][0]])
|
|
81
|
+
# drop the columns of x_df where column is string exclude the column stype args['group']
|
|
82
|
+
exclude_column = args['group']
|
|
83
|
+
columns_to_keep = x_df.dtypes != 'object'
|
|
84
|
+
columns_to_keep |= (x_df.columns == exclude_column)
|
|
85
|
+
x_df = x_df.loc[:, columns_to_keep]
|
|
86
|
+
y_df = df[[data_info['data']['Y'][0]]]
|
|
87
|
+
y_df.rename(columns={data_info['data']['Y'][0]: "Y"}, inplace=True)
|
|
88
|
+
|
|
89
|
+
manual_fit_spec = None #TODO add in manual fit
|
|
90
|
+
if args['Keep_Fit'] == str(2) or args['Keep_Fit'] == 2:
|
|
91
|
+
if manual_fit_spec is None:
|
|
92
|
+
args['Manual_Fit'] = None
|
|
93
|
+
else:
|
|
94
|
+
print('fitting manually')
|
|
95
|
+
args['Manual_Fit'] = manual_fit_spec
|
|
96
|
+
if args['problem_number'] == str(8) or args['problem_number'] == 8:
|
|
97
|
+
print('Maine County Dataset.')
|
|
98
|
+
args['group'] = 'county'
|
|
99
|
+
args['panels'] = 'element_ID'
|
|
100
|
+
args['ID'] = 'element_ID'
|
|
101
|
+
args['_max_characteristics'] = 55
|
|
102
|
+
elif args['problem_number'] == str(9) or args['problem_number'] == 9:
|
|
103
|
+
args['group'] = 'group'
|
|
104
|
+
args['panels'] = 'ind_id'
|
|
105
|
+
args['ID'] = 'ind_id'
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
args['complexity_level'] = args.get('complexity_level', 6)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
# Initialize AnalystSpecs to None if not manually provided
|
|
113
|
+
args['AnalystSpecs'] = args.get('AnalystSpecs', None)
|
|
114
|
+
|
|
115
|
+
if args['algorithm'] == 'sa':
|
|
116
|
+
args_hyperparameters = {'alpha': float(args['temp_scale']),
|
|
117
|
+
'STEPS_PER_TEMP': int(args['steps']),
|
|
118
|
+
'INTL_ACPT': 0.5,
|
|
119
|
+
'_crossover_perc': args['crossover'],
|
|
120
|
+
'MAX_ITERATIONS': int(args['_max_imp']),
|
|
121
|
+
'_num_intl_slns': 25,
|
|
122
|
+
'Manual_Fit': args['Manual_Fit'],
|
|
123
|
+
'MP': int(args['MP'])}
|
|
124
|
+
helperprocess.entries_to_remove(('crossover', '_max_imp', '_hms', '_hmcr', '_par'), args)
|
|
125
|
+
print(args)
|
|
126
|
+
|
|
127
|
+
obj_fun = ObjectiveFunction(x_df, y_df, **args)
|
|
128
|
+
|
|
129
|
+
results = simulated_annealing(obj_fun, None, **args_hyperparameters)
|
|
130
|
+
|
|
131
|
+
helperprocess.results_printer(results, args['algorithm'], int(args['is_multi']))
|
|
132
|
+
|
|
133
|
+
if args['dual_complexities']:
|
|
134
|
+
args['complexity_level'] = args['secondary_complexity']
|
|
135
|
+
obj_fun = ObjectiveFunction(x_df, y_df, **args)
|
|
136
|
+
results = simulated_annealing(obj_fun, None, **args_hyperparameters)
|
|
137
|
+
helperprocess.results_printer(results, args['algorithm'], int(args['is_multi']))
|
|
138
|
+
|
|
139
|
+
elif args['algorithm'] == 'hs':
|
|
140
|
+
args['_mpai'] = 1
|
|
141
|
+
|
|
142
|
+
obj_fun = ObjectiveFunction(x_df, y_df, **args)
|
|
143
|
+
args_hyperparameters = {
|
|
144
|
+
'Manual_Fit': args['Manual_Fit'],
|
|
145
|
+
'MP': int(args['MP'])
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
results = harmony_search(obj_fun, None, **args_hyperparameters)
|
|
149
|
+
helperprocess.results_printer(results, args['algorithm'], int(args['is_multi']))
|
|
150
|
+
|
|
151
|
+
if args.get('dual_complexities', 0):
|
|
152
|
+
args['complexity_level'] = args['secondary_complexity']
|
|
153
|
+
obj_fun = ObjectiveFunction(x_df, y_df, **args)
|
|
154
|
+
results = harmony_search(obj_fun, None, **args_hyperparameters)
|
|
155
|
+
helperprocess.results_printer(results, args['algorithm'], int(args['is_multi']))
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
elif args['algorithm'] == 'de':
|
|
159
|
+
# force variables
|
|
160
|
+
args['must_include'] = args.get('force', [])
|
|
161
|
+
|
|
162
|
+
args_hyperparameters = {'_AI': args.get('_AI', 2),
|
|
163
|
+
'_crossover_perc': float(args['crossover']),
|
|
164
|
+
'_max_iter': int(args['_max_imp'])
|
|
165
|
+
, '_pop_size': int(args['_hms']), 'instance_number': int(args['line'])
|
|
166
|
+
, 'Manual_Fit': args['Manual_Fit'],
|
|
167
|
+
'MP': int(args['MP'])
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
args_hyperparameters = dict(args_hyperparameters)
|
|
171
|
+
|
|
172
|
+
helperprocess.entries_to_remove(('crossover', '_max_imp', '_hms', '_hmcr', '_par'), args)
|
|
173
|
+
obj_fun = ObjectiveFunction(x_df, y_df, **args)
|
|
174
|
+
|
|
175
|
+
results = differential_evolution(obj_fun, None, **args_hyperparameters)
|
|
176
|
+
|
|
177
|
+
helperprocess.results_printer(results, args['algorithm'], int(args['is_multi']))
|
|
178
|
+
|
|
179
|
+
if args['dual_complexities']:
|
|
180
|
+
args['complexity_level'] = args['secondary_complexity']
|
|
181
|
+
obj_fun = ObjectiveFunction(x_df, y_df, **args)
|
|
182
|
+
results = differential_evolution(obj_fun, None, **args_hyperparameters)
|
|
183
|
+
helperprocess.results_printer(results, args['algorithm'], int(args['is_multi'])) #TODO FIX This
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
if __name__ == '__main__':
|
|
187
|
+
"""Loading in command line args. """
|
|
188
|
+
alg_parser = argparse.ArgumentParser(prog='algorithm', epilog='algorithm specific arguments')
|
|
189
|
+
alg_parser.add_argument('-AI', default=2, help='adjustment index. For the allowable movement of the algorithm')
|
|
190
|
+
alg_parser.print_help()
|
|
191
|
+
parser = argparse.ArgumentParser(prog='main',
|
|
192
|
+
epilog=main.__doc__,
|
|
193
|
+
formatter_class=argparse.RawDescriptionHelpFormatter, conflict_handler='resolve')
|
|
194
|
+
|
|
195
|
+
parser.add_argument('-line', type=int, default=1,
|
|
196
|
+
help='line to read in csv to pass in argument')
|
|
197
|
+
|
|
198
|
+
if vars(parser.parse_args())['line'] is not None:
|
|
199
|
+
reader = csv.DictReader(open('set_data.csv', 'r'))
|
|
200
|
+
args = list()
|
|
201
|
+
line_number_obs = 0
|
|
202
|
+
for dictionary in reader: # TODO find a way to handle multiple args
|
|
203
|
+
args = dictionary
|
|
204
|
+
if line_number_obs == int(vars(parser.parse_args())['line']):
|
|
205
|
+
break
|
|
206
|
+
line_number_obs += 1
|
|
207
|
+
args = dict(args)
|
|
208
|
+
|
|
209
|
+
for key, value in args.items():
|
|
210
|
+
try:
|
|
211
|
+
# Attempt to parse the string value to a Python literal if value is a string.
|
|
212
|
+
if isinstance(value, str):
|
|
213
|
+
value = ast.literal_eval(value)
|
|
214
|
+
except (ValueError, SyntaxError):
|
|
215
|
+
# If there's a parsing error, value remains as the original string.
|
|
216
|
+
pass
|
|
217
|
+
|
|
218
|
+
# Add the argument to the parser with the potentially updated value.
|
|
219
|
+
parser.add_argument(f'-{key}', default=value)
|
|
220
|
+
|
|
221
|
+
for i, action in enumerate(parser._optionals._actions):
|
|
222
|
+
if "-algorithm" in action.option_strings:
|
|
223
|
+
parser._optionals._actions[i].help = "optimization algorithm"
|
|
224
|
+
|
|
225
|
+
override = True
|
|
226
|
+
if override:
|
|
227
|
+
print('todo turn off, in testing phase')
|
|
228
|
+
parser.add_argument('-problem_number', default='10')
|
|
229
|
+
print('did it make it')
|
|
230
|
+
if 'algorithm' not in args:
|
|
231
|
+
parser.add_argument('-algorithm', type=str, default='hs',
|
|
232
|
+
help='optimization algorithm')
|
|
233
|
+
elif 'Manual_Fit' not in args:
|
|
234
|
+
parser.add_argument('-Manual_Fit', action='store_false', default=None,
|
|
235
|
+
help='To fit a model manually if desired.')
|
|
236
|
+
|
|
237
|
+
parser.add_argument('-seperate_out_factors', action='store_false', default=False,
|
|
238
|
+
help='Trie of wanting to split data that is potentially categorical as binary'
|
|
239
|
+
' we want to split the data for processing')
|
|
240
|
+
parser.add_argument('-supply_csv', type = str, help = 'enter the name of the csv, please include it as a full directorys')
|
|
241
|
+
|
|
242
|
+
else: # DIDN"T SPECIFY LINES TRY EACH ONE MANNUALY
|
|
243
|
+
parser.add_argument('-com', type=str, default='MetaCode',
|
|
244
|
+
help='line to read csv')
|
|
245
|
+
|
|
246
|
+
# Check the args
|
|
247
|
+
parser.print_help()
|
|
248
|
+
args = vars(parser.parse_args())
|
|
249
|
+
print(type(args))
|
|
250
|
+
# TODO add in chi 2 and df in estimation and compare degrees of freedom this needs to be done in solution
|
|
251
|
+
|
|
252
|
+
# Print the args.
|
|
253
|
+
profiler = cProfile.Profile()
|
|
254
|
+
profiler.runcall(main,args)
|
|
255
|
+
profiler.print_stats(sort='time')
|
|
256
|
+
#TOO MAX_TIME
|
|
257
|
+
|
|
258
|
+
|
|
@@ -1,10 +1,28 @@
|
|
|
1
|
+
from os.path import exists
|
|
1
2
|
import numpy as np
|
|
2
3
|
import pandas as pd
|
|
3
4
|
import csv
|
|
4
5
|
import matplotlib.pyplot as plt
|
|
6
|
+
from scipy import stats as st
|
|
7
|
+
from sklearn.preprocessing import StandardScaler
|
|
8
|
+
|
|
5
9
|
|
|
6
10
|
plt.style.use('https://github.com/dhaitz/matplotlib-stylesheets/raw/master/pitayasmoothie-dark.mplstyle')
|
|
7
11
|
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from itertools import product
|
|
17
|
+
|
|
18
|
+
# Function to create a list of dictionaries from a parameter grid
|
|
19
|
+
def generate_param_combinations(param_grid):
|
|
20
|
+
keys = param_grid.keys()
|
|
21
|
+
values = param_grid.values()
|
|
22
|
+
combinations = [dict(zip(keys, v)) for v in product(*values)]
|
|
23
|
+
return combinations
|
|
24
|
+
|
|
25
|
+
|
|
8
26
|
##Select the best Features Based on RF
|
|
9
27
|
def select_features(X_train, y_train, n_f=16):
|
|
10
28
|
try:
|
|
@@ -77,6 +95,7 @@ def findCorrelation(corr, cutoff=0.9, exact=None): """
|
|
|
77
95
|
findCorrelation(R1, cutoff=0.6, exact=True) # ['x1', 'x5', 'x4']
|
|
78
96
|
"""
|
|
79
97
|
|
|
98
|
+
|
|
80
99
|
def _findCorrelation_fast(corr, avg, cutoff):
|
|
81
100
|
|
|
82
101
|
combsAboveCutoff = corr.where(lambda x: (np.tril(x) == 0) & (x > cutoff)).stack().index
|
|
@@ -151,6 +170,220 @@ def remove_files(yes=1):
|
|
|
151
170
|
os.remove('pop_log.csv')
|
|
152
171
|
|
|
153
172
|
|
|
173
|
+
# Function to process the DataFrame
|
|
174
|
+
'''
|
|
175
|
+
Example usuage
|
|
176
|
+
# Configuration dictionary
|
|
177
|
+
config = {
|
|
178
|
+
'Age': {
|
|
179
|
+
'type': 'bin',
|
|
180
|
+
'bins': [0, 18, 35, 50, 100],
|
|
181
|
+
'labels': ['Child', 'YoungAdult', 'MiddleAged', 'Senior'],
|
|
182
|
+
'prefix': 'Age_Binned'
|
|
183
|
+
},
|
|
184
|
+
'Income': {
|
|
185
|
+
'type': 'bin',
|
|
186
|
+
'bins': [0, 2000, 5000, 10000],
|
|
187
|
+
'labels': ['Low', 'Medium', 'High'],
|
|
188
|
+
'prefix': 'Income_Binned'
|
|
189
|
+
},
|
|
190
|
+
'Gender': {
|
|
191
|
+
'type': 'one-hot',
|
|
192
|
+
'prefix': 'Gender'
|
|
193
|
+
},
|
|
194
|
+
'Score': {
|
|
195
|
+
'type': 'none'
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
'''
|
|
199
|
+
def null_handler(vari):
|
|
200
|
+
if vari in locals():
|
|
201
|
+
return vari
|
|
202
|
+
else:
|
|
203
|
+
print(f'{vari} does not exist, setting None..')
|
|
204
|
+
return None
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def set_up_analyst_constraints(data_characteristic, model_terms, variable_decisions_alt = None):
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
name_data_characteristics = data_characteristic.columns.tolist()
|
|
211
|
+
# Get non-None values as a list
|
|
212
|
+
non_none_terms = [value for value in model_terms.values() if value is not None]
|
|
213
|
+
# how to make name_data_characteristics - non_none_terms
|
|
214
|
+
|
|
215
|
+
result = [item for item in name_data_characteristics if item not in non_none_terms]
|
|
216
|
+
distu = ['normal', 'uniform', 'triangular']
|
|
217
|
+
tra = ['no', 'sqrt', 'arcsinh']
|
|
218
|
+
if model_terms.get('group') is None:
|
|
219
|
+
print('cant have grouped rpm, removing level 4 from every item')
|
|
220
|
+
MAKE_ALL_4_FALSE = True
|
|
221
|
+
else:
|
|
222
|
+
MAKE_ALL_4_FALSE = False
|
|
223
|
+
|
|
224
|
+
variable_decisions = {
|
|
225
|
+
name: {
|
|
226
|
+
'levels': list(range(6)),
|
|
227
|
+
'Distributions': distu,
|
|
228
|
+
'Transformations': tra
|
|
229
|
+
}
|
|
230
|
+
for name in result
|
|
231
|
+
}
|
|
232
|
+
# Override elements in the original dictionary with the alt dictionary
|
|
233
|
+
if variable_decisions_alt is not None:
|
|
234
|
+
for key, alt_value in variable_decisions_alt.items():
|
|
235
|
+
if key in variable_decisions:
|
|
236
|
+
# Update the existing entry
|
|
237
|
+
variable_decisions[key].update(alt_value)
|
|
238
|
+
else:
|
|
239
|
+
# Add new entry if it doesn't exist
|
|
240
|
+
variable_decisions[key] = alt_value
|
|
241
|
+
# Prepare the data for the DataFrame
|
|
242
|
+
rows = []
|
|
243
|
+
for column_name, details in variable_decisions.items():
|
|
244
|
+
# Create a row dictionary
|
|
245
|
+
row = {'Column': column_name}
|
|
246
|
+
|
|
247
|
+
# Add levels as True/False for Level 0 through Level 5
|
|
248
|
+
for level in range(6): # Assuming Level 0 to Level 5
|
|
249
|
+
|
|
250
|
+
if level == 4 and MAKE_ALL_4_FALSE:
|
|
251
|
+
row[f'Level {level}'] = False
|
|
252
|
+
else:
|
|
253
|
+
row[f'Level {level}'] = level in details['levels']
|
|
254
|
+
|
|
255
|
+
# Add distributions and transformations directly
|
|
256
|
+
|
|
257
|
+
# Add distributions and transformations as comma-separated strings
|
|
258
|
+
row['Distributions'] = str(details['Distributions'])
|
|
259
|
+
row['Transformations'] = str(details['Transformations'])
|
|
260
|
+
|
|
261
|
+
rows.append(row)
|
|
262
|
+
|
|
263
|
+
# Create the DataFrame
|
|
264
|
+
df = pd.DataFrame(rows)
|
|
265
|
+
|
|
266
|
+
data_new = data_characteristic.rename(columns={v: k for k, v in model_terms.items() if v in data_characteristic.columns})
|
|
267
|
+
return df, data_new
|
|
268
|
+
|
|
269
|
+
# Function to guess Low, Medium, High ranges
|
|
270
|
+
def guess_low_medium_high(column_name, series):
|
|
271
|
+
# Compute the tertiles (33rd and 66th percentiles)
|
|
272
|
+
#print('did it make it...')
|
|
273
|
+
#mode_value = st.mode(series) # Get the most frequent value
|
|
274
|
+
#print('good')
|
|
275
|
+
# series = pd.to_numeric(series, errors='coerce').fillna(mode_value)
|
|
276
|
+
low_threshold = np.quantile(series, 0.33)
|
|
277
|
+
high_threshold = np.quantile(series,0.66)
|
|
278
|
+
|
|
279
|
+
# Define the bins and labels
|
|
280
|
+
bins = [np.min(series) - 1, low_threshold, high_threshold, np.max(series)]
|
|
281
|
+
# Handle duplicate bins by adjusting labels
|
|
282
|
+
if len(set(bins)) < len(bins): # Check for duplicate bin edges
|
|
283
|
+
if low_threshold == high_threshold:
|
|
284
|
+
# Collapse to two bins (Low and High)
|
|
285
|
+
bins = [np.min(series) - 1, low_threshold, np.max(series)]
|
|
286
|
+
labels = ['Low', 'High']
|
|
287
|
+
else:
|
|
288
|
+
# Collapse to three unique bins
|
|
289
|
+
bins = sorted(set(bins)) # Remove duplicate edges
|
|
290
|
+
labels = [f'Bin {i + 1}' for i in range(len(bins) - 1)]
|
|
291
|
+
else:
|
|
292
|
+
# Standard case: Low, Medium, High
|
|
293
|
+
labels = ['Low', 'Medium', 'High']
|
|
294
|
+
|
|
295
|
+
return {
|
|
296
|
+
'type': 'bin',
|
|
297
|
+
'bins': bins,
|
|
298
|
+
'labels': labels,
|
|
299
|
+
'prefix': f'{column_name}'
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
def transform_dataframe(df, config):
|
|
303
|
+
output_df = pd.DataFrame()
|
|
304
|
+
|
|
305
|
+
for column, settings in config.items():
|
|
306
|
+
if settings['type'] == 'bin':
|
|
307
|
+
# Apply binning
|
|
308
|
+
# Get unique bins (remove duplicates)
|
|
309
|
+
unique_bins = sorted(set(settings['bins']))
|
|
310
|
+
|
|
311
|
+
# Adjust labels if necessary
|
|
312
|
+
if len(unique_bins) - 1 != len(settings['labels']):
|
|
313
|
+
print(f"Adjusting labels to match bins: {len(unique_bins) - 1} bins detected.")
|
|
314
|
+
labels = [f'Bin {i+1}' for i in range(len(unique_bins) - 1)]
|
|
315
|
+
else:
|
|
316
|
+
labels = settings['labels']
|
|
317
|
+
|
|
318
|
+
# Perform the binning
|
|
319
|
+
binned_d = pd.cut(
|
|
320
|
+
df[column],
|
|
321
|
+
bins=unique_bins, # Deduplicated bins
|
|
322
|
+
labels=labels, # Adjusted or original labels
|
|
323
|
+
right=False # Adjust based on whether to include the right edge
|
|
324
|
+
)
|
|
325
|
+
# One-hot encode the binned column
|
|
326
|
+
binned_dummies = pd.get_dummies(binned_d, prefix=settings['prefix'])
|
|
327
|
+
output_df = pd.concat([output_df, binned_dummies], axis=1)
|
|
328
|
+
|
|
329
|
+
elif settings['type'] == 'one-hot':
|
|
330
|
+
# One-hot encode the column
|
|
331
|
+
one_hot_dummies = pd.get_dummies(df[column], prefix=settings.get('prefix', column))
|
|
332
|
+
output_df = pd.concat([output_df, one_hot_dummies], axis=1)
|
|
333
|
+
|
|
334
|
+
elif settings['type'] == 'continuous':
|
|
335
|
+
# Apply function to continuous data
|
|
336
|
+
data = df[column]
|
|
337
|
+
if 'bounds' in settings:
|
|
338
|
+
# Apply bounds filtering
|
|
339
|
+
lower, upper = settings['bounds']
|
|
340
|
+
data = data[(data >= lower) & (data <= upper)]
|
|
341
|
+
if 'apply_func' in settings:
|
|
342
|
+
# Apply custom function
|
|
343
|
+
data = data.apply(settings['apply_func'])
|
|
344
|
+
output_df[column] = data
|
|
345
|
+
|
|
346
|
+
elif settings['type'] == 'none':
|
|
347
|
+
# Leave the column unchanged
|
|
348
|
+
if column in df.columns:
|
|
349
|
+
|
|
350
|
+
output_df = pd.concat([output_df, df[[column]]], axis=1)
|
|
351
|
+
else:
|
|
352
|
+
print(f'config variable {column} is not in the data. Ignoring ...')
|
|
353
|
+
return output_df
|
|
354
|
+
|
|
355
|
+
# Helper function to guess column type and update `config`
|
|
356
|
+
def guess_column_type(column_name, series):
|
|
357
|
+
|
|
358
|
+
if series.empty:
|
|
359
|
+
raise ValueError(f"The column {column_name} contains no numeric data.")
|
|
360
|
+
|
|
361
|
+
if series.dtype == 'object' or series.dtype.name == 'category':
|
|
362
|
+
# If the column is categorical (e.g., strings), assume one-hot encoding
|
|
363
|
+
return {'type': 'one-hot', 'prefix': column_name}
|
|
364
|
+
elif pd.api.types.is_numeric_dtype(series):
|
|
365
|
+
unique_values = series.nunique()
|
|
366
|
+
|
|
367
|
+
if unique_values < 5:
|
|
368
|
+
return {'type': 'one-hot', 'prefix': column_name}
|
|
369
|
+
|
|
370
|
+
elif np.max(series) - np.min(series) > 20:
|
|
371
|
+
print('made it through here')
|
|
372
|
+
# If there are few unique values, assume binning with default bins
|
|
373
|
+
return guess_low_medium_high(column_name,series)
|
|
374
|
+
else:
|
|
375
|
+
# # Otherwise, assume continuous data with normalization
|
|
376
|
+
# Otherwise, fallback to continuous standardization
|
|
377
|
+
return {
|
|
378
|
+
'type': 'continuous',
|
|
379
|
+
'apply_func': (lambda x: (x - series.mean()) / series.std()) # Z-Score Standardization
|
|
380
|
+
}
|
|
381
|
+
else:
|
|
382
|
+
# Default fallback (leave the column unchanged)
|
|
383
|
+
return {'type': 'none'}
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
|
|
154
387
|
def as_wide_factor(x_df, yes=1, min_factor=2, max_factor=8, keep_original=0, exclude=[]):
|
|
155
388
|
if not yes:
|
|
156
389
|
return x_df
|
|
@@ -173,7 +406,7 @@ def PCA_code(X, n_components=5):
|
|
|
173
406
|
|
|
174
407
|
|
|
175
408
|
def interactions(df, keep=None, drop_this_perc=0.6, interact = False):
|
|
176
|
-
|
|
409
|
+
full_columns = df.columns
|
|
177
410
|
if interact:
|
|
178
411
|
interactions_list = []
|
|
179
412
|
for i, var_i in enumerate(df.columns):
|
|
@@ -199,14 +432,31 @@ def interactions(df, keep=None, drop_this_perc=0.6, interact = False):
|
|
|
199
432
|
df = pd.concat([df, df_interactions], axis=1, sort=False)
|
|
200
433
|
|
|
201
434
|
# second
|
|
202
|
-
|
|
435
|
+
# Remove `keep` columns from the correlation matrix
|
|
436
|
+
if keep is not None:
|
|
437
|
+
missing_columns = [col for col in keep if col not in df.columns]
|
|
438
|
+
|
|
439
|
+
if missing_columns:
|
|
440
|
+
print(f"The following columns are not in the DataFrame and will be ignored: {missing_columns}")
|
|
441
|
+
keep = [col for col in keep if col not in missing_columns]
|
|
442
|
+
df_corr = df.drop(columns=keep, errors='ignore', inplace=False) # Exclude `keep` columns
|
|
443
|
+
else:
|
|
444
|
+
df_corr = df
|
|
445
|
+
|
|
446
|
+
# Compute the absolute correlation matrix
|
|
447
|
+
corr_matrix = df_corr.corr().abs()
|
|
448
|
+
|
|
449
|
+
# Keep only the upper triangle of the correlation matrix
|
|
203
450
|
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
|
|
204
451
|
|
|
205
|
-
# Find features with correlation greater than
|
|
452
|
+
# Find features with correlation greater than the threshold
|
|
206
453
|
to_drop = [column for column in upper.columns if any(upper[column] > drop_this_perc)]
|
|
454
|
+
|
|
455
|
+
# Ensure `keep` columns are not dropped
|
|
207
456
|
if keep is not None:
|
|
208
|
-
to_drop = [column for column in to_drop if column not in
|
|
209
|
-
|
|
457
|
+
to_drop = [column for column in to_drop if column not in full_columns]
|
|
458
|
+
|
|
459
|
+
# Drop the identified features
|
|
210
460
|
df.drop(to_drop, axis=1, inplace=True)
|
|
211
461
|
|
|
212
462
|
return df
|
|
@@ -330,3 +580,5 @@ def entries_to_remove(entries, the_dict):
|
|
|
330
580
|
for key in entries:
|
|
331
581
|
if key in the_dict:
|
|
332
582
|
del the_dict[key]
|
|
583
|
+
|
|
584
|
+
|