dragon-ml-toolbox 20.2.0__py3-none-any.whl → 20.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/METADATA +1 -1
  2. dragon_ml_toolbox-20.3.0.dist-info/RECORD +143 -0
  3. ml_tools/ETL_cleaning/__init__.py +5 -1
  4. ml_tools/ETL_cleaning/_basic_clean.py +1 -1
  5. ml_tools/ETL_engineering/__init__.py +5 -1
  6. ml_tools/GUI_tools/__init__.py +5 -1
  7. ml_tools/IO_tools/_IO_loggers.py +12 -4
  8. ml_tools/IO_tools/__init__.py +5 -1
  9. ml_tools/MICE/__init__.py +8 -2
  10. ml_tools/MICE/_dragon_mice.py +1 -1
  11. ml_tools/ML_callbacks/__init__.py +5 -1
  12. ml_tools/ML_chain/__init__.py +5 -1
  13. ml_tools/ML_configuration/__init__.py +7 -1
  14. ml_tools/ML_configuration/_training.py +65 -1
  15. ml_tools/ML_datasetmaster/__init__.py +5 -1
  16. ml_tools/ML_datasetmaster/_base_datasetmaster.py +31 -20
  17. ml_tools/ML_datasetmaster/_datasetmaster.py +26 -9
  18. ml_tools/ML_datasetmaster/_sequence_datasetmaster.py +38 -23
  19. ml_tools/ML_evaluation/__init__.py +5 -1
  20. ml_tools/ML_evaluation_captum/__init__.py +5 -1
  21. ml_tools/ML_finalize_handler/__init__.py +5 -1
  22. ml_tools/ML_inference/__init__.py +5 -1
  23. ml_tools/ML_inference_sequence/__init__.py +5 -1
  24. ml_tools/ML_inference_vision/__init__.py +5 -1
  25. ml_tools/ML_models/__init__.py +21 -6
  26. ml_tools/ML_models/_dragon_autoint.py +302 -0
  27. ml_tools/ML_models/_dragon_gate.py +358 -0
  28. ml_tools/ML_models/_dragon_node.py +268 -0
  29. ml_tools/ML_models/_dragon_tabnet.py +255 -0
  30. ml_tools/ML_models_sequence/__init__.py +5 -1
  31. ml_tools/ML_models_vision/__init__.py +5 -1
  32. ml_tools/ML_optimization/__init__.py +11 -3
  33. ml_tools/ML_optimization/_multi_dragon.py +2 -2
  34. ml_tools/ML_optimization/_single_dragon.py +47 -67
  35. ml_tools/ML_optimization/_single_manual.py +1 -1
  36. ml_tools/ML_scaler/_ML_scaler.py +12 -7
  37. ml_tools/ML_scaler/__init__.py +5 -1
  38. ml_tools/ML_trainer/__init__.py +5 -1
  39. ml_tools/ML_trainer/_base_trainer.py +136 -13
  40. ml_tools/ML_trainer/_dragon_detection_trainer.py +31 -91
  41. ml_tools/ML_trainer/_dragon_sequence_trainer.py +24 -74
  42. ml_tools/ML_trainer/_dragon_trainer.py +24 -85
  43. ml_tools/ML_utilities/__init__.py +5 -1
  44. ml_tools/ML_utilities/_inspection.py +44 -30
  45. ml_tools/ML_vision_transformers/__init__.py +8 -2
  46. ml_tools/PSO_optimization/__init__.py +5 -1
  47. ml_tools/SQL/__init__.py +8 -2
  48. ml_tools/VIF/__init__.py +5 -1
  49. ml_tools/data_exploration/__init__.py +4 -1
  50. ml_tools/data_exploration/_cleaning.py +4 -2
  51. ml_tools/ensemble_evaluation/__init__.py +5 -1
  52. ml_tools/ensemble_inference/__init__.py +5 -1
  53. ml_tools/ensemble_learning/__init__.py +5 -1
  54. ml_tools/excel_handler/__init__.py +5 -1
  55. ml_tools/keys/__init__.py +5 -1
  56. ml_tools/math_utilities/__init__.py +5 -1
  57. ml_tools/optimization_tools/__init__.py +5 -1
  58. ml_tools/path_manager/__init__.py +8 -2
  59. ml_tools/plot_fonts/__init__.py +8 -2
  60. ml_tools/schema/__init__.py +8 -2
  61. ml_tools/schema/_feature_schema.py +3 -3
  62. ml_tools/serde/__init__.py +5 -1
  63. ml_tools/utilities/__init__.py +5 -1
  64. ml_tools/utilities/_utility_save_load.py +38 -20
  65. dragon_ml_toolbox-20.2.0.dist-info/RECORD +0 -179
  66. ml_tools/ETL_cleaning/_imprimir.py +0 -13
  67. ml_tools/ETL_engineering/_imprimir.py +0 -24
  68. ml_tools/GUI_tools/_imprimir.py +0 -12
  69. ml_tools/IO_tools/_imprimir.py +0 -14
  70. ml_tools/MICE/_imprimir.py +0 -11
  71. ml_tools/ML_callbacks/_imprimir.py +0 -12
  72. ml_tools/ML_chain/_imprimir.py +0 -12
  73. ml_tools/ML_configuration/_imprimir.py +0 -47
  74. ml_tools/ML_datasetmaster/_imprimir.py +0 -15
  75. ml_tools/ML_evaluation/_imprimir.py +0 -25
  76. ml_tools/ML_evaluation_captum/_imprimir.py +0 -10
  77. ml_tools/ML_finalize_handler/_imprimir.py +0 -8
  78. ml_tools/ML_inference/_imprimir.py +0 -11
  79. ml_tools/ML_inference_sequence/_imprimir.py +0 -8
  80. ml_tools/ML_inference_vision/_imprimir.py +0 -8
  81. ml_tools/ML_models/_advanced_models.py +0 -1086
  82. ml_tools/ML_models/_imprimir.py +0 -18
  83. ml_tools/ML_models_sequence/_imprimir.py +0 -8
  84. ml_tools/ML_models_vision/_imprimir.py +0 -16
  85. ml_tools/ML_optimization/_imprimir.py +0 -13
  86. ml_tools/ML_scaler/_imprimir.py +0 -8
  87. ml_tools/ML_trainer/_imprimir.py +0 -10
  88. ml_tools/ML_utilities/_imprimir.py +0 -16
  89. ml_tools/ML_vision_transformers/_imprimir.py +0 -14
  90. ml_tools/PSO_optimization/_imprimir.py +0 -10
  91. ml_tools/SQL/_imprimir.py +0 -8
  92. ml_tools/VIF/_imprimir.py +0 -10
  93. ml_tools/data_exploration/_imprimir.py +0 -32
  94. ml_tools/ensemble_evaluation/_imprimir.py +0 -14
  95. ml_tools/ensemble_inference/_imprimir.py +0 -9
  96. ml_tools/ensemble_learning/_imprimir.py +0 -10
  97. ml_tools/excel_handler/_imprimir.py +0 -13
  98. ml_tools/keys/_imprimir.py +0 -11
  99. ml_tools/math_utilities/_imprimir.py +0 -11
  100. ml_tools/optimization_tools/_imprimir.py +0 -13
  101. ml_tools/path_manager/_imprimir.py +0 -15
  102. ml_tools/plot_fonts/_imprimir.py +0 -8
  103. ml_tools/schema/_imprimir.py +0 -10
  104. ml_tools/serde/_imprimir.py +0 -10
  105. ml_tools/utilities/_imprimir.py +0 -18
  106. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/WHEEL +0 -0
  107. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/licenses/LICENSE +0 -0
  108. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
  109. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dragon-ml-toolbox
3
- Version: 20.2.0
3
+ Version: 20.3.0
4
4
  Summary: Complete pipelines and helper tools for data science and machine learning projects.
5
5
  Author-email: Karl Luigi Loza Vidaurre <luigiloza@gmail.com>
6
6
  License-Expression: MIT
@@ -0,0 +1,143 @@
1
+ dragon_ml_toolbox-20.3.0.dist-info/licenses/LICENSE,sha256=L35WDmmLZNTlJvxF6Vy7Uy4SYNi6rCfWUqlTHpoRMoU,1081
2
+ dragon_ml_toolbox-20.3.0.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=0-HBRMMgKuwtGy6nMJZvIn1fLxhx_ksyyVB2U_iyYZU,2818
3
+ ml_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ ml_tools/constants.py,sha256=3br5Rk9cL2IUo638eJuMOGdbGQaWssaUecYEvSeRBLM,3322
5
+ ml_tools/ETL_cleaning/__init__.py,sha256=8dsHiguUkI6Ix1759IPdGU3IXcjMz4DyaSCkdYhxxg8,490
6
+ ml_tools/ETL_cleaning/_basic_clean.py,sha256=2_FhWP-xYgl8s51H3OjYb_sqsW2yX_QZ4kmyrKjbSsc,13892
7
+ ml_tools/ETL_cleaning/_clean_tools.py,sha256=pizTBK69zHt7HpZc_bcX9KoX2loLDcyQJddf_Kl-Ldo,5129
8
+ ml_tools/ETL_cleaning/_dragon_cleaner.py,sha256=dge7KQSO4IdeXV4pCCJCb5lhAzR8rmwZPoCscm1A9KY,10272
9
+ ml_tools/ETL_engineering/__init__.py,sha256=EVIU0skxaH4ZDk8tEkOrxhTMSSA2LI_glhIpzFSxxlg,1007
10
+ ml_tools/ETL_engineering/_dragon_engineering.py,sha256=D-D6tmhyQ3I9-cXgxLVVbQBRTZoNsWaKPsvcTUaetws,10810
11
+ ml_tools/ETL_engineering/_transforms.py,sha256=qOxa_vjh3gzS4IiGFqq_0Wnh0ilQO41jRiIp-6Ej4vw,47079
12
+ ml_tools/GUI_tools/_GUI_tools.py,sha256=vjiBbiU3qCxB4rivBWHNBnq-NhpDZZERslkmi_61WxY,48987
13
+ ml_tools/GUI_tools/__init__.py,sha256=zjUFxE3AnYPtd_Ptq7UCGQH5bXAM324t03rGQtcYLo4,372
14
+ ml_tools/IO_tools/_IO_loggers.py,sha256=ytX7toElLb4jzqTk_Lcb3EzeHHWdD82mVXDheKfmML4,9001
15
+ ml_tools/IO_tools/_IO_save_load.py,sha256=xVeQzrd4r-L9ruFPvO8cV3bvzYHhJ0coOfZMrNWq5rs,4426
16
+ ml_tools/IO_tools/_IO_utils.py,sha256=quOqBVSi_z0AI7qznCNAcLRB_f4kaI-abANngXUBcYA,4384
17
+ ml_tools/IO_tools/__init__.py,sha256=Klu0Qf0qNfb7SHG33cs6qqecH5lEnYlXfe_xYdZ1Ry4,474
18
+ ml_tools/MICE/_MICE_imputation.py,sha256=N1cDwVYfoHvIZz7FLLcW-guZUo8iFKedtkfS7CU6TVE,5318
19
+ ml_tools/MICE/__init__.py,sha256=-IZv9V06U7BbB3ubu1vgbxtwFy0dV6E-9EDSg6-woio,385
20
+ ml_tools/MICE/_dragon_mice.py,sha256=k82I3-f4aMuSW7LzTRnuBniEig7A9_vH-Oj7yWum6ss,17817
21
+ ml_tools/ML_callbacks/__init__.py,sha256=xck_IdLFYCq6Lo2lQqbQd_nOeCDI8nfVyxBaBnQ-wcY,490
22
+ ml_tools/ML_callbacks/_base.py,sha256=xLVAFOhBHjqnf8a_wKgW1F-tn2u6EqV3IHXsXKTn2NE,3269
23
+ ml_tools/ML_callbacks/_checkpoint.py,sha256=Ioj9wn8XlsR_S1NnmWbyT9lkO8o2_DcHVMrFtxYJOes,9721
24
+ ml_tools/ML_callbacks/_early_stop.py,sha256=qzTzxfDCDim0qj7QQ7ykJNIOBWbXtviDptMCczXXy_k,8073
25
+ ml_tools/ML_callbacks/_scheduler.py,sha256=mn97_VH8Lp37KH3zSgmPemGQV8g-K8GfhRNHTftaNcg,7390
26
+ ml_tools/ML_chain/__init__.py,sha256=aqSGAJnFYE_ZWbueNneg2z5welBsmGJ0XKi8Ebgw6Eg,554
27
+ ml_tools/ML_chain/_chaining_tools.py,sha256=BDwTvgJFbJ-wgy3IkP6_SNpNaWpHGXV3PhAM7sYmHeU,13675
28
+ ml_tools/ML_chain/_dragon_chain.py,sha256=x3fN136C5N9WcXJJW9zkNrBzP8QoBaXpxz7SPF3txjg,5601
29
+ ml_tools/ML_chain/_update_schema.py,sha256=z1Us7lv6hy6GwSu1mcid50Jmqq3sh91hMQ0LnQjhte8,3806
30
+ ml_tools/ML_configuration/__init__.py,sha256=ogktFnYxz5jWJkhHS4DVaMldHkt3lT2gw9jx5PQ3d78,2755
31
+ ml_tools/ML_configuration/_base_model_config.py,sha256=95L3IfobNFMtnNr79zYpDGerC1q1v7M05tWZvTS2cwE,2247
32
+ ml_tools/ML_configuration/_finalize.py,sha256=l_n13bLu0avMdJ8hNRrH8V_wOBQZM1UGsTydKBkTysM,15047
33
+ ml_tools/ML_configuration/_metrics.py,sha256=PqBGPO1Y_6ImmYI3TEBJhzipULE854vbvE0AbP5m8zQ,22888
34
+ ml_tools/ML_configuration/_models.py,sha256=lvuuqvD6DWUzOa3i06NZfrdfOi9bu2e26T_QO6BGMSw,7629
35
+ ml_tools/ML_configuration/_training.py,sha256=_M_TwouHFNbGrZQtQNAvyG_poSVpmN99cbyUonZsHhk,8969
36
+ ml_tools/ML_datasetmaster/__init__.py,sha256=UltQzuXnlXVCkD-aeA5TW4IcMVLnQf1_aglawg4WyrI,580
37
+ ml_tools/ML_datasetmaster/_base_datasetmaster.py,sha256=lmqo9CN09xMu-YKYtKEnC2ZEzkxcZFJ0rS1B7K2-PKY,14691
38
+ ml_tools/ML_datasetmaster/_datasetmaster.py,sha256=Oy2UE3YJpKTaFwQF5TkQLgLB54-BFw_5b8wIPTxZIKU,19157
39
+ ml_tools/ML_datasetmaster/_sequence_datasetmaster.py,sha256=cW3fuILZWs-7Yuo4T2fgGfTC4vwho3Gp4ohIKJYS7O0,18452
40
+ ml_tools/ML_datasetmaster/_vision_datasetmaster.py,sha256=kvSqXYeNBN1JSRfSEEXYeIcsqy9HsJAl_EwFWClqlsw,67025
41
+ ml_tools/ML_evaluation/__init__.py,sha256=e3c8JNP0tt4Kxc7QSQpGcOgrxf8JAucH4UkJvJxUL2E,1122
42
+ ml_tools/ML_evaluation/_classification.py,sha256=SmhIxJy81iIFrys36LUoKrH5ey9IqzE-UxR2-tdgseI,28396
43
+ ml_tools/ML_evaluation/_feature_importance.py,sha256=mTwi3LKom_axu6UFKunELj30APDdhG9GQC2w7I9mYhI,17137
44
+ ml_tools/ML_evaluation/_loss.py,sha256=1a4O25i3Ya_3naNZNL7ELLUL46BY86g1scA7d7q2UFM,3625
45
+ ml_tools/ML_evaluation/_regression.py,sha256=hnT2B2_6AnQ7aA7uk-X2lZL9G5JFGCduDXyZbr1gFCA,11037
46
+ ml_tools/ML_evaluation/_sequence.py,sha256=gUk9Uvmy7MrXkfrriMnfypkgJU5XERHdqekTa2gBaOM,8004
47
+ ml_tools/ML_evaluation/_vision.py,sha256=abBHQ6Z2GunHNusL3wcLgfI1FVNA6hBUBTq1eOA8FSA,11489
48
+ ml_tools/ML_evaluation_captum/_ML_evaluation_captum.py,sha256=6g3ymSxJGHXxwIN7WCD2Zi9zxKWEv-Qskd2cCGQQJ5Y,18439
49
+ ml_tools/ML_evaluation_captum/__init__.py,sha256=DZDoZXexCI49JNl_tTmFfYW4hTUYK5QQLex01wMfhnk,333
50
+ ml_tools/ML_finalize_handler/_ML_finalize_handler.py,sha256=g-vkHJDTGXZsKOUA-Yfg7EuA1SmaHjzesCPiAyRMg2k,7054
51
+ ml_tools/ML_finalize_handler/__init__.py,sha256=VQyLbCQUcliAAFiOAsnPhyJ7UVYgbSqAbAnpqeOnRSg,198
52
+ ml_tools/ML_inference/__init__.py,sha256=MTCbCAruvKMgXdesk9HkiJvUGnw2IH5ps8AIKZTFR7w,462
53
+ ml_tools/ML_inference/_base_inference.py,sha256=wwmGGBnSCrtVvv30TWBwc3jK8b0Sym43NAPU97a3G6w,7594
54
+ ml_tools/ML_inference/_chain_inference.py,sha256=1wdKBRqDlW5uhd0bnfpPKftAikaaTYk9brShoX49voU,7708
55
+ ml_tools/ML_inference/_dragon_inference.py,sha256=OOJhT50bQWdrmX2oggtdBzOMtVNZlmVnOujquRPbq4c,14619
56
+ ml_tools/ML_inference/_multi_inference.py,sha256=XPVYWTOG8jiMV-iOWYTV0NTZk3fggPYI93kDlrPjtqo,8107
57
+ ml_tools/ML_inference_sequence/__init__.py,sha256=6kA4SvjnuM9I1rP_SB9BR6NFVQO8BTeCnUaW5-oFDo0,217
58
+ ml_tools/ML_inference_sequence/_sequence_inference.py,sha256=Fb2H-4qTdsqYxH6YO4KcbQfXfQDUUaUZ36QB_F2gl8M,16223
59
+ ml_tools/ML_inference_vision/__init__.py,sha256=wd2EtqpQB_JGFraHYYGvsiBnypGuFcJCKWFLxrrydNY,212
60
+ ml_tools/ML_inference_vision/_vision_inference.py,sha256=qCmKpOk0vDj1XwGeRBg4KUjLBRdiqZEx61GYmLWHj9M,20063
61
+ ml_tools/ML_models/__init__.py,sha256=esMIN6WE04EkzvU9Uo5cllAWpIZnRfxdTPn-e0aZOak,761
62
+ ml_tools/ML_models/_base_mlp_attention.py,sha256=bhG6qgKcnFQ6H7fKZcj_NAoR2tJfpuAY9YPKr5LlB0Q,7199
63
+ ml_tools/ML_models/_base_save_load.py,sha256=h2ymwYAz3sw31Evuv48seULwt53w_rwwm1aPiiFJtl4,5754
64
+ ml_tools/ML_models/_dragon_autoint.py,sha256=EIXLUG5J6CoHob10AkAxc3zdfedKhdhBtiAEdOrjdKQ,12380
65
+ ml_tools/ML_models/_dragon_gate.py,sha256=sz_8ZB1Xo0ifjn1MyCwk6_Be0bqeTDjzgKtTgtxHi_I,14812
66
+ ml_tools/ML_models/_dragon_node.py,sha256=1R4uy3IWFL9UJ-wNErIzQ8uH6lY3-bvygjdqILZdVP0,11046
67
+ ml_tools/ML_models/_dragon_tabnet.py,sha256=cPQZvmN8GMx4f-5IfH4D7jos4gzG7ZzqewD0GwpegRY,9637
68
+ ml_tools/ML_models/_dragon_tabular.py,sha256=SlMR2SNpHU897GJGlBQjlKOC6Gi55f1l0pXF_Z9B2E0,10416
69
+ ml_tools/ML_models/_mlp_attention.py,sha256=kmnXk-hy_zg6Fni4xsy8euYjtBjuOgCVQdZXSNR8aIY,5837
70
+ ml_tools/ML_models/_models_advanced_helpers.py,sha256=T4eeG7b42GuAeT3KG1pi10ctdveM7pNKYxeUj1HKzCE,39110
71
+ ml_tools/ML_models_sequence/__init__.py,sha256=lWubpG6dWTweYIRGG26I9YdngPpFCivf98J207ncNV4,191
72
+ ml_tools/ML_models_sequence/_sequence_models.py,sha256=c6TOyVeePN1XyIee2wcve9mx3g0QKItZli5f-c87YqY,5590
73
+ ml_tools/ML_models_vision/__init__.py,sha256=L3jcR2An7LrL0sg7jfVYKx6LYSzUev1ZFa0v6p-4OJQ,533
74
+ ml_tools/ML_models_vision/_base_wrapper.py,sha256=9GcwVw7H19xBO3d-xzyr3-5NL66imfVzw9hYDIXfcSM,10094
75
+ ml_tools/ML_models_vision/_image_classification.py,sha256=miwMNoTXpmmZSiqeXvDKpx06oaBYpyBRIGVdg646tWw,6897
76
+ ml_tools/ML_models_vision/_image_segmentation.py,sha256=NRjn91bDD2OJWSJFrrNW9s41qgg5w7pw68Q61-kg-As,4157
77
+ ml_tools/ML_models_vision/_object_detection.py,sha256=AOGER5bx0REc-FfBtspJmyLJxn3GdwDSPwFGveobR94,5608
78
+ ml_tools/ML_optimization/__init__.py,sha256=No18Dsw6Q9zPt8B9fpG0bWomuXmwDC7DiokiaPuwmRI,485
79
+ ml_tools/ML_optimization/_multi_dragon.py,sha256=oUTscfoySSypgX2_7wfkDzjJ60Y93utSW2OZvAGTGE0,37494
80
+ ml_tools/ML_optimization/_single_dragon.py,sha256=jh5-SK6NKAzbheQhquiYoROozk-RzUv1jiFkIzK_AFg,7288
81
+ ml_tools/ML_optimization/_single_manual.py,sha256=h-_k9JmRqPkjTra1nu7AyYbSyWkYZ1R3utiNmW06WFs,21809
82
+ ml_tools/ML_scaler/_ML_scaler.py,sha256=P75X0Sx8N-VxC2Qy8aG7mWaZlkTfjspiZDi1YiMQD1I,8872
83
+ ml_tools/ML_scaler/__init__.py,sha256=SHDNyLsoOLl2OtkIb3pGg-JRs3E2bYJBgnHwH3vw_Tk,172
84
+ ml_tools/ML_trainer/__init__.py,sha256=42kueHa7Z0b_yLbywNCgIxlW6WmgLBqkTFwKH7vFLXw,379
85
+ ml_tools/ML_trainer/_base_trainer.py,sha256=mflBw36SEN3pc8fOVqazrjwYk9n7Ey7dEhWgLfhD_Dw,17699
86
+ ml_tools/ML_trainer/_dragon_detection_trainer.py,sha256=B5F93PPnp2fYQmj1SYFRnAPVA39JwZUtJRMCdpSQF7k,16235
87
+ ml_tools/ML_trainer/_dragon_sequence_trainer.py,sha256=Tj4YGgMrCkLnnNUlT_8wcdJFFcFhsdux308QPiqj-tw,23509
88
+ ml_tools/ML_trainer/_dragon_trainer.py,sha256=bvSen_liut6B7gbg53MxOXKpJUkRaHtXDXW2SXBWPYQ,58553
89
+ ml_tools/ML_utilities/__init__.py,sha256=71T3RDKDgHVvFrEr0G7tjuwbDVk_4JZGzwZtejC3PuE,739
90
+ ml_tools/ML_utilities/_artifact_finder.py,sha256=X4xz_rmi0jVan8Sun_6431TcQiNM-GDHm-DHLA1zYms,15816
91
+ ml_tools/ML_utilities/_inspection.py,sha256=mXTnjGmdDpBfY99xfekyrGbSvrWHBcVndivMbqPD4PI,13186
92
+ ml_tools/ML_utilities/_train_tools.py,sha256=3pg2JLV2SHmpzD1xjlPBQVsoXeqXDmiwcM8kr2V0jh8,7488
93
+ ml_tools/ML_vision_transformers/__init__.py,sha256=SaLVnrQXPjhb5x-qY_EH2XAQty5VvkgAdLPOxLJMEno,516
94
+ ml_tools/ML_vision_transformers/_core_transforms.py,sha256=mxMBmRtg0Jw1s4f8ExAOkzMYE1zYJKosZb14eWUyHlw,9420
95
+ ml_tools/ML_vision_transformers/_offline_augmentation.py,sha256=f1-GPjt-8jCdJWIlurmD00rhVrhpPpFri-xusoiEwZY,6210
96
+ ml_tools/PSO_optimization/_PSO.py,sha256=kEeCD1azTzdyqIdHgwjjkx-WJlpuHSICYv5j0mfG030,24828
97
+ ml_tools/PSO_optimization/__init__.py,sha256=Xz01CuCok67RtGpfrpIXuYxZIyREtK20R6g3NkKWhcs,292
98
+ ml_tools/SQL/__init__.py,sha256=ghqYXnE_xZOq8_TSfBHlU3MBJBK8RXwC22cp1j0mVH0,167
99
+ ml_tools/SQL/_dragon_SQL.py,sha256=SxiDoGbt1HODpqvmMz6a2TZyQ0ZpPnjfI71vMfv5DZI,11465
100
+ ml_tools/VIF/_VIF_factor.py,sha256=0xeMhaReG2vpBhPkOz0qaqnGmMXCz24frBTQdl6cTLk,10380
101
+ ml_tools/VIF/__init__.py,sha256=YEstWS4xYFHwdo5VV-BBVsB_ux4qcA17NlHxpNB2KV0,262
102
+ ml_tools/_core/__init__.py,sha256=m-VP0RW0tOTm9N5NI3kFNcpM7WtVgs0RK9pK3ZJRZQQ,141
103
+ ml_tools/_core/_logger.py,sha256=xzhn_FouMDRVNwXGBGlPC9Ruq6i5uCrmNaS5jesguMU,4972
104
+ ml_tools/_core/_schema_load_ops.py,sha256=KLs9vBzANz5ESe2wlP-C41N4VlgGil-ywcfvWKSOGss,1551
105
+ ml_tools/_core/_script_info.py,sha256=LtFGt10gEvCnhIRMKJPi2yXkiGLcdr7lE-oIP2XGHzQ,234
106
+ ml_tools/data_exploration/__init__.py,sha256=ahCjELrum2aIj_cLK-sdGbJjTvvolf3US_oaB97rOQg,1736
107
+ ml_tools/data_exploration/_analysis.py,sha256=H6LryV56FFCHWjvQdkhZbtprZy6aP8EqU_hC2Cf9CLE,7832
108
+ ml_tools/data_exploration/_cleaning.py,sha256=pAZOXgGK35j7O8q6cnyTwYK1GLNnD04A8p2fSyMB1mg,20906
109
+ ml_tools/data_exploration/_features.py,sha256=wW-M8n2aLIy05DR2z4fI8wjpPjn3mOAnm9aSGYbMKwI,23363
110
+ ml_tools/data_exploration/_plotting.py,sha256=zH1dPcIoAlOuww23xIoBCsQOAshPPv9OyGposOA2RvI,19883
111
+ ml_tools/data_exploration/_schema_ops.py,sha256=PoFeHaS9dXI9gfL0SRD-8uSP4owqmbQFbtfA-HxkLnY,7108
112
+ ml_tools/ensemble_evaluation/__init__.py,sha256=t4Gr8EGEk8RLatyc92-S0BzbQvdvodzoF-qDAH2qjVg,546
113
+ ml_tools/ensemble_evaluation/_ensemble_evaluation.py,sha256=-sX9cLMaa0FOQDikmVv2lsCYtQ56Kftd3tILnNej0Hg,28346
114
+ ml_tools/ensemble_inference/__init__.py,sha256=VMX-Kata2V0UmiURIU2jx6mRuZmvTWf-QXzCpHmVGZA,255
115
+ ml_tools/ensemble_inference/_ensemble_inference.py,sha256=Nu4GZRQuJuw5cDqUH2VEjFF8E2QkW3neVPcphicaPLk,8547
116
+ ml_tools/ensemble_learning/__init__.py,sha256=azY9ldY2NxX5gWiISGD0PUW9-QS8TeWiJkbtM-_PdvI,316
117
+ ml_tools/ensemble_learning/_ensemble_learning.py,sha256=MHDZBR20_nStlSSeThFI3bSujz3dTLAcRSXEiJldgzQ,21944
118
+ ml_tools/excel_handler/__init__.py,sha256=AaWM3n_dqBhJLTs3OEA57ex5YykKXNOwVCyHlVsdnqI,530
119
+ ml_tools/excel_handler/_excel_handler.py,sha256=TODudmeQgDSdxUKzLfAzizs--VL-g8WxDOfQ4sgxxLs,13965
120
+ ml_tools/keys/__init__.py,sha256=-0c2pmrhyfROc-oQpEjJGLBMhSagA3CyFijQaaqZRqU,399
121
+ ml_tools/keys/_keys.py,sha256=DLP0BYibRueM_8Dz9pSbWUpKypcimFL5kmXUl4wSwdU,9292
122
+ ml_tools/math_utilities/__init__.py,sha256=K7Obkkc4rPKj4EbRZf1BsXHfiCg7FXYv_aN9Yc2Z_Vg,400
123
+ ml_tools/math_utilities/_math_utilities.py,sha256=BYHIVcM9tuKIhVrkgLLiM5QalJ39zx7dXYy_M9aGgiM,9012
124
+ ml_tools/optimization_tools/__init__.py,sha256=KD8JXpfGuPndO4AHnjJGu6uV1GRwhOfboD0KZV45kzw,658
125
+ ml_tools/optimization_tools/_optimization_bounds.py,sha256=vZgFMO5rTM4ijeJ5wFbq0tp4GCPCIfJejH5DkINa3qk,9230
126
+ ml_tools/optimization_tools/_optimization_plots.py,sha256=GlxWvk5K2l7sgArKah8zchTMvWNuUqzWhUJCq1AsTgI,8986
127
+ ml_tools/path_manager/__init__.py,sha256=y5xeRxk_gDuuLFCz4_H003UtxnPzOwv76dAgOFcn6zc,535
128
+ ml_tools/path_manager/_dragonmanager.py,sha256=q9wHTKPmdzywEz6N14ipUoeR3MmW0bzB4RePz-Wn4uA,13111
129
+ ml_tools/path_manager/_path_tools.py,sha256=LcZE31QlkzZWUR8g1MW_N_mPY2DpKBJLA45VJz7ZYsw,11905
130
+ ml_tools/plot_fonts/__init__.py,sha256=KIxXRCjQ3SliEoLhEcqs7zDVZbVTn38bmSdL-yR1Q2w,187
131
+ ml_tools/plot_fonts/_plot_fonts.py,sha256=mfjXNT9P59ymHoTI85Q8CcvfxfK5BIFBWtTZH-hNIC4,2209
132
+ ml_tools/schema/__init__.py,sha256=K6uiZ9f0GCQ7etw1yl2-dQVLhU7RkL3KHesO3HNX6v4,334
133
+ ml_tools/schema/_feature_schema.py,sha256=aVY3AJt1j4D2mtusVy2l6lDR2SYzPMyfvG1o9zOn0Kw,8585
134
+ ml_tools/schema/_gui_schema.py,sha256=IVwN4THAdFrvh2TpV4SFd_zlzMX3eioF-w-qcSVTndE,7245
135
+ ml_tools/serde/__init__.py,sha256=IDirr8i-qjUHB71hmHO6lGiODhUoOnUcXYrvb_XgrzE,292
136
+ ml_tools/serde/_serde.py,sha256=8QnYK8ZG21zdNaC0v63iSz2bhgwOKRKAWxTVQvMV0A8,5525
137
+ ml_tools/utilities/__init__.py,sha256=iQb-S5JesEjGGI8983Vkj-14LCtchFxdWRhaziyvnoY,808
138
+ ml_tools/utilities/_utility_save_load.py,sha256=EFvFaTaHahDQWdJWZr-j7cHqRbG_Xrpc96228JhV-bs,16773
139
+ ml_tools/utilities/_utility_tools.py,sha256=bN0J9d1S0W5wNzNntBWqDsJcEAK7-1OgQg3X2fwXns0,6918
140
+ dragon_ml_toolbox-20.3.0.dist-info/METADATA,sha256=pPP6u48MGLf2mBIMTn6ddq7knczywo8sm9XO3iF9Zzg,7866
141
+ dragon_ml_toolbox-20.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
142
+ dragon_ml_toolbox-20.3.0.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
143
+ dragon_ml_toolbox-20.3.0.dist-info/RECORD,,
@@ -13,7 +13,7 @@ from ._clean_tools import (
13
13
  save_unique_values
14
14
  )
15
15
 
16
- from ._imprimir import info
16
+ from .._core import _imprimir_disponibles
17
17
 
18
18
 
19
19
  __all__ = [
@@ -24,3 +24,7 @@ __all__ = [
24
24
  "basic_clean_drop",
25
25
  "drop_macro_polars",
26
26
  ]
27
+
28
+
29
+ def info():
30
+ _imprimir_disponibles(__all__)
@@ -261,7 +261,7 @@ def _generate_null_report(df: pl.DataFrame, save_dir: Path, filename: str):
261
261
  (pl.col("null_count") / total_rows * 100).round(2).alias("missing_percent")
262
262
  ).sort("missing_percent", descending=True)
263
263
 
264
- save_dataframe_filename(df=report, save_dir=save_dir, filename=filename)
264
+ save_dataframe_filename(df=report, save_dir=save_dir, filename=filename, verbose=2)
265
265
 
266
266
 
267
267
  def drop_macro_polars(df: pl.DataFrame,
@@ -21,7 +21,7 @@ from ._transforms import (
21
21
  MolecularFormulaTransformer
22
22
  )
23
23
 
24
- from ._imprimir import info
24
+ from .._core import _imprimir_disponibles
25
25
 
26
26
 
27
27
  __all__ = [
@@ -43,3 +43,7 @@ __all__ = [
43
43
  "DateFeatureExtractor",
44
44
  "MolecularFormulaTransformer"
45
45
  ]
46
+
47
+
48
+ def info():
49
+ _imprimir_disponibles(__all__)
@@ -6,7 +6,7 @@ from ._GUI_tools import (
6
6
  catch_exceptions,
7
7
  )
8
8
 
9
- from ._imprimir import info
9
+ from .._core import _imprimir_disponibles
10
10
 
11
11
 
12
12
  __all__ = [
@@ -16,3 +16,7 @@ __all__ = [
16
16
  "DragonGUIHandler",
17
17
  "catch_exceptions",
18
18
  ]
19
+
20
+
21
+ def info():
22
+ _imprimir_disponibles(__all__)
@@ -29,6 +29,7 @@ def custom_logger(
29
29
  log_name: str,
30
30
  add_timestamp: bool=True,
31
31
  dict_as: Literal['auto', 'json', 'csv'] = 'auto',
32
+ verbose: int = 3
32
33
  ) -> None:
33
34
  """
34
35
  Logs various data types to corresponding output formats:
@@ -63,6 +64,7 @@ def custom_logger(
63
64
  """
64
65
  try:
65
66
  if not isinstance(data, BaseException) and not data:
67
+ # Display a warning instead of error to allow program continuation
66
68
  _LOGGER.warning("Empty data received. No log file will be saved.")
67
69
  return
68
70
 
@@ -103,8 +105,9 @@ def custom_logger(
103
105
  else:
104
106
  _LOGGER.error("Unsupported data type. Must be list, dict, str, or BaseException.")
105
107
  raise ValueError()
106
-
107
- _LOGGER.info(f"Log saved as: '{base_path.name}'")
108
+
109
+ if verbose >= 2:
110
+ _LOGGER.info(f"Log saved as: '{base_path.name}'")
108
111
 
109
112
  except Exception:
110
113
  _LOGGER.exception(f"Log not saved.")
@@ -169,7 +172,8 @@ def _log_dict_to_json(data: dict[Any, Any], path: Path) -> None:
169
172
  def train_logger(train_config: Union[dict, Any],
170
173
  model_parameters: Union[dict, Any],
171
174
  train_history: Union[dict, None],
172
- save_directory: Union[str, Path]):
175
+ save_directory: Union[str, Path],
176
+ verbose: int = 3) -> None:
173
177
  """
174
178
  Logs training data to JSON, adding a timestamp to the filename.
175
179
 
@@ -230,6 +234,10 @@ def train_logger(train_config: Union[dict, Any],
230
234
  save_directory=save_directory,
231
235
  log_name="Training_Log",
232
236
  add_timestamp=True,
233
- dict_as='json'
237
+ dict_as='json',
238
+ verbose=1
234
239
  )
240
+
241
+ if verbose >= 2:
242
+ _LOGGER.info(f"Training log saved to '{save_directory}/Training_Log.json'")
235
243
 
@@ -14,7 +14,7 @@ from ._IO_save_load import (
14
14
  load_list_strings,
15
15
  )
16
16
 
17
- from ._imprimir import info
17
+ from .._core import _imprimir_disponibles
18
18
 
19
19
 
20
20
  __all__ = [
@@ -26,3 +26,7 @@ __all__ = [
26
26
  "load_list_strings",
27
27
  "compare_lists"
28
28
  ]
29
+
30
+
31
+ def info():
32
+ _imprimir_disponibles(__all__)
ml_tools/MICE/__init__.py CHANGED
@@ -4,9 +4,11 @@ from ._dragon_mice import (
4
4
  get_imputed_distributions,
5
5
  )
6
6
 
7
- from ._MICE_imputation import run_mice_pipeline
7
+ from ._MICE_imputation import (
8
+ run_mice_pipeline,
9
+ )
8
10
 
9
- from ._imprimir import info
11
+ from .._core import _imprimir_disponibles
10
12
 
11
13
 
12
14
  __all__ = [
@@ -15,3 +17,7 @@ __all__ = [
15
17
  "get_imputed_distributions",
16
18
  "run_mice_pipeline",
17
19
  ]
20
+
21
+
22
+ def info():
23
+ _imprimir_disponibles(__all__)
@@ -284,7 +284,7 @@ class DragonMICE:
284
284
  def _save_imputed_datasets(save_dir: Union[str, Path], imputed_datasets: list, df_targets: pd.DataFrame, imputed_dataset_names: list[str]):
285
285
  for imputed_df, subname in zip(imputed_datasets, imputed_dataset_names):
286
286
  merged_df = merge_dataframes(imputed_df, df_targets, direction="horizontal", verbose=False)
287
- save_dataframe_filename(df=merged_df, save_dir=save_dir, filename=subname)
287
+ save_dataframe_filename(df=merged_df, save_dir=save_dir, filename=subname, verbose=2)
288
288
 
289
289
 
290
290
  #Convergence diagnostic
@@ -12,7 +12,7 @@ from ._scheduler import (
12
12
  DragonPlateauScheduler,
13
13
  )
14
14
 
15
- from ._imprimir import info
15
+ from .._core import _imprimir_disponibles
16
16
 
17
17
 
18
18
  __all__ = [
@@ -22,3 +22,7 @@ __all__ = [
22
22
  "DragonScheduler",
23
23
  "DragonPlateauScheduler",
24
24
  ]
25
+
26
+
27
+ def info():
28
+ _imprimir_disponibles(__all__)
@@ -12,7 +12,7 @@ from ._update_schema import (
12
12
  derive_next_step_schema
13
13
  )
14
14
 
15
- from ._imprimir import info
15
+ from .._core import _imprimir_disponibles
16
16
 
17
17
 
18
18
  __all__ = [
@@ -22,3 +22,7 @@ __all__ = [
22
22
  "augment_dataset_with_predictions_multi",
23
23
  "prepare_chaining_dataset",
24
24
  ]
25
+
26
+
27
+ def info():
28
+ _imprimir_disponibles(__all__)
@@ -41,9 +41,10 @@ from ._models import (
41
41
  from ._training import (
42
42
  DragonTrainingConfig,
43
43
  DragonParetoConfig,
44
+ DragonOptimizerConfig
44
45
  )
45
46
 
46
- from ._imprimir import info
47
+ from .._core import _imprimir_disponibles
47
48
 
48
49
 
49
50
  __all__ = [
@@ -87,4 +88,9 @@ __all__ = [
87
88
  # --- Training Config ---
88
89
  "DragonTrainingConfig",
89
90
  "DragonParetoConfig",
91
+ "DragonOptimizerConfig",
90
92
  ]
93
+
94
+
95
+ def info():
96
+ _imprimir_disponibles(__all__)
@@ -14,7 +14,8 @@ _LOGGER = get_logger("ML Configuration")
14
14
  __all__ = [
15
15
  # --- Training Config ---
16
16
  "DragonTrainingConfig",
17
- "DragonParetoConfig"
17
+ "DragonParetoConfig",
18
+ "DragonOptimizerConfig",
18
19
  ]
19
20
 
20
21
 
@@ -137,3 +138,66 @@ class DragonParetoConfig(_BaseModelParams):
137
138
  self.plot_font_size = plot_font_size
138
139
  self.discretize_start_at_zero = discretize_start_at_zero
139
140
 
141
+
142
+ class DragonOptimizerConfig(_BaseModelParams):
143
+ """
144
+ Configuration object for the Single-Objective DragonOptimizer.
145
+ """
146
+ def __init__(self,
147
+ target_name: str,
148
+ task: Literal["min", "max"],
149
+ continuous_bounds_map: Union[dict[str, tuple[float, float]], str, Path],
150
+ save_directory: Union[str, Path],
151
+ save_format: Literal['csv', 'sqlite', 'both'] = 'csv',
152
+ algorithm: Literal["SNES", "CEM", "Genetic"] = "Genetic",
153
+ population_size: int = 500,
154
+ generations: int = 1000,
155
+ repetitions: int = 1,
156
+ discretize_start_at_zero: bool = True,
157
+ **searcher_kwargs: Any):
158
+ """
159
+ Args:
160
+ target_name (str): The name of the target variable to optimize.
161
+ task (str): The optimization goal, either "min" or "max".
162
+ continuous_bounds_map (Dict | str | Path): Dictionary {feature_name: (min, max)} or path to "optimization_bounds.json".
163
+ save_directory (str | Path): Directory to save results.
164
+ save_format (str): Format for saving results ('csv', 'sqlite', 'both').
165
+ algorithm (str): Search algorithm ("SNES", "CEM", "Genetic").
166
+ population_size (int): Population size for CEM and GeneticAlgorithm.
167
+ generations (int): Number of generations per repetition.
168
+ repetitions (int): Number of independent optimization runs.
169
+ discretize_start_at_zero (bool): True if discrete encoding starts at 0.
170
+ **searcher_kwargs: Additional arguments for the specific search algorithm
171
+ (e.g., stdev_init for SNES).
172
+ """
173
+ # Validate paths
174
+ self.save_directory = make_fullpath(save_directory, make=True, enforce="directory")
175
+
176
+ if isinstance(continuous_bounds_map, (str, Path)):
177
+ self.continuous_bounds_map = make_fullpath(continuous_bounds_map, make=False, enforce="directory")
178
+ else:
179
+ self.continuous_bounds_map = continuous_bounds_map
180
+
181
+ # Core params
182
+ self.target_name = target_name
183
+ self.task = task
184
+ self.save_format = save_format
185
+ self.algorithm = algorithm
186
+ self.population_size = population_size
187
+ self.generations = generations
188
+ self.repetitions = repetitions
189
+ self.discretize_start_at_zero = discretize_start_at_zero
190
+
191
+ # Store algorithm specific kwargs
192
+ self.searcher_kwargs = searcher_kwargs
193
+
194
+ # Basic Validation
195
+ if self.task not in ["min", "max"]:
196
+ _LOGGER.error(f"Invalid task '{self.task}'. Must be 'min' or 'max'.")
197
+ raise ValueError()
198
+
199
+ valid_algos = ["SNES", "CEM", "Genetic"]
200
+ if self.algorithm not in valid_algos:
201
+ _LOGGER.error(f"Invalid algorithm '{self.algorithm}'. Must be one of {valid_algos}.")
202
+ raise ValueError()
203
+
@@ -13,7 +13,7 @@ from ._vision_datasetmaster import (
13
13
  DragonDatasetObjectDetection
14
14
  )
15
15
 
16
- from ._imprimir import info
16
+ from .._core import _imprimir_disponibles
17
17
 
18
18
 
19
19
  __all__ = [
@@ -26,3 +26,7 @@ __all__ = [
26
26
  "DragonDatasetSegmentation",
27
27
  "DragonDatasetObjectDetection",
28
28
  ]
29
+
30
+
31
+ def info():
32
+ _imprimir_disponibles(__all__)
@@ -6,7 +6,7 @@ from typing import Union, Optional
6
6
  from abc import ABC
7
7
  from pathlib import Path
8
8
 
9
- from ..IO_tools import save_list_strings, custom_logger
9
+ from ..IO_tools import save_list_strings, save_json
10
10
  from ..ML_scaler import DragonScaler
11
11
  from ..schema import FeatureSchema
12
12
 
@@ -126,13 +126,15 @@ class _BaseDatasetMaker(ABC):
126
126
  X_val: pandas.DataFrame,
127
127
  X_test: pandas.DataFrame,
128
128
  label_dtype: torch.dtype,
129
- schema: FeatureSchema) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
129
+ schema: FeatureSchema,
130
+ verbose:int = 3) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
130
131
  """Internal helper to fit and apply a DragonScaler for FEATURES using a FeatureSchema."""
131
132
  continuous_feature_indices: Optional[list[int]] = None
132
133
 
133
134
  # Get continuous feature indices *from the schema*
134
135
  if schema.continuous_feature_names:
135
- _LOGGER.info("Getting continuous feature indices from schema.")
136
+ if verbose >= 2:
137
+ _LOGGER.info("Getting continuous feature indices from schema.")
136
138
  try:
137
139
  # Convert columns to a standard list for .index()
138
140
  train_cols_list = X_train.columns.to_list()
@@ -142,7 +144,8 @@ class _BaseDatasetMaker(ABC):
142
144
  _LOGGER.error(f"Feature name from schema not found in training data columns:\n{e}")
143
145
  raise ValueError()
144
146
  else:
145
- _LOGGER.info("No continuous features listed in schema. Feature scaler will not be fitted.")
147
+ if verbose >= 2:
148
+ _LOGGER.info("No continuous features listed in schema. Feature scaler will not be fitted.")
146
149
 
147
150
  X_train_values = X_train.to_numpy()
148
151
  X_val_values = X_val.to_numpy()
@@ -150,23 +153,29 @@ class _BaseDatasetMaker(ABC):
150
153
 
151
154
  # continuous_feature_indices is derived
152
155
  if self.feature_scaler is None and continuous_feature_indices:
153
- _LOGGER.info("Fitting a new DragonScaler on training features.")
156
+ if verbose >= 3:
157
+ _LOGGER.info("Fitting a new DragonScaler on training features.")
154
158
  temp_train_ds = _PytorchDataset(X_train_values, y_train, label_dtype)
155
- self.feature_scaler = DragonScaler.fit(temp_train_ds, continuous_feature_indices)
159
+ self.feature_scaler = DragonScaler.fit(temp_train_ds, continuous_feature_indices, verbose=verbose)
156
160
 
157
161
  if self.feature_scaler and self.feature_scaler.mean_ is not None:
158
- _LOGGER.info("Applying scaler transformation to train, validation, and test feature sets.")
162
+ if verbose >= 3:
163
+ _LOGGER.info("Applying scaler transformation to train, validation, and test feature sets.")
159
164
  X_train_tensor = self.feature_scaler.transform(torch.tensor(X_train_values, dtype=torch.float32))
160
165
  X_val_tensor = self.feature_scaler.transform(torch.tensor(X_val_values, dtype=torch.float32))
161
166
  X_test_tensor = self.feature_scaler.transform(torch.tensor(X_test_values, dtype=torch.float32))
162
167
  return X_train_tensor.numpy(), X_val_tensor.numpy(), X_test_tensor.numpy()
168
+
169
+ if verbose >= 2:
170
+ _LOGGER.info("Feature scaling transformation complete.")
163
171
 
164
172
  return X_train_values, X_val_values, X_test_values
165
173
 
166
174
  def _prepare_target_scaler(self,
167
175
  y_train: Union[pandas.Series, pandas.DataFrame],
168
176
  y_val: Union[pandas.Series, pandas.DataFrame],
169
- y_test: Union[pandas.Series, pandas.DataFrame]) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
177
+ y_test: Union[pandas.Series, pandas.DataFrame],
178
+ verbose: int = 3) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
170
179
  """Internal helper to fit and apply a DragonScaler for TARGETS."""
171
180
 
172
181
  y_train_arr = y_train.to_numpy() if isinstance(y_train, (pandas.Series, pandas.DataFrame)) else y_train
@@ -180,17 +189,19 @@ class _BaseDatasetMaker(ABC):
180
189
  # ------------------------------------------------------------------
181
190
 
182
191
  if self.target_scaler is None:
183
- _LOGGER.info("Fitting a new DragonScaler on training targets.")
192
+ if verbose >= 2:
193
+ _LOGGER.info("Fitting a new DragonScaler on training targets.")
184
194
  # Convert to float tensor for calculation
185
195
  y_train_tensor = torch.tensor(y_train_arr, dtype=torch.float32)
186
- self.target_scaler = DragonScaler.fit_tensor(y_train_tensor)
196
+ self.target_scaler = DragonScaler.fit_tensor(y_train_tensor, verbose=verbose)
187
197
 
188
198
  if self.target_scaler and self.target_scaler.mean_ is not None:
189
- _LOGGER.info("Applying scaler transformation to train, validation, and test targets.")
190
- y_train_tensor = self.target_scaler.transform(torch.tensor(y_train_arr, dtype=torch.float32))
191
- y_val_tensor = self.target_scaler.transform(torch.tensor(y_val_arr, dtype=torch.float32))
192
- y_test_tensor = self.target_scaler.transform(torch.tensor(y_test_arr, dtype=torch.float32))
193
- return y_train_tensor.numpy(), y_val_tensor.numpy(), y_test_tensor.numpy()
199
+ if verbose >= 3:
200
+ _LOGGER.info("Applying scaler transformation to train, validation, and test targets.")
201
+ y_train_tensor = self.target_scaler.transform(torch.tensor(y_train_arr, dtype=torch.float32))
202
+ y_val_tensor = self.target_scaler.transform(torch.tensor(y_val_arr, dtype=torch.float32))
203
+ y_test_tensor = self.target_scaler.transform(torch.tensor(y_test_arr, dtype=torch.float32))
204
+ return y_train_tensor.numpy(), y_val_tensor.numpy(), y_test_tensor.numpy()
194
205
 
195
206
  return y_train_arr, y_val_arr, y_test_arr
196
207
 
@@ -318,11 +329,11 @@ class _BaseDatasetMaker(ABC):
318
329
 
319
330
  log_name = f"Class_to_Index_{self.id}" if self.id else "Class_to_Index"
320
331
 
321
- custom_logger(data=self.class_map,
322
- save_directory=directory,
323
- log_name=log_name,
324
- add_timestamp=False,
325
- dict_as="json")
332
+ save_json(data=self.class_map,
333
+ directory=directory,
334
+ filename=log_name,
335
+ verbose=False)
336
+
326
337
  if verbose:
327
338
  _LOGGER.info(f"Class map for '{self.id}' saved as '{log_name}.json'.")
328
339