pg-sui 1.6.14.dev9__py3-none-any.whl → 1.6.16a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -343,19 +343,19 @@ class NLPCAConfig:
343
343
  cfg.model.dropout_rate = 0.10
344
344
  cfg.model.gamma = 1.5
345
345
  # Train
346
- cfg.train.batch_size = 128
347
- cfg.train.learning_rate = 1e-3
346
+ cfg.train.batch_size = 256
347
+ cfg.train.learning_rate = 2e-3
348
348
  cfg.train.early_stop_gen = 5
349
349
  cfg.train.min_epochs = 10
350
- cfg.train.max_epochs = 120
351
- cfg.train.weights_beta = 0.9999
352
- cfg.train.weights_max_ratio = 2.0
350
+ cfg.train.max_epochs = 150
351
+ cfg.train.weights_beta = 0.999
352
+ cfg.train.weights_max_ratio = 5.0
353
353
  # Tuning
354
354
  cfg.tune.enabled = True
355
355
  cfg.tune.fast = True
356
- cfg.tune.n_trials = 25
357
- cfg.tune.epochs = 120
358
- cfg.tune.batch_size = 128
356
+ cfg.tune.n_trials = 20
357
+ cfg.tune.epochs = 150
358
+ cfg.tune.batch_size = 256
359
359
  cfg.tune.max_samples = 512
360
360
  cfg.tune.max_loci = 0
361
361
  cfg.tune.eval_interval = 20
@@ -374,26 +374,26 @@ class NLPCAConfig:
374
374
  cfg.model.gamma = 2.0
375
375
  # Train
376
376
  cfg.train.batch_size = 128
377
- cfg.train.learning_rate = 8e-4
377
+ cfg.train.learning_rate = 1e-3
378
378
  cfg.train.early_stop_gen = 15
379
379
  cfg.train.min_epochs = 50
380
380
  cfg.train.max_epochs = 600
381
381
  cfg.train.weights_beta = 0.9999
382
- cfg.train.weights_max_ratio = 2.0
382
+ cfg.train.weights_max_ratio = 5.0
383
383
  # Tuning
384
384
  cfg.tune.enabled = True
385
- cfg.tune.fast = True
386
- cfg.tune.n_trials = 75
387
- cfg.tune.epochs = 300
385
+ cfg.tune.fast = False
386
+ cfg.tune.n_trials = 60
387
+ cfg.tune.epochs = 200
388
388
  cfg.tune.batch_size = 128
389
389
  cfg.tune.max_samples = 2048
390
390
  cfg.tune.max_loci = 0
391
- cfg.tune.eval_interval = 20
392
- cfg.tune.infer_epochs = 40
391
+ cfg.tune.eval_interval = 10
392
+ cfg.tune.infer_epochs = 50
393
393
  cfg.tune.patience = 10
394
394
  cfg.tune.proxy_metric_batch = 0
395
395
  # Eval
396
- cfg.evaluate.eval_latent_steps = 30
396
+ cfg.evaluate.eval_latent_steps = 40
397
397
 
398
398
  else: # thorough
399
399
  # Model
@@ -404,26 +404,26 @@ class NLPCAConfig:
404
404
  cfg.model.gamma = 2.5
405
405
  # Train
406
406
  cfg.train.batch_size = 64
407
- cfg.train.learning_rate = 6e-4
408
- cfg.train.early_stop_gen = 20 # Reduced from 30
407
+ cfg.train.learning_rate = 5e-4
408
+ cfg.train.early_stop_gen = 30
409
409
  cfg.train.min_epochs = 100
410
- cfg.train.max_epochs = 800 # Reduced from 1200
410
+ cfg.train.max_epochs = 2000
411
411
  cfg.train.weights_beta = 0.9999
412
- cfg.train.weights_max_ratio = 2.0
412
+ cfg.train.weights_max_ratio = 5.0
413
413
  # Tuning
414
414
  cfg.tune.enabled = True
415
- cfg.tune.fast = False
416
- cfg.tune.n_trials = 150
415
+ cfg.tune.fast = False # Full search
416
+ cfg.tune.n_trials = 100
417
417
  cfg.tune.epochs = 600
418
418
  cfg.tune.batch_size = 64
419
- cfg.tune.max_samples = 5000 # Capped from 0
419
+ cfg.tune.max_samples = 0 # No limit
420
420
  cfg.tune.max_loci = 0
421
421
  cfg.tune.eval_interval = 10
422
422
  cfg.tune.infer_epochs = 80
423
- cfg.tune.patience = 15 # Reduced from 20
423
+ cfg.tune.patience = 20
424
424
  cfg.tune.proxy_metric_batch = 0
425
425
  # Eval
426
- cfg.evaluate.eval_latent_steps = 50
426
+ cfg.evaluate.eval_latent_steps = 100
427
427
 
428
428
  return cfg
429
429
 
@@ -496,19 +496,19 @@ class UBPConfig:
496
496
  cfg.model.dropout_rate = 0.10
497
497
  cfg.model.gamma = 1.5
498
498
  # Train
499
- cfg.train.batch_size = 128
500
- cfg.train.learning_rate = 1e-3
499
+ cfg.train.batch_size = 256
500
+ cfg.train.learning_rate = 2e-3
501
501
  cfg.train.early_stop_gen = 5
502
502
  cfg.train.min_epochs = 10
503
- cfg.train.max_epochs = 120
504
- cfg.train.weights_beta = 0.9999
505
- cfg.train.weights_max_ratio = 2.0
503
+ cfg.train.max_epochs = 150
504
+ cfg.train.weights_beta = 0.999
505
+ cfg.train.weights_max_ratio = 5.0
506
506
  # Tuning
507
507
  cfg.tune.enabled = True
508
508
  cfg.tune.fast = True
509
- cfg.tune.n_trials = 25
510
- cfg.tune.epochs = 120
511
- cfg.tune.batch_size = 128
509
+ cfg.tune.n_trials = 20
510
+ cfg.tune.epochs = 150
511
+ cfg.tune.batch_size = 256
512
512
  cfg.tune.max_samples = 512
513
513
  cfg.tune.max_loci = 0
514
514
  cfg.tune.eval_interval = 20
@@ -529,26 +529,26 @@ class UBPConfig:
529
529
  cfg.model.gamma = 2.0
530
530
  # Train
531
531
  cfg.train.batch_size = 128
532
- cfg.train.learning_rate = 8e-4
532
+ cfg.train.learning_rate = 1e-3
533
533
  cfg.train.early_stop_gen = 15
534
534
  cfg.train.min_epochs = 50
535
535
  cfg.train.max_epochs = 600
536
536
  cfg.train.weights_beta = 0.9999
537
- cfg.train.weights_max_ratio = 2.0
537
+ cfg.train.weights_max_ratio = 5.0
538
538
  # Tuning
539
539
  cfg.tune.enabled = True
540
- cfg.tune.fast = True
541
- cfg.tune.n_trials = 75
542
- cfg.tune.epochs = 300
540
+ cfg.tune.fast = False
541
+ cfg.tune.n_trials = 60
542
+ cfg.tune.epochs = 200
543
543
  cfg.tune.batch_size = 128
544
544
  cfg.tune.max_samples = 2048
545
545
  cfg.tune.max_loci = 0
546
- cfg.tune.eval_interval = 20
547
- cfg.tune.infer_epochs = 40
546
+ cfg.tune.eval_interval = 10
547
+ cfg.tune.infer_epochs = 50
548
548
  cfg.tune.patience = 10
549
549
  cfg.tune.proxy_metric_batch = 0
550
550
  # Eval
551
- cfg.evaluate.eval_latent_steps = 30
551
+ cfg.evaluate.eval_latent_steps = 40
552
552
  cfg.evaluate.eval_latent_lr = 1e-2
553
553
  cfg.evaluate.eval_latent_weight_decay = 0.0
554
554
 
@@ -561,26 +561,26 @@ class UBPConfig:
561
561
  cfg.model.gamma = 2.5
562
562
  # Train
563
563
  cfg.train.batch_size = 64
564
- cfg.train.learning_rate = 6e-4
565
- cfg.train.early_stop_gen = 20 # Reduced from 30
564
+ cfg.train.learning_rate = 5e-4
565
+ cfg.train.early_stop_gen = 30
566
566
  cfg.train.min_epochs = 100
567
- cfg.train.max_epochs = 800 # Reduced from 1200
567
+ cfg.train.max_epochs = 2000
568
568
  cfg.train.weights_beta = 0.9999
569
- cfg.train.weights_max_ratio = 2.0
569
+ cfg.train.weights_max_ratio = 5.0
570
570
  # Tuning
571
571
  cfg.tune.enabled = True
572
572
  cfg.tune.fast = False
573
- cfg.tune.n_trials = 150
573
+ cfg.tune.n_trials = 100
574
574
  cfg.tune.epochs = 600
575
575
  cfg.tune.batch_size = 64
576
- cfg.tune.max_samples = 5000 # Capped from 0
576
+ cfg.tune.max_samples = 0
577
577
  cfg.tune.max_loci = 0
578
578
  cfg.tune.eval_interval = 10
579
579
  cfg.tune.infer_epochs = 80
580
- cfg.tune.patience = 15 # Reduced from 20
580
+ cfg.tune.patience = 20
581
581
  cfg.tune.proxy_metric_batch = 0
582
582
  # Eval
583
- cfg.evaluate.eval_latent_steps = 50
583
+ cfg.evaluate.eval_latent_steps = 100
584
584
  cfg.evaluate.eval_latent_lr = 1e-2
585
585
  cfg.evaluate.eval_latent_weight_decay = 0.0
586
586
 
@@ -657,18 +657,18 @@ class AutoencoderConfig:
657
657
  cfg.model.layer_scaling_factor = 2.0
658
658
  cfg.model.dropout_rate = 0.10
659
659
  cfg.model.gamma = 1.5
660
- cfg.train.batch_size = 128
661
- cfg.train.learning_rate = 1e-3
660
+ cfg.train.batch_size = 256
661
+ cfg.train.learning_rate = 2e-3
662
662
  cfg.train.early_stop_gen = 5
663
663
  cfg.train.min_epochs = 10
664
- cfg.train.max_epochs = 120
665
- cfg.train.weights_beta = 0.9999
666
- cfg.train.weights_max_ratio = 2.0
664
+ cfg.train.max_epochs = 150
665
+ cfg.train.weights_beta = 0.999
666
+ cfg.train.weights_max_ratio = 5.0
667
667
  cfg.tune.enabled = True
668
668
  cfg.tune.fast = True
669
- cfg.tune.n_trials = 25
670
- cfg.tune.epochs = 120
671
- cfg.tune.batch_size = 128
669
+ cfg.tune.n_trials = 20
670
+ cfg.tune.epochs = 150
671
+ cfg.tune.batch_size = 256
672
672
  cfg.tune.max_samples = 512
673
673
  cfg.tune.max_loci = 0
674
674
  cfg.tune.eval_interval = 20
@@ -684,20 +684,20 @@ class AutoencoderConfig:
684
684
  cfg.model.dropout_rate = 0.20
685
685
  cfg.model.gamma = 2.0
686
686
  cfg.train.batch_size = 128
687
- cfg.train.learning_rate = 8e-4
687
+ cfg.train.learning_rate = 1e-3
688
688
  cfg.train.early_stop_gen = 15
689
689
  cfg.train.min_epochs = 50
690
690
  cfg.train.max_epochs = 600
691
691
  cfg.train.weights_beta = 0.9999
692
- cfg.train.weights_max_ratio = 2.0
692
+ cfg.train.weights_max_ratio = 5.0
693
693
  cfg.tune.enabled = True
694
- cfg.tune.fast = True
695
- cfg.tune.n_trials = 75
696
- cfg.tune.epochs = 300
694
+ cfg.tune.fast = False
695
+ cfg.tune.n_trials = 60
696
+ cfg.tune.epochs = 200
697
697
  cfg.tune.batch_size = 128
698
698
  cfg.tune.max_samples = 2048
699
699
  cfg.tune.max_loci = 0
700
- cfg.tune.eval_interval = 20
700
+ cfg.tune.eval_interval = 10
701
701
  cfg.tune.patience = 10
702
702
  cfg.tune.proxy_metric_batch = 0
703
703
  if hasattr(cfg.tune, "infer_epochs"):
@@ -710,21 +710,21 @@ class AutoencoderConfig:
710
710
  cfg.model.dropout_rate = 0.30
711
711
  cfg.model.gamma = 2.5
712
712
  cfg.train.batch_size = 64
713
- cfg.train.learning_rate = 6e-4
714
- cfg.train.early_stop_gen = 20 # Reduced from 30
713
+ cfg.train.learning_rate = 5e-4
714
+ cfg.train.early_stop_gen = 30
715
715
  cfg.train.min_epochs = 100
716
- cfg.train.max_epochs = 800 # Reduced from 1200
716
+ cfg.train.max_epochs = 2000
717
717
  cfg.train.weights_beta = 0.9999
718
- cfg.train.weights_max_ratio = 2.0
718
+ cfg.train.weights_max_ratio = 5.0
719
719
  cfg.tune.enabled = True
720
720
  cfg.tune.fast = False
721
- cfg.tune.n_trials = 150
721
+ cfg.tune.n_trials = 100
722
722
  cfg.tune.epochs = 600
723
723
  cfg.tune.batch_size = 64
724
- cfg.tune.max_samples = 5000 # Capped from 0
724
+ cfg.tune.max_samples = 0
725
725
  cfg.tune.max_loci = 0
726
726
  cfg.tune.eval_interval = 10
727
- cfg.tune.patience = 15 # Reduced from 20
727
+ cfg.tune.patience = 20
728
728
  cfg.tune.proxy_metric_batch = 0
729
729
  if hasattr(cfg.tune, "infer_epochs"):
730
730
  cfg.tune.infer_epochs = 0
@@ -812,30 +812,30 @@ class VAEConfig:
812
812
  cfg.sim.sim_strategy = "random"
813
813
  cfg.sim.sim_prop = 0.2
814
814
 
815
- # VAE KL schedules, shortened for speed
816
- cfg.vae.kl_beta = 1.0
817
- cfg.vae.kl_warmup = 25
818
- cfg.vae.kl_ramp = 100
819
-
820
815
  if preset == "fast":
821
816
  cfg.model.latent_dim = 4
822
817
  cfg.model.num_hidden_layers = 1
823
818
  cfg.model.layer_scaling_factor = 2.0
824
819
  cfg.model.dropout_rate = 0.10
825
820
  cfg.model.gamma = 1.5
826
- cfg.vae.kl_beta = 0.5 # Lower beta for fast training
827
- cfg.train.batch_size = 128
828
- cfg.train.learning_rate = 1e-3
821
+ # VAE specifics
822
+ cfg.vae.kl_beta = 0.5
823
+ cfg.vae.kl_warmup = 10
824
+ cfg.vae.kl_ramp = 40
825
+ # Train
826
+ cfg.train.batch_size = 256
827
+ cfg.train.learning_rate = 2e-3
829
828
  cfg.train.early_stop_gen = 5
830
829
  cfg.train.min_epochs = 10
831
- cfg.train.max_epochs = 120
832
- cfg.train.weights_beta = 0.9999
833
- cfg.train.weights_max_ratio = 2.0
830
+ cfg.train.max_epochs = 150
831
+ cfg.train.weights_beta = 0.999
832
+ cfg.train.weights_max_ratio = 5.0
833
+ # Tune
834
834
  cfg.tune.enabled = True
835
835
  cfg.tune.fast = True
836
- cfg.tune.n_trials = 25
837
- cfg.tune.epochs = 120
838
- cfg.tune.batch_size = 128
836
+ cfg.tune.n_trials = 20
837
+ cfg.tune.epochs = 150
838
+ cfg.tune.batch_size = 256
839
839
  cfg.tune.max_samples = 512
840
840
  cfg.tune.max_loci = 0
841
841
  cfg.tune.eval_interval = 20
@@ -850,21 +850,27 @@ class VAEConfig:
850
850
  cfg.model.layer_scaling_factor = 3.0
851
851
  cfg.model.dropout_rate = 0.20
852
852
  cfg.model.gamma = 2.0
853
+ # VAE specifics
854
+ cfg.vae.kl_beta = 1.0
855
+ cfg.vae.kl_warmup = 50
856
+ cfg.vae.kl_ramp = 150
857
+ # Train
853
858
  cfg.train.batch_size = 128
854
- cfg.train.learning_rate = 8e-4
859
+ cfg.train.learning_rate = 1e-3
855
860
  cfg.train.early_stop_gen = 15
856
861
  cfg.train.min_epochs = 50
857
862
  cfg.train.max_epochs = 600
858
863
  cfg.train.weights_beta = 0.9999
859
- cfg.train.weights_max_ratio = 2.0
864
+ cfg.train.weights_max_ratio = 5.0
865
+ # Tune
860
866
  cfg.tune.enabled = True
861
- cfg.tune.fast = True
862
- cfg.tune.n_trials = 75
863
- cfg.tune.epochs = 300
867
+ cfg.tune.fast = False
868
+ cfg.tune.n_trials = 60
869
+ cfg.tune.epochs = 200
864
870
  cfg.tune.batch_size = 128
865
871
  cfg.tune.max_samples = 2048
866
872
  cfg.tune.max_loci = 0
867
- cfg.tune.eval_interval = 20
873
+ cfg.tune.eval_interval = 10
868
874
  cfg.tune.patience = 10
869
875
  cfg.tune.proxy_metric_batch = 0
870
876
  if hasattr(cfg.tune, "infer_epochs"):
@@ -876,22 +882,28 @@ class VAEConfig:
876
882
  cfg.model.layer_scaling_factor = 5.0
877
883
  cfg.model.dropout_rate = 0.30
878
884
  cfg.model.gamma = 2.5
885
+ # VAE specifics
886
+ cfg.vae.kl_beta = 1.0
887
+ cfg.vae.kl_warmup = 100
888
+ cfg.vae.kl_ramp = 400
889
+ # Train
879
890
  cfg.train.batch_size = 64
880
- cfg.train.learning_rate = 6e-4
881
- cfg.train.early_stop_gen = 20 # Reduced from 30
891
+ cfg.train.learning_rate = 5e-4
892
+ cfg.train.early_stop_gen = 30
882
893
  cfg.train.min_epochs = 100
883
- cfg.train.max_epochs = 800 # Reduced from 1200
894
+ cfg.train.max_epochs = 2000
884
895
  cfg.train.weights_beta = 0.9999
885
- cfg.train.weights_max_ratio = 2.0
896
+ cfg.train.weights_max_ratio = 5.0
897
+ # Tune
886
898
  cfg.tune.enabled = True
887
899
  cfg.tune.fast = False
888
- cfg.tune.n_trials = 150
900
+ cfg.tune.n_trials = 100
889
901
  cfg.tune.epochs = 600
890
902
  cfg.tune.batch_size = 64
891
- cfg.tune.max_samples = 5000 # Capped from 0
903
+ cfg.tune.max_samples = 0
892
904
  cfg.tune.max_loci = 0
893
905
  cfg.tune.eval_interval = 10
894
- cfg.tune.patience = 15 # Reduced from 20
906
+ cfg.tune.patience = 20
895
907
  cfg.tune.proxy_metric_batch = 0
896
908
  if hasattr(cfg.tune, "infer_epochs"):
897
909
  cfg.tune.infer_epochs = 0
@@ -1264,13 +1276,13 @@ class RFConfig:
1264
1276
  """Build a config from a named preset."""
1265
1277
  cfg = cls()
1266
1278
  if preset == "fast":
1267
- cfg.model.n_estimators = 100 # Increased from 50
1279
+ cfg.model.n_estimators = 50
1268
1280
  cfg.model.max_depth = None
1269
1281
  cfg.imputer.max_iter = 5
1270
1282
  cfg.io.n_jobs = 1
1271
1283
  cfg.tune.enabled = False
1272
1284
  elif preset == "balanced":
1273
- cfg.model.n_estimators = 200 # Increased from 100
1285
+ cfg.model.n_estimators = 200
1274
1286
  cfg.model.max_depth = None
1275
1287
  cfg.imputer.max_iter = 10
1276
1288
  cfg.io.n_jobs = 1
@@ -1279,7 +1291,7 @@ class RFConfig:
1279
1291
  elif preset == "thorough":
1280
1292
  cfg.model.n_estimators = 500
1281
1293
  cfg.model.max_depth = 50 # Added safety cap
1282
- cfg.imputer.max_iter = 15
1294
+ cfg.imputer.max_iter = 20
1283
1295
  cfg.io.n_jobs = 1
1284
1296
  cfg.tune.enabled = False
1285
1297
  cfg.tune.n_trials = 250
@@ -1357,14 +1369,14 @@ class HGBConfig:
1357
1369
  cfg = cls()
1358
1370
  if preset == "fast":
1359
1371
  cfg.model.n_estimators = 50
1360
- cfg.model.learning_rate = 0.15
1372
+ cfg.model.learning_rate = 0.2
1361
1373
  cfg.model.max_depth = None
1362
1374
  cfg.imputer.max_iter = 5
1363
1375
  cfg.io.n_jobs = 1
1364
1376
  cfg.tune.enabled = False
1365
1377
  cfg.tune.n_trials = 50
1366
1378
  elif preset == "balanced":
1367
- cfg.model.n_estimators = 100
1379
+ cfg.model.n_estimators = 150
1368
1380
  cfg.model.learning_rate = 0.1
1369
1381
  cfg.model.max_depth = None
1370
1382
  cfg.imputer.max_iter = 10
@@ -1373,10 +1385,10 @@ class HGBConfig:
1373
1385
  cfg.tune.n_trials = 100
1374
1386
  elif preset == "thorough":
1375
1387
  cfg.model.n_estimators = 500
1376
- cfg.model.learning_rate = 0.05 # Reduced from 0.08
1388
+ cfg.model.learning_rate = 0.05
1377
1389
  cfg.model.n_iter_no_change = 20 # Increased patience
1378
1390
  cfg.model.max_depth = None
1379
- cfg.imputer.max_iter = 15
1391
+ cfg.imputer.max_iter = 20
1380
1392
  cfg.io.n_jobs = 1
1381
1393
  cfg.tune.enabled = False
1382
1394
  cfg.tune.n_trials = 250
@@ -278,10 +278,13 @@ class BaseNNImputer:
278
278
  raise AttributeError(msg)
279
279
 
280
280
  # Start with a base set of fixed (non-tuned) parameters.
281
+ base_num_classes = getattr(self, "output_classes_", None)
282
+ if base_num_classes is None:
283
+ base_num_classes = self.num_classes_
281
284
  all_params = {
282
285
  "n_features": self.num_features_,
283
286
  "prefix": self.prefix,
284
- "num_classes": self.num_classes_,
287
+ "num_classes": base_num_classes,
285
288
  "verbose": self.verbose,
286
289
  "debug": self.debug,
287
290
  "device": self.device,