x-evolution 0.1.14__tar.gz → 0.1.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: x-evolution
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary: x-evolution
5
5
  Project-URL: Homepage, https://pypi.org/project/x-evolution/
6
6
  Project-URL: Repository, https://github.com/lucidrains/x-evolution
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "x-evolution"
3
- version = "0.1.14"
3
+ version = "0.1.16"
4
4
  description = "x-evolution"
5
5
  authors = [
6
6
  { name = "Phil Wang", email = "lucidrains@gmail.com" }
@@ -45,3 +45,5 @@ def test_evo_strat(
45
45
 
46
46
  evo_strat('evolve', 1)
47
47
  evo_strat('more.evolve', 1)
48
+
49
+ fitnesses = evo_strat('more.evolve', 2, rollback_model_at_end = True)
@@ -80,6 +80,8 @@ from x_mlps_pytorch.residual_normed_mlp import ResidualNormedMLP
80
80
 
81
81
  actor = ResidualNormedMLP(dim_in = 8, dim = 24, depth = 2, residual_every = 1, dim_out = 4)
82
82
 
83
+ from torch.optim.lr_scheduler import CosineAnnealingLR
84
+
83
85
  evo_strat = EvoStrategy(
84
86
  actor,
85
87
  environment = LunarEnvironment(repeats = 2),
@@ -91,7 +93,10 @@ evo_strat = EvoStrategy(
91
93
  learned_noise_scale = True,
92
94
  use_sigma_optimizer = True,
93
95
  learning_rate = 1e-3,
94
- noise_scale_learning_rate = 1e-4
96
+ noise_scale_learning_rate = 1e-4,
97
+ use_scheduler = True,
98
+ scheduler_klass = CosineAnnealingLR,
99
+ scheduler_kwargs = dict(T_max = 50_000)
95
100
  )
96
101
 
97
102
  evo_strat()
@@ -366,11 +366,17 @@ class EvoStrategy(Module):
366
366
  self,
367
367
  filename = 'evolved.model',
368
368
  num_generations = None,
369
- disable_distributed = False
369
+ disable_distributed = False,
370
+ rollback_model_at_end = False
370
371
  ):
371
372
 
372
373
  model = self.noisable_model.to(self.device)
373
374
 
375
+ # maybe save model for rolling back (for meta-evo)
376
+
377
+ if rollback_model_at_end:
378
+ self.checkpoint('initial.model')
379
+
374
380
  # maybe sigmas
375
381
 
376
382
  if self.learned_noise_scale:
@@ -515,7 +521,7 @@ class EvoStrategy(Module):
515
521
  if self.learned_noise_scale:
516
522
  packed_sigma, _ = pack(list(self.sigmas), '*')
517
523
  avg_sigma = packed_sigma.mean().item()
518
- msg += f' | avg sigma: {avg_sigma:.3f}'
524
+ msg += f' | average sigma: {avg_sigma:.3f}'
519
525
 
520
526
  self.print(msg)
521
527
 
@@ -533,8 +539,17 @@ class EvoStrategy(Module):
533
539
 
534
540
  self.print('evolution complete')
535
541
 
542
+ # final checkpoint
543
+
536
544
  self.checkpoint(f'{filename}.final.{generation}')
537
545
 
546
+ # maybe rollback
547
+
548
+ if rollback_model_at_end:
549
+ orig_state_dict = torch.load(str(self.checkpoint_folder / 'initial.model.pt'), weights_only = True)
550
+
551
+ self.model.load_state_dict(orig_state_dict)
552
+
538
553
  # return fitnesses across generations
539
554
  # for meta-evolutionary (nesting EvoStrategy within the environment of another and optimizing some meta-network)
540
555
 
File without changes
File without changes
File without changes
File without changes