evolutionary-policy-optimization 0.0.24__py3-none-any.whl → 0.0.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutionary_policy_optimization/epo.py +47 -0
- {evolutionary_policy_optimization-0.0.24.dist-info → evolutionary_policy_optimization-0.0.26.dist-info}/METADATA +1 -1
- evolutionary_policy_optimization-0.0.26.dist-info/RECORD +7 -0
- evolutionary_policy_optimization-0.0.24.dist-info/RECORD +0 -7
- {evolutionary_policy_optimization-0.0.24.dist-info → evolutionary_policy_optimization-0.0.26.dist-info}/WHEEL +0 -0
- {evolutionary_policy_optimization-0.0.24.dist-info → evolutionary_policy_optimization-0.0.26.dist-info}/licenses/LICENSE +0 -0
@@ -372,6 +372,53 @@ class LatentGenePool(Module):
|
|
372
372
|
|
373
373
|
self.should_run_genetic_algorithm = should_run_genetic_algorithm
|
374
374
|
|
375
|
+
def firefly_step(
|
376
|
+
self,
|
377
|
+
fitness,
|
378
|
+
beta0 = 2., # exploitation factor, moving fireflies of low light intensity to high
|
379
|
+
gamma = 1., # controls light intensity decay over distance - setting this to zero will make firefly equivalent to vanilla PSO
|
380
|
+
alpha = 0.1, # exploration factor
|
381
|
+
alpha_decay = 0.995, # exploration decay each step
|
382
|
+
inplace = True,
|
383
|
+
):
|
384
|
+
islands = self.num_islands
|
385
|
+
fireflies = self.latents # the latents are the fireflies
|
386
|
+
|
387
|
+
assert fitness.shape[0] == fireflies.shape[0]
|
388
|
+
|
389
|
+
fitness = rearrange(fitness, '(i p) -> i p', i = islands)
|
390
|
+
fireflies = rearrange(fireflies, '(i p) ... -> i p ...', i = islands)
|
391
|
+
|
392
|
+
# fireflies with lower light intensity (high cost) moves towards the higher intensity (lower cost)
|
393
|
+
|
394
|
+
move_mask = einx.less('i x, i y -> i x y', fitness, fitness)
|
395
|
+
|
396
|
+
# get vectors of fireflies to one another
|
397
|
+
# calculate distance and the beta
|
398
|
+
|
399
|
+
delta_positions = einx.subtract('i y ... d, i x ... d -> i x y ... d', fireflies, fireflies)
|
400
|
+
|
401
|
+
distance = delta_positions.norm(dim = -1)
|
402
|
+
|
403
|
+
betas = beta0 * (-gamma * distance ** 2).exp()
|
404
|
+
|
405
|
+
# move the fireflies according to attraction
|
406
|
+
|
407
|
+
fireflies += einsum(move_mask, betas, delta_positions, 'i x y, i x y ..., i x y ... -> i x ...')
|
408
|
+
|
409
|
+
# merge back the islands
|
410
|
+
|
411
|
+
fireflies = rearrange(fireflies, 'i p ... -> (i p) ...')
|
412
|
+
|
413
|
+
# maybe fireflies on hypersphere
|
414
|
+
|
415
|
+
fireflies = self.maybe_l2norm(fireflies)
|
416
|
+
|
417
|
+
if not inplace:
|
418
|
+
return fireflies
|
419
|
+
|
420
|
+
self.latents.copy_(fireflies)
|
421
|
+
|
375
422
|
@torch.no_grad()
|
376
423
|
# non-gradient optimization, at least, not on the individual level (taken care of by rl component)
|
377
424
|
def genetic_algorithm_step(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.26
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -0,0 +1,7 @@
|
|
1
|
+
evolutionary_policy_optimization/__init__.py,sha256=Qavcia0n13jjaWIS_LPW7QrxSLT_BBeKujCjF9kQjbA,133
|
2
|
+
evolutionary_policy_optimization/epo.py,sha256=zYKRKUkvFdxgHkc2yduN76Hph3asWX33mnpDF3isDfo,22019
|
3
|
+
evolutionary_policy_optimization/experimental.py,sha256=ktBKxRF27Qsj7WIgBpYlWXqMVxO9zOx2oD1JuDYRAwM,548
|
4
|
+
evolutionary_policy_optimization-0.0.26.dist-info/METADATA,sha256=l24aFXZu4kp1oxZeIdFTUw1mwkyzln9C64S3HNqebF4,4958
|
5
|
+
evolutionary_policy_optimization-0.0.26.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
6
|
+
evolutionary_policy_optimization-0.0.26.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
7
|
+
evolutionary_policy_optimization-0.0.26.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
evolutionary_policy_optimization/__init__.py,sha256=Qavcia0n13jjaWIS_LPW7QrxSLT_BBeKujCjF9kQjbA,133
|
2
|
-
evolutionary_policy_optimization/epo.py,sha256=-kQgrnnOLiCOZ-6EroO057tDx0sS7TQro92cjJhSbZU,20353
|
3
|
-
evolutionary_policy_optimization/experimental.py,sha256=ktBKxRF27Qsj7WIgBpYlWXqMVxO9zOx2oD1JuDYRAwM,548
|
4
|
-
evolutionary_policy_optimization-0.0.24.dist-info/METADATA,sha256=d3imh1p1-nPpNGhD8cReLdL07_-oHZs3YqJaOEJi1TM,4958
|
5
|
-
evolutionary_policy_optimization-0.0.24.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
6
|
-
evolutionary_policy_optimization-0.0.24.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
7
|
-
evolutionary_policy_optimization-0.0.24.dist-info/RECORD,,
|
File without changes
|