evolutionary-policy-optimization 0.1.14__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evolutionary_policy_optimization/epo.py +55 -8
- {evolutionary_policy_optimization-0.1.14.dist-info → evolutionary_policy_optimization-0.1.15.dist-info}/METADATA +10 -1
- {evolutionary_policy_optimization-0.1.14.dist-info → evolutionary_policy_optimization-0.1.15.dist-info}/RECORD +5 -5
- {evolutionary_policy_optimization-0.1.14.dist-info → evolutionary_policy_optimization-0.1.15.dist-info}/WHEEL +0 -0
- {evolutionary_policy_optimization-0.1.14.dist-info → evolutionary_policy_optimization-0.1.15.dist-info}/licenses/LICENSE +0 -0
@@ -287,6 +287,38 @@ class PowerLawDist(Module):
|
|
287
287
|
|
288
288
|
return self.values[sampled]
|
289
289
|
|
290
|
+
# layer integrated memory
|
291
|
+
|
292
|
+
class DynamicLIMe(Module):
|
293
|
+
def __init__(
|
294
|
+
self,
|
295
|
+
dim,
|
296
|
+
num_layers
|
297
|
+
):
|
298
|
+
super().__init__()
|
299
|
+
self.num_layers = num_layers
|
300
|
+
|
301
|
+
self.to_weights = nn.Sequential(
|
302
|
+
nn.RMSNorm(dim),
|
303
|
+
nn.Linear(dim, num_layers),
|
304
|
+
nn.ReLU()
|
305
|
+
)
|
306
|
+
|
307
|
+
def forward(
|
308
|
+
self,
|
309
|
+
x,
|
310
|
+
hiddens
|
311
|
+
):
|
312
|
+
|
313
|
+
if not is_tensor(hiddens):
|
314
|
+
hiddens = stack(hiddens)
|
315
|
+
|
316
|
+
assert hiddens.shape[0] == self.num_layers, f'expected hiddens to have {self.num_layers} layers but received {tuple(hiddens.shape)} instead (first dimension must be layers)'
|
317
|
+
|
318
|
+
weights = self.to_weights(x)
|
319
|
+
|
320
|
+
return einsum(hiddens, weights, 'l b d, b l -> b d')
|
321
|
+
|
290
322
|
# simple MLP networks, but with latent variables
|
291
323
|
# the latent variables are the "genes" with the rest of the network as the scaffold for "gene expression" - as suggested in the paper
|
292
324
|
|
@@ -316,15 +348,22 @@ class MLP(Module):
|
|
316
348
|
|
317
349
|
layers = []
|
318
350
|
|
319
|
-
for
|
351
|
+
for ind in range(depth):
|
352
|
+
is_first = ind == 0
|
353
|
+
|
354
|
+
lime = DynamicLIMe(dim, num_layers = ind + 1) if not is_first else None
|
355
|
+
|
320
356
|
layer = nn.Sequential(
|
321
|
-
nn.
|
357
|
+
nn.RMSNorm(dim),
|
322
358
|
nn.Linear(dim, dim_hidden),
|
323
359
|
nn.SiLU(),
|
324
360
|
nn.Linear(dim_hidden, dim),
|
325
361
|
)
|
326
362
|
|
327
|
-
layers.append(
|
363
|
+
layers.append(ModuleList([
|
364
|
+
lime,
|
365
|
+
layer
|
366
|
+
]))
|
328
367
|
|
329
368
|
# modules across layers
|
330
369
|
|
@@ -354,10 +393,18 @@ class MLP(Module):
|
|
354
393
|
|
355
394
|
# layers
|
356
395
|
|
357
|
-
|
358
|
-
|
396
|
+
prev_layer_inputs = [x]
|
397
|
+
|
398
|
+
for lime, layer in self.layers:
|
399
|
+
|
400
|
+
layer_inp = x
|
401
|
+
|
402
|
+
if exists(lime):
|
403
|
+
layer_inp = lime(x, prev_layer_inputs)
|
404
|
+
|
405
|
+
x = layer(layer_inp) + x
|
359
406
|
|
360
|
-
|
407
|
+
prev_layer_inputs.append(x)
|
361
408
|
|
362
409
|
return x
|
363
410
|
|
@@ -385,7 +432,7 @@ class Actor(Module):
|
|
385
432
|
self.mlp = MLP(dim = dim, depth = mlp_depth, dim_latent = dim_latent)
|
386
433
|
|
387
434
|
self.to_out = nn.Sequential(
|
388
|
-
nn.
|
435
|
+
nn.RMSNorm(dim),
|
389
436
|
nn.Linear(dim, num_actions, bias = False),
|
390
437
|
)
|
391
438
|
|
@@ -426,7 +473,7 @@ class Critic(Module):
|
|
426
473
|
|
427
474
|
self.mlp = MLP(dim = dim, depth = mlp_depth, dim_latent = dim_latent)
|
428
475
|
|
429
|
-
self.final_norm = nn.
|
476
|
+
self.final_norm = nn.RMSNorm(dim)
|
430
477
|
|
431
478
|
self.to_pred = HLGaussLayer(
|
432
479
|
dim = dim,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: evolutionary-policy-optimization
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.15
|
4
4
|
Summary: EPO - Pytorch
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/evolutionary-policy-optimization/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/evolutionary-policy-optimization
|
@@ -226,4 +226,13 @@ agent.load('./agent.pt')
|
|
226
226
|
}
|
227
227
|
```
|
228
228
|
|
229
|
+
```bibtex
|
230
|
+
@inproceedings{Gerasimov2025YouDN,
|
231
|
+
title = {You Do Not Fully Utilize Transformer's Representation Capacity},
|
232
|
+
author = {Gleb Gerasimov and Yaroslav Aksenov and Nikita Balagansky and Viacheslav Sinii and Daniil Gavrilov},
|
233
|
+
year = {2025},
|
234
|
+
url = {https://api.semanticscholar.org/CorpusID:276317819}
|
235
|
+
}
|
236
|
+
```
|
237
|
+
|
229
238
|
*Evolution is cleverer than you are.* - Leslie Orgel
|
@@ -1,10 +1,10 @@
|
|
1
1
|
evolutionary_policy_optimization/__init__.py,sha256=NyiYDYU7DlpmOTM7xiBQET3r1WwX0ebrgMCBLSQrW3c,288
|
2
2
|
evolutionary_policy_optimization/distributed.py,sha256=7KgZdeS_wxBHo_du9XZFB1Cu318J-Bp66Xdr6Log_20,2423
|
3
3
|
evolutionary_policy_optimization/env_wrappers.py,sha256=bDL06o9_b1iW6k3fw2xifnOnYlzs643tdW6Yv2gsIdw,803
|
4
|
-
evolutionary_policy_optimization/epo.py,sha256=
|
4
|
+
evolutionary_policy_optimization/epo.py,sha256=iUxd7gbT1GPGwso4utTaxgtjcxvvGNA8AGGUpSOImTM,47108
|
5
5
|
evolutionary_policy_optimization/experimental.py,sha256=-IgqjJ_Wk_CMB1y9YYWpoYqTG9GZHAS6kbRdTluVevg,1563
|
6
6
|
evolutionary_policy_optimization/mock_env.py,sha256=TLyyRm6tOD0Kdn9QqJJQriaSnsR-YmNQHo4OohmZFG4,1410
|
7
|
-
evolutionary_policy_optimization-0.1.
|
8
|
-
evolutionary_policy_optimization-0.1.
|
9
|
-
evolutionary_policy_optimization-0.1.
|
10
|
-
evolutionary_policy_optimization-0.1.
|
7
|
+
evolutionary_policy_optimization-0.1.15.dist-info/METADATA,sha256=e8ofJe5rpGIyEiMd3mJBU-2VjOfFJ8TpGGv7adSKjRM,7962
|
8
|
+
evolutionary_policy_optimization-0.1.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
9
|
+
evolutionary_policy_optimization-0.1.15.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
10
|
+
evolutionary_policy_optimization-0.1.15.dist-info/RECORD,,
|
File without changes
|