heavyball 0.23.1__py3-none-any.whl → 0.23.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- heavyball/delayed_psgd.py +2 -2
- heavyball/foreach_adamw.py +2 -2
- heavyball/foreach_adopt.py +2 -2
- heavyball/foreach_laprop.py +2 -2
- heavyball/foreach_sfadamw.py +2 -3
- heavyball/palm_foreach_sfadamw.py +2 -2
- heavyball/precond_schedule_sfpsoap.py +2 -2
- heavyball/schedule_free_palm_foreach_soap.py +3 -3
- heavyball/utils.py +42 -19
- {heavyball-0.23.1.dist-info → heavyball-0.23.3.dist-info}/METADATA +1 -1
- heavyball-0.23.3.dist-info/RECORD +24 -0
- heavyball-0.23.1.dist-info/RECORD +0 -24
- {heavyball-0.23.1.dist-info → heavyball-0.23.3.dist-info}/LICENSE +0 -0
- {heavyball-0.23.1.dist-info → heavyball-0.23.3.dist-info}/WHEEL +0 -0
- {heavyball-0.23.1.dist-info → heavyball-0.23.3.dist-info}/top_level.txt +0 -0
heavyball/delayed_psgd.py
CHANGED
@@ -8,10 +8,10 @@ import torch
|
|
8
8
|
from heavyball.utils import stochastic_lerp_, beta_debias, stochastic_add_
|
9
9
|
|
10
10
|
from .utils import update_param_, warmup, psgd_precond_grad, init_Q_exprs, trust_region_clip_, PSGDBase, \
|
11
|
-
triu_to_line, line_to_triu, promote,_compilable_update_
|
11
|
+
triu_to_line, line_to_triu, promote,_compilable_update_, decorator_knowngood
|
12
12
|
|
13
13
|
|
14
|
-
@
|
14
|
+
@decorator_knowngood
|
15
15
|
def _compilable_psgd_precond_grad_(q, exprs, ea, p, lr, weight_decay, clip_fn, caution, grad):
|
16
16
|
new = psgd_precond_grad(False, exprs, ea, *q)
|
17
17
|
_compilable_update_([p], clip_fn([new]), weight_decay, stochastic_add_, lr, caution, [grad])
|
heavyball/foreach_adamw.py
CHANGED
@@ -2,10 +2,10 @@ import torch
|
|
2
2
|
import torch.optim
|
3
3
|
from heavyball.utils import copy_stochastic_list_
|
4
4
|
|
5
|
-
from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote
|
5
|
+
from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote, decorator_knowngood
|
6
6
|
|
7
7
|
|
8
|
-
@
|
8
|
+
@decorator_knowngood
|
9
9
|
def _compilable_step_(y, grad, exp_avg_sq, exp_avg, beta1, beta2, step, lr, eps, decay, caution):
|
10
10
|
g32, exp_avg32, exp_avg_sq32 = [list(map(promote, x)) for x in [grad, exp_avg, exp_avg_sq]]
|
11
11
|
|
heavyball/foreach_adopt.py
CHANGED
@@ -2,10 +2,10 @@ import torch
|
|
2
2
|
import torch.optim
|
3
3
|
from heavyball.utils import copy_stochastic_list_
|
4
4
|
|
5
|
-
from .utils import warmup, beta_debias, update_param_, StatefulOptimizer, promote
|
5
|
+
from .utils import warmup, beta_debias, update_param_, StatefulOptimizer, promote, decorator_knowngood
|
6
6
|
|
7
7
|
|
8
|
-
@
|
8
|
+
@decorator_knowngood
|
9
9
|
def _compilable_step_(y, grad, exp_avg_sq, exp_avg, beta1, beta2, step, lr, eps, decay, caution):
|
10
10
|
g32, exp_avg32, exp_avg_sq32 = [list(map(promote, x)) for x in [grad, exp_avg, exp_avg_sq]]
|
11
11
|
update_param_(y, exp_avg, lr, decay, caution=caution, grad=g32)
|
heavyball/foreach_laprop.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1
1
|
import torch
|
2
2
|
import torch.optim
|
3
3
|
|
4
|
-
from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote, copy_stochastic_list_
|
4
|
+
from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote, copy_stochastic_list_, decorator_knowngood
|
5
5
|
|
6
6
|
|
7
|
-
@
|
7
|
+
@decorator_knowngood
|
8
8
|
def _compilable_step_(y, grad, exp_avg_sq, exp_avg, beta1, beta2, step, lr, eps, decay, caution):
|
9
9
|
g32, exp_avg32, exp_avg_sq32 = [list(map(promote, x)) for x in [grad, exp_avg, exp_avg_sq]]
|
10
10
|
|
heavyball/foreach_sfadamw.py
CHANGED
@@ -1,11 +1,10 @@
|
|
1
1
|
import torch
|
2
2
|
import torch.optim
|
3
|
-
from heavyball.utils import get_ckp1, copy_stochastic_list_
|
4
3
|
|
5
|
-
from .utils import warmup, ScheduleFree, exp_avg_sq_, beta_debias, promote, _compilable_schedule_free_
|
4
|
+
from .utils import get_ckp1, copy_stochastic_list_, warmup, ScheduleFree, exp_avg_sq_, beta_debias, promote, _compilable_schedule_free_, decorator_knowngood
|
6
5
|
|
7
6
|
|
8
|
-
@
|
7
|
+
@decorator_knowngood
|
9
8
|
def _compilable_step_(y, grad, exp_avg_sq, z, beta1, beta2, step, ckp1, eps, decay, lr):
|
10
9
|
old_debiased2 = beta_debias(beta2, step)
|
11
10
|
|
@@ -2,10 +2,10 @@ import torch
|
|
2
2
|
import torch.optim
|
3
3
|
|
4
4
|
from .utils import warmup, ScheduleFree, exp_avg_sq_, beta_debias, get_ckp1, promote, \
|
5
|
-
_compilable_schedule_free_, copy_stochastic_list_
|
5
|
+
_compilable_schedule_free_, copy_stochastic_list_, decorator_knowngood
|
6
6
|
|
7
7
|
|
8
|
-
@
|
8
|
+
@decorator_knowngood
|
9
9
|
def _compilable_step_(y, grad, exp_avg_sq, z, beta1, beta2, step, ckp1, eps, decay, lr):
|
10
10
|
old_debiased2 = beta_debias(beta2, step)
|
11
11
|
|
@@ -4,10 +4,10 @@ import torch
|
|
4
4
|
|
5
5
|
from .utils import init_preconditioner, update_preconditioner, project, set_, adaptive_gradient_clipping_, exp_avg_sq_, \
|
6
6
|
beta_debias, schedule_free_, warmup, ScheduleFree, precond_schedule, copy_stochastic_list_, \
|
7
|
-
promote
|
7
|
+
promote, decorator_knowngood
|
8
8
|
|
9
9
|
|
10
|
-
@
|
10
|
+
@decorator_knowngood
|
11
11
|
def _compilable_exp_avg_sq_(exp_avg_sq, grad_projected, old_debiased2, eps):
|
12
12
|
eas32, gp32 = [list(map(promote, x)) for x in (exp_avg_sq, grad_projected)]
|
13
13
|
denom = exp_avg_sq_(eas32, gp32, old_debiased2, eps)
|
@@ -1,13 +1,13 @@
|
|
1
1
|
import random
|
2
2
|
|
3
3
|
import torch
|
4
|
-
from heavyball.utils import mars_correction
|
5
4
|
|
6
5
|
from .utils import init_preconditioner, update_preconditioner, project, set_, adaptive_gradient_clipping_, exp_avg_sq_, \
|
7
|
-
beta_debias, schedule_free_, warmup, ScheduleFree, copy_stochastic_list_, promote
|
6
|
+
beta_debias, schedule_free_, warmup, ScheduleFree, copy_stochastic_list_, promote, decorator_knowngood, \
|
7
|
+
mars_correction
|
8
8
|
|
9
9
|
|
10
|
-
@
|
10
|
+
@decorator_knowngood
|
11
11
|
def _compilable_exp_avg_sq_(exp_avg_sq, grad_projected, old_debiased2, eps):
|
12
12
|
eas32, gp32 = [list(map(promote, x)) for x in (exp_avg_sq, grad_projected)]
|
13
13
|
denom = exp_avg_sq_(eas32, gp32, old_debiased2, eps)
|
heavyball/utils.py
CHANGED
@@ -10,8 +10,11 @@ import torch
|
|
10
10
|
from torch import Tensor
|
11
11
|
from torch.backends import cudnn, opt_einsum
|
12
12
|
from torch.utils._pytree import tree_map
|
13
|
+
from torch._dynamo.exc import TorchDynamoException
|
13
14
|
|
14
|
-
compile_mode =
|
15
|
+
compile_mode = "max-autotune-no-cudagraphs"
|
16
|
+
dynamic = False
|
17
|
+
compile_mode_recommended_to_none = None
|
15
18
|
zeroth_power_mode = 'qr' # 'qr' is baseline, 'newtonschulz' converges better and faster, 'eigh' is perfect but slow
|
16
19
|
|
17
20
|
|
@@ -24,7 +27,22 @@ def decorator(func):
|
|
24
27
|
return func(*args, **kwargs)
|
25
28
|
nonlocal compiled
|
26
29
|
if compiled is None:
|
27
|
-
compiled = torch.compile(func, fullgraph=True, dynamic=
|
30
|
+
compiled = torch.compile(func, fullgraph=True, dynamic=dynamic, mode=compile_mode_recommended_to_none)
|
31
|
+
return compiled(*args, **kwargs)
|
32
|
+
|
33
|
+
return _fn
|
34
|
+
|
35
|
+
|
36
|
+
def decorator_knowngood(func):
|
37
|
+
compiled = None
|
38
|
+
|
39
|
+
@functools.wraps(func)
|
40
|
+
def _fn(*args, **kwargs):
|
41
|
+
if compile_mode is None:
|
42
|
+
return func(*args, **kwargs)
|
43
|
+
nonlocal compiled
|
44
|
+
if compiled is None:
|
45
|
+
compiled = torch.compile(func, fullgraph=True, dynamic=dynamic, mode=compile_mode)
|
28
46
|
return compiled(*args, **kwargs)
|
29
47
|
|
30
48
|
return _fn
|
@@ -39,7 +57,7 @@ def warmup(lr: float, step: int, warmup_steps: int):
|
|
39
57
|
return lr * step / warmup_steps
|
40
58
|
|
41
59
|
|
42
|
-
@
|
60
|
+
@decorator_knowngood
|
43
61
|
def _compilable_schedule_free_(p: List[Tensor], z: List[Tensor], ckp1: Tensor, grad: List[Tensor], lr: Tensor, beta1: Tensor):
|
44
62
|
p32, z32, g32 = [promote(x) for x in (p, z, grad)]
|
45
63
|
for p_, z_, g_ in zip(p32, z32, g32):
|
@@ -139,7 +157,7 @@ def beta_debias(beta, step):
|
|
139
157
|
return 1 - (1 - beta) / (1 - beta ** step)
|
140
158
|
|
141
159
|
|
142
|
-
@
|
160
|
+
@decorator_knowngood
|
143
161
|
def _compilable_exp_avg_sq_(state: List[Tensor], grad: List[Tensor], beta2: Tensor, eps: Tensor, out: List[Optional[Tensor]]):
|
144
162
|
torch._foreach_mul_(state, beta2)
|
145
163
|
[s.addcmul_(g, g, value=1 - beta2) for s, g in zip(state, grad)]
|
@@ -175,7 +193,7 @@ def adaptive_gradient_clipping_(parameters: List[Tensor], gradients: List[Tensor
|
|
175
193
|
def is_compiling():
|
176
194
|
try:
|
177
195
|
return torch.compiler.is_compiling()
|
178
|
-
except
|
196
|
+
except TorchDynamoException:
|
179
197
|
return True
|
180
198
|
|
181
199
|
|
@@ -339,7 +357,7 @@ def get_orthogonal_matrix(mat):
|
|
339
357
|
return final
|
340
358
|
|
341
359
|
|
342
|
-
@
|
360
|
+
@decorator_knowngood
|
343
361
|
def _compilable_stochastic_lerp_(x: List[Tensor], y: List[Tensor], a: Union[float, int, Tensor]):
|
344
362
|
for x_, y_ in zip(x, y):
|
345
363
|
x32 = promote(x_)
|
@@ -368,7 +386,7 @@ def scalar_guard(x, ref):
|
|
368
386
|
return x
|
369
387
|
|
370
388
|
|
371
|
-
@
|
389
|
+
@decorator_knowngood
|
372
390
|
def _compilable_stochastic_add_(x: List[Tensor], y: List[Tensor], alpha: Union[float, int, Tensor]):
|
373
391
|
for x_, y_ in zip(x, y):
|
374
392
|
x32 = promote(x_)
|
@@ -595,7 +613,9 @@ class StatefulOptimizer(torch.optim.Optimizer):
|
|
595
613
|
else:
|
596
614
|
with torch.enable_grad():
|
597
615
|
loss = closure()
|
598
|
-
|
616
|
+
|
617
|
+
# we assume that parameters are constant and that there are no excessive recompiles
|
618
|
+
with torch.no_grad(), torch._dynamo.utils.disable_cache_limit():
|
599
619
|
for top_group in self.param_groups:
|
600
620
|
for group in self.get_groups(top_group):
|
601
621
|
self._step(group)
|
@@ -643,7 +663,7 @@ def copy_stochastic_list_(target: List[Tensor], source: List[Tensor]):
|
|
643
663
|
copy_stochastic_(t, s)
|
644
664
|
|
645
665
|
|
646
|
-
@
|
666
|
+
@decorator_knowngood
|
647
667
|
def _compilable_exp_avg_(exp_avg: List[Tensor], exp_avg_sq: List[Tensor], grad: List[Tensor],
|
648
668
|
grad_projected: List[Tensor], beta1: Tensor, beta2: Tensor, step: Tensor):
|
649
669
|
beta1 = beta_debias(beta1, step)
|
@@ -667,7 +687,7 @@ def exp_avg_(exp_avg: List[Tensor], exp_avg_sq: List[Tensor], grad: List[Tensor]
|
|
667
687
|
return denom
|
668
688
|
|
669
689
|
|
670
|
-
@
|
690
|
+
@decorator_knowngood
|
671
691
|
def _compilable_copy_stochastic_(target: Tensor, source: Tensor):
|
672
692
|
"""Taken as-is from https://github.com/pytorch/pytorch/issues/120376#issuecomment-1974828905"""
|
673
693
|
# create a random 16 bit integer
|
@@ -691,7 +711,7 @@ def copy_stochastic_(target: Tensor, source: Tensor):
|
|
691
711
|
set_(target, source)
|
692
712
|
|
693
713
|
|
694
|
-
@
|
714
|
+
@decorator_knowngood
|
695
715
|
def _compilable_update_(p: List[Tensor], u: List[Tensor], decay: Tensor, add_fn: callable, lr: Tensor, caution: bool,
|
696
716
|
g: List[Optional[Tensor]]):
|
697
717
|
u = [u_.view_as(p_) for u_, p_ in zip(u, p)]
|
@@ -852,7 +872,7 @@ def psgd_lb(A, max_abs):
|
|
852
872
|
return x
|
853
873
|
|
854
874
|
|
855
|
-
@
|
875
|
+
@decorator_knowngood
|
856
876
|
def psgd_update_precond(Q, exprs, G, precond_lr, tiny, oq, store_triu_as_line):
|
857
877
|
"""Update Kronecker product preconditioner Q with pair (V, G)."""
|
858
878
|
exprA, exprGs, _ = exprs
|
@@ -885,7 +905,7 @@ def psgd_update_precond(Q, exprs, G, precond_lr, tiny, oq, store_triu_as_line):
|
|
885
905
|
stochastic_add_([o], [term1], -1)
|
886
906
|
|
887
907
|
|
888
|
-
@
|
908
|
+
@decorator_knowngood
|
889
909
|
def psgd_precond_grad(inplace: bool, exprs: str, grad: Tensor, *preconds: Tensor):
|
890
910
|
"""Precondition gradient G with preconditioner Q."""
|
891
911
|
md = min_dtype(preconds)
|
@@ -1030,12 +1050,15 @@ class PSGDBase(StatefulOptimizer):
|
|
1030
1050
|
|
1031
1051
|
|
1032
1052
|
# TODO: Figure out why this sometimes crashes
|
1033
|
-
|
1053
|
+
#@decorator_knowngood
|
1034
1054
|
def _compilable_precond_grad_cached_(ea: Tensor, expr: str, param: Tensor, lr: Tensor, weight_decay: Tensor,
|
1035
1055
|
clip_fn: callable, caution: bool, grad: Optional[Tensor], *cached_q: Tensor):
|
1036
|
-
md = min_dtype(cached_q + [ea])
|
1037
|
-
|
1038
|
-
|
1056
|
+
md = min_dtype(list(cached_q) + [ea])
|
1057
|
+
args = [q.to(md) for q in cached_q]
|
1058
|
+
args = args + [ea.to(md)]
|
1059
|
+
new = torch.einsum(expr, *args)
|
1060
|
+
new = new.to(torch.float32)
|
1061
|
+
_compilable_update_([param], clip_fn([new]), weight_decay, stochastic_add_, lr, caution, [grad])
|
1039
1062
|
|
1040
1063
|
|
1041
1064
|
def precond_grad_cached_(cached_q: List[Tensor], ea: Tensor, expr: str, param: Tensor, lr: float, weight_decay: float,
|
@@ -1044,7 +1067,7 @@ def precond_grad_cached_(cached_q: List[Tensor], ea: Tensor, expr: str, param: T
|
|
1044
1067
|
_compilable_precond_grad_cached_(ea, expr, param, lr, weight_decay, clip_fn, caution, grad, *cached_q)
|
1045
1068
|
|
1046
1069
|
|
1047
|
-
@
|
1070
|
+
@decorator_knowngood
|
1048
1071
|
def _compilable_mars_correction_(g: Tensor, old_g: Tensor, a: Tensor):
|
1049
1072
|
g_copy = [g_.clone() for g_ in g]
|
1050
1073
|
_compilable_stochastic_lerp_(g, old_g, a)
|
@@ -1058,7 +1081,7 @@ def mars_correction(g, old_g, beta1, gamma):
|
|
1058
1081
|
_compilable_mars_correction_(g, old_g, a)
|
1059
1082
|
|
1060
1083
|
|
1061
|
-
@
|
1084
|
+
@decorator_knowngood
|
1062
1085
|
def _compilable_cautioning_(g: Tensor, update: Tensor):
|
1063
1086
|
mask = (g * update) > 0
|
1064
1087
|
update.masked_fill_(~mask, 0)
|
@@ -0,0 +1,24 @@
|
|
1
|
+
heavyball/__init__.py,sha256=icHYN-MGsmHkLUlHCMcZkOlwY7GT63_ayR_a5iPKmzM,2226
|
2
|
+
heavyball/cached_delayed_psgd_kron.py,sha256=n3wIOhrop0Ls4MZ0kXpwGuImp1jzPs6VGdxIlPyoYdQ,6827
|
3
|
+
heavyball/cached_psgd_kron.py,sha256=KCLsfvj9qh_2FNwRTdWM3zjnt2oGHfsf4Y341rPcceI,6778
|
4
|
+
heavyball/delayed_psgd.py,sha256=xaAPNqE5Pg476fqXjST11Bi0zrZ8KjjU5h_NPUdwlZk,6295
|
5
|
+
heavyball/foreach_adamw.py,sha256=IdcP5ggNB2SVDK3iNrNKGTGlEwWn18H77ClqCnJGB74,2786
|
6
|
+
heavyball/foreach_adopt.py,sha256=NzHYoeiq1pFKn1RPHiVG2vJsHES30Blh5v2ypOWP2uQ,3508
|
7
|
+
heavyball/foreach_laprop.py,sha256=myb0uwC-oZqYqeVSozas2JNMlbUkLCAMrVB9ZP4QOKQ,2794
|
8
|
+
heavyball/foreach_sfadamw.py,sha256=B8xyL8Qxul4G1rsxMv8ZMlkYh1gaTpeCvCgkubaBAhE,3013
|
9
|
+
heavyball/foreach_soap.py,sha256=7B_dP2Hm_xqwpBQiPYkv_c6eoRnU1dV2VZfvSoa4uJ8,4729
|
10
|
+
heavyball/p_adam.py,sha256=8BlZ6YoaDXawMiRbCxo0Kd5_0-pAn0MQIhL0LHNaRBs,6315
|
11
|
+
heavyball/palm_foreach_sfadamw.py,sha256=QzNXZOXEH6ufEPbnPg8ixn19WpVr4OhDreqnxIwcBVM,3336
|
12
|
+
heavyball/palm_foreach_soap.py,sha256=IknGm_CzrqDIFEoCkejxjoZ4sfIy6RSoInqlMUOYLB4,6156
|
13
|
+
heavyball/precond_schedule_foreach_soap.py,sha256=bJ2ifPFa8zEP9GO8eBpqZzsmP7p_iQkkCkllNeEMHPU,4892
|
14
|
+
heavyball/precond_schedule_palm_foreach_soap.py,sha256=4dT9f134-Faq2KuCMCHzMtrkMO-es5p_DYS1of5yF-s,6428
|
15
|
+
heavyball/precond_schedule_sfpsoap.py,sha256=ey-mUIjAy9ny5vJac0vRZHUXgef1bc7u7_-4hRkM4Rs,7491
|
16
|
+
heavyball/psgd_kron.py,sha256=4eiGPXAFjvGIXLdiai1UJfAvTozAV1TXaE9UGkE4BLc,6051
|
17
|
+
heavyball/pure_psgd.py,sha256=344NdVNHwUFX3fU2R1S_Xh9SXAML3E4ryHr7xfMh9Cc,5076
|
18
|
+
heavyball/schedule_free_palm_foreach_soap.py,sha256=irvlIXF-oABpWWycZPMV-JG9XTiXSlgHtrM-ygfATic,7207
|
19
|
+
heavyball/utils.py,sha256=kClbLP7CECCrqjf7VYAuWuLDDW17JdgFQpSMWVnIU6o,39559
|
20
|
+
heavyball-0.23.3.dist-info/LICENSE,sha256=CGdGJim64YifGmUVPaeyRsxkvyExtClswhRNIp8FY_U,1322
|
21
|
+
heavyball-0.23.3.dist-info/METADATA,sha256=q3Df5J-g84JSwTEgIOKKILM0NgHzkkEblqcrtbUVQwA,11926
|
22
|
+
heavyball-0.23.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
23
|
+
heavyball-0.23.3.dist-info/top_level.txt,sha256=SzCxSVg_qCUPA4kZObW3Zyo4v-d_mMOD-p7a-WXTl2E,10
|
24
|
+
heavyball-0.23.3.dist-info/RECORD,,
|
@@ -1,24 +0,0 @@
|
|
1
|
-
heavyball/__init__.py,sha256=icHYN-MGsmHkLUlHCMcZkOlwY7GT63_ayR_a5iPKmzM,2226
|
2
|
-
heavyball/cached_delayed_psgd_kron.py,sha256=n3wIOhrop0Ls4MZ0kXpwGuImp1jzPs6VGdxIlPyoYdQ,6827
|
3
|
-
heavyball/cached_psgd_kron.py,sha256=KCLsfvj9qh_2FNwRTdWM3zjnt2oGHfsf4Y341rPcceI,6778
|
4
|
-
heavyball/delayed_psgd.py,sha256=z_Y1eYr2upVt_FsyCIv91yTFJY6yqvHsI8S2mOpqdv8,6334
|
5
|
-
heavyball/foreach_adamw.py,sha256=uawSbGGUD2E1RtcwspP83yQNElERdGX-diqCI5e8FqE,2825
|
6
|
-
heavyball/foreach_adopt.py,sha256=DFEaPswVzdHcbxC-mirsf_okM_HR6r34PDUTty5CrUE,3547
|
7
|
-
heavyball/foreach_laprop.py,sha256=J4Vms0nAOMh3GQtAOPyrYOe5WtpzokVv25b9oDnwc2A,2833
|
8
|
-
heavyball/foreach_sfadamw.py,sha256=HWbLekY5BloHDIgrN2J0a7IolZCt8Ah2xkLAU_-5oSc,3079
|
9
|
-
heavyball/foreach_soap.py,sha256=7B_dP2Hm_xqwpBQiPYkv_c6eoRnU1dV2VZfvSoa4uJ8,4729
|
10
|
-
heavyball/p_adam.py,sha256=8BlZ6YoaDXawMiRbCxo0Kd5_0-pAn0MQIhL0LHNaRBs,6315
|
11
|
-
heavyball/palm_foreach_sfadamw.py,sha256=E8raxrBIkSmTEGFzwnfWxKwDJjBQE2vdsmyqfc8aL_A,3375
|
12
|
-
heavyball/palm_foreach_soap.py,sha256=IknGm_CzrqDIFEoCkejxjoZ4sfIy6RSoInqlMUOYLB4,6156
|
13
|
-
heavyball/precond_schedule_foreach_soap.py,sha256=bJ2ifPFa8zEP9GO8eBpqZzsmP7p_iQkkCkllNeEMHPU,4892
|
14
|
-
heavyball/precond_schedule_palm_foreach_soap.py,sha256=4dT9f134-Faq2KuCMCHzMtrkMO-es5p_DYS1of5yF-s,6428
|
15
|
-
heavyball/precond_schedule_sfpsoap.py,sha256=FOR-axwlkSN7IHZWYYUVFfjSFCLxc_NdiTlb-n5gmgs,7530
|
16
|
-
heavyball/psgd_kron.py,sha256=4eiGPXAFjvGIXLdiai1UJfAvTozAV1TXaE9UGkE4BLc,6051
|
17
|
-
heavyball/pure_psgd.py,sha256=344NdVNHwUFX3fU2R1S_Xh9SXAML3E4ryHr7xfMh9Cc,5076
|
18
|
-
heavyball/schedule_free_palm_foreach_soap.py,sha256=0WT_gvTKymqLQzYT6ewDgCmpDq-HgMAewipw1QvyQYA,7267
|
19
|
-
heavyball/utils.py,sha256=8XE-z5T7FkbPlfo8Dh9dfoH8UsE-HgjDiJCD_XHkT54,39526
|
20
|
-
heavyball-0.23.1.dist-info/LICENSE,sha256=CGdGJim64YifGmUVPaeyRsxkvyExtClswhRNIp8FY_U,1322
|
21
|
-
heavyball-0.23.1.dist-info/METADATA,sha256=eE1t-LDRa2ajLlXzITHLzyOt3elr9t4gxaOk55m6pj8,11926
|
22
|
-
heavyball-0.23.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
23
|
-
heavyball-0.23.1.dist-info/top_level.txt,sha256=SzCxSVg_qCUPA4kZObW3Zyo4v-d_mMOD-p7a-WXTl2E,10
|
24
|
-
heavyball-0.23.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|