heavyball 0.23.1__py3-none-any.whl → 0.23.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
heavyball/delayed_psgd.py CHANGED
@@ -8,10 +8,10 @@ import torch
8
8
  from heavyball.utils import stochastic_lerp_, beta_debias, stochastic_add_
9
9
 
10
10
  from .utils import update_param_, warmup, psgd_precond_grad, init_Q_exprs, trust_region_clip_, PSGDBase, \
11
- triu_to_line, line_to_triu, promote,_compilable_update_
11
+ triu_to_line, line_to_triu, promote,_compilable_update_, decorator_knowngood
12
12
 
13
13
 
14
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
14
+ @decorator_knowngood
15
15
  def _compilable_psgd_precond_grad_(q, exprs, ea, p, lr, weight_decay, clip_fn, caution, grad):
16
16
  new = psgd_precond_grad(False, exprs, ea, *q)
17
17
  _compilable_update_([p], clip_fn([new]), weight_decay, stochastic_add_, lr, caution, [grad])
@@ -2,10 +2,10 @@ import torch
2
2
  import torch.optim
3
3
  from heavyball.utils import copy_stochastic_list_
4
4
 
5
- from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote
5
+ from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote, decorator_knowngood
6
6
 
7
7
 
8
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
8
+ @decorator_knowngood
9
9
  def _compilable_step_(y, grad, exp_avg_sq, exp_avg, beta1, beta2, step, lr, eps, decay, caution):
10
10
  g32, exp_avg32, exp_avg_sq32 = [list(map(promote, x)) for x in [grad, exp_avg, exp_avg_sq]]
11
11
 
@@ -2,10 +2,10 @@ import torch
2
2
  import torch.optim
3
3
  from heavyball.utils import copy_stochastic_list_
4
4
 
5
- from .utils import warmup, beta_debias, update_param_, StatefulOptimizer, promote
5
+ from .utils import warmup, beta_debias, update_param_, StatefulOptimizer, promote, decorator_knowngood
6
6
 
7
7
 
8
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
8
+ @decorator_knowngood
9
9
  def _compilable_step_(y, grad, exp_avg_sq, exp_avg, beta1, beta2, step, lr, eps, decay, caution):
10
10
  g32, exp_avg32, exp_avg_sq32 = [list(map(promote, x)) for x in [grad, exp_avg, exp_avg_sq]]
11
11
  update_param_(y, exp_avg, lr, decay, caution=caution, grad=g32)
@@ -1,10 +1,10 @@
1
1
  import torch
2
2
  import torch.optim
3
3
 
4
- from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote, copy_stochastic_list_
4
+ from .utils import warmup, exp_avg_sq_, beta_debias, update_param_, StatefulOptimizer, promote, copy_stochastic_list_, decorator_knowngood
5
5
 
6
6
 
7
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
7
+ @decorator_knowngood
8
8
  def _compilable_step_(y, grad, exp_avg_sq, exp_avg, beta1, beta2, step, lr, eps, decay, caution):
9
9
  g32, exp_avg32, exp_avg_sq32 = [list(map(promote, x)) for x in [grad, exp_avg, exp_avg_sq]]
10
10
 
@@ -1,11 +1,10 @@
1
1
  import torch
2
2
  import torch.optim
3
- from heavyball.utils import get_ckp1, copy_stochastic_list_
4
3
 
5
- from .utils import warmup, ScheduleFree, exp_avg_sq_, beta_debias, promote, _compilable_schedule_free_
4
+ from .utils import get_ckp1, copy_stochastic_list_, warmup, ScheduleFree, exp_avg_sq_, beta_debias, promote, _compilable_schedule_free_, decorator_knowngood
6
5
 
7
6
 
8
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
7
+ @decorator_knowngood
9
8
  def _compilable_step_(y, grad, exp_avg_sq, z, beta1, beta2, step, ckp1, eps, decay, lr):
10
9
  old_debiased2 = beta_debias(beta2, step)
11
10
 
@@ -2,10 +2,10 @@ import torch
2
2
  import torch.optim
3
3
 
4
4
  from .utils import warmup, ScheduleFree, exp_avg_sq_, beta_debias, get_ckp1, promote, \
5
- _compilable_schedule_free_, copy_stochastic_list_
5
+ _compilable_schedule_free_, copy_stochastic_list_, decorator_knowngood
6
6
 
7
7
 
8
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
8
+ @decorator_knowngood
9
9
  def _compilable_step_(y, grad, exp_avg_sq, z, beta1, beta2, step, ckp1, eps, decay, lr):
10
10
  old_debiased2 = beta_debias(beta2, step)
11
11
 
@@ -4,10 +4,10 @@ import torch
4
4
 
5
5
  from .utils import init_preconditioner, update_preconditioner, project, set_, adaptive_gradient_clipping_, exp_avg_sq_, \
6
6
  beta_debias, schedule_free_, warmup, ScheduleFree, precond_schedule, copy_stochastic_list_, \
7
- promote
7
+ promote, decorator_knowngood
8
8
 
9
9
 
10
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
10
+ @decorator_knowngood
11
11
  def _compilable_exp_avg_sq_(exp_avg_sq, grad_projected, old_debiased2, eps):
12
12
  eas32, gp32 = [list(map(promote, x)) for x in (exp_avg_sq, grad_projected)]
13
13
  denom = exp_avg_sq_(eas32, gp32, old_debiased2, eps)
@@ -1,13 +1,13 @@
1
1
  import random
2
2
 
3
3
  import torch
4
- from heavyball.utils import mars_correction
5
4
 
6
5
  from .utils import init_preconditioner, update_preconditioner, project, set_, adaptive_gradient_clipping_, exp_avg_sq_, \
7
- beta_debias, schedule_free_, warmup, ScheduleFree, copy_stochastic_list_, promote
6
+ beta_debias, schedule_free_, warmup, ScheduleFree, copy_stochastic_list_, promote, decorator_knowngood, \
7
+ mars_correction
8
8
 
9
9
 
10
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
10
+ @decorator_knowngood
11
11
  def _compilable_exp_avg_sq_(exp_avg_sq, grad_projected, old_debiased2, eps):
12
12
  eas32, gp32 = [list(map(promote, x)) for x in (exp_avg_sq, grad_projected)]
13
13
  denom = exp_avg_sq_(eas32, gp32, old_debiased2, eps)
heavyball/utils.py CHANGED
@@ -8,10 +8,13 @@ from typing import List, Optional, Tuple, Callable, Union
8
8
  import numpy as np
9
9
  import torch
10
10
  from torch import Tensor
11
+ from torch._dynamo.exc import TorchDynamoException
11
12
  from torch.backends import cudnn, opt_einsum
12
13
  from torch.utils._pytree import tree_map
13
14
 
14
- compile_mode = None
15
+ compile_mode = "max-autotune-no-cudagraphs"
16
+ dynamic = False
17
+ compile_mode_recommended_to_none = None
15
18
  zeroth_power_mode = 'qr' # 'qr' is baseline, 'newtonschulz' converges better and faster, 'eigh' is perfect but slow
16
19
 
17
20
 
@@ -20,11 +23,26 @@ def decorator(func):
20
23
 
21
24
  @functools.wraps(func)
22
25
  def _fn(*args, **kwargs):
23
- if compile_mode is None:
26
+ if is_compiling() or compile_mode is None:
24
27
  return func(*args, **kwargs)
25
28
  nonlocal compiled
26
29
  if compiled is None:
27
- compiled = torch.compile(func, fullgraph=True, dynamic=False, mode=compile_mode)
30
+ compiled = torch.compile(fullgraph=True, dynamic=dynamic, mode=compile_mode_recommended_to_none)(func)
31
+ return compiled(*args, **kwargs)
32
+
33
+ return _fn
34
+
35
+
36
+ def decorator_knowngood(func: Callable):
37
+ compiled = None
38
+
39
+ @functools.wraps(func)
40
+ def _fn(*args, **kwargs):
41
+ if is_compiling() or compile_mode is None:
42
+ return func(*args, **kwargs)
43
+ nonlocal compiled
44
+ if compiled is None:
45
+ compiled = torch.compile(fullgraph=True, dynamic=dynamic, mode=compile_mode)(func)
28
46
  return compiled(*args, **kwargs)
29
47
 
30
48
  return _fn
@@ -39,8 +57,9 @@ def warmup(lr: float, step: int, warmup_steps: int):
39
57
  return lr * step / warmup_steps
40
58
 
41
59
 
42
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
43
- def _compilable_schedule_free_(p: List[Tensor], z: List[Tensor], ckp1: Tensor, grad: List[Tensor], lr: Tensor, beta1: Tensor):
60
+ @decorator_knowngood
61
+ def _compilable_schedule_free_(p: List[Tensor], z: List[Tensor], ckp1: Tensor, grad: List[Tensor], lr: Tensor,
62
+ beta1: Tensor):
44
63
  p32, z32, g32 = [promote(x) for x in (p, z, grad)]
45
64
  for p_, z_, g_ in zip(p32, z32, g32):
46
65
  p_.lerp_(z_, ckp1)
@@ -139,8 +158,9 @@ def beta_debias(beta, step):
139
158
  return 1 - (1 - beta) / (1 - beta ** step)
140
159
 
141
160
 
142
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
143
- def _compilable_exp_avg_sq_(state: List[Tensor], grad: List[Tensor], beta2: Tensor, eps: Tensor, out: List[Optional[Tensor]]):
161
+ @decorator_knowngood
162
+ def _compilable_exp_avg_sq_(state: List[Tensor], grad: List[Tensor], beta2: Tensor, eps: Tensor,
163
+ out: List[Optional[Tensor]]):
144
164
  torch._foreach_mul_(state, beta2)
145
165
  [s.addcmul_(g, g, value=1 - beta2) for s, g in zip(state, grad)]
146
166
  denom = torch._foreach_sqrt(state)
@@ -175,7 +195,7 @@ def adaptive_gradient_clipping_(parameters: List[Tensor], gradients: List[Tensor
175
195
  def is_compiling():
176
196
  try:
177
197
  return torch.compiler.is_compiling()
178
- except AttributeError:
198
+ except TorchDynamoException:
179
199
  return True
180
200
 
181
201
 
@@ -339,7 +359,7 @@ def get_orthogonal_matrix(mat):
339
359
  return final
340
360
 
341
361
 
342
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
362
+ @decorator_knowngood
343
363
  def _compilable_stochastic_lerp_(x: List[Tensor], y: List[Tensor], a: Union[float, int, Tensor]):
344
364
  for x_, y_ in zip(x, y):
345
365
  x32 = promote(x_)
@@ -368,7 +388,7 @@ def scalar_guard(x, ref):
368
388
  return x
369
389
 
370
390
 
371
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
391
+ @decorator_knowngood
372
392
  def _compilable_stochastic_add_(x: List[Tensor], y: List[Tensor], alpha: Union[float, int, Tensor]):
373
393
  for x_, y_ in zip(x, y):
374
394
  x32 = promote(x_)
@@ -595,7 +615,9 @@ class StatefulOptimizer(torch.optim.Optimizer):
595
615
  else:
596
616
  with torch.enable_grad():
597
617
  loss = closure()
598
- with torch.no_grad():
618
+
619
+ # we assume that parameters are constant and that there are no excessive recompiles
620
+ with torch.no_grad(), torch._dynamo.utils.disable_cache_limit():
599
621
  for top_group in self.param_groups:
600
622
  for group in self.get_groups(top_group):
601
623
  self._step(group)
@@ -643,7 +665,7 @@ def copy_stochastic_list_(target: List[Tensor], source: List[Tensor]):
643
665
  copy_stochastic_(t, s)
644
666
 
645
667
 
646
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
668
+ @decorator_knowngood
647
669
  def _compilable_exp_avg_(exp_avg: List[Tensor], exp_avg_sq: List[Tensor], grad: List[Tensor],
648
670
  grad_projected: List[Tensor], beta1: Tensor, beta2: Tensor, step: Tensor):
649
671
  beta1 = beta_debias(beta1, step)
@@ -667,7 +689,7 @@ def exp_avg_(exp_avg: List[Tensor], exp_avg_sq: List[Tensor], grad: List[Tensor]
667
689
  return denom
668
690
 
669
691
 
670
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
692
+ @decorator_knowngood
671
693
  def _compilable_copy_stochastic_(target: Tensor, source: Tensor):
672
694
  """Taken as-is from https://github.com/pytorch/pytorch/issues/120376#issuecomment-1974828905"""
673
695
  # create a random 16 bit integer
@@ -691,7 +713,7 @@ def copy_stochastic_(target: Tensor, source: Tensor):
691
713
  set_(target, source)
692
714
 
693
715
 
694
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
716
+ @decorator_knowngood
695
717
  def _compilable_update_(p: List[Tensor], u: List[Tensor], decay: Tensor, add_fn: callable, lr: Tensor, caution: bool,
696
718
  g: List[Optional[Tensor]]):
697
719
  u = [u_.view_as(p_) for u_, p_ in zip(u, p)]
@@ -852,7 +874,7 @@ def psgd_lb(A, max_abs):
852
874
  return x
853
875
 
854
876
 
855
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
877
+ @decorator_knowngood
856
878
  def psgd_update_precond(Q, exprs, G, precond_lr, tiny, oq, store_triu_as_line):
857
879
  """Update Kronecker product preconditioner Q with pair (V, G)."""
858
880
  exprA, exprGs, _ = exprs
@@ -885,7 +907,7 @@ def psgd_update_precond(Q, exprs, G, precond_lr, tiny, oq, store_triu_as_line):
885
907
  stochastic_add_([o], [term1], -1)
886
908
 
887
909
 
888
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
910
+ @decorator_knowngood
889
911
  def psgd_precond_grad(inplace: bool, exprs: str, grad: Tensor, *preconds: Tensor):
890
912
  """Precondition gradient G with preconditioner Q."""
891
913
  md = min_dtype(preconds)
@@ -1030,12 +1052,15 @@ class PSGDBase(StatefulOptimizer):
1030
1052
 
1031
1053
 
1032
1054
  # TODO: Figure out why this sometimes crashes
1033
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
1055
+ # @decorator_knowngood
1034
1056
  def _compilable_precond_grad_cached_(ea: Tensor, expr: str, param: Tensor, lr: Tensor, weight_decay: Tensor,
1035
1057
  clip_fn: callable, caution: bool, grad: Optional[Tensor], *cached_q: Tensor):
1036
- md = min_dtype(cached_q + [ea])
1037
- new = torch.einsum(expr, *[c_.to(md) for c_ in cached_q], ea.to(md)).to(torch.float32)
1038
- update_param_([param], clip_fn([new]), lr, weight_decay, caution=caution, grad=grad)
1058
+ md = min_dtype(list(cached_q) + [ea])
1059
+ args = [q.to(md) for q in cached_q]
1060
+ args = args + [ea.to(md)]
1061
+ new = torch.einsum(expr, *args)
1062
+ new = new.to(torch.float32)
1063
+ _compilable_update_([param], clip_fn([new]), weight_decay, stochastic_add_, lr, caution, [grad])
1039
1064
 
1040
1065
 
1041
1066
  def precond_grad_cached_(cached_q: List[Tensor], ea: Tensor, expr: str, param: Tensor, lr: float, weight_decay: float,
@@ -1044,7 +1069,7 @@ def precond_grad_cached_(cached_q: List[Tensor], ea: Tensor, expr: str, param: T
1044
1069
  _compilable_precond_grad_cached_(ea, expr, param, lr, weight_decay, clip_fn, caution, grad, *cached_q)
1045
1070
 
1046
1071
 
1047
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
1072
+ @decorator_knowngood
1048
1073
  def _compilable_mars_correction_(g: Tensor, old_g: Tensor, a: Tensor):
1049
1074
  g_copy = [g_.clone() for g_ in g]
1050
1075
  _compilable_stochastic_lerp_(g, old_g, a)
@@ -1058,7 +1083,7 @@ def mars_correction(g, old_g, beta1, gamma):
1058
1083
  _compilable_mars_correction_(g, old_g, a)
1059
1084
 
1060
1085
 
1061
- @torch.compile(mode='max-autotune-no-cudagraphs', fullgraph=True, dynamic=False)
1086
+ @decorator_knowngood
1062
1087
  def _compilable_cautioning_(g: Tensor, update: Tensor):
1063
1088
  mask = (g * update) > 0
1064
1089
  update.masked_fill_(~mask, 0)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: heavyball
3
- Version: 0.23.1
3
+ Version: 0.23.4
4
4
  Summary: Efficient optimizers
5
5
  Home-page: https://github.com/clashluke/heavyball
6
6
  Author: Lucas Nestler
@@ -0,0 +1,24 @@
1
+ heavyball/__init__.py,sha256=icHYN-MGsmHkLUlHCMcZkOlwY7GT63_ayR_a5iPKmzM,2226
2
+ heavyball/cached_delayed_psgd_kron.py,sha256=n3wIOhrop0Ls4MZ0kXpwGuImp1jzPs6VGdxIlPyoYdQ,6827
3
+ heavyball/cached_psgd_kron.py,sha256=KCLsfvj9qh_2FNwRTdWM3zjnt2oGHfsf4Y341rPcceI,6778
4
+ heavyball/delayed_psgd.py,sha256=xaAPNqE5Pg476fqXjST11Bi0zrZ8KjjU5h_NPUdwlZk,6295
5
+ heavyball/foreach_adamw.py,sha256=IdcP5ggNB2SVDK3iNrNKGTGlEwWn18H77ClqCnJGB74,2786
6
+ heavyball/foreach_adopt.py,sha256=NzHYoeiq1pFKn1RPHiVG2vJsHES30Blh5v2ypOWP2uQ,3508
7
+ heavyball/foreach_laprop.py,sha256=myb0uwC-oZqYqeVSozas2JNMlbUkLCAMrVB9ZP4QOKQ,2794
8
+ heavyball/foreach_sfadamw.py,sha256=B8xyL8Qxul4G1rsxMv8ZMlkYh1gaTpeCvCgkubaBAhE,3013
9
+ heavyball/foreach_soap.py,sha256=7B_dP2Hm_xqwpBQiPYkv_c6eoRnU1dV2VZfvSoa4uJ8,4729
10
+ heavyball/p_adam.py,sha256=8BlZ6YoaDXawMiRbCxo0Kd5_0-pAn0MQIhL0LHNaRBs,6315
11
+ heavyball/palm_foreach_sfadamw.py,sha256=QzNXZOXEH6ufEPbnPg8ixn19WpVr4OhDreqnxIwcBVM,3336
12
+ heavyball/palm_foreach_soap.py,sha256=IknGm_CzrqDIFEoCkejxjoZ4sfIy6RSoInqlMUOYLB4,6156
13
+ heavyball/precond_schedule_foreach_soap.py,sha256=bJ2ifPFa8zEP9GO8eBpqZzsmP7p_iQkkCkllNeEMHPU,4892
14
+ heavyball/precond_schedule_palm_foreach_soap.py,sha256=4dT9f134-Faq2KuCMCHzMtrkMO-es5p_DYS1of5yF-s,6428
15
+ heavyball/precond_schedule_sfpsoap.py,sha256=ey-mUIjAy9ny5vJac0vRZHUXgef1bc7u7_-4hRkM4Rs,7491
16
+ heavyball/psgd_kron.py,sha256=4eiGPXAFjvGIXLdiai1UJfAvTozAV1TXaE9UGkE4BLc,6051
17
+ heavyball/pure_psgd.py,sha256=344NdVNHwUFX3fU2R1S_Xh9SXAML3E4ryHr7xfMh9Cc,5076
18
+ heavyball/schedule_free_palm_foreach_soap.py,sha256=irvlIXF-oABpWWycZPMV-JG9XTiXSlgHtrM-ygfATic,7207
19
+ heavyball/utils.py,sha256=FFZLqq_bnQUDXOMBO_hBu32yNMHi18W13wxlOJ0Q_78,39665
20
+ heavyball-0.23.4.dist-info/LICENSE,sha256=CGdGJim64YifGmUVPaeyRsxkvyExtClswhRNIp8FY_U,1322
21
+ heavyball-0.23.4.dist-info/METADATA,sha256=ebfSVWG2CeKxSfE5Ru0VipLE23DQiQKOmODVdlFW4aY,11926
22
+ heavyball-0.23.4.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
23
+ heavyball-0.23.4.dist-info/top_level.txt,sha256=SzCxSVg_qCUPA4kZObW3Zyo4v-d_mMOD-p7a-WXTl2E,10
24
+ heavyball-0.23.4.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- heavyball/__init__.py,sha256=icHYN-MGsmHkLUlHCMcZkOlwY7GT63_ayR_a5iPKmzM,2226
2
- heavyball/cached_delayed_psgd_kron.py,sha256=n3wIOhrop0Ls4MZ0kXpwGuImp1jzPs6VGdxIlPyoYdQ,6827
3
- heavyball/cached_psgd_kron.py,sha256=KCLsfvj9qh_2FNwRTdWM3zjnt2oGHfsf4Y341rPcceI,6778
4
- heavyball/delayed_psgd.py,sha256=z_Y1eYr2upVt_FsyCIv91yTFJY6yqvHsI8S2mOpqdv8,6334
5
- heavyball/foreach_adamw.py,sha256=uawSbGGUD2E1RtcwspP83yQNElERdGX-diqCI5e8FqE,2825
6
- heavyball/foreach_adopt.py,sha256=DFEaPswVzdHcbxC-mirsf_okM_HR6r34PDUTty5CrUE,3547
7
- heavyball/foreach_laprop.py,sha256=J4Vms0nAOMh3GQtAOPyrYOe5WtpzokVv25b9oDnwc2A,2833
8
- heavyball/foreach_sfadamw.py,sha256=HWbLekY5BloHDIgrN2J0a7IolZCt8Ah2xkLAU_-5oSc,3079
9
- heavyball/foreach_soap.py,sha256=7B_dP2Hm_xqwpBQiPYkv_c6eoRnU1dV2VZfvSoa4uJ8,4729
10
- heavyball/p_adam.py,sha256=8BlZ6YoaDXawMiRbCxo0Kd5_0-pAn0MQIhL0LHNaRBs,6315
11
- heavyball/palm_foreach_sfadamw.py,sha256=E8raxrBIkSmTEGFzwnfWxKwDJjBQE2vdsmyqfc8aL_A,3375
12
- heavyball/palm_foreach_soap.py,sha256=IknGm_CzrqDIFEoCkejxjoZ4sfIy6RSoInqlMUOYLB4,6156
13
- heavyball/precond_schedule_foreach_soap.py,sha256=bJ2ifPFa8zEP9GO8eBpqZzsmP7p_iQkkCkllNeEMHPU,4892
14
- heavyball/precond_schedule_palm_foreach_soap.py,sha256=4dT9f134-Faq2KuCMCHzMtrkMO-es5p_DYS1of5yF-s,6428
15
- heavyball/precond_schedule_sfpsoap.py,sha256=FOR-axwlkSN7IHZWYYUVFfjSFCLxc_NdiTlb-n5gmgs,7530
16
- heavyball/psgd_kron.py,sha256=4eiGPXAFjvGIXLdiai1UJfAvTozAV1TXaE9UGkE4BLc,6051
17
- heavyball/pure_psgd.py,sha256=344NdVNHwUFX3fU2R1S_Xh9SXAML3E4ryHr7xfMh9Cc,5076
18
- heavyball/schedule_free_palm_foreach_soap.py,sha256=0WT_gvTKymqLQzYT6ewDgCmpDq-HgMAewipw1QvyQYA,7267
19
- heavyball/utils.py,sha256=8XE-z5T7FkbPlfo8Dh9dfoH8UsE-HgjDiJCD_XHkT54,39526
20
- heavyball-0.23.1.dist-info/LICENSE,sha256=CGdGJim64YifGmUVPaeyRsxkvyExtClswhRNIp8FY_U,1322
21
- heavyball-0.23.1.dist-info/METADATA,sha256=eE1t-LDRa2ajLlXzITHLzyOt3elr9t4gxaOk55m6pj8,11926
22
- heavyball-0.23.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
23
- heavyball-0.23.1.dist-info/top_level.txt,sha256=SzCxSVg_qCUPA4kZObW3Zyo4v-d_mMOD-p7a-WXTl2E,10
24
- heavyball-0.23.1.dist-info/RECORD,,