heavyball 0.23.3__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -86,7 +86,7 @@ class ForeachCachedDelayedPSGDKron(PSGDBase):
86
86
  state = self.state_(p)
87
87
 
88
88
  if 'Q' not in state:
89
- state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype)
89
+ state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype, memory_format=torch.preserve_format)
90
90
  Q, state["exprs"] = init_Q_exprs(p, precond_init_scale, max_size_triangular, min_ndim_triangular,
91
91
  memory_save_mode, dtype=q_dtype)
92
92
  state['Q'] = triu_to_line(Q) if store_triu_as_line else Q
@@ -83,7 +83,7 @@ class ForeachCachedPSGDKron(PSGDBase):
83
83
  state = self.state_(p)
84
84
 
85
85
  if 'Q' not in state:
86
- state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype)
86
+ state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype, memory_format=torch.preserve_format)
87
87
  Q, state["exprs"] = init_Q_exprs(p, precond_init_scale, max_size_triangular, min_ndim_triangular,
88
88
  memory_save_mode, dtype=q_dtype)
89
89
  state['Q'] = triu_to_line(Q) if store_triu_as_line else Q
heavyball/delayed_psgd.py CHANGED
@@ -89,7 +89,7 @@ class ForeachDelayedPSGD(PSGDBase):
89
89
  state = self.state_(p)
90
90
 
91
91
  if 'Q' not in state:
92
- state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype)
92
+ state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype, memory_format=torch.preserve_format)
93
93
  Q, state["exprs"] = init_Q_exprs(p, precond_init_scale, max_size_triangular, min_ndim_triangular,
94
94
  memory_save_mode, dtype=q_dtype)
95
95
  state["Q"] = triu_to_line(Q) if store_triu_as_line else Q
@@ -45,8 +45,8 @@ class ForeachAdamW(StatefulOptimizer):
45
45
 
46
46
  for p in active_p:
47
47
  if 'exp_avg' not in self.state_(p):
48
- self.state_(p)['exp_avg'] = torch.zeros_like(p.data, dtype=storage_dtype)
49
- self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype)
48
+ self.state_(p)['exp_avg'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
49
+ self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
50
50
 
51
51
  y, grad, exp_avg_sq, exp_avg = zip(
52
52
  *[(p.data, p.grad, self.state_(p)['exp_avg_sq'], self.state_(p)['exp_avg']) for p in active_p])
@@ -51,8 +51,8 @@ class ForeachADOPT(StatefulOptimizer):
51
51
 
52
52
  for p in active_p:
53
53
  if 'exp_avg' not in self.state_(p):
54
- self.state_(p)['exp_avg'] = torch.zeros_like(p.data, dtype=storage_dtype)
55
- self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype)
54
+ self.state_(p)['exp_avg'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
55
+ self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
56
56
 
57
57
  y, grad, exp_avg_sq, exp_avg = zip(
58
58
  *[(p.data, p.grad, self.state_(p)['exp_avg_sq'], self.state_(p)['exp_avg']) for p in active_p])
@@ -47,8 +47,8 @@ class ForeachLaProp(StatefulOptimizer):
47
47
 
48
48
  for p in active_p:
49
49
  if 'exp_avg' not in self.state_(p):
50
- self.state_(p)['exp_avg'] = torch.zeros_like(p.data, dtype=storage_dtype)
51
- self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype)
50
+ self.state_(p)['exp_avg'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
51
+ self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
52
52
 
53
53
  y, grad, exp_avg_sq, exp_avg = zip(
54
54
  *[(p.data, p.grad, self.state_(p)['exp_avg_sq'], self.state_(p)['exp_avg']) #
@@ -50,8 +50,8 @@ class ForeachSFAdamW(ScheduleFree):
50
50
 
51
51
  for p in active_p:
52
52
  if 'z' not in self.state_(p):
53
- self.state_(p)['z'] = torch.clone(p.data)
54
- self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype)
53
+ self.state_(p)['z'] = torch.clone(p.data, memory_format=torch.preserve_format)
54
+ self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
55
55
 
56
56
  y, grad, exp_avg_sq, z = zip(*[(p.data, p.grad, self.state_(p)['exp_avg_sq'], self.state_(p)['z']) #
57
57
  for p in active_p])
heavyball/foreach_soap.py CHANGED
@@ -48,8 +48,8 @@ class ForeachSOAP(StatefulOptimizer):
48
48
  step = state['step'] = state.get("step", -1) + 1
49
49
 
50
50
  if "exp_avg" not in state:
51
- state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32)
52
- state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32)
51
+ state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
52
+ state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
53
53
  init_preconditioner(g, state, max_precond_dim, precondition_1d)
54
54
  update_preconditioner(g, state, max_precond_dim, precondition_1d, 0, True)
55
55
  continue # first step is skipped so that we never use the current gradients in the projection.
heavyball/p_adam.py CHANGED
@@ -81,8 +81,8 @@ class ForeachPaLMPAdam(PSGDBase):
81
81
  state = self.state_(p)
82
82
 
83
83
  if 'Q' not in state:
84
- state['exp_avg'] = torch.zeros_like(g, dtype=storage_dtype)
85
- state['exp_avg_sq'] = torch.zeros_like(g, dtype=storage_dtype)
84
+ state['exp_avg'] = torch.zeros_like(g, dtype=storage_dtype, memory_format=torch.preserve_format)
85
+ state['exp_avg_sq'] = torch.zeros_like(g, dtype=storage_dtype, memory_format=torch.preserve_format)
86
86
  Q, state["exprs"] = init_Q_exprs(p, precond_init_scale, max_size_triangular, min_ndim_triangular,
87
87
  memory_save_mode, dtype=q_dtype)
88
88
  state['Q'] = triu_to_line(Q) if store_triu_as_line else Q
@@ -54,8 +54,8 @@ class PaLMForeachSFAdamW(ScheduleFree):
54
54
 
55
55
  for p in active_p:
56
56
  if 'z' not in self.state_(p):
57
- self.state_(p)['z'] = torch.clone(p.data)
58
- self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype)
57
+ self.state_(p)['z'] = torch.clone(p.data, memory_format=torch.preserve_format)
58
+ self.state_(p)['exp_avg_sq'] = torch.zeros_like(p.data, dtype=storage_dtype, memory_format=torch.preserve_format)
59
59
 
60
60
  # Decay the first moment running average coefficient
61
61
  beta2 = 1 - (k + 1) ** -group['beta2_scale']
@@ -56,8 +56,8 @@ class PaLMForeachSOAP(StatefulOptimizer):
56
56
  step = state['step'] = state.get("step", -1) + 1
57
57
 
58
58
  if "exp_avg" not in state:
59
- state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32)
60
- state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32)
59
+ state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
60
+ state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
61
61
  init_preconditioner(g, state, max_precond_dim, precondition_1d)
62
62
  update_preconditioner(g, state, max_precond_dim, precondition_1d, 0, True)
63
63
  continue # first step is skipped so that we never use the current gradients in the projection.
@@ -50,8 +50,8 @@ class PrecondScheduleForeachSOAP(StatefulOptimizer):
50
50
  step = state['step'] = state.get("step", -1) + 1
51
51
 
52
52
  if "exp_avg" not in state:
53
- state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32)
54
- state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32)
53
+ state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
54
+ state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
55
55
  init_preconditioner(g, state, max_precond_dim, precondition_1d)
56
56
  update_preconditioner(g, state, max_precond_dim, precondition_1d, 0, True)
57
57
  continue # first step is skipped so that we never use the current gradients in the projection.
@@ -58,8 +58,8 @@ class PrecondSchedulePaLMForeachSOAP(StatefulOptimizer):
58
58
  step = state['step'] = state.get("step", -1) + 1
59
59
 
60
60
  if "exp_avg" not in state:
61
- state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32)
62
- state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32)
61
+ state["exp_avg"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
62
+ state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
63
63
  init_preconditioner(g, state, max_precond_dim, precondition_1d)
64
64
  update_preconditioner(g, state, max_precond_dim, precondition_1d, 0, True)
65
65
  continue # first step is skipped so that we never use the current gradients in the projection.
@@ -96,8 +96,8 @@ class PrecondScheduleSFPaLMSOAP(ScheduleFree):
96
96
  state = self.state_(p)
97
97
 
98
98
  if "z" not in state:
99
- state["z"] = torch.clone(p.data)
100
- state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32)
99
+ state["z"] = torch.clone(p.data, memory_format=torch.preserve_format)
100
+ state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
101
101
  init_preconditioner(g, state, max_precond_dim, precondition_1d)
102
102
  update_preconditioner(g, state, max_precond_dim, precondition_1d, 0, True)
103
103
  continue # first step is skipped so that we never use the current gradients in the projection.
heavyball/psgd_kron.py CHANGED
@@ -84,7 +84,7 @@ class ForeachPSGDKron(PSGDBase):
84
84
  state = self.state_(p)
85
85
 
86
86
  if 'Q' not in state:
87
- state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype)
87
+ state["exp_avg"] = torch.zeros_like(g, dtype=storage_dtype, memory_format=torch.preserve_format)
88
88
  Q, state["exprs"] = init_Q_exprs(p, precond_init_scale, max_size_triangular, min_ndim_triangular,
89
89
  memory_save_mode, dtype=q_dtype)
90
90
  state['Q'] = triu_to_line(Q) if store_triu_as_line else Q
@@ -90,7 +90,7 @@ class SFPaLMForeachSOAP(ScheduleFree):
90
90
 
91
91
  if "z" not in state:
92
92
  state["z"] = torch.clone(p).float()
93
- state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32)
93
+ state["exp_avg_sq"] = torch.zeros_like(g, dtype=torch.float32, memory_format=torch.preserve_format)
94
94
  if mars:
95
95
  state['mars_prev_grad'] = g.clone()
96
96
  init_preconditioner(g, state, max_precond_dim, precondition_1d)
heavyball/utils.py CHANGED
@@ -8,9 +8,9 @@ from typing import List, Optional, Tuple, Callable, Union
8
8
  import numpy as np
9
9
  import torch
10
10
  from torch import Tensor
11
+ from torch._dynamo.exc import TorchDynamoException
11
12
  from torch.backends import cudnn, opt_einsum
12
13
  from torch.utils._pytree import tree_map
13
- from torch._dynamo.exc import TorchDynamoException
14
14
 
15
15
  compile_mode = "max-autotune-no-cudagraphs"
16
16
  dynamic = False
@@ -23,26 +23,26 @@ def decorator(func):
23
23
 
24
24
  @functools.wraps(func)
25
25
  def _fn(*args, **kwargs):
26
- if compile_mode is None:
26
+ if is_compiling() or compile_mode is None:
27
27
  return func(*args, **kwargs)
28
28
  nonlocal compiled
29
29
  if compiled is None:
30
- compiled = torch.compile(func, fullgraph=True, dynamic=dynamic, mode=compile_mode_recommended_to_none)
30
+ compiled = torch.compile(fullgraph=True, dynamic=dynamic, mode=compile_mode_recommended_to_none)(func)
31
31
  return compiled(*args, **kwargs)
32
32
 
33
33
  return _fn
34
34
 
35
35
 
36
- def decorator_knowngood(func):
36
+ def decorator_knowngood(func: Callable):
37
37
  compiled = None
38
38
 
39
39
  @functools.wraps(func)
40
40
  def _fn(*args, **kwargs):
41
- if compile_mode is None:
41
+ if is_compiling() or compile_mode is None:
42
42
  return func(*args, **kwargs)
43
43
  nonlocal compiled
44
44
  if compiled is None:
45
- compiled = torch.compile(func, fullgraph=True, dynamic=dynamic, mode=compile_mode)
45
+ compiled = torch.compile(fullgraph=True, dynamic=dynamic, mode=compile_mode)(func)
46
46
  return compiled(*args, **kwargs)
47
47
 
48
48
  return _fn
@@ -58,12 +58,13 @@ def warmup(lr: float, step: int, warmup_steps: int):
58
58
 
59
59
 
60
60
  @decorator_knowngood
61
- def _compilable_schedule_free_(p: List[Tensor], z: List[Tensor], ckp1: Tensor, grad: List[Tensor], lr: Tensor, beta1: Tensor):
62
- p32, z32, g32 = [promote(x) for x in (p, z, grad)]
61
+ def _compilable_schedule_free_(p: List[Tensor], z: List[Tensor], ckp1: Tensor, grad: List[Tensor], lr: Tensor,
62
+ beta1: Tensor):
63
+ p32, z32, g32 = [list(map(promote, x)) for x in (p, z, grad)]
63
64
  for p_, z_, g_ in zip(p32, z32, g32):
64
65
  p_.lerp_(z_, ckp1)
65
66
  p_.add_(g_, alpha=lr * (beta1 * (1 - ckp1) - 1))
66
- z_.add(g_, alpha=-lr)
67
+ z_.add_(g_, alpha=-lr)
67
68
  copy_stochastic_list_(p, p32)
68
69
  copy_stochastic_list_(z, z32)
69
70
 
@@ -158,7 +159,8 @@ def beta_debias(beta, step):
158
159
 
159
160
 
160
161
  @decorator_knowngood
161
- def _compilable_exp_avg_sq_(state: List[Tensor], grad: List[Tensor], beta2: Tensor, eps: Tensor, out: List[Optional[Tensor]]):
162
+ def _compilable_exp_avg_sq_(state: List[Tensor], grad: List[Tensor], beta2: Tensor, eps: Tensor,
163
+ out: List[Optional[Tensor]]):
162
164
  torch._foreach_mul_(state, beta2)
163
165
  [s.addcmul_(g, g, value=1 - beta2) for s, g in zip(state, grad)]
164
166
  denom = torch._foreach_sqrt(state)
@@ -1050,7 +1052,7 @@ class PSGDBase(StatefulOptimizer):
1050
1052
 
1051
1053
 
1052
1054
  # TODO: Figure out why this sometimes crashes
1053
- #@decorator_knowngood
1055
+ # @decorator_knowngood
1054
1056
  def _compilable_precond_grad_cached_(ea: Tensor, expr: str, param: Tensor, lr: Tensor, weight_decay: Tensor,
1055
1057
  clip_fn: callable, caution: bool, grad: Optional[Tensor], *cached_q: Tensor):
1056
1058
  md = min_dtype(list(cached_q) + [ea])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: heavyball
3
- Version: 0.23.3
3
+ Version: 0.24.0
4
4
  Summary: Efficient optimizers
5
5
  Home-page: https://github.com/clashluke/heavyball
6
6
  Author: Lucas Nestler
@@ -0,0 +1,24 @@
1
+ heavyball/__init__.py,sha256=icHYN-MGsmHkLUlHCMcZkOlwY7GT63_ayR_a5iPKmzM,2226
2
+ heavyball/cached_delayed_psgd_kron.py,sha256=cHwVDq-_284_eMt09rAq26D_8fv3N0e0wdN1woCHU1M,6864
3
+ heavyball/cached_psgd_kron.py,sha256=ttg6bemNDRpCJBV3aJg2DSyVfsfTMZAnhErgwC2jXlw,6815
4
+ heavyball/delayed_psgd.py,sha256=yHy83YQ_PKWtwQq1R_OVyj3cjmcbsZAXX1M-hGyciss,6332
5
+ heavyball/foreach_adamw.py,sha256=K4xTes4drylAqaqWky8O_Bg_mmbAmcHZ5DEBs5vMD-s,2860
6
+ heavyball/foreach_adopt.py,sha256=fHnbEqvKKc5IKPDWC9Qo9PiISSjj1MEViy0Jb3BRgZQ,3582
7
+ heavyball/foreach_laprop.py,sha256=EXkwFQ-H7hHWLmiNUsxUcmXhzNNLMjieHjfOlY_6kmo,2868
8
+ heavyball/foreach_sfadamw.py,sha256=TeWf0nKXQEFcz02rADYRJenDM9mX1dGHhvILLks6OW8,3087
9
+ heavyball/foreach_soap.py,sha256=408jRysE9ek0ea-TphhSBMTa9zcjkgMX3qlx8qTCt34,4803
10
+ heavyball/p_adam.py,sha256=qEcuU8VEc35vaWAXjT0O65vfCuNn_3ttwL4RlJKN3Xw,6389
11
+ heavyball/palm_foreach_sfadamw.py,sha256=1qOr-uniSmI1sNCJc1SnvyKH5iFu80Z6H5h93lDTwcE,3410
12
+ heavyball/palm_foreach_soap.py,sha256=cExM9nTC3zAgsRr42VOIMWNwYA4dAJaA8-pIo7SWilc,6230
13
+ heavyball/precond_schedule_foreach_soap.py,sha256=EL_Z-v5l7BC98QgI-Zg9iyM77TAreVgD5Zln59ewGoI,4966
14
+ heavyball/precond_schedule_palm_foreach_soap.py,sha256=HWo2t7yY-_n4pPGmDiELccy0jdELTVhdlH-eyFBih5k,6502
15
+ heavyball/precond_schedule_sfpsoap.py,sha256=KUKdZzd336w24zPRcqwRatj7IVmd1Us0a_VuzASluIo,7565
16
+ heavyball/psgd_kron.py,sha256=PtTe6eR547Y-4CvgjpchgkQsr_kWr4AN-uY9L_JO_C8,6088
17
+ heavyball/pure_psgd.py,sha256=344NdVNHwUFX3fU2R1S_Xh9SXAML3E4ryHr7xfMh9Cc,5076
18
+ heavyball/schedule_free_palm_foreach_soap.py,sha256=KTQY37MZH7YnOSTLKY8uVySUXxWXbFVUA1QXN3iv8Ds,7244
19
+ heavyball/utils.py,sha256=12DfrpBDiHAdFxN3cA3BA9tcailHw8wl5QTzEn4As98,39677
20
+ heavyball-0.24.0.dist-info/LICENSE,sha256=CGdGJim64YifGmUVPaeyRsxkvyExtClswhRNIp8FY_U,1322
21
+ heavyball-0.24.0.dist-info/METADATA,sha256=ZL_FTyrobNlPxhtgdVH6kZ9aD_jIq0SGALp7jo0BbiI,11926
22
+ heavyball-0.24.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
23
+ heavyball-0.24.0.dist-info/top_level.txt,sha256=SzCxSVg_qCUPA4kZObW3Zyo4v-d_mMOD-p7a-WXTl2E,10
24
+ heavyball-0.24.0.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- heavyball/__init__.py,sha256=icHYN-MGsmHkLUlHCMcZkOlwY7GT63_ayR_a5iPKmzM,2226
2
- heavyball/cached_delayed_psgd_kron.py,sha256=n3wIOhrop0Ls4MZ0kXpwGuImp1jzPs6VGdxIlPyoYdQ,6827
3
- heavyball/cached_psgd_kron.py,sha256=KCLsfvj9qh_2FNwRTdWM3zjnt2oGHfsf4Y341rPcceI,6778
4
- heavyball/delayed_psgd.py,sha256=xaAPNqE5Pg476fqXjST11Bi0zrZ8KjjU5h_NPUdwlZk,6295
5
- heavyball/foreach_adamw.py,sha256=IdcP5ggNB2SVDK3iNrNKGTGlEwWn18H77ClqCnJGB74,2786
6
- heavyball/foreach_adopt.py,sha256=NzHYoeiq1pFKn1RPHiVG2vJsHES30Blh5v2ypOWP2uQ,3508
7
- heavyball/foreach_laprop.py,sha256=myb0uwC-oZqYqeVSozas2JNMlbUkLCAMrVB9ZP4QOKQ,2794
8
- heavyball/foreach_sfadamw.py,sha256=B8xyL8Qxul4G1rsxMv8ZMlkYh1gaTpeCvCgkubaBAhE,3013
9
- heavyball/foreach_soap.py,sha256=7B_dP2Hm_xqwpBQiPYkv_c6eoRnU1dV2VZfvSoa4uJ8,4729
10
- heavyball/p_adam.py,sha256=8BlZ6YoaDXawMiRbCxo0Kd5_0-pAn0MQIhL0LHNaRBs,6315
11
- heavyball/palm_foreach_sfadamw.py,sha256=QzNXZOXEH6ufEPbnPg8ixn19WpVr4OhDreqnxIwcBVM,3336
12
- heavyball/palm_foreach_soap.py,sha256=IknGm_CzrqDIFEoCkejxjoZ4sfIy6RSoInqlMUOYLB4,6156
13
- heavyball/precond_schedule_foreach_soap.py,sha256=bJ2ifPFa8zEP9GO8eBpqZzsmP7p_iQkkCkllNeEMHPU,4892
14
- heavyball/precond_schedule_palm_foreach_soap.py,sha256=4dT9f134-Faq2KuCMCHzMtrkMO-es5p_DYS1of5yF-s,6428
15
- heavyball/precond_schedule_sfpsoap.py,sha256=ey-mUIjAy9ny5vJac0vRZHUXgef1bc7u7_-4hRkM4Rs,7491
16
- heavyball/psgd_kron.py,sha256=4eiGPXAFjvGIXLdiai1UJfAvTozAV1TXaE9UGkE4BLc,6051
17
- heavyball/pure_psgd.py,sha256=344NdVNHwUFX3fU2R1S_Xh9SXAML3E4ryHr7xfMh9Cc,5076
18
- heavyball/schedule_free_palm_foreach_soap.py,sha256=irvlIXF-oABpWWycZPMV-JG9XTiXSlgHtrM-ygfATic,7207
19
- heavyball/utils.py,sha256=kClbLP7CECCrqjf7VYAuWuLDDW17JdgFQpSMWVnIU6o,39559
20
- heavyball-0.23.3.dist-info/LICENSE,sha256=CGdGJim64YifGmUVPaeyRsxkvyExtClswhRNIp8FY_U,1322
21
- heavyball-0.23.3.dist-info/METADATA,sha256=q3Df5J-g84JSwTEgIOKKILM0NgHzkkEblqcrtbUVQwA,11926
22
- heavyball-0.23.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
23
- heavyball-0.23.3.dist-info/top_level.txt,sha256=SzCxSVg_qCUPA4kZObW3Zyo4v-d_mMOD-p7a-WXTl2E,10
24
- heavyball-0.23.3.dist-info/RECORD,,