heavyball 0.24.0__tar.gz → 0.24.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {heavyball-0.24.0 → heavyball-0.24.2}/PKG-INFO +1 -1
  2. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/utils.py +10 -1
  3. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball.egg-info/PKG-INFO +1 -1
  4. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball.egg-info/SOURCES.txt +1 -0
  5. {heavyball-0.24.0 → heavyball-0.24.2}/setup.py +1 -1
  6. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_channels_last.py +3 -2
  7. heavyball-0.24.2/test/test_hook.py +51 -0
  8. {heavyball-0.24.0 → heavyball-0.24.2}/LICENSE +0 -0
  9. {heavyball-0.24.0 → heavyball-0.24.2}/README.md +0 -0
  10. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/__init__.py +0 -0
  11. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/cached_delayed_psgd_kron.py +0 -0
  12. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/cached_psgd_kron.py +0 -0
  13. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/delayed_psgd.py +0 -0
  14. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/foreach_adamw.py +0 -0
  15. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/foreach_adopt.py +0 -0
  16. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/foreach_laprop.py +0 -0
  17. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/foreach_sfadamw.py +0 -0
  18. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/foreach_soap.py +0 -0
  19. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/p_adam.py +0 -0
  20. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/palm_foreach_sfadamw.py +0 -0
  21. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/palm_foreach_soap.py +0 -0
  22. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/precond_schedule_foreach_soap.py +0 -0
  23. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/precond_schedule_palm_foreach_soap.py +0 -0
  24. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/precond_schedule_sfpsoap.py +0 -0
  25. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/psgd_kron.py +0 -0
  26. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/pure_psgd.py +0 -0
  27. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball/schedule_free_palm_foreach_soap.py +0 -0
  28. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball.egg-info/dependency_links.txt +0 -0
  29. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball.egg-info/requires.txt +0 -0
  30. {heavyball-0.24.0 → heavyball-0.24.2}/heavyball.egg-info/top_level.txt +0 -0
  31. {heavyball-0.24.0 → heavyball-0.24.2}/setup.cfg +0 -0
  32. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_bf16_params.py +0 -0
  33. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_bf16_q.py +0 -0
  34. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_bf16_storage.py +0 -0
  35. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_caution.py +0 -0
  36. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_closure.py +0 -0
  37. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_ema.py +0 -0
  38. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_foreach.py +0 -0
  39. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_mars.py +0 -0
  40. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_memory.py +0 -0
  41. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_merge.py +0 -0
  42. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_no_grad.py +0 -0
  43. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_psgd.py +0 -0
  44. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_soap.py +0 -0
  45. {heavyball-0.24.0 → heavyball-0.24.2}/test/test_stochastic_updates.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: heavyball
3
- Version: 0.24.0
3
+ Version: 0.24.2
4
4
  Summary: Efficient optimizers
5
5
  Home-page: https://github.com/clashluke/heavyball
6
6
  Author: Lucas Nestler
@@ -134,7 +134,7 @@ def dim_merger(grad, max_precond_dim, split: bool = False):
134
134
  if curr_shape > 1 or len(new_shape) == 0:
135
135
  new_shape.append(curr_shape)
136
136
 
137
- new_grad = grad.view(new_shape)
137
+ new_grad = grad.reshape(new_shape) # needs to be .reshape() due to channels_last
138
138
  if not split:
139
139
  return new_grad
140
140
 
@@ -1129,3 +1129,12 @@ def merge_group(group, *tensors):
1129
1129
  append_or_extend(out, dim_merger(t, group['max_size_triangular'] if 'max_size_triangular' in group else group[
1130
1130
  'max_precond_dim'], group.get('split', False)))
1131
1131
  return out
1132
+
1133
+ def hook_optimizer_into_model(model, optimizer, *args, **kwargs):
1134
+ def _step(p: Tensor, o: torch.optim.Optimizer):
1135
+ o.step()
1136
+ o.zero_grad()
1137
+
1138
+
1139
+ for p in model.parameters():
1140
+ p.register_post_accumulate_grad_hook(functools.partial(_step, o=optimizer([p], *args, **kwargs)))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: heavyball
3
- Version: 0.24.0
3
+ Version: 0.24.2
4
4
  Summary: Efficient optimizers
5
5
  Home-page: https://github.com/clashluke/heavyball
6
6
  Author: Lucas Nestler
@@ -33,6 +33,7 @@ test/test_channels_last.py
33
33
  test/test_closure.py
34
34
  test/test_ema.py
35
35
  test/test_foreach.py
36
+ test/test_hook.py
36
37
  test/test_mars.py
37
38
  test/test_memory.py
38
39
  test/test_merge.py
@@ -10,7 +10,7 @@ setuptools.setup(
10
10
  name='heavyball',
11
11
  license='BSD',
12
12
  description='Efficient optimizers',
13
- version='0.24.0',
13
+ version='0.24.2',
14
14
  long_description=README,
15
15
  url='https://github.com/clashluke/heavyball',
16
16
  packages=setuptools.find_packages(),
@@ -11,6 +11,7 @@ from heavyball.utils import clean, set_torch
11
11
  from torch import nn
12
12
  from torch._dynamo import config
13
13
 
14
+ heavyball.utils.compile_mode = 'default'
14
15
  config.cache_size_limit = 128
15
16
 
16
17
 
@@ -29,14 +30,14 @@ def test_foreach(opt, size, depth: int, iterations: int = 32, outer_iterations:
29
30
  losses.append([])
30
31
 
31
32
  for i in range(outer_iterations):
32
- model = nn.Sequential(*[nn.Conv2d(size, size, 1) for _ in range(depth)]).cuda()
33
+ model = nn.Sequential(*[nn.Conv2d(size, size, 3) for _ in range(depth)]).cuda()
33
34
  if is_channels_last:
34
35
  model.to(memory_format=torch.channels_last)
35
36
 
36
37
  o = get_optim(opt, model.parameters(), lr=1e-3, weight_decay=1e-4, warmup_steps=16)
37
38
 
38
39
  for _ in range(iterations):
39
- loss = model(torch.randn((1024, size, 1, 1), device='cuda')).square().mean()
40
+ loss = model(torch.randn((1024, size, 4, 4), device='cuda')).square().mean()
40
41
  loss.backward()
41
42
  o.step()
42
43
  o.zero_grad()
@@ -0,0 +1,51 @@
1
+ import os
2
+
3
+ os.environ["TORCH_LOGS"] = "+recompiles"
4
+
5
+ import heavyball
6
+ import heavyball.utils
7
+ import pytest
8
+ import torch
9
+ from benchmark.utils import get_optim
10
+ from heavyball.utils import clean, set_torch, hook_optimizer_into_model
11
+ from torch import nn
12
+ from torch._dynamo import config
13
+
14
+ heavyball.utils.compile_mode = 'default'
15
+ config.cache_size_limit = 128
16
+
17
+
18
+ @pytest.mark.parametrize("opt", heavyball.__all__)
19
+ @pytest.mark.parametrize("size,depth", [(128, 1)])
20
+ def test_foreach(opt, size, depth: int, iterations: int = 128, outer_iterations: int = 1):
21
+ set_torch()
22
+ opt = getattr(heavyball, opt)
23
+
24
+ peaks = []
25
+ losses = []
26
+
27
+ for use_hook in [False, True]:
28
+ torch.manual_seed(0x2131290)
29
+ peaks.append([])
30
+ losses.append([])
31
+
32
+ for i in range(outer_iterations):
33
+ model = nn.Sequential(*[nn.Linear(size, size) for _ in range(depth)]).cuda()
34
+
35
+ if use_hook:
36
+ hook_optimizer_into_model(model, opt, lr=1e-3, weight_decay=1e-4, warmup_steps=16)
37
+ else:
38
+ o = get_optim(opt, model.parameters(), lr=1e-3, weight_decay=1e-4, warmup_steps=16)
39
+ for _ in range(iterations):
40
+ loss = model(torch.randn((1024, size), device='cuda')).square().mean()
41
+ loss.backward()
42
+ if not use_hook:
43
+ o.step()
44
+ o.zero_grad()
45
+ losses[-1].append(loss.detach())
46
+
47
+ clean()
48
+
49
+ for i, (l0, l1) in enumerate(zip(*losses)):
50
+ print(i, l0.item(), l1.item())
51
+ assert torch.allclose(l0.float(), l1.float(), rtol=0.1)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes