torchzero 0.4.1__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. torchzero/__init__.py +3 -1
  2. torchzero/_minimize/__init__.py +0 -0
  3. torchzero/_minimize/methods.py +95 -0
  4. torchzero/_minimize/minimize.py +518 -0
  5. torchzero/core/__init__.py +5 -5
  6. torchzero/core/chain.py +2 -1
  7. torchzero/core/functional.py +2 -1
  8. torchzero/core/module.py +75 -4
  9. torchzero/core/transform.py +6 -5
  10. torchzero/linalg/eigh.py +116 -68
  11. torchzero/linalg/linear_operator.py +1 -0
  12. torchzero/linalg/orthogonalize.py +60 -5
  13. torchzero/linalg/sketch.py +39 -0
  14. torchzero/modules/__init__.py +1 -0
  15. torchzero/modules/adaptive/adagrad.py +2 -0
  16. torchzero/modules/adaptive/adam.py +5 -1
  17. torchzero/modules/adaptive/adan.py +3 -0
  18. torchzero/modules/adaptive/ggt.py +20 -18
  19. torchzero/modules/adaptive/lion.py +3 -1
  20. torchzero/modules/adaptive/mars.py +6 -5
  21. torchzero/modules/adaptive/msam.py +3 -0
  22. torchzero/modules/adaptive/rmsprop.py +2 -0
  23. torchzero/modules/adaptive/rprop.py +9 -7
  24. torchzero/modules/adaptive/shampoo.py +9 -1
  25. torchzero/modules/adaptive/soap.py +32 -29
  26. torchzero/modules/basis/__init__.py +2 -0
  27. torchzero/modules/basis/ggt_basis.py +199 -0
  28. torchzero/modules/basis/soap_basis.py +254 -0
  29. torchzero/modules/clipping/ema_clipping.py +32 -27
  30. torchzero/modules/clipping/growth_clipping.py +1 -0
  31. torchzero/modules/experimental/__init__.py +1 -6
  32. torchzero/modules/experimental/coordinate_momentum.py +2 -0
  33. torchzero/modules/experimental/cubic_adam.py +4 -0
  34. torchzero/modules/grad_approximation/__init__.py +3 -2
  35. torchzero/modules/least_squares/gn.py +6 -0
  36. torchzero/modules/misc/gradient_accumulation.py +1 -0
  37. torchzero/modules/misc/misc.py +6 -0
  38. torchzero/modules/momentum/averaging.py +6 -0
  39. torchzero/modules/momentum/momentum.py +4 -0
  40. torchzero/modules/ops/__init__.py +0 -1
  41. torchzero/modules/ops/accumulate.py +4 -0
  42. torchzero/modules/ops/higher_level.py +6 -1
  43. torchzero/modules/second_order/inm.py +4 -0
  44. torchzero/modules/second_order/newton.py +11 -3
  45. torchzero/modules/second_order/newton_cg.py +7 -3
  46. torchzero/modules/second_order/nystrom.py +14 -19
  47. torchzero/modules/second_order/rsn.py +37 -6
  48. torchzero/modules/trust_region/trust_region.py +2 -1
  49. torchzero/utils/benchmarks/logistic.py +33 -18
  50. torchzero/utils/params.py +13 -1
  51. torchzero/utils/tensorlist.py +2 -2
  52. {torchzero-0.4.1.dist-info → torchzero-0.4.2.dist-info}/METADATA +1 -1
  53. {torchzero-0.4.1.dist-info → torchzero-0.4.2.dist-info}/RECORD +56 -53
  54. torchzero/modules/experimental/adanystrom.py +0 -258
  55. torchzero/modules/experimental/common_directions_whiten.py +0 -142
  56. torchzero/modules/experimental/eigen_sr1.py +0 -182
  57. torchzero/modules/experimental/eigengrad.py +0 -207
  58. /torchzero/modules/{experimental → grad_approximation}/spsa1.py +0 -0
  59. {torchzero-0.4.1.dist-info → torchzero-0.4.2.dist-info}/WHEEL +0 -0
  60. {torchzero-0.4.1.dist-info → torchzero-0.4.2.dist-info}/top_level.txt +0 -0
@@ -1,207 +0,0 @@
1
- # pylint: disable = non-ascii-name
2
- from collections.abc import Mapping
3
-
4
- import torch
5
-
6
- from ...core import Chainable, TensorTransform
7
- from ...linalg.eigh import eigh_plus_uuT, regularize_eigh
8
- from ...linalg.orthogonalize import OrthogonalizeMethod, orthogonalize
9
- from ...linalg.linear_operator import Eigendecomposition
10
- from ..adaptive.lre_optimizers import LREOptimizerBase
11
-
12
-
13
- def _eigengrad_update_state_(state:dict, setting: Mapping, L_new: torch.Tensor | None, Q_new:torch.Tensor | None):
14
- """stores L, Q, L_reg, Q_reg and reprojects eigenbasis opt (this is also used on other eigen based modules)"""
15
- if (L_new is not None) and (Q_new is not None):
16
-
17
- # re-orthogonalize
18
- orthogonalize_interval = setting["orthogonalize_interval"]
19
- if orthogonalize_interval is not None:
20
- Q_step = state.get("Q_step", 0)
21
- state["Q_step"] = Q_step + 1
22
- if Q_step % orthogonalize_interval == 0:
23
- Q_new = orthogonalize(Q_new, method=setting["orthogonalize_method"])
24
-
25
- # take absolute value (for hessian)
26
- if setting.get("abs", False):
27
- L_new = L_new.abs()
28
-
29
- # store
30
- state["L"] = L_new
31
- state["Q"] = Q_new
32
-
33
- # absolute value for matmul
34
- if setting.get("mm_abs", False):
35
- L_new = L_new.abs()
36
-
37
- # regularize for matmul
38
- # this second round of regularization is only used for preconditioning
39
- # and doesn't affect the accumulator
40
- L_reg_new, Q_reg_new = regularize_eigh(L=L_new, Q=Q_new,
41
- truncate=setting["mm_truncate"],
42
- tol=setting["mm_tol"],
43
- damping=setting["mm_damping"],
44
- rdamping=setting["mm_rdamping"],
45
- )
46
-
47
- # print(f'{state["L_reg"] = }, {L_reg_new = }')
48
-
49
- # reproject eigenbasis optimizer
50
- if (L_reg_new is not None) and (Q_reg_new is not None):
51
- eigenbasis_optimizer: LREOptimizerBase | None = setting["eigenbasis_optimizer"]
52
- if eigenbasis_optimizer is not None:
53
- eigenbasis_optimizer.reproject(L_old=state["L_reg"], Q_old=state["Q_reg"], L_new=L_reg_new,
54
- Q_new=Q_reg_new, state=state["eigenbasis_state"])
55
-
56
- state["L_reg"] = L_reg_new
57
- state["Q_reg"] = Q_reg_new
58
-
59
-
60
- def eigengrad_apply(
61
- tensor: torch.Tensor,
62
- L_reg: torch.Tensor,
63
- Q_reg: torch.Tensor,
64
- beta: float | None,
65
- step: int | None,
66
- debias: bool,
67
- id_reg: float | None,
68
- eigenbasis_optimizer: LREOptimizerBase | None,
69
- eigenbasis_state: dict,
70
-
71
- whiten_fn = torch.sqrt
72
- ):
73
- # debias
74
- if debias:
75
- assert beta is not None and step is not None
76
- L_reg = L_reg / (1 - beta **step)
77
-
78
- # step with eigenbasis optimizer
79
- if eigenbasis_optimizer is not None:
80
- if (id_reg is not None) and (id_reg != 0):
81
- raise RuntimeError("id_reg is not compatible with eigenbasis_optimizer")
82
-
83
- update = eigenbasis_optimizer.step(tensor.ravel(), L=L_reg, Q=Q_reg, state=eigenbasis_state)
84
- return update.view_as(tensor)
85
-
86
- # or just whiten
87
- # L_reg = L_reg.clip(min=torch.finfo(L_reg.dtype).tiny * 2)
88
-
89
- if id_reg is None or id_reg == 0:
90
- G = Eigendecomposition(whiten_fn(L_reg), Q_reg, use_nystrom=False)
91
- dir = G.solve(tensor.ravel())
92
-
93
- else:
94
- G = Eigendecomposition(whiten_fn(L_reg), Q_reg, use_nystrom=True)
95
- dir = G.solve_plus_diag(tensor.ravel(), diag=id_reg)
96
-
97
- return dir.view_as(tensor)
98
-
99
- class Eigengrad(TensorTransform):
100
- """we can easily compute rank 1 symmetric update to a low rank eigendecomposition.
101
- So this stores covariance matrix as it.
102
-
103
-
104
- Args:
105
- rank (int): maximum allowed rank
106
- beta (float, optional): beta for covariance matrix exponential moving average. Defaults to 0.95.
107
- eig_tol (float, optional):
108
- removes eigenvalues this much smaller than largest eigenvalue when updating the preconditioner. Defaults to 1e-7.
109
- damping (float, optional):
110
- added to eigenvalues when updating the preconditioner. Defaults to 1e-8.
111
- rdamping (float, optional):
112
- added to eigenvalues when updating the preconditioner, relative to largest eigenvalue. Defaults to 0.
113
- mm_tol (float, optional):
114
- removes eigenvalues this much smaller than largest eigenvalue when computing the update. Defaults to 1e-7.
115
- mm_truncate (int | None, optional):
116
- uses top k eigenvalues to compute the update. Defaults to None.
117
- mm_damping (float, optional):
118
- added to eigenvalues when computing the update. Defaults to 1e-4.
119
- mm_rdamping (float, optional):
120
- added to eigenvalues when computing the update, relative to largest eigenvalue. Defaults to 0.
121
- id_reg (float, optional):
122
- multiplier to identity matrix added to preconditioner before computing update
123
- If this value is given, solution from Nyström sketch-and-solve will be used to compute the update.
124
- This value can't be too small (i.e. less than 1e-5) or the solver will be very unstable. Defaults to None.
125
- column_space_tol (float, optional):
126
- tolerance for deciding if new eigenvector is within column space of the covariance matrix. Defaults to 1e-9.
127
- concat_params (bool, optional):
128
- whether to precondition all parameters at once if True, or each separately if False. Defaults to True.
129
- update_freq (int, optional): update frequency. Defaults to 1.
130
- inner (Chainable | None, optional): inner modules. Defaults to None.
131
-
132
- """
133
-
134
- def __init__(
135
- self,
136
- rank: int = 100,
137
- beta=0.95,
138
- eig_tol: float | None = 1e-5,
139
- damping: float = 0,
140
- rdamping: float = 0,
141
- mm_tol: float = 0,
142
- mm_truncate: int | None = None,
143
- mm_damping: float = 1e-4,
144
- mm_rdamping: float = 0,
145
- id_reg: float | None = None,
146
- column_space_tol = 1e-9,
147
-
148
- orthogonalize_interval: int | None = None,
149
- orthogonalize_method: OrthogonalizeMethod = 'qr',
150
-
151
- eigenbasis_optimizer: LREOptimizerBase | None = None,
152
- concat_params: bool = True,
153
- update_freq: int = 1,
154
- inner: Chainable | None = None,
155
- ):
156
- defaults = locals().copy()
157
- for k in ["self", "concat_params", "inner", "update_freq"]:
158
- del defaults[k]
159
-
160
- super().__init__(defaults, concat_params=concat_params, inner=inner, update_freq=update_freq)
161
-
162
- def single_tensor_update(self, tensor, param, grad, loss, state, setting):
163
- state["step"] = state.get("step", 0) + 1
164
- beta = setting["beta"]
165
-
166
- if "L" not in state:
167
- # for uu^T u is eigenvector and u^T u is eigenvalue
168
- norm = torch.linalg.vector_norm(tensor).clip(min=torch.finfo(tensor.dtype).tiny * 2) # pylint:disable=not-callable
169
-
170
- state["L"] = state["L_reg"] = (tensor.dot(tensor).unsqueeze(0) / norm) # (rank,)
171
- state["Q"] = state["Q_reg"] = tensor.unsqueeze(-1) / norm # (m, rank)
172
-
173
- else:
174
- try:
175
- L = state["L"]
176
- Q = state["Q"]
177
-
178
- # compute new factors
179
- L_new, Q_new = eigh_plus_uuT(L*beta, Q, tensor, alpha=(1-beta), tol=setting["column_space_tol"], retry_float64=True)
180
-
181
- # truncate/regularize new factors (those go into the accumulator)
182
- L_new, Q_new = regularize_eigh(L=L_new, Q=Q_new, truncate=setting["rank"], tol=setting["eig_tol"],
183
- damping=setting["damping"], rdamping=setting["rdamping"])
184
-
185
- _eigengrad_update_state_(state=state, setting=setting, L_new=L_new, Q_new=Q_new)
186
-
187
- except torch.linalg.LinAlgError:
188
- pass
189
-
190
- def single_tensor_apply(self, tensor, param, grad, loss, state, setting):
191
- if "L_reg" not in state:
192
- return tensor.clip(-0.1, 0.1)
193
-
194
- if "eigenbasis_state" not in state:
195
- state["eigenbasis_state"] = {}
196
-
197
- return eigengrad_apply(
198
- tensor = tensor,
199
- L_reg = state["L_reg"],
200
- Q_reg = state["Q_reg"],
201
- beta = setting["beta"],
202
- step = state["step"],
203
- debias = True,
204
- id_reg = setting["id_reg"],
205
- eigenbasis_optimizer = setting["eigenbasis_optimizer"],
206
- eigenbasis_state = state["eigenbasis_state"]
207
- )