ista-daslab-optimizers 1.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. ista_daslab_optimizers/__init__.py +6 -0
  2. ista_daslab_optimizers/acdc/__init__.py +5 -0
  3. ista_daslab_optimizers/acdc/acdc.py +387 -0
  4. ista_daslab_optimizers/acdc/wd_scheduler.py +31 -0
  5. ista_daslab_optimizers/dense_mfac/__init__.py +5 -0
  6. ista_daslab_optimizers/dense_mfac/dense_core_mfac.py +164 -0
  7. ista_daslab_optimizers/dense_mfac/dense_mfac.py +93 -0
  8. ista_daslab_optimizers/fft_low_rank/dct_adamw.py +351 -0
  9. ista_daslab_optimizers/fft_low_rank/fft_projector.py +192 -0
  10. ista_daslab_optimizers/fft_low_rank/trion.py +242 -0
  11. ista_daslab_optimizers/ista_optimizer/__init__.py +5 -0
  12. ista_daslab_optimizers/ista_optimizer/ista_optimizer.py +36 -0
  13. ista_daslab_optimizers/micro_adam/__init__.py +5 -0
  14. ista_daslab_optimizers/micro_adam/micro_adam.py +402 -0
  15. ista_daslab_optimizers/sparse_mfac/__init__.py +7 -0
  16. ista_daslab_optimizers/sparse_mfac/sparse_core_mfac_w_ef.py +226 -0
  17. ista_daslab_optimizers/sparse_mfac/sparse_mfac.py +87 -0
  18. ista_daslab_optimizers/tools.py +218 -0
  19. ista_daslab_optimizers/utils/dct.py +45 -0
  20. ista_daslab_optimizers/utils/global_cache.py +45 -0
  21. ista_daslab_optimizers/utils/matrix_storage.py +58 -0
  22. ista_daslab_optimizers/utils/newton_schulz_triton.py +374 -0
  23. ista_daslab_optimizers/utils/quantizers.py +71 -0
  24. ista_daslab_optimizers/utils/schedulers.py +41 -0
  25. ista_daslab_optimizers-1.1.8.dist-info/METADATA +333 -0
  26. ista_daslab_optimizers-1.1.8.dist-info/RECORD +29 -0
  27. ista_daslab_optimizers-1.1.8.dist-info/WHEEL +5 -0
  28. ista_daslab_optimizers-1.1.8.dist-info/licenses/LICENSE +201 -0
  29. ista_daslab_optimizers-1.1.8.dist-info/top_level.txt +1 -0
@@ -0,0 +1,87 @@
1
+ import wandb
2
+ import torch
3
+ from .sparse_core_mfac_w_ef import SparseCoreMFACwithEF
4
+ from ..tools import get_first_device, get_gpus, get_weights_and_gradients, update_model, get_gpu_mem_usage
5
+
6
+
7
+ class SparseMFAC(torch.optim.Optimizer):
8
+ def __init__(self, params, lr: float, damp: float, m: int, k_init: float, weight_decay: float, use_bf16: bool):
9
+ super(SparseMFAC, self).__init__(params, dict(lr=lr, weight_decay=weight_decay))
10
+ self.lr = lr
11
+ self.weight_decay = weight_decay
12
+ self.m = m
13
+ self.damp = damp
14
+ self.use_bf16 = use_bf16
15
+ self.k_init = k_init
16
+
17
+ self.device = get_first_device()
18
+ self.d = sum([p.numel() for group in self.param_groups for p in group['params']])
19
+
20
+ ##### Sparse M-FAC preconditioner
21
+ self.core_mfac = SparseCoreMFACwithEF(
22
+ m=self.m,
23
+ d=self.d,
24
+ k_init=self.k_init,
25
+ dev=self.device,
26
+ gpus=[self.device] if torch.distributed.is_initialized() else get_gpus(),
27
+ damp=damp,
28
+ use_bf16=use_bf16)
29
+
30
+ ##### scalar variables
31
+ self.steps = 0
32
+ self.log_interval = 100
33
+ self.grad_norms_sum = 0
34
+
35
+ self.wandb_data = dict()
36
+ self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6)
37
+
38
+ @torch.no_grad()
39
+ def step(self, closure=None):
40
+ self.steps += 1
41
+
42
+ loss = None
43
+ if closure is not None:
44
+ with torch.enable_grad():
45
+ loss = closure()
46
+
47
+ ##################################################
48
+ ########## [1] GET GRADIENT
49
+ ##################################################
50
+ g_dense = get_weights_and_gradients(self.param_groups, get_weights=False, get_grad=True, grad_bf16=self.use_bf16)
51
+ norm_g_dense = g_dense.norm(p=2)
52
+ self.grad_norms_sum += norm_g_dense
53
+
54
+ ##################################################
55
+ ########## [2] PRECONDITION
56
+ ##################################################
57
+ update = self.core_mfac.apply_ef_then_update_buffer_then_precondition(g_dense)
58
+
59
+ ##################################################
60
+ ########## [3] UPDATE THE MODEL
61
+ ##################################################
62
+ lr = self.param_groups[0]['lr']
63
+
64
+ update_model(
65
+ params=self.param_groups,
66
+ update=update,
67
+ weight_decay=self.weight_decay,
68
+ alpha=None,
69
+ multiply_wd_w_lr=True)
70
+
71
+ ##################################################
72
+ ########## LOGS
73
+ ##################################################
74
+ if self.log_interval > 0 and self.steps % self.log_interval == 0:
75
+ norm_error = self.core_mfac.error.norm(p=2)
76
+ self.wandb_data.update({
77
+ 'epoch/step': self.steps,
78
+ 'epoch/norm_g': norm_g_dense,
79
+ 'epoch/norm_error': norm_error,
80
+ 'epoch/ef_norm_div_grad_norm_sum': norm_error / self.grad_norms_sum,
81
+ 'epoch/norm_u': update.norm(p=2),
82
+ 'epoch/gpu_mem_usage': get_gpu_mem_usage(),
83
+ })
84
+ self.wandb_data.update(self.core_mfac.wandb_data)
85
+ wandb.log(self.wandb_data)
86
+
87
+ return loss
@@ -0,0 +1,218 @@
1
+ import os
2
+ import gpustat
3
+ import torch
4
+ from enum import Enum
5
+ from importlib import import_module
6
+ import ista_daslab_cuda_tools
7
+
8
+ def get_cuda_capability(device=0):
9
+ cc = torch.cuda.get_device_capability(device) # tuple, for example (8, 6) for CUDA Capability 8.6
10
+ return f'{cc[0]}{cc[1]}'
11
+
12
+ class CopyDirection(Enum):
13
+ k2d = 0
14
+ d2k = 1
15
+
16
+ class Strategy(Enum):
17
+ """Apply Top-K globally"""
18
+ GLOBAL = 1
19
+
20
+ """Apply Top-K in blocks of specific size"""
21
+ BLOCK = 2
22
+
23
+ @staticmethod
24
+ def factory(name: str):
25
+ if name == 'gl': return Strategy.GLOBAL
26
+ if name == 'bl': return Strategy.BLOCK
27
+ raise RuntimeError('Invalid strategy name')
28
+
29
+ def get_first_device():
30
+ if not torch.cuda.is_available():
31
+ return torch.device('cpu')
32
+ if torch.distributed.is_initialized():
33
+ return torch.device(f'cuda:{torch.distributed.get_rank()}')
34
+ return torch.device('cuda:0')
35
+
36
+
37
+ def get_gpus():
38
+ if not torch.cuda.is_available():
39
+ return ['cpu']
40
+ device = get_first_device()
41
+ if torch.cuda.device_count() == 1:
42
+ return [device]
43
+
44
+ return [
45
+ torch.device(f'cuda:{i}')
46
+ for i in range(len(os.environ['CUDA_VISIBLE_DEVICES'].split(',')))
47
+ ]
48
+
49
+ def get_gpu_mem_usage():
50
+ """
51
+ This method returns the GPU memory usage for the current process.
52
+ It uses gpustat to query the GPU used by the current process (using CUDA_VISIBLE_DEVICES)
53
+
54
+ GPUSTAT usage:
55
+ stat = gpustat.new_query().gpus # this is a list containing information about each GPU indexed from 0 to 7
56
+ stat[i] (GPU #i) has the following keys:
57
+ - 'index'
58
+ - 'uuid'
59
+ - 'name'
60
+ - 'temperature.gpu'
61
+ - 'fan.speed'
62
+ - 'utilization.gpu'
63
+ - 'utilization.enc'
64
+ - 'utilization.dec'
65
+ - 'power.draw'
66
+ - 'enforced.power.limit'
67
+ - 'memory.used'
68
+ - 'memory.total'
69
+ - 'processes'
70
+ Among these keys, only the key 'processes' is used here.
71
+ stat[i].processes is a list of dicts, where each dict contains information about each process currently running on the GPU #i
72
+ - 'username'
73
+ - 'command'
74
+ - 'full_command'
75
+ - 'gpu_memory_usage'
76
+ - 'cpu_percent'
77
+ - 'cpu_memory_usage'
78
+ - 'pid'
79
+ """
80
+ gpus = gpustat.new_query().gpus
81
+ gids = list(map(int, os.environ['CUDA_VISIBLE_DEVICES'].split(',')))
82
+ gpu_mem = sum([int(proc['gpu_memory_usage']) for gid in gids for proc in gpus[gid]['processes'] if int(proc['pid']) == os.getpid()])
83
+ return gpu_mem
84
+
85
+ def block_split(model_size, block_size):
86
+ if model_size < block_size:
87
+ return 1, model_size
88
+ ### this is the shorter version that only returns the number of full blocks of size "block_size"
89
+ ### and the starting position of the last and smallest block
90
+ blocks_count = int(model_size / block_size)
91
+ start_index_last_block = model_size - model_size % block_size
92
+ return blocks_count, start_index_last_block
93
+
94
+ def get_weights_and_gradients(params, get_weights, get_grad=True, grad_bf16=False):
95
+ """
96
+ This method returns:
97
+ - w: the raw weights collected from the model if get_weights=True
98
+ - g: the gradients (without WD added)
99
+ """
100
+ w, g = [], []
101
+ for group in params:
102
+ for p in group['params']:
103
+ if p.grad is None or not p.requires_grad:
104
+ continue
105
+
106
+ if get_weights:
107
+ w.append(p.reshape(-1))
108
+ if get_grad:
109
+ if grad_bf16:
110
+ if p.grad.dtype != torch.bfloat16:
111
+ g.append(p.grad.reshape(-1).to(dtype=torch.bfloat16))
112
+ else:
113
+ g.append(p.grad.reshape(-1))
114
+ else:
115
+ g.append(p.grad.reshape(-1))
116
+
117
+ if get_weights and get_grad:
118
+ return torch.cat(w), torch.cat(g)
119
+ if get_weights:
120
+ return torch.cat(w)
121
+ if get_grad:
122
+ return torch.cat(g)
123
+ raise RuntimeError(f'invalid combination of parameters: {get_weights=}, {get_grad=}')
124
+
125
+
126
+ def update_model(params, update, weight_decay=0, alpha=None, multiply_wd_w_lr=False):
127
+ """
128
+ Applies the `update` to the model
129
+ When alpha=None, alpha is set to lr in the group
130
+ Returns the shrinking factor for the weights
131
+ """
132
+ count = 0
133
+ for group in params:
134
+ lr = group['lr']
135
+ wd = group.get('weight_decay', weight_decay) # if the param groups do not have weight decay, then use the externally provided one
136
+ for p in group['params']:
137
+ if p.grad is None:
138
+ continue
139
+ u = update[count:(count + p.numel())].reshape(p.shape).to(p.device)
140
+ if wd > 0:
141
+ if multiply_wd_w_lr:
142
+ p.mul_(1 - lr * wd)
143
+ else:
144
+ p.mul_(1 - wd)
145
+ p.add_(u, alpha=-lr if alpha is None else alpha)
146
+ count += p.numel()
147
+
148
+ class KernelVersionsManager:
149
+ def __init__(self, version_SP, version_LCG, m, d, d_block_size):
150
+ self.version_SP = version_SP
151
+ self.version_LCG = version_LCG
152
+ self.m = m
153
+ self.d = d
154
+ self.d_block_size = d_block_size
155
+
156
+ self.BLOCK_INDEX = 0
157
+ self.THREAD_INDEX = 1
158
+
159
+ # set number of blocks (initially None) based on the number of threads (see page 80 in the PhD #8)
160
+ # if self.d > 300_000_000:
161
+ # print(f'Model size is larger than 300M. Switching SP version from {self.version_SP} to 252')
162
+ # self.version_SP = 252
163
+
164
+ self.SP_BLOCKS_THREADS = {
165
+ 23: [self.m, self.m],
166
+ # 24: [1024, 1024],
167
+ # 251: [None, 1024],
168
+ # 252: [None, self.m],
169
+ # 261: [None, 128],
170
+ # 262: [None, 128],
171
+ # 272: [None, 1024],
172
+ }
173
+
174
+ self.LCG_BLOCKS_THREADS = {
175
+ # 42: [68, 256],
176
+ # 43: [117, 32],
177
+ 51: [None, 1024],
178
+ # 524: [None, 128],
179
+ # 53: [None, 128],
180
+ # 54: [None, 128],
181
+ }
182
+
183
+ self.set_blocks_count(self.SP_BLOCKS_THREADS, self.version_SP, op='SP')
184
+ self.set_blocks_count(self.LCG_BLOCKS_THREADS, self.version_LCG, op='LCG')
185
+ # self.SP_BLOCKS_THREADS[self.version_SP][0] = 10
186
+
187
+ def set_blocks_count(self, op_blocks_threads, op_version, op):
188
+ """
189
+ Safety measure: for small models, there might be too many thread blocks launched and most of them will process data out of bounds of arrays out, indices and values
190
+ """
191
+ def div_inc(a, b):
192
+ r = a // b
193
+ return (r + 1) if (a % b > 0) else r
194
+
195
+ if op_blocks_threads[op_version][self.BLOCK_INDEX] is None:
196
+ blocks_count = div_inc(self.d, self.d_block_size)
197
+ op_max_blocks = ista_daslab_cuda_tools.get_sm_count()
198
+ op_required_blocks = min(blocks_count, op_max_blocks)
199
+ if op_required_blocks < op_max_blocks:
200
+ print(f'Maximum number of blocks for {op} is {op_max_blocks}, but this model requires only {op_required_blocks}')
201
+ # return op_required_blocks
202
+ op_blocks_threads[op_version][self.BLOCK_INDEX] = op_required_blocks
203
+ op_blocks_threads[op_version][self.BLOCK_INDEX] = op_max_blocks
204
+
205
+ print(f'{op_blocks_threads=}, {op_version=}, {op=}, {op_blocks_threads[op_version][self.BLOCK_INDEX]=}')
206
+
207
+ def get_SP_blocks(self):
208
+ return self.SP_BLOCKS_THREADS[self.version_SP][self.BLOCK_INDEX]
209
+
210
+ def get_SP_threads(self):
211
+ return self.SP_BLOCKS_THREADS[self.version_SP][self.THREAD_INDEX]
212
+
213
+ def get_LCG_blocks(self):
214
+ return self.LCG_BLOCKS_THREADS[self.version_LCG][self.BLOCK_INDEX]
215
+
216
+ def get_LCG_threads(self):
217
+ return self.LCG_BLOCKS_THREADS[self.version_LCG][self.THREAD_INDEX]
218
+
@@ -0,0 +1,45 @@
1
+ import torch
2
+ import math
3
+
4
+ from ista_daslab_optimizers.utils.global_cache import GlobalCache
5
+
6
+ def dct3_matrix(n, dtype, device):
7
+ """
8
+ This function returns the orthogonal transformation for Discrete Cosine Transform (DCT-3).
9
+ """
10
+ lin = torch.arange(n)
11
+ I = lin.repeat(n, 1).to(torch.float)
12
+ Q = math.sqrt(2 / n) * torch.cos(torch.pi * (I.t() * (2. * I + 1.)) / (2. * n))
13
+ del lin, I
14
+ Q[0, :] *= math.sqrt(0.5)
15
+ return Q.to(device=device, dtype=dtype)
16
+
17
+ def dct_type2_makhoul(X):
18
+ N = X.shape[1]
19
+
20
+ if GlobalCache.contains(category='perm', key=N):
21
+ perm = GlobalCache.get(category='perm', key=N)
22
+ else:
23
+ even_idx = torch.arange(0, N, 2) # 0, 2, 4, ...
24
+ odd_idx = torch.arange(1, N, 2).flip(0) # last odd → first odd
25
+ perm = torch.cat([even_idx, odd_idx]).to(X.device)
26
+
27
+ GlobalCache.add(category='perm', key=N, item=perm)
28
+ #
29
+ # X_input = X[:, perm]
30
+ # if X_input.dtype != torch.float:
31
+ # X_input = X_input.to(torch.float)
32
+ # X_fft = torch.fft.fft(X_input, dim=1)
33
+
34
+ X_fft = torch.fft.fft(X[:, perm].contiguous(), dim=1)
35
+
36
+ if GlobalCache.contains(category='twiddle', key=N):
37
+ W = GlobalCache.get(category='twiddle', key=N)
38
+ else:
39
+ W = 2 * torch.exp((-1j * torch.pi * torch.arange(N, device=X.device) / (2 * N)))
40
+ W[0] /= math.sqrt(4 * N)
41
+ W[1:] /= math.sqrt(2 * N)
42
+
43
+ GlobalCache.add(category='twiddle', key=N, item=W.reshape(1, N))
44
+
45
+ return (X_fft * W).real
@@ -0,0 +1,45 @@
1
+ class GlobalCache:
2
+ _instance = None
3
+
4
+ @staticmethod
5
+ def init():
6
+ if GlobalCache._instance is None:
7
+ GlobalCache._instance = GlobalCache()
8
+
9
+ @staticmethod
10
+ def contains(category, key):
11
+ return GlobalCache.get_instance()._contains(category, key)
12
+
13
+ @staticmethod
14
+ def get_instance():
15
+ if GlobalCache._instance is None:
16
+ GlobalCache.init()
17
+ return GlobalCache._instance
18
+
19
+ @staticmethod
20
+ def get(category, key):
21
+ return GlobalCache.get_instance()._get(category, key)
22
+
23
+ @staticmethod
24
+ def add(category, key, item):
25
+ return GlobalCache.get_instance()._add(category, key, item)
26
+
27
+ def __init__(self):
28
+ self.storage = dict()
29
+
30
+ def _contains(self, category, key):
31
+ if category not in self.storage:
32
+ return False
33
+ return key in self.storage[category]
34
+
35
+ def _add(self, category, key, item):
36
+ if category not in self.storage:
37
+ self.storage[category] = { key: item }
38
+ elif key not in self.storage[category]:
39
+ self.storage[category][key] = item
40
+
41
+ def _get(self, category, key):
42
+ # print(self.storage)
43
+ if self._contains(category, key):
44
+ return self.storage[category][key]
45
+ raise ValueError(f'GlobalCache does not contain category {category} and/or key {key}')
@@ -0,0 +1,58 @@
1
+ import torch
2
+ import torch.distributed as dist
3
+ from memory_efficient_optimizers.utils.dct import dct3_matrix
4
+
5
+ PROJ_DCT = 'dct'
6
+ PROJ_HDM = 'hdm'
7
+
8
+ ALL_PROJ = [
9
+ PROJ_DCT, # DCT projection
10
+ PROJ_HDM, # Hadamard projection
11
+ ]
12
+
13
+ class MatrixStorage:
14
+ """
15
+ This singleton class stores a dictionary where:
16
+ - keys = the matrix size
17
+ - values = the corresponding orthogonal matrix of DCT-3 or Hadamard transforms of size stored in the key
18
+ """
19
+ _instance = None
20
+
21
+ @staticmethod
22
+ def init():
23
+ if MatrixStorage._instance is None:
24
+ MatrixStorage._instance = MatrixStorage()
25
+
26
+ @staticmethod
27
+ def get_instance():
28
+ if MatrixStorage._instance is None:
29
+ MatrixStorage.init()
30
+ return MatrixStorage._instance
31
+
32
+ @staticmethod
33
+ def get_matrix(size, proj, dtype):
34
+ return MatrixStorage.get_instance()._get_matrix(size, proj, dtype)
35
+
36
+ @staticmethod
37
+ def add_matrix(size, proj, dtype):
38
+ return MatrixStorage.get_instance()._add_matrix(size, proj, dtype)
39
+
40
+ def __init__(self):
41
+ self.storage = dict()
42
+ self.dtype = None
43
+ self.device = f'cuda:{dist.get_rank()}' if dist.is_initialized() else 'cuda:0'
44
+
45
+ def _add_matrix(self, size, proj, dtype):
46
+ if size not in self.storage:
47
+ if proj == PROJ_DCT:
48
+ self.storage[size] = dct3_matrix(size).to(device=self.device, dtype=dtype) # first row is zero
49
+ elif proj == PROJ_HDM:
50
+ self.storage[size] = hadamard_transform(torch.eye(size).to(device=self.device, dtype=dtype), scale=1./math.sqrt(size))
51
+ else:
52
+ raise RuntimeError(f'Projection {proj} is currently not supported!')
53
+
54
+ def _get_matrix(self, size, proj, dtype):
55
+ if size not in self.storage:
56
+ assert dtype is not None
57
+ self._add_matrix(size, proj, dtype)
58
+ return self.storage[size]