fusion-bench 0.2.24__py3-none-any.whl → 0.2.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. fusion_bench/__init__.py +152 -42
  2. fusion_bench/dataset/__init__.py +27 -4
  3. fusion_bench/dataset/clip_dataset.py +2 -2
  4. fusion_bench/method/__init__.py +10 -1
  5. fusion_bench/method/classification/__init__.py +27 -2
  6. fusion_bench/method/classification/image_classification_finetune.py +214 -0
  7. fusion_bench/method/opcm/opcm.py +1 -0
  8. fusion_bench/method/pwe_moe/module.py +0 -2
  9. fusion_bench/method/tall_mask/task_arithmetic.py +2 -2
  10. fusion_bench/mixins/__init__.py +2 -0
  11. fusion_bench/mixins/pyinstrument.py +174 -0
  12. fusion_bench/mixins/simple_profiler.py +106 -23
  13. fusion_bench/modelpool/__init__.py +2 -0
  14. fusion_bench/modelpool/base_pool.py +77 -14
  15. fusion_bench/modelpool/clip_vision/modelpool.py +56 -19
  16. fusion_bench/modelpool/resnet_for_image_classification.py +208 -0
  17. fusion_bench/models/__init__.py +35 -9
  18. fusion_bench/optim/__init__.py +40 -2
  19. fusion_bench/optim/lr_scheduler/__init__.py +27 -1
  20. fusion_bench/optim/muon.py +339 -0
  21. fusion_bench/programs/__init__.py +2 -0
  22. fusion_bench/programs/fabric_fusion_program.py +2 -2
  23. fusion_bench/programs/fusion_program.py +271 -0
  24. fusion_bench/tasks/clip_classification/__init__.py +15 -0
  25. fusion_bench/utils/__init__.py +167 -21
  26. fusion_bench/utils/lazy_imports.py +91 -12
  27. fusion_bench/utils/lazy_state_dict.py +55 -5
  28. fusion_bench/utils/misc.py +104 -13
  29. fusion_bench/utils/packages.py +4 -0
  30. fusion_bench/utils/path.py +7 -0
  31. fusion_bench/utils/pylogger.py +6 -0
  32. fusion_bench/utils/rich_utils.py +1 -0
  33. fusion_bench/utils/state_dict_arithmetic.py +935 -162
  34. {fusion_bench-0.2.24.dist-info → fusion_bench-0.2.25.dist-info}/METADATA +1 -1
  35. {fusion_bench-0.2.24.dist-info → fusion_bench-0.2.25.dist-info}/RECORD +48 -34
  36. fusion_bench_config/method/classification/image_classification_finetune.yaml +16 -0
  37. fusion_bench_config/method/classification/image_classification_finetune_test.yaml +6 -0
  38. fusion_bench_config/model_fusion.yaml +45 -0
  39. fusion_bench_config/modelpool/ResNetForImageClassfication/transformers/resnet152_cifar10.yaml +14 -0
  40. fusion_bench_config/modelpool/ResNetForImageClassfication/transformers/resnet152_cifar100.yaml +14 -0
  41. fusion_bench_config/modelpool/ResNetForImageClassfication/transformers/resnet18_cifar10.yaml +14 -0
  42. fusion_bench_config/modelpool/ResNetForImageClassfication/transformers/resnet18_cifar100.yaml +14 -0
  43. fusion_bench_config/modelpool/ResNetForImageClassfication/transformers/resnet50_cifar10.yaml +14 -0
  44. fusion_bench_config/modelpool/ResNetForImageClassfication/transformers/resnet50_cifar100.yaml +14 -0
  45. {fusion_bench-0.2.24.dist-info → fusion_bench-0.2.25.dist-info}/WHEEL +0 -0
  46. {fusion_bench-0.2.24.dist-info → fusion_bench-0.2.25.dist-info}/entry_points.txt +0 -0
  47. {fusion_bench-0.2.24.dist-info → fusion_bench-0.2.25.dist-info}/licenses/LICENSE +0 -0
  48. {fusion_bench-0.2.24.dist-info → fusion_bench-0.2.25.dist-info}/top_level.txt +0 -0
@@ -1,323 +1,1065 @@
1
1
  from collections import OrderedDict
2
2
  from numbers import Number
3
- from typing import Callable, Dict, List, Literal, Union, cast
3
+ from typing import Callable, Dict, List, Literal, Optional, Union, cast
4
4
 
5
5
  import torch
6
6
  from torch import Tensor
7
7
  from tqdm.auto import tqdm
8
8
 
9
- from .parameters import check_parameters_all_equal
10
9
  from .type import BoolStateDictType, StateDictType
11
10
 
11
+ __all__ = [
12
+ "ArithmeticStateDict",
13
+ "state_dicts_check_keys",
14
+ "state_dict_to_device",
15
+ "num_params_of_state_dict",
16
+ "state_dict_flatten",
17
+ "state_dict_avg",
18
+ "state_dict_sub",
19
+ "state_dict_add",
20
+ "state_dict_add_scalar",
21
+ "state_dict_mul",
22
+ "state_dict_div",
23
+ "state_dict_power",
24
+ "state_dict_interpolation",
25
+ "state_dict_sum",
26
+ "state_dict_weighted_sum",
27
+ "state_dict_diff_abs",
28
+ "state_dict_binary_mask",
29
+ "state_dict_hadamard_product",
30
+ ]
31
+
32
+
33
+ class ArithmeticStateDict(OrderedDict):
34
+ """
35
+ An OrderedDict subclass that supports arithmetic operations on state dictionaries.
36
+
37
+ This class provides convenient operator overloading for common state dict operations
38
+ like addition, subtraction, multiplication, and division, while maintaining all
39
+ the functionality of OrderedDict.
40
+
41
+ Examples:
42
+ >>> sd1 = ArithmeticStateDict({'weight': torch.tensor([1.0, 2.0]), 'bias': torch.tensor([0.5])})
43
+ >>> sd2 = ArithmeticStateDict({'weight': torch.tensor([2.0, 3.0]), 'bias': torch.tensor([1.0])})
44
+ >>> result = sd1 + sd2 # Element-wise addition
45
+ >>> result = sd1 - sd2 # Element-wise subtraction
46
+ >>> result = sd1 * 2.0 # Scalar multiplication
47
+ >>> result = sd1 / 2.0 # Scalar division
48
+ >>> result = sd1 @ sd2 # Hadamard product
49
+ """
50
+
51
+ def __init__(self, *args, **kwargs):
52
+ """Initialize ArithmeticStateDict with the same interface as OrderedDict."""
53
+ super().__init__(*args, **kwargs)
54
+
55
+ def __add__(
56
+ self, other: Union["ArithmeticStateDict", StateDictType, Number]
57
+ ) -> "ArithmeticStateDict":
58
+ """
59
+ Element-wise addition with another state dict or scalar.
60
+
61
+ Args:
62
+ other: Another state dict to add or a scalar to add to all elements.
63
+
64
+ Returns:
65
+ A new ArithmeticStateDict with the element-wise sum.
66
+ """
67
+ if isinstance(other, (int, float, Number)):
68
+ # Scalar addition
69
+ result_dict = state_dict_add_scalar(self, other)
70
+ return ArithmeticStateDict(result_dict)
71
+ elif isinstance(other, (dict, OrderedDict)):
72
+ # State dict addition
73
+ result_dict = state_dict_add(self, other, strict=True)
74
+ return ArithmeticStateDict(result_dict)
75
+ else:
76
+ raise TypeError(
77
+ f"Cannot add ArithmeticStateDict with {type(other).__name__}"
78
+ )
79
+
80
+ def __radd__(
81
+ self, other: Union["ArithmeticStateDict", StateDictType, Number]
82
+ ) -> "ArithmeticStateDict":
83
+ """
84
+ Right addition (other + self).
85
+ Handles the case where sum() starts with 0 and scalar addition.
86
+ """
87
+ if other == 0: # sum() starts with 0 by default
88
+ return self
89
+ elif isinstance(other, (int, float, Number)):
90
+ # Scalar addition is commutative
91
+ return self.__add__(other)
92
+ elif isinstance(other, (dict, OrderedDict)):
93
+ return self.__add__(other)
94
+ else:
95
+ raise TypeError(
96
+ f"Cannot add {type(other).__name__} with ArithmeticStateDict"
97
+ )
98
+
99
+ def __sub__(
100
+ self, other: Union["ArithmeticStateDict", StateDictType, Number]
101
+ ) -> "ArithmeticStateDict":
102
+ """
103
+ Element-wise subtraction with another state dict or scalar.
104
+
105
+ Args:
106
+ other: Another state dict to subtract or a scalar to subtract from all elements.
107
+
108
+ Returns:
109
+ A new ArithmeticStateDict with the element-wise difference.
110
+ """
111
+ if isinstance(other, (int, float, Number)):
112
+ # Scalar subtraction: subtract scalar from all elements
113
+ result_dict = state_dict_add_scalar(self, -other)
114
+ return ArithmeticStateDict(result_dict)
115
+ elif isinstance(other, (dict, OrderedDict)):
116
+ # State dict subtraction
117
+ result_dict = state_dict_sub(self, other, strict=True)
118
+ return ArithmeticStateDict(result_dict)
119
+ else:
120
+ raise TypeError(
121
+ f"Cannot subtract {type(other).__name__} from ArithmeticStateDict"
122
+ )
123
+
124
+ def __rsub__(
125
+ self, other: Union["ArithmeticStateDict", StateDictType, Number]
126
+ ) -> "ArithmeticStateDict":
127
+ """Right subtraction (other - self)."""
128
+ if isinstance(other, (int, float, Number)):
129
+ # Scalar - ArithmeticStateDict: subtract each element from scalar
130
+ result = ArithmeticStateDict()
131
+ for key, tensor in self.items():
132
+ result[key] = other - tensor
133
+ return result
134
+ elif isinstance(other, (dict, OrderedDict)):
135
+ result_dict = state_dict_sub(other, self, strict=True)
136
+ return ArithmeticStateDict(result_dict)
137
+ else:
138
+ raise TypeError(
139
+ f"Cannot subtract ArithmeticStateDict from {type(other).__name__}"
140
+ )
141
+
142
+ def __mul__(
143
+ self, scalar: Union[Number, "ArithmeticStateDict", StateDictType]
144
+ ) -> "ArithmeticStateDict":
145
+ """
146
+ Scalar multiplication or Hadamard product.
147
+
148
+ Args:
149
+ scalar: A scalar value for element-wise multiplication, or another state dict
150
+ for Hadamard product.
151
+
152
+ Returns:
153
+ A new ArithmeticStateDict with the result.
154
+ """
155
+ if isinstance(scalar, (int, float, Number)):
156
+ result_dict = state_dict_mul(self, scalar)
157
+ return ArithmeticStateDict(result_dict)
158
+ elif isinstance(scalar, (dict, OrderedDict)):
159
+ # Hadamard product for dict-like objects
160
+ result_dict = state_dict_hadamard_product(self, scalar)
161
+ return ArithmeticStateDict(result_dict)
162
+ else:
163
+ raise TypeError(
164
+ f"Cannot multiply ArithmeticStateDict with {type(scalar).__name__}"
165
+ )
166
+
167
+ def __rmul__(
168
+ self, scalar: Union[Number, "ArithmeticStateDict", StateDictType]
169
+ ) -> "ArithmeticStateDict":
170
+ """Right multiplication (scalar * self)."""
171
+ return self.__mul__(scalar)
172
+
173
+ def __truediv__(self, scalar: Number) -> "ArithmeticStateDict":
174
+ """
175
+ Scalar division.
176
+
177
+ Args:
178
+ scalar: A scalar value to divide by.
179
+
180
+ Returns:
181
+ A new ArithmeticStateDict with each element divided by scalar.
182
+
183
+ Raises:
184
+ ZeroDivisionError: If scalar is zero.
185
+ TypeError: If scalar is not a number.
186
+ """
187
+ if not isinstance(scalar, (int, float, Number)):
188
+ raise TypeError(
189
+ f"Cannot divide ArithmeticStateDict by {type(scalar).__name__}"
190
+ )
191
+
192
+ result_dict = state_dict_div(self, scalar)
193
+ return ArithmeticStateDict(result_dict)
194
+
195
+ def __pow__(self, exponent: Number) -> "ArithmeticStateDict":
196
+ """
197
+ Element-wise power operation.
198
+
199
+ Args:
200
+ exponent: The exponent to raise each element to.
201
+
202
+ Returns:
203
+ A new ArithmeticStateDict with each element raised to the power.
204
+ """
205
+ if not isinstance(exponent, (int, float, Number)):
206
+ raise TypeError(
207
+ f"Cannot raise ArithmeticStateDict to power of {type(exponent).__name__}"
208
+ )
209
+
210
+ result_dict = state_dict_power(self, exponent)
211
+ return ArithmeticStateDict(result_dict)
212
+
213
+ def __matmul__(
214
+ self, other: Union["ArithmeticStateDict", StateDictType]
215
+ ) -> "ArithmeticStateDict":
216
+ """
217
+ Hadamard product (element-wise multiplication) using @ operator.
218
+
219
+ Args:
220
+ other: Another state dict for element-wise multiplication.
221
+
222
+ Returns:
223
+ A new ArithmeticStateDict with the Hadamard product.
224
+ """
225
+ if not isinstance(other, (dict, OrderedDict)):
226
+ raise TypeError(
227
+ f"Cannot compute Hadamard product with {type(other).__name__}"
228
+ )
229
+
230
+ result_dict = state_dict_hadamard_product(self, other)
231
+ return ArithmeticStateDict(result_dict)
232
+
233
+ def __rmatmul__(
234
+ self, other: Union["ArithmeticStateDict", StateDictType]
235
+ ) -> "ArithmeticStateDict":
236
+ """Right matrix multiplication (other @ self)."""
237
+ return self.__matmul__(other)
238
+
239
+ def __iadd__(
240
+ self, other: Union["ArithmeticStateDict", StateDictType, Number]
241
+ ) -> "ArithmeticStateDict":
242
+ """In-place addition."""
243
+ if isinstance(other, (int, float, Number)):
244
+ # Scalar addition
245
+ for key in self:
246
+ self[key] = self[key] + other
247
+ elif isinstance(other, (dict, OrderedDict)):
248
+ # State dict addition
249
+ for key in self:
250
+ if key in other:
251
+ self[key] = self[key] + other[key]
252
+ else:
253
+ raise TypeError(f"Cannot add {type(other).__name__} to ArithmeticStateDict")
254
+ return self
255
+
256
+ def __isub__(
257
+ self, other: Union["ArithmeticStateDict", StateDictType, Number]
258
+ ) -> "ArithmeticStateDict":
259
+ """In-place subtraction."""
260
+ if isinstance(other, (int, float, Number)):
261
+ # Scalar subtraction
262
+ for key in self:
263
+ self[key] = self[key] - other
264
+ elif isinstance(other, (dict, OrderedDict)):
265
+ # State dict subtraction
266
+ for key in self:
267
+ if key in other:
268
+ self[key] = self[key] - other[key]
269
+ else:
270
+ raise TypeError(
271
+ f"Cannot subtract {type(other).__name__} from ArithmeticStateDict"
272
+ )
273
+ return self
274
+
275
+ def __imul__(
276
+ self, scalar: Union[Number, "ArithmeticStateDict", StateDictType]
277
+ ) -> "ArithmeticStateDict":
278
+ """In-place multiplication."""
279
+ if isinstance(scalar, (int, float, Number)):
280
+ for key in self:
281
+ self[key] = self[key] * scalar
282
+ elif isinstance(scalar, (dict, OrderedDict)):
283
+ for key in self:
284
+ if key in scalar:
285
+ self[key] = self[key] * scalar[key]
286
+ else:
287
+ raise TypeError(
288
+ f"Cannot multiply ArithmeticStateDict with {type(scalar).__name__}"
289
+ )
290
+ return self
291
+
292
+ def __itruediv__(self, scalar: Number) -> "ArithmeticStateDict":
293
+ """In-place division."""
294
+ if not isinstance(scalar, (int, float, Number)):
295
+ raise TypeError(
296
+ f"Cannot divide ArithmeticStateDict by {type(scalar).__name__}"
297
+ )
298
+ if scalar == 0:
299
+ raise ZeroDivisionError("Cannot divide by zero")
300
+
301
+ for key in self:
302
+ self[key] = self[key] / scalar
303
+ return self
304
+
305
+ def __ipow__(self, exponent: Number) -> "ArithmeticStateDict":
306
+ """In-place power operation."""
307
+ if not isinstance(exponent, (int, float, Number)):
308
+ raise TypeError(
309
+ f"Cannot raise ArithmeticStateDict to power of {type(exponent).__name__}"
310
+ )
311
+
312
+ for key in self:
313
+ self[key] = self[key] ** exponent
314
+ return self
315
+
316
+ def abs(self) -> "ArithmeticStateDict":
317
+ """
318
+ Element-wise absolute value.
319
+
320
+ Returns:
321
+ A new ArithmeticStateDict with absolute values.
322
+ """
323
+ result = ArithmeticStateDict()
324
+ for key, tensor in self.items():
325
+ result[key] = torch.abs(tensor)
326
+ return result
327
+
328
+ def sqrt(self) -> "ArithmeticStateDict":
329
+ """
330
+ Element-wise square root.
331
+
332
+ Returns:
333
+ A new ArithmeticStateDict with square roots.
334
+ """
335
+ result = ArithmeticStateDict()
336
+ for key, tensor in self.items():
337
+ result[key] = torch.sqrt(tensor)
338
+ return result
339
+
340
+ def sum(self) -> "ArithmeticStateDict":
341
+ """
342
+ Sum with other ArithmeticStateDicts using the + operator.
343
+
344
+ Args:
345
+ *others: Other ArithmeticStateDicts to sum with.
346
+
347
+ Returns:
348
+ A new ArithmeticStateDict with the sum.
349
+ """
350
+ # This is used for when sum() is called on a list of ArithmeticStateDicts
351
+ return self
352
+
353
+ def to_device(
354
+ self,
355
+ device: Union[torch.device, str],
356
+ copy: bool = False,
357
+ inplace: bool = False,
358
+ ) -> "ArithmeticStateDict":
359
+ """
360
+ Move all tensors to the specified device.
361
+
362
+ Args:
363
+ device: Target device.
364
+ copy: Whether to force a copy.
365
+ inplace: Whether to modify in place.
366
+
367
+ Returns:
368
+ ArithmeticStateDict with tensors on the target device.
369
+ """
370
+ if inplace:
371
+ for key, tensor in self.items():
372
+ self[key] = tensor.to(device, non_blocking=True, copy=copy)
373
+ return self
374
+ else:
375
+ result = ArithmeticStateDict()
376
+ for key, tensor in self.items():
377
+ result[key] = tensor.to(device, non_blocking=True, copy=copy)
378
+ return result
379
+
380
+ def clone(self) -> "ArithmeticStateDict":
381
+ """
382
+ Create a deep copy with cloned tensors.
383
+
384
+ Returns:
385
+ A new ArithmeticStateDict with cloned tensors.
386
+ """
387
+ result = ArithmeticStateDict()
388
+ for key, tensor in self.items():
389
+ result[key] = tensor.clone()
390
+ return result
391
+
392
+ def detach(self) -> "ArithmeticStateDict":
393
+ """
394
+ Detach all tensors from the computation graph.
395
+
396
+ Returns:
397
+ A new ArithmeticStateDict with detached tensors.
398
+ """
399
+ result = ArithmeticStateDict()
400
+ for key, tensor in self.items():
401
+ result[key] = tensor.detach()
402
+ return result
403
+
404
+ def num_params(self) -> int:
405
+ """
406
+ Calculate the total number of parameters.
407
+
408
+ Returns:
409
+ Total number of parameters in all tensors.
410
+ """
411
+ return sum(tensor.numel() for tensor in self.values())
412
+
413
+ @classmethod
414
+ def from_state_dict(cls, state_dict: StateDictType) -> "ArithmeticStateDict":
415
+ """
416
+ Create an ArithmeticStateDict from a regular state dict.
417
+
418
+ Args:
419
+ state_dict: A regular state dictionary.
420
+
421
+ Returns:
422
+ A new ArithmeticStateDict with the same data.
423
+ """
424
+ return cls(state_dict)
425
+
426
+ @classmethod
427
+ def weighted_sum(
428
+ cls,
429
+ state_dicts: List[Union["ArithmeticStateDict", StateDictType]],
430
+ weights: List[float],
431
+ ) -> "ArithmeticStateDict":
432
+ """
433
+ Compute a weighted sum of multiple state dicts.
434
+
435
+ Args:
436
+ state_dicts: List of state dicts to combine.
437
+ weights: List of weights for the combination.
438
+
439
+ Returns:
440
+ A new ArithmeticStateDict with the weighted sum.
441
+ """
442
+ result_dict = state_dict_weighted_sum(state_dicts, weights)
443
+ return cls(result_dict)
444
+
445
+ @classmethod
446
+ def average(
447
+ cls, state_dicts: List[Union["ArithmeticStateDict", StateDictType]]
448
+ ) -> "ArithmeticStateDict":
449
+ """
450
+ Compute the average of multiple state dicts.
451
+
452
+ Args:
453
+ state_dicts: List of state dicts to average.
454
+
455
+ Returns:
456
+ A new ArithmeticStateDict with the average.
457
+ """
458
+ result_dict = state_dict_avg(state_dicts)
459
+ return cls(result_dict)
460
+
461
+
462
+ def _validate_state_dict_list_not_empty(state_dicts: List[StateDictType]) -> None:
463
+ """
464
+ Validate that the list of state dicts is not empty and contains valid state dicts.
465
+
466
+ Args:
467
+ state_dicts: List of state dictionaries to validate.
468
+
469
+ Raises:
470
+ TypeError: If state_dicts is not a list or contains non-dict items.
471
+ ValueError: If the list is empty or contains empty state dicts.
472
+ """
473
+ if state_dicts is None:
474
+ raise TypeError("state_dicts cannot be None")
475
+
476
+ if not isinstance(state_dicts, (list, tuple)):
477
+ raise TypeError(
478
+ f"Expected list or tuple of state dicts, got {type(state_dicts).__name__}"
479
+ )
480
+
481
+ if not state_dicts:
482
+ raise ValueError("The list of state_dicts must not be empty")
483
+
484
+ for i, state_dict in enumerate(state_dicts):
485
+ if state_dict is None:
486
+ raise ValueError(f"State dict at index {i} is None")
487
+ if not isinstance(state_dict, (dict, OrderedDict)):
488
+ raise TypeError(
489
+ f"Item at index {i} is not a dictionary, got {type(state_dict).__name__}"
490
+ )
491
+ if not state_dict:
492
+ raise ValueError(f"State dict at index {i} is empty")
493
+
494
+
495
+ def _validate_state_dict_same_keys(state_dicts: List[StateDictType]) -> None:
496
+ """
497
+ Validate that all state dicts have the same keys and compatible tensor shapes.
498
+
499
+ Args:
500
+ state_dicts: List of state dictionaries to validate.
501
+
502
+ Raises:
503
+ ValueError: If state dicts have different keys or incompatible tensor shapes.
504
+ TypeError: If tensors have incompatible types.
505
+ """
506
+ if not state_dicts:
507
+ return
508
+
509
+ if len(state_dicts) < 2:
510
+ return
511
+
512
+ reference_state_dict = state_dicts[0]
513
+ reference_keys = set(reference_state_dict.keys())
514
+
515
+ if not reference_keys:
516
+ raise ValueError("Reference state dict (index 0) has no keys")
517
+
518
+ for i, state_dict in enumerate(state_dicts[1:], 1):
519
+ current_keys = set(state_dict.keys())
12
520
 
13
- def to_device(
521
+ # Check for missing keys
522
+ missing_keys = reference_keys - current_keys
523
+ if missing_keys:
524
+ raise ValueError(
525
+ f"State dict at index {i} is missing keys: {sorted(missing_keys)}"
526
+ )
527
+
528
+ # Check for extra keys
529
+ extra_keys = current_keys - reference_keys
530
+ if extra_keys:
531
+ raise ValueError(
532
+ f"State dict at index {i} has extra keys: {sorted(extra_keys)}"
533
+ )
534
+
535
+ # Check tensor shapes and dtypes for compatibility
536
+ for key in reference_keys:
537
+ ref_tensor = reference_state_dict[key]
538
+ curr_tensor = state_dict[key]
539
+
540
+ # Handle None values
541
+ if ref_tensor is None and curr_tensor is None:
542
+ continue
543
+ if ref_tensor is None or curr_tensor is None:
544
+ raise ValueError(
545
+ f"Tensor None mismatch for key '{key}' at index {i}: "
546
+ f"one is None, the other is not"
547
+ )
548
+
549
+ if not isinstance(curr_tensor, type(ref_tensor)):
550
+ raise TypeError(
551
+ f"Tensor type mismatch for key '{key}' at index {i}: "
552
+ f"expected {type(ref_tensor).__name__}, got {type(curr_tensor).__name__}"
553
+ )
554
+
555
+ if hasattr(ref_tensor, "shape") and hasattr(curr_tensor, "shape"):
556
+ if ref_tensor.shape != curr_tensor.shape:
557
+ raise ValueError(
558
+ f"Shape mismatch for key '{key}' at index {i}: "
559
+ f"expected {ref_tensor.shape}, got {curr_tensor.shape}"
560
+ )
561
+
562
+ if hasattr(ref_tensor, "dtype") and hasattr(curr_tensor, "dtype"):
563
+ if ref_tensor.dtype != curr_tensor.dtype:
564
+ raise ValueError(
565
+ f"Dtype mismatch for key '{key}' at index {i}: "
566
+ f"expected {ref_tensor.dtype}, got {curr_tensor.dtype}"
567
+ )
568
+
569
+ # Check device compatibility (warn but don't fail)
570
+ if (
571
+ hasattr(ref_tensor, "device")
572
+ and hasattr(curr_tensor, "device")
573
+ and ref_tensor.device != curr_tensor.device
574
+ ):
575
+ import warnings
576
+
577
+ warnings.warn(
578
+ f"Device mismatch for key '{key}' at index {i}: "
579
+ f"reference on {ref_tensor.device}, current on {curr_tensor.device}. "
580
+ f"This may cause issues during arithmetic operations."
581
+ )
582
+
583
+
584
+ def _validate_list_lengths_equal(
585
+ list1: List,
586
+ list2: List,
587
+ name1: str = "the first list",
588
+ name2: str = "the second list",
589
+ ) -> None:
590
+ """
591
+ Validate that two lists have the same length and are valid.
592
+
593
+ Args:
594
+ list1: First list to compare.
595
+ list2: Second list to compare.
596
+ name1: Descriptive name for the first list.
597
+ name2: Descriptive name for the second list.
598
+
599
+ Raises:
600
+ TypeError: If either argument is not a list or names are not strings.
601
+ ValueError: If the lists have different lengths or are empty.
602
+ """
603
+ # Validate input types
604
+ if not isinstance(name1, str) or not isinstance(name2, str):
605
+ raise TypeError("List names must be strings")
606
+
607
+ if list1 is None or list2 is None:
608
+ raise TypeError("Lists cannot be None")
609
+
610
+ if not isinstance(list1, (list, tuple)):
611
+ raise TypeError(f"{name1} must be a list or tuple, got {type(list1).__name__}")
612
+ if not isinstance(list2, (list, tuple)):
613
+ raise TypeError(f"{name2} must be a list or tuple, got {type(list2).__name__}")
614
+
615
+ if not list1 and not list2:
616
+ raise ValueError(f"Both {name1} and {name2} are empty")
617
+
618
+ len1, len2 = len(list1), len(list2)
619
+ if len1 != len2:
620
+ raise ValueError(
621
+ f"Length mismatch: {name1} has {len1} items, " f"{name2} has {len2} items"
622
+ )
623
+
624
+ # Additional validation for numeric lists (common use case)
625
+ if list1 and hasattr(list1[0], "__float__"): # Likely numeric
626
+ try:
627
+ # Check for NaN or infinite values in numeric lists
628
+ import math
629
+
630
+ for i, val in enumerate(list1):
631
+ if isinstance(val, (int, float)) and (
632
+ math.isnan(val) or math.isinf(val)
633
+ ):
634
+ raise ValueError(
635
+ f"{name1} contains invalid numeric value at index {i}: {val}"
636
+ )
637
+ for i, val in enumerate(list2):
638
+ if isinstance(val, (int, float)) and (
639
+ math.isnan(val) or math.isinf(val)
640
+ ):
641
+ raise ValueError(
642
+ f"{name2} contains invalid numeric value at index {i}: {val}"
643
+ )
644
+ except (TypeError, AttributeError):
645
+ # If we can't check numeric values, skip this validation
646
+ pass
647
+
648
+
649
+ def state_dict_to_device(
14
650
  state_dict: StateDictType,
15
651
  device: Union[torch.device, str],
16
652
  copy: bool = False,
17
653
  inplace: bool = False,
18
- ):
654
+ ) -> StateDictType:
655
+ """
656
+ Move state dict tensors to the specified device.
657
+
658
+ Args:
659
+ state_dict: The state dictionary to move.
660
+ device: Target device for the tensors.
661
+ copy: Whether to force a copy even when the tensor is already on the target device.
662
+ inplace: Whether to modify the input state dict in place.
663
+
664
+ Returns:
665
+ State dict with tensors moved to the specified device.
666
+ """
19
667
  if inplace:
20
668
  ret_state_dict = state_dict
21
669
  else:
22
670
  ret_state_dict = OrderedDict()
23
- for key in state_dict:
24
- ret_state_dict[key] = cast(Tensor, state_dict[key]).to(
671
+
672
+ for key, tensor in state_dict.items():
673
+ ret_state_dict[key] = cast(Tensor, tensor).to(
25
674
  device, non_blocking=True, copy=copy
26
675
  )
27
676
  return ret_state_dict
28
677
 
29
678
 
30
- def state_dicts_check_keys(state_dicts: List[StateDictType]):
679
+ def state_dicts_check_keys(state_dicts: List[StateDictType]) -> None:
31
680
  """
32
- Checks that the state dictionaries have the same keys.
681
+ Check that all state dictionaries have the same keys.
33
682
 
34
683
  Args:
35
- state_dicts (List[Dict[str, Tensor]]): A list of dictionaries containing the state of PyTorch models.
684
+ state_dicts: A list of state dictionaries to check.
36
685
 
37
686
  Raises:
38
- ValueError: If the state dictionaries have different keys.
687
+ ValueError: If the state dictionaries have different keys or the list is empty.
39
688
  """
40
- # Get the keys of the first state dictionary in the list
41
- keys = set(state_dicts[0].keys())
42
- # Check that all the state dictionaries have the same keys
43
- for state_dict in state_dicts:
44
- assert keys == set(state_dict.keys()), "keys of state_dicts are not equal"
689
+ _validate_state_dict_list_not_empty(state_dicts)
690
+ _validate_state_dict_same_keys(state_dicts)
45
691
 
46
692
 
47
693
  def num_params_of_state_dict(state_dict: StateDictType) -> int:
48
694
  """
49
- Returns the number of parameters in a state dict.
695
+ Calculate the total number of parameters in a state dict.
50
696
 
51
697
  Args:
52
- state_dict (Dict[str, Tensor]): The state dict to count the number of parameters in.
698
+ state_dict: The state dict to count parameters in.
53
699
 
54
700
  Returns:
55
- int: The number of parameters in the state dict.
701
+ The total number of parameters in the state dict.
56
702
  """
57
- return sum([state_dict[key].numel() for key in state_dict])
703
+ return sum(tensor.numel() for tensor in state_dict.values())
58
704
 
59
705
 
60
- def state_dict_flatten(state_dict: Dict[str, Tensor]) -> Tensor:
706
+ def state_dict_flatten(state_dict: StateDictType) -> Tensor:
61
707
  """
62
- Flattens a state dict.
708
+ Flatten all tensors in a state dict into a single 1D tensor.
63
709
 
64
710
  Args:
65
- state_dict (Dict[str, Tensor]): The state dict to be flattened.
711
+ state_dict: The state dict to flatten.
66
712
 
67
713
  Returns:
68
- Tensor: The flattened state dict.
714
+ A single flattened tensor containing all parameters.
69
715
  """
70
- flattened_state_dict = []
71
- for key in state_dict:
72
- flattened_state_dict.append(state_dict[key].flatten())
73
- return torch.cat(flattened_state_dict)
716
+ return torch.cat([tensor.flatten() for tensor in state_dict.values()])
74
717
 
75
718
 
76
719
  def state_dict_avg(state_dicts: List[StateDictType]) -> StateDictType:
77
720
  """
78
- Returns the average of a list of state dicts.
721
+ Calculate the element-wise average of a list of state dicts.
79
722
 
80
723
  Args:
81
- state_dicts (List[Dict[str, Tensor]]): The list of state dicts to average.
724
+ state_dicts: List of state dicts to average.
82
725
 
83
726
  Returns:
84
- Dict: The average of the state dicts.
727
+ A state dict containing the averaged parameters.
728
+
729
+ Raises:
730
+ ValueError: If the list is empty or state dicts have different keys.
85
731
  """
86
- assert len(state_dicts) > 0, "The number of state_dicts must be greater than 0"
87
- assert all(
88
- [len(state_dicts[0]) == len(state_dict) for state_dict in state_dicts]
89
- ), "All state_dicts must have the same number of keys"
732
+ _validate_state_dict_list_not_empty(state_dicts)
733
+ _validate_state_dict_same_keys(state_dicts)
90
734
 
91
735
  num_state_dicts = len(state_dicts)
92
736
  avg_state_dict = OrderedDict()
737
+
738
+ # Initialize with zeros_like for better performance
93
739
  for key in state_dicts[0]:
94
740
  avg_state_dict[key] = torch.zeros_like(state_dicts[0][key])
95
- for state_dict in state_dicts:
741
+
742
+ # Accumulate all state dicts
743
+ for state_dict in state_dicts:
744
+ for key in avg_state_dict:
96
745
  avg_state_dict[key] += state_dict[key]
746
+
747
+ # Divide by number of state dicts
748
+ for key in avg_state_dict:
97
749
  avg_state_dict[key] /= num_state_dicts
750
+
98
751
  return avg_state_dict
99
752
 
100
753
 
101
754
  def state_dict_sub(
102
- a: StateDictType, b: StateDictType, strict: bool = True, device=None
755
+ a: StateDictType,
756
+ b: StateDictType,
757
+ strict: bool = True,
758
+ device: Optional[Union[torch.device, str]] = None,
103
759
  ) -> StateDictType:
104
760
  """
105
- Returns the difference between two state dicts `a-b`.
761
+ Compute the element-wise difference between two state dicts (a - b).
106
762
 
107
763
  Args:
108
- a (StateDictType): The first state dict.
109
- b (StateDictType): The second state dict.
110
- strict (bool): Whether to check if the keys of the two state dicts are the same.
764
+ a: The first state dict (minuend).
765
+ b: The second state dict (subtrahend).
766
+ strict: Whether to require exact key matching between state dicts.
767
+ device: Optional device to move the result tensors to.
111
768
 
112
769
  Returns:
113
- StateDictType: The difference between the two state dicts.
770
+ A state dict containing the element-wise differences.
771
+
772
+ Raises:
773
+ ValueError: If strict=True and the state dicts have different keys or incompatible tensor shapes.
774
+ TypeError: If tensors have incompatible types.
114
775
  """
776
+ result = OrderedDict()
777
+
115
778
  if strict:
116
- assert set(a.keys()) == set(b.keys())
779
+ _validate_state_dict_same_keys([a, b])
780
+ keys_to_process = a.keys()
781
+ else:
782
+ keys_to_process = set(a.keys()) & set(b.keys())
117
783
 
118
- diff = OrderedDict()
119
- for k in a:
120
- if k in b:
121
- diff[k] = a[k] - b[k]
122
- if device is not None:
123
- diff[k] = diff[k].to(device, non_blocking=True)
124
- return diff
784
+ for key in keys_to_process:
785
+ result_tensor = a[key] - b[key]
786
+ if device is not None:
787
+ result_tensor = result_tensor.to(device, non_blocking=True)
788
+ result[key] = result_tensor
789
+
790
+ return result
125
791
 
126
792
 
127
793
  def state_dict_add(
128
794
  a: StateDictType,
129
795
  b: StateDictType,
130
796
  strict: bool = True,
131
- device=None,
797
+ device: Optional[Union[torch.device, str]] = None,
132
798
  show_pbar: bool = False,
133
799
  ) -> StateDictType:
134
800
  """
135
- Returns the sum of two state dicts.
801
+ Compute the element-wise sum of two state dicts.
136
802
 
137
803
  Args:
138
- a (Dict): The first state dict.
139
- b (Dict): The second state dict.
140
- strict (bool): Whether to check if the keys of the two state dicts are the same.
804
+ a: The first state dict.
805
+ b: The second state dict.
806
+ strict: Whether to require exact key matching between state dicts.
807
+ device: Optional device to move the result tensors to.
808
+ show_pbar: Whether to show a progress bar during computation.
141
809
 
142
810
  Returns:
143
- Dict: The sum of the two state dicts.
811
+ A state dict containing the element-wise sums.
812
+
813
+ Raises:
814
+ ValueError: If strict=True and the state dicts have different parameters.
144
815
  """
145
- ans = {}
816
+ result = OrderedDict()
817
+
146
818
  if strict:
147
- check_parameters_all_equal([a, b])
148
- for key in tqdm(tuple(a.keys())) if show_pbar else a:
149
- ans[key] = a[key] + b[key]
819
+ _validate_state_dict_same_keys([a, b])
820
+ keys_to_process = a.keys()
150
821
  else:
151
- for key in tqdm(tuple(a.keys())) if show_pbar else a:
152
- if key in b:
153
- ans[key] = a[key] + b[key]
822
+ keys_to_process = set(a.keys()) & set(b.keys())
823
+
824
+ keys_iter = (
825
+ tqdm(keys_to_process, desc="Adding state dicts")
826
+ if show_pbar
827
+ else keys_to_process
828
+ )
829
+
830
+ for key in keys_iter:
831
+ if key in b: # This check is redundant when strict=True but harmless
832
+ result[key] = a[key] + b[key]
833
+
154
834
  if device is not None:
155
- ans = to_device(ans, device)
156
- return ans
835
+ result = state_dict_to_device(result, device)
157
836
 
837
+ return result
158
838
 
159
- def state_dict_add_scalar(a: StateDictType, scalar: Number) -> StateDictType:
160
- ans = OrderedDict()
161
- for key in a:
162
- ans[key] = a[key] + scalar
163
- return ans
839
+
840
+ def state_dict_add_scalar(state_dict: StateDictType, scalar: Number) -> StateDictType:
841
+ """
842
+ Add a scalar value to all parameters in a state dict.
843
+
844
+ Args:
845
+ state_dict: The state dict to modify.
846
+ scalar: The scalar value to add to each parameter.
847
+
848
+ Returns:
849
+ A new state dict with the scalar added to each parameter.
850
+ """
851
+ return OrderedDict((key, tensor + scalar) for key, tensor in state_dict.items())
164
852
 
165
853
 
166
854
  def state_dict_mul(state_dict: StateDictType, scalar: float) -> StateDictType:
167
855
  """
168
- Returns the product of a state dict and a scalar.
856
+ Multiply all parameters in a state dict by a scalar.
169
857
 
170
858
  Args:
171
- state_dict (Dict): The state dict to be multiplied.
172
- scalar (float): The scalar to multiply the state dict with.
859
+ state_dict: The state dict to multiply.
860
+ scalar: The scalar value to multiply each parameter by.
173
861
 
174
862
  Returns:
175
- Dict: The product of the state dict and the scalar.
863
+ A new state dict with each parameter multiplied by the scalar.
176
864
  """
177
- diff = OrderedDict()
178
- for k in state_dict:
179
- diff[k] = scalar * state_dict[k]
180
- return diff
865
+ return OrderedDict((key, scalar * tensor) for key, tensor in state_dict.items())
181
866
 
182
867
 
183
868
  def state_dict_div(
184
869
  state_dict: StateDictType, scalar: float, show_pbar: bool = False
185
870
  ) -> StateDictType:
186
871
  """
187
- Returns the division of a state dict by a scalar.
872
+ Divide all parameters in a state dict by a scalar.
188
873
 
189
874
  Args:
190
- state_dict (Dict): The state dict to be divided.
191
- scalar (float): The scalar to divide the state dict by.
875
+ state_dict: The state dict to divide.
876
+ scalar: The scalar value to divide each parameter by.
877
+ show_pbar: Whether to show a progress bar during computation.
192
878
 
193
879
  Returns:
194
- Dict: The division of the state dict by the scalar.
880
+ A new state dict with each parameter divided by the scalar.
881
+
882
+ Raises:
883
+ ZeroDivisionError: If scalar is zero.
195
884
  """
196
- diff = OrderedDict()
197
- for k in tqdm(tuple(state_dict.keys())) if show_pbar else state_dict:
198
- diff[k] = state_dict[k] / scalar
199
- return diff
885
+ if scalar == 0:
886
+ raise ZeroDivisionError("Cannot divide state dict by zero")
887
+
888
+ keys_iter = (
889
+ tqdm(state_dict.keys(), desc="Dividing state dict")
890
+ if show_pbar
891
+ else state_dict.keys()
892
+ )
893
+ return OrderedDict((key, state_dict[key] / scalar) for key in keys_iter)
200
894
 
201
895
 
202
896
  def state_dict_power(state_dict: StateDictType, p: float) -> StateDictType:
203
897
  """
204
- Returns the power of a state dict.
898
+ Raise all parameters in a state dict to a power.
205
899
 
206
900
  Args:
207
- state_dict (StateDictType): The state dict to be powered.
208
- p (float): The power to raise the state dict to.
901
+ state_dict: The state dict to raise to a power.
902
+ p: The exponent to raise each parameter to.
209
903
 
210
904
  Returns:
211
- StateDictType: The powered state dict.
905
+ A new state dict with each parameter raised to the power p.
212
906
  """
213
- powered_state_dict = {}
214
- for key in state_dict:
215
- powered_state_dict[key] = state_dict[key] ** p
216
- return powered_state_dict
907
+ return OrderedDict((key, tensor**p) for key, tensor in state_dict.items())
217
908
 
218
909
 
219
910
  def state_dict_interpolation(
220
911
  state_dicts: List[StateDictType], scalars: List[float]
221
912
  ) -> StateDictType:
222
913
  """
223
- Interpolates between a list of state dicts using a list of scalars.
914
+ Interpolate between multiple state dicts using specified scalar weights.
224
915
 
225
916
  Args:
226
- state_dicts (List[StateDictType]): The list of state dicts to interpolate between.
227
- scalars (List[float]): The list of scalars to use for interpolation.
917
+ state_dicts: List of state dicts to interpolate between.
918
+ scalars: List of scalar weights for interpolation.
228
919
 
229
920
  Returns:
230
- StateDictType: The interpolated state dict.
921
+ A state dict containing the interpolated parameters.
922
+
923
+ Raises:
924
+ ValueError: If the lists have different lengths or are empty, or if state dicts have different keys.
231
925
  """
232
- assert len(state_dicts) == len(
233
- scalars
234
- ), "The number of state_dicts and scalars must be the same"
235
- assert len(state_dicts) > 0, "The number of state_dicts must be greater than 0"
236
- assert all(
237
- [len(state_dicts[0]) == len(state_dict) for state_dict in state_dicts]
238
- ), "All state_dicts must have the same number of keys"
926
+ _validate_state_dict_list_not_empty(state_dicts)
927
+ _validate_list_lengths_equal(state_dicts, scalars, "state_dicts", "scalars")
928
+ _validate_state_dict_same_keys(state_dicts)
929
+
930
+ interpolated_state_dict = OrderedDict()
239
931
 
240
- interpolated_state_dict = {}
932
+ # Initialize with zeros
241
933
  for key in state_dicts[0]:
242
934
  interpolated_state_dict[key] = torch.zeros_like(state_dicts[0][key])
243
- for state_dict, scalar in zip(state_dicts, scalars):
935
+
936
+ # Accumulate weighted contributions
937
+ for state_dict, scalar in zip(state_dicts, scalars):
938
+ for key in interpolated_state_dict:
244
939
  interpolated_state_dict[key] += scalar * state_dict[key]
940
+
245
941
  return interpolated_state_dict
246
942
 
247
943
 
248
944
  def state_dict_sum(state_dicts: List[StateDictType]) -> StateDictType:
249
945
  """
250
- Returns the sum of a list of state dicts.
946
+ Compute the element-wise sum of multiple state dicts.
251
947
 
252
948
  Args:
253
- state_dicts (List[StateDictType]): The list of state dicts to sum.
949
+ state_dicts: List of state dicts to sum.
254
950
 
255
951
  Returns:
256
- StateDictType: The sum of the state dicts.
952
+ A state dict containing the element-wise sums.
953
+
954
+ Raises:
955
+ ValueError: If the list is empty or state dicts have different keys.
257
956
  """
258
- assert len(state_dicts) > 0, "The number of state_dicts must be greater than 0"
259
- assert all(
260
- [len(state_dicts[0]) == len(state_dict) for state_dict in state_dicts]
261
- ), "All state_dicts must have the same number of keys"
957
+ _validate_state_dict_list_not_empty(state_dicts)
958
+ _validate_state_dict_same_keys(state_dicts)
262
959
 
263
960
  sum_state_dict = OrderedDict()
961
+
962
+ # Initialize with zeros
264
963
  for key in state_dicts[0]:
265
- sum_state_dict[key] = 0
266
- for state_dict in state_dicts:
267
- sum_state_dict[key] = sum_state_dict[key] + state_dict[key]
964
+ sum_state_dict[key] = torch.zeros_like(state_dicts[0][key])
965
+
966
+ # Accumulate all state dicts
967
+ for state_dict in state_dicts:
968
+ for key in sum_state_dict:
969
+ sum_state_dict[key] += state_dict[key]
970
+
268
971
  return sum_state_dict
269
972
 
270
973
 
271
974
  def state_dict_weighted_sum(
272
- state_dicts: List[StateDictType], weights: List[float], device=None
975
+ state_dicts: List[StateDictType],
976
+ weights: List[float],
977
+ device: Optional[Union[torch.device, str]] = None,
273
978
  ) -> StateDictType:
274
979
  """
275
- Returns the weighted sum of a list of state dicts.
980
+ Compute the weighted sum of multiple state dicts.
276
981
 
277
982
  Args:
278
- state_dicts (List[StateDictType]): The list of state dicts to interpolate between.
279
- weights (List[float]): The list of weights to use for the weighted sum.
983
+ state_dicts: List of state dicts to combine.
984
+ weights: List of weights for the weighted sum.
985
+ device: Optional device to move the result tensors to.
280
986
 
281
987
  Returns:
282
- StateDictType: The weighted sum of the state dicts.
988
+ A state dict containing the weighted sum of parameters.
989
+
990
+ Raises:
991
+ ValueError: If the lists have different lengths or are empty, or if state dicts have different keys.
283
992
  """
284
- assert len(state_dicts) == len(
285
- weights
286
- ), "The number of state_dicts and weights must be the same"
287
- assert len(state_dicts) > 0, "The number of state_dicts must be greater than 0"
288
- assert all(
289
- [len(state_dicts[0]) == len(state_dict) for state_dict in state_dicts]
290
- ), "All state_dicts must have the same number of keys"
993
+ _validate_state_dict_list_not_empty(state_dicts)
994
+ _validate_list_lengths_equal(state_dicts, weights, "state_dicts", "weights")
995
+ _validate_state_dict_same_keys(state_dicts)
996
+
997
+ weighted_sum_state_dict = OrderedDict()
291
998
 
292
- weighted_sum_state_dict: Dict[str, Tensor] = {}
999
+ # Single pass initialization and computation for better performance
293
1000
  for key in state_dicts[0]:
294
- # states dicts can be sparse matrices
295
- weighted_sum_state_dict[key] = torch.zeros_like(state_dicts[0][key]).to_dense()
1001
+ # Get reference tensor and handle sparse tensors
1002
+ ref_tensor = state_dicts[0][key]
1003
+ is_sparse = ref_tensor.is_sparse if hasattr(ref_tensor, "is_sparse") else False
1004
+
1005
+ # Initialize result tensor
1006
+ if is_sparse:
1007
+ # For sparse tensors, start with zeros in dense format for efficient accumulation
1008
+ result_tensor = torch.zeros_like(ref_tensor).to_dense()
1009
+ else:
1010
+ result_tensor = torch.zeros_like(ref_tensor)
1011
+
1012
+ # Accumulate weighted contributions in a single loop
296
1013
  for state_dict, weight in zip(state_dicts, weights):
297
- weighted_sum_state_dict[key] = torch.add(
298
- weighted_sum_state_dict[key], weight * state_dict[key]
299
- )
1014
+ tensor = state_dict[key]
1015
+
1016
+ # Optimize for common cases
1017
+ if weight == 0.0:
1018
+ continue # Skip zero weights
1019
+ elif weight == 1.0:
1020
+ result_tensor += tensor # Avoid multiplication for unit weights
1021
+ else:
1022
+ # Use in-place operations when possible for memory efficiency
1023
+ if is_sparse and hasattr(tensor, "is_sparse") and tensor.is_sparse:
1024
+ result_tensor += weight * tensor.to_dense()
1025
+ else:
1026
+ result_tensor += weight * tensor
1027
+
1028
+ # Move to target device if specified (do this once per tensor, not per operation)
300
1029
  if device is not None:
301
- weighted_sum_state_dict[key] = weighted_sum_state_dict[key].to(
302
- device, non_blocking=True
303
- )
1030
+ result_tensor = result_tensor.to(device, non_blocking=True)
1031
+
1032
+ # Convert back to sparse if original was sparse and result is suitable
1033
+ if is_sparse and hasattr(result_tensor, "to_sparse"):
1034
+ try:
1035
+ # Only convert back to sparse if it would be memory efficient
1036
+ # (i.e., if the result has sufficient sparsity)
1037
+ if result_tensor.numel() > 0:
1038
+ sparsity_ratio = (result_tensor == 0).float().mean().item()
1039
+ if sparsity_ratio > 0.5: # Convert back if >50% zeros
1040
+ result_tensor = result_tensor.to_sparse()
1041
+ except (RuntimeError, AttributeError):
1042
+ # If conversion fails, keep as dense
1043
+ pass
1044
+
1045
+ weighted_sum_state_dict[key] = result_tensor
1046
+
304
1047
  return weighted_sum_state_dict
305
1048
 
306
1049
 
307
1050
  def state_dict_diff_abs(a: StateDictType, b: StateDictType) -> StateDictType:
308
1051
  """
309
- Returns the per-layer abs of the difference between two state dicts.
1052
+ Compute the element-wise absolute difference between two state dicts.
310
1053
 
311
1054
  Args:
312
- a (StateDictType): The first state dict.
313
- b (StateDictType): The second state dict.
1055
+ a: The first state dict.
1056
+ b: The second state dict.
314
1057
 
315
1058
  Returns:
316
- StateDictType: The absolute difference between the two state dicts.
1059
+ A state dict containing the absolute differences.
317
1060
  """
318
1061
  diff = state_dict_sub(a, b)
319
- abs_diff = {key: diff[key].abs() for key in diff}
320
- return abs_diff
1062
+ return OrderedDict((key, tensor.abs()) for key, tensor in diff.items())
321
1063
 
322
1064
 
323
1065
  def state_dict_binary_mask(
@@ -327,18 +1069,28 @@ def state_dict_binary_mask(
327
1069
  Literal["greater", "less", "equal", "not_equal"],
328
1070
  Callable[[Tensor, Tensor], torch.BoolTensor],
329
1071
  ] = "greater",
1072
+ strict: bool = True,
1073
+ show_pbar: bool = False,
330
1074
  ) -> BoolStateDictType:
331
1075
  """
332
- Returns the binary mask of elements in a compared to elements in b using the provided comparison function.
1076
+ Create binary masks by comparing elements in two state dicts.
333
1077
 
334
1078
  Args:
335
- a (StateDictType): The first state dict.
336
- b (StateDictType): The second state dict.
337
- compare_fn (Union[Literal["greater", "less", "equal", "not_equal"], Callable[[Tensor, Tensor], Tensor]]): A function that takes two tensors and returns a boolean tensor.
338
- Defaults to greater than comparison (x > y).
1079
+ a: The first state dict.
1080
+ b: The second state dict.
1081
+ compare_fn: Comparison function to use. Can be a string literal
1082
+ ("greater", "less", "equal", "not_equal") or a callable
1083
+ that takes two tensors and returns a boolean tensor.
1084
+ strict: Whether to require exact key matching between state dicts.
1085
+ show_pbar: Whether to show a progress bar during computation.
339
1086
 
340
1087
  Returns:
341
- StateDictType: A dictionary containing binary masks (0 or 1) based on the comparison.
1088
+ A dictionary containing boolean masks based on the comparison.
1089
+
1090
+ Raises:
1091
+ ValueError: If compare_fn is not a valid string or callable, or if strict=True
1092
+ and the state dicts have different keys or incompatible tensor shapes.
1093
+ TypeError: If tensors have incompatible types.
342
1094
  """
343
1095
  compare_fn_dict = {
344
1096
  "greater": lambda x, y: x > y,
@@ -346,31 +1098,52 @@ def state_dict_binary_mask(
346
1098
  "equal": lambda x, y: x == y,
347
1099
  "not_equal": lambda x, y: x != y,
348
1100
  }
1101
+
349
1102
  if isinstance(compare_fn, str):
1103
+ if compare_fn not in compare_fn_dict:
1104
+ raise ValueError(
1105
+ f"Invalid compare_fn string: {compare_fn}. Must be one of {list(compare_fn_dict.keys())}"
1106
+ )
350
1107
  compare_fn = compare_fn_dict[compare_fn]
351
1108
  elif not callable(compare_fn):
352
1109
  raise ValueError(
353
1110
  f"compare_fn must be a string or a callable, but got {type(compare_fn)}"
354
1111
  )
355
1112
 
356
- mask = OrderedDict()
357
- for key in a:
358
- mask[key] = compare_fn(a[key], b[key])
359
- return mask
1113
+ result = OrderedDict()
360
1114
 
1115
+ if strict:
1116
+ _validate_state_dict_same_keys([a, b])
1117
+ keys_to_process = a.keys()
1118
+ else:
1119
+ keys_to_process = set(a.keys()) & set(b.keys())
1120
+
1121
+ keys_iter = (
1122
+ tqdm(keys_to_process, desc="Creating binary masks")
1123
+ if show_pbar
1124
+ else keys_to_process
1125
+ )
1126
+
1127
+ for key in keys_iter:
1128
+ result[key] = compare_fn(a[key], b[key])
361
1129
 
362
- def state_dict_hadmard_product(a: StateDictType, b: StateDictType) -> StateDictType:
1130
+ return result
1131
+
1132
+
1133
+ def state_dict_hadamard_product(a: StateDictType, b: StateDictType) -> StateDictType:
363
1134
  """
364
- Returns the Hadamard product of two state dicts, i.e. element-wise product.
1135
+ Compute the Hadamard product (element-wise multiplication) of two state dicts.
365
1136
 
366
1137
  Args:
367
- a (StateDictType): The first state dict.
368
- b (StateDictType): The second state dict.
1138
+ a: The first state dict.
1139
+ b: The second state dict.
369
1140
 
370
1141
  Returns:
371
- StateDictType: The Hadamard product of the two state dicts.
1142
+ A state dict containing the element-wise products.
1143
+
1144
+ Raises:
1145
+ ValueError: If the state dicts have different keys or incompatible tensor shapes.
1146
+ TypeError: If tensors have incompatible types.
372
1147
  """
373
- ans = OrderedDict()
374
- for key in a:
375
- ans[key] = a[key] * b[key]
376
- return ans
1148
+ _validate_state_dict_same_keys([a, b])
1149
+ return OrderedDict((key, a[key] * b[key]) for key in a)