ema-pytorch 0.3.2__tar.gz → 0.3.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ema-pytorch might be problematic. Click here for more details.
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/PKG-INFO +1 -1
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/README.md +13 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch/ema_pytorch.py +5 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch.egg-info/PKG-INFO +1 -1
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/setup.py +1 -1
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/LICENSE +0 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch/__init__.py +0 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch.egg-info/SOURCES.txt +0 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch.egg-info/dependency_links.txt +0 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch.egg-info/requires.txt +0 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/ema_pytorch.egg-info/top_level.txt +0 -0
- {ema-pytorch-0.3.2 → ema-pytorch-0.3.3}/setup.cfg +0 -0
|
@@ -52,3 +52,16 @@ ema_output = ema(data)
|
|
|
52
52
|
## Todo
|
|
53
53
|
|
|
54
54
|
- [ ] address the issue of annealing EMA to 1 near the end of training for BYOL https://github.com/lucidrains/byol-pytorch/issues/82
|
|
55
|
+
|
|
56
|
+
## Citations
|
|
57
|
+
|
|
58
|
+
```bibtex
|
|
59
|
+
@article{Karras2023AnalyzingAI,
|
|
60
|
+
title = {Analyzing and Improving the Training Dynamics of Diffusion Models},
|
|
61
|
+
author = {Tero Karras and Miika Aittala and Jaakko Lehtinen and Janne Hellsten and Timo Aila and Samuli Laine},
|
|
62
|
+
journal = {ArXiv},
|
|
63
|
+
year = {2023},
|
|
64
|
+
volume = {abs/2312.02696},
|
|
65
|
+
url = {https://api.semanticscholar.org/CorpusID:265659032}
|
|
66
|
+
}
|
|
67
|
+
```
|
|
@@ -69,6 +69,8 @@ class EMA(Module):
|
|
|
69
69
|
self._beta = beta
|
|
70
70
|
self.karras_beta = karras_beta
|
|
71
71
|
|
|
72
|
+
self.is_frozen = beta == 1.
|
|
73
|
+
|
|
72
74
|
# whether to include the online model within the module tree, so that state_dict also saves it
|
|
73
75
|
|
|
74
76
|
self.include_online_model = include_online_model
|
|
@@ -202,6 +204,9 @@ class EMA(Module):
|
|
|
202
204
|
|
|
203
205
|
@torch.no_grad()
|
|
204
206
|
def update_moving_average(self, ma_model, current_model):
|
|
207
|
+
if self.is_frozen:
|
|
208
|
+
return
|
|
209
|
+
|
|
205
210
|
copy, lerp = self.inplace_copy, self.inplace_lerp
|
|
206
211
|
current_decay = self.get_current_decay()
|
|
207
212
|
|
|
@@ -3,7 +3,7 @@ from setuptools import setup, find_packages
|
|
|
3
3
|
setup(
|
|
4
4
|
name = 'ema-pytorch',
|
|
5
5
|
packages = find_packages(exclude=[]),
|
|
6
|
-
version = '0.3.
|
|
6
|
+
version = '0.3.3',
|
|
7
7
|
license='MIT',
|
|
8
8
|
description = 'Easy way to keep track of exponential moving average version of your pytorch module',
|
|
9
9
|
author = 'Phil Wang',
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|