ema-pytorch 0.3.1__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ema-pytorch might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ema-pytorch
3
- Version: 0.3.1
3
+ Version: 0.3.2
4
4
  Summary: Easy way to keep track of exponential moving average version of your pytorch module
5
5
  Home-page: https://github.com/lucidrains/ema-pytorch
6
6
  Author: Phil Wang
@@ -43,7 +43,7 @@ class EMA(Module):
43
43
 
44
44
  Args:
45
45
  inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
46
- power (float): Exponential factor of EMA warmup. Default: 1.
46
+ power (float): Exponential factor of EMA warmup. Default: 2/3.
47
47
  min_value (float): The minimum EMA decay rate. Default: 0.
48
48
  """
49
49
 
@@ -53,6 +53,7 @@ class EMA(Module):
53
53
  model: Module,
54
54
  ema_model: Optional[Module] = None, # if your model has lazylinears or other types of non-deepcopyable modules, you can pass in your own ema model
55
55
  beta = 0.9999,
56
+ karras_beta = False, # if True, uses the karras time dependent beta
56
57
  update_after_step = 100,
57
58
  update_every = 10,
58
59
  inv_gamma = 1.0,
@@ -65,7 +66,8 @@ class EMA(Module):
65
66
  allow_different_devices = False # if the EMA model is on a different device (say CPU), automatically move the tensor
66
67
  ):
67
68
  super().__init__()
68
- self.beta = beta
69
+ self._beta = beta
70
+ self.karras_beta = karras_beta
69
71
 
70
72
  # whether to include the online model within the module tree, so that state_dict also saves it
71
73
 
@@ -127,6 +129,13 @@ class EMA(Module):
127
129
  @property
128
130
  def model(self):
129
131
  return self.online_model if self.include_online_model else self.online_model[0]
132
+
133
+ @property
134
+ def beta(self):
135
+ if self.karras_beta:
136
+ return (1 - 1 / (self.step + 1)) ** (1 + self.power)
137
+
138
+ return self._beta
130
139
 
131
140
  def eval(self):
132
141
  return self.ema_model.eval()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ema-pytorch
3
- Version: 0.3.1
3
+ Version: 0.3.2
4
4
  Summary: Easy way to keep track of exponential moving average version of your pytorch module
5
5
  Home-page: https://github.com/lucidrains/ema-pytorch
6
6
  Author: Phil Wang
@@ -3,7 +3,7 @@ from setuptools import setup, find_packages
3
3
  setup(
4
4
  name = 'ema-pytorch',
5
5
  packages = find_packages(exclude=[]),
6
- version = '0.3.1',
6
+ version = '0.3.2',
7
7
  license='MIT',
8
8
  description = 'Easy way to keep track of exponential moving average version of your pytorch module',
9
9
  author = 'Phil Wang',
File without changes
File without changes
File without changes