gradboard 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gradboard might be problematic. Click here for more details.

gradboard/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
- from . import cycle
1
+ from . import cycles
2
2
  from . import optimiser
3
- from . import schedule
3
+ from . import scheduler
@@ -188,8 +188,8 @@ class Cycle:
188
188
  max_area = 0
189
189
  ascent_steps = 0
190
190
  descent_steps = 0
191
- total_up_gradient = 0
192
- total_down_gradient = 0
191
+ avg_up_gradient = 0
192
+ avg_down_gradient = 0
193
193
  total_gradient = 0
194
194
  previous_lr = None
195
195
  for s in range(self.total_steps):
@@ -200,11 +200,11 @@ class Cycle:
200
200
  pass
201
201
  elif previous_lr > height:
202
202
  descent_steps += 1
203
- total_down_gradient += height - previous_lr
203
+ avg_down_gradient += height - previous_lr
204
204
  total_gradient += height - previous_lr
205
205
  elif previous_lr < height:
206
206
  ascent_steps += 1
207
- total_up_gradient += height - previous_lr
207
+ avg_up_gradient += height - previous_lr
208
208
  total_gradient += height - previous_lr
209
209
  else:
210
210
  total_gradient += height
@@ -213,12 +213,8 @@ class Cycle:
213
213
  "area": total_area / max_area,
214
214
  "pc_ascent": round(ascent_steps / self.total_steps, 3),
215
215
  "pc_descent": round(descent_steps / self.total_steps, 3),
216
- "avg_up_gradient": round(
217
- total_up_gradient / ascent_steps if ascent_steps > 0 else 0.0, 3
218
- ),
219
- "avg_down_gradient": round(
220
- total_down_gradient / descent_steps if descent_steps > 0 else 0.0, 3
221
- ),
216
+ "avg_up_gradient": round(avg_up_gradient, 3),
217
+ "avg_down_gradient": round(avg_down_gradient, 3),
222
218
  "avg_gradient": round(-(self.high - self.low) / self.total_steps, 3),
223
219
  }
224
220
 
@@ -10,7 +10,7 @@ from scipy.ndimage import gaussian_filter1d
10
10
 
11
11
  from torch.amp import GradScaler
12
12
 
13
- from .cycle import Cycle
13
+ from .cycles import Cycle
14
14
 
15
15
 
16
16
  class PASS:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: gradboard
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: Easily snowboard down gnarly loss gradients
5
5
  License: MIT
6
6
  Author: Nicholas Bailey
@@ -0,0 +1,8 @@
1
+ gradboard/__init__.py,sha256=57AkHusYwLCsusiVnajH5pMFKioRCj-3IjF9qpdOzE0,69
2
+ gradboard/cycles.py,sha256=8mdqRDCVI6kUEqCcLOo99pzwMngsRNZI-JwrYucpadQ,8927
3
+ gradboard/optimiser.py,sha256=Iw1piQkQkWdNUW8d9f3iPrvP9qEGVxEKmerK7ATDgOI,6189
4
+ gradboard/scheduler.py,sha256=mNz6tEniA3L3hzeNvwqTM2XunfXwSqs8NahYI8gcm-A,6689
5
+ gradboard-0.1.5.dist-info/LICENSE,sha256=0BAzJE5BqQ7Iixp_AFdB2W1uO-HCRX-Qfun8PHt6yVM,1073
6
+ gradboard-0.1.5.dist-info/METADATA,sha256=bF8jq98LCay9RpiSJXObTk490h9IaoSBtzBoPEMmrQQ,2173
7
+ gradboard-0.1.5.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
8
+ gradboard-0.1.5.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- gradboard/__init__.py,sha256=sLjLo-olnO7cbIEphFI9Bc24I4Nv2bw3VfI2m-hjmEs,67
2
- gradboard/cycle.py,sha256=I0iN0aeckniyK7glVnq3mnVuFE_M30lZCQ09MR2QnOs,9089
3
- gradboard/optimiser.py,sha256=Iw1piQkQkWdNUW8d9f3iPrvP9qEGVxEKmerK7ATDgOI,6189
4
- gradboard/schedule.py,sha256=F5L8HT281VuskK2-2dpfP0zBM6XCqxyCXcTc-CDZa1o,6688
5
- gradboard-0.1.3.dist-info/LICENSE,sha256=0BAzJE5BqQ7Iixp_AFdB2W1uO-HCRX-Qfun8PHt6yVM,1073
6
- gradboard-0.1.3.dist-info/METADATA,sha256=tipi9Kkuz8EY-zNoliGQT_QCdBHkCqi_4vvi2WFqc2s,2173
7
- gradboard-0.1.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
8
- gradboard-0.1.3.dist-info/RECORD,,