rxnn 0.2.70__py3-none-any.whl → 0.2.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rxnn/training/mrl.py CHANGED
@@ -607,21 +607,38 @@ class MRLTrainer:
607
607
  print(f"Encoder grad norm - total: {encoder_total:.6f}, mean: {encoder_mean:.6f}")
608
608
  print(f"Decoder grad norm - total: {decoder_total:.6f}, mean: {decoder_mean:.6f}")
609
609
  print(f"Memory attention grad norm - total: {mem_att_total:.6f}, mean: {mem_att_mean:.6f}")
610
- # decoder's cross att
611
- dec_x_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in self.actor.decoder.model.layers]
612
- print(f"Decoder cross-att mean norm: {(sum(dec_x_att_norms) / len(dec_x_att_norms)):.6f}, all: {dec_x_att_norms}")
613
610
 
611
+ dec_x_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in self.actor.decoder.model.layers]
614
612
  mem_att_norms = [get_gradient_norms(layer)[1] for layer in self.actor.memory_attention.model.attention_layers]
615
- print(f"Memory attention layers mean norm: {(sum(mem_att_norms) / len(mem_att_norms)):.6f}, all: {mem_att_norms}")
616
-
617
613
  enc_ff_norms = [get_gradient_norms(layer.ff)[1] for layer in self.actor.encoder.model.layers]
618
- print(f"Encoder ff mean norm: {(sum(enc_ff_norms) / len(enc_ff_norms)):.6f}, all: {enc_ff_norms}")
619
-
620
614
  enc_self_att_norms = [get_gradient_norms(layer.attention)[1] for layer in self.actor.encoder.model.layers]
621
- print(f"Encoder self-att mean norm: {(sum(enc_self_att_norms) / len(enc_self_att_norms)):.6f}, all: {enc_self_att_norms}")
615
+ enc_x_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in
616
+ self.actor.encoder.model.layers]
617
+
618
+ calc_mean = lambda x: sum(x) / len(x)
619
+
620
+ dec_x_att_norms_mean = calc_mean(dec_x_att_norms)
621
+ mem_att_norms_mean = calc_mean(mem_att_norms)
622
+ enc_ff_norms_mean = calc_mean(enc_ff_norms)
623
+ enc_self_att_norms_mean = calc_mean(enc_self_att_norms)
624
+ enc_x_att_norms_mean = calc_mean(enc_x_att_norms)
625
+
626
+ print(f"Decoder cross-att mean norm: {dec_x_att_norms_mean:.6f}, all: {dec_x_att_norms}")
627
+ print(f"Memory attention layers mean norm: {mem_att_norms_mean:.6f}, all: {mem_att_norms}")
628
+ print(f"Encoder ff mean norm: {enc_ff_norms_mean:.6f}, all: {enc_ff_norms}")
629
+ print(f"Encoder self-att mean norm: {enc_self_att_norms_mean:.6f}, all: {enc_self_att_norms}")
630
+ print(f"Encoder cross-att mean norm: {enc_x_att_norms_mean:.6f}, all: {enc_x_att_norms}")
631
+
632
+ if self.writer is not None:
633
+ self.writer.add_scalar('Gradient/encoder', encoder_mean, self.global_step['train'])
634
+ self.writer.add_scalar('Gradient/decoder', decoder_mean, self.global_step['train'])
635
+ self.writer.add_scalar('Gradient/mem-att', mem_att_mean, self.global_step['train'])
636
+ self.writer.add_scalar('Gradient/decoder x-att', dec_x_att_norms_mean, self.global_step['train'])
637
+ self.writer.add_scalar('Gradient/mem-att layers', mem_att_norms_mean, self.global_step['train'])
638
+ self.writer.add_scalar('Gradient/encoder ff', enc_ff_norms_mean, self.global_step['train'])
639
+ self.writer.add_scalar('Gradient/encoder self-att', enc_self_att_norms_mean, self.global_step['train'])
640
+ self.writer.add_scalar('Gradient/encoder x-att', enc_x_att_norms_mean, self.global_step['train'])
622
641
 
623
- enc_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in self.actor.encoder.model.layers]
624
- print(f"Encoder cross-att mean norm: {(sum(enc_att_norms) / len(enc_att_norms)):.6f}, all: {enc_att_norms}")
625
642
 
626
643
  def update_actor(self, state: tuple[TokenizedDict, TokenizedDict, TokenizedDict], action: TokenizedDict,
627
644
  advantages: torch.Tensor, old_log_probs: torch.Tensor, epoch: int) -> float:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.70
3
+ Version: 0.2.71
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -17,7 +17,7 @@ rxnn/training/callbacks.py,sha256=rS8leuVFPVVfE5Zc8DMkUZhRIPN-vpPbUjowXE5TSBw,36
17
17
  rxnn/training/dataset.py,sha256=ruU6k33pQmpTqhxpjLFNdDJnCjcrBcGeFOzJqFahJDM,51880
18
18
  rxnn/training/ddp.py,sha256=VsNBjn3cY-uUj8hbsW7oKvb0_ZKnXnJ2KgObm-Mr9i4,836
19
19
  rxnn/training/models.py,sha256=ILkcqBV1MImnULnq-YDSSEf8cUdEbUgQaH0FRTsa4LA,9069
20
- rxnn/training/mrl.py,sha256=eIMfR0Rp7d_nvrgP9E2F_o7h2Suc0IWTUP0AXHqp-6Q,66282
20
+ rxnn/training/mrl.py,sha256=Ntkti6DDKipKa-AwTvo1WDOdIXOL3uXOhT-Xx29wR-w,67369
21
21
  rxnn/training/reward.py,sha256=uiSsBXmjMw2yv-1Bssy3RTlpU6zP8ape3490Sl-aT0M,16144
22
22
  rxnn/training/rl.py,sha256=hWtExxY-_pAmTOGYxyCNounUbaGWvLDVltC4sRC7MN4,7175
23
23
  rxnn/training/scheduler.py,sha256=LcjU35mEwz2U5x3U6tLfeeYlBqMxbFSxYzJYuXkWbSY,1408
@@ -33,7 +33,7 @@ rxnn/transformers/moe.py,sha256=j6jEx6Ip0zttlUZKKn82azxo95lkLZs-H2GLSMD88hY,5859
33
33
  rxnn/transformers/positional.py,sha256=1PjcJybUzeQlIKJI4tahAGZcYgCRCL0otxs7mpsNuzM,4410
34
34
  rxnn/transformers/sampler.py,sha256=t6iiQTdLQ0TakUWnnhKkb5DKF2F_9-thXHBydDF3fxg,17389
35
35
  rxnn/utils.py,sha256=ihb6OTyDtPiocB_lOvnq7eOkjjpCkgs8wxvXUBNQ7mM,996
36
- rxnn-0.2.70.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
37
- rxnn-0.2.70.dist-info/METADATA,sha256=jrbxT7UcwiXy63xX3TDBD1V84INrbArX362nafwkp98,60420
38
- rxnn-0.2.70.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
39
- rxnn-0.2.70.dist-info/RECORD,,
36
+ rxnn-0.2.71.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
37
+ rxnn-0.2.71.dist-info/METADATA,sha256=7BHHcFtImjPB57X2eRLgO4IFOSBNb7GOR5ytMaCttkI,60420
38
+ rxnn-0.2.71.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
39
+ rxnn-0.2.71.dist-info/RECORD,,
File without changes
File without changes