rxnn 0.2.70__tar.gz → 0.2.71__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {rxnn-0.2.70 → rxnn-0.2.71}/PKG-INFO +1 -1
  2. {rxnn-0.2.70 → rxnn-0.2.71}/pyproject.toml +1 -1
  3. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/mrl.py +27 -10
  4. {rxnn-0.2.70 → rxnn-0.2.71}/LICENSE +0 -0
  5. {rxnn-0.2.70 → rxnn-0.2.71}/README.md +0 -0
  6. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/.DS_Store +0 -0
  7. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/experimental/attention.py +0 -0
  10. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/experimental/models.py +0 -0
  11. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/experimental/moe.py +0 -0
  12. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/memory/__init__.py +0 -0
  13. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/memory/attention.py +0 -0
  14. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/memory/stm.py +0 -0
  16. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/rxt/__init__.py +0 -0
  17. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/rxt/models.py +0 -0
  18. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/__init__.py +0 -0
  19. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/base.py +0 -0
  20. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/bml.py +0 -0
  21. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/callbacks.py +0 -0
  22. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/dataset.py +0 -0
  23. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/ddp.py +0 -0
  24. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/models.py +0 -0
  25. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/reward.py +0 -0
  26. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/rl.py +0 -0
  27. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/scheduler.py +0 -0
  28. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/tokenizer.py +0 -0
  29. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/training/utils.py +0 -0
  30. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/__init__.py +0 -0
  31. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/attention.py +0 -0
  32. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/ff.py +0 -0
  33. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/layers.py +0 -0
  34. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/mask.py +0 -0
  35. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/models.py +0 -0
  36. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/moe.py +0 -0
  37. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/positional.py +0 -0
  38. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/transformers/sampler.py +0 -0
  39. {rxnn-0.2.70 → rxnn-0.2.71}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.70
3
+ Version: 0.2.71
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.2.70"
7
+ version = "0.2.71"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -607,21 +607,38 @@ class MRLTrainer:
607
607
  print(f"Encoder grad norm - total: {encoder_total:.6f}, mean: {encoder_mean:.6f}")
608
608
  print(f"Decoder grad norm - total: {decoder_total:.6f}, mean: {decoder_mean:.6f}")
609
609
  print(f"Memory attention grad norm - total: {mem_att_total:.6f}, mean: {mem_att_mean:.6f}")
610
- # decoder's cross att
611
- dec_x_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in self.actor.decoder.model.layers]
612
- print(f"Decoder cross-att mean norm: {(sum(dec_x_att_norms) / len(dec_x_att_norms)):.6f}, all: {dec_x_att_norms}")
613
610
 
611
+ dec_x_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in self.actor.decoder.model.layers]
614
612
  mem_att_norms = [get_gradient_norms(layer)[1] for layer in self.actor.memory_attention.model.attention_layers]
615
- print(f"Memory attention layers mean norm: {(sum(mem_att_norms) / len(mem_att_norms)):.6f}, all: {mem_att_norms}")
616
-
617
613
  enc_ff_norms = [get_gradient_norms(layer.ff)[1] for layer in self.actor.encoder.model.layers]
618
- print(f"Encoder ff mean norm: {(sum(enc_ff_norms) / len(enc_ff_norms)):.6f}, all: {enc_ff_norms}")
619
-
620
614
  enc_self_att_norms = [get_gradient_norms(layer.attention)[1] for layer in self.actor.encoder.model.layers]
621
- print(f"Encoder self-att mean norm: {(sum(enc_self_att_norms) / len(enc_self_att_norms)):.6f}, all: {enc_self_att_norms}")
615
+ enc_x_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in
616
+ self.actor.encoder.model.layers]
617
+
618
+ calc_mean = lambda x: sum(x) / len(x)
619
+
620
+ dec_x_att_norms_mean = calc_mean(dec_x_att_norms)
621
+ mem_att_norms_mean = calc_mean(mem_att_norms)
622
+ enc_ff_norms_mean = calc_mean(enc_ff_norms)
623
+ enc_self_att_norms_mean = calc_mean(enc_self_att_norms)
624
+ enc_x_att_norms_mean = calc_mean(enc_x_att_norms)
625
+
626
+ print(f"Decoder cross-att mean norm: {dec_x_att_norms_mean:.6f}, all: {dec_x_att_norms}")
627
+ print(f"Memory attention layers mean norm: {mem_att_norms_mean:.6f}, all: {mem_att_norms}")
628
+ print(f"Encoder ff mean norm: {enc_ff_norms_mean:.6f}, all: {enc_ff_norms}")
629
+ print(f"Encoder self-att mean norm: {enc_self_att_norms_mean:.6f}, all: {enc_self_att_norms}")
630
+ print(f"Encoder cross-att mean norm: {enc_x_att_norms_mean:.6f}, all: {enc_x_att_norms}")
631
+
632
+ if self.writer is not None:
633
+ self.writer.add_scalar('Gradient/encoder', encoder_mean, self.global_step['train'])
634
+ self.writer.add_scalar('Gradient/decoder', decoder_mean, self.global_step['train'])
635
+ self.writer.add_scalar('Gradient/mem-att', mem_att_mean, self.global_step['train'])
636
+ self.writer.add_scalar('Gradient/decoder x-att', dec_x_att_norms_mean, self.global_step['train'])
637
+ self.writer.add_scalar('Gradient/mem-att layers', mem_att_norms_mean, self.global_step['train'])
638
+ self.writer.add_scalar('Gradient/encoder ff', enc_ff_norms_mean, self.global_step['train'])
639
+ self.writer.add_scalar('Gradient/encoder self-att', enc_self_att_norms_mean, self.global_step['train'])
640
+ self.writer.add_scalar('Gradient/encoder x-att', enc_x_att_norms_mean, self.global_step['train'])
622
641
 
623
- enc_att_norms = [get_gradient_norms(layer.memory_cross_attention)[1] for layer in self.actor.encoder.model.layers]
624
- print(f"Encoder cross-att mean norm: {(sum(enc_att_norms) / len(enc_att_norms)):.6f}, all: {enc_att_norms}")
625
642
 
626
643
  def update_actor(self, state: tuple[TokenizedDict, TokenizedDict, TokenizedDict], action: TokenizedDict,
627
644
  advantages: torch.Tensor, old_log_probs: torch.Tensor, epoch: int) -> float:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes