rxnn 0.2.2__tar.gz → 0.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {rxnn-0.2.2 → rxnn-0.2.3}/PKG-INFO +1 -1
  2. {rxnn-0.2.2 → rxnn-0.2.3}/pyproject.toml +1 -1
  3. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/callbacks.py +2 -2
  4. {rxnn-0.2.2 → rxnn-0.2.3}/LICENSE +0 -0
  5. {rxnn-0.2.2 → rxnn-0.2.3}/README.md +0 -0
  6. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/.DS_Store +0 -0
  7. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/__init__.py +0 -0
  8. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/experimental/__init__.py +0 -0
  9. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/experimental/attention.py +0 -0
  10. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/experimental/models.py +0 -0
  11. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/experimental/moe.py +0 -0
  12. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/memory/__init__.py +0 -0
  13. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/memory/attention.py +0 -0
  14. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/memory/stm.py +0 -0
  16. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/rxt/__init__.py +0 -0
  17. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/rxt/models.py +0 -0
  18. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/__init__.py +0 -0
  19. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/base.py +0 -0
  20. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/bml.py +0 -0
  21. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/dataset.py +0 -0
  22. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/models.py +0 -0
  23. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/mrl.py +0 -0
  24. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/reward.py +0 -0
  25. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/rl.py +0 -0
  26. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/scheduler.py +0 -0
  27. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/tokenizer.py +0 -0
  28. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/training/utils.py +0 -0
  29. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/__init__.py +0 -0
  30. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/attention.py +0 -0
  31. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/ff.py +0 -0
  32. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/layers.py +0 -0
  33. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/mask.py +0 -0
  34. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/models.py +0 -0
  35. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/moe.py +0 -0
  36. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/positional.py +0 -0
  37. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/transformers/sampler.py +0 -0
  38. {rxnn-0.2.2 → rxnn-0.2.3}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.2.2"
7
+ version = "0.2.3"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -557,7 +557,7 @@ class MrlPrintCallback(MrlTrainerCallback):
557
557
  def on_epoch_start(self, actor: nn.Module, epoch: int, stage_epochs: int, curriculum_config: dict,
558
558
  global_epoch: int, global_epochs: int) -> None:
559
559
  print(
560
- f'Starting epoch {epoch}/{stage_epochs} (stage) | {global_epoch}/{global_epochs} (global) for {curriculum_config['steps']} steps in {curriculum_config['strategy']} strategy.')
560
+ f'Starting epoch {epoch}/{stage_epochs} (stage) | {global_epoch}/{global_epochs} (global) for {curriculum_config["steps"]} steps in {curriculum_config["strategy"]} strategy.')
561
561
 
562
562
  def on_epoch_end(self, actor: nn.Module, epoch: int, stage_epochs: int, policy_loss: float,
563
563
  critic_loss: float, global_epoch: int, global_epochs: int) -> None:
@@ -580,7 +580,7 @@ class MrlPrintCallback(MrlTrainerCallback):
580
580
  print(f'Epoch {epoch} | Step {step} - updated policy loss {critic_loss}')
581
581
 
582
582
  def on_training_end(self, actor: nn.Module, critic: nn.Module, curriculum_config: dict) -> None:
583
- print(f'Finished training for {curriculum_config['steps']} steps in {curriculum_config['strategy']} strategy.')
583
+ print(f'Finished training for {curriculum_config["steps"]} steps in {curriculum_config["strategy"]} strategy.')
584
584
 
585
585
  def on_eval_end(self, actor: nn.Module, critic: nn.Module, epoch: int, eval_mean_reward: float) -> None:
586
586
  print(f'Eval epoch {epoch} - mean reward {eval_mean_reward}')
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes