rxnn 0.1.15__tar.gz → 0.1.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {rxnn-0.1.15 → rxnn-0.1.17}/PKG-INFO +24 -1
  2. {rxnn-0.1.15 → rxnn-0.1.17}/README.md +24 -0
  3. {rxnn-0.1.15 → rxnn-0.1.17}/pyproject.toml +1 -1
  4. rxnn-0.1.17/src/rxnn/experimental/attention.py +743 -0
  5. rxnn-0.1.17/src/rxnn/experimental/models.py +116 -0
  6. rxnn-0.1.17/src/rxnn/experimental/moe.py +206 -0
  7. rxnn-0.1.17/src/rxnn/transformers/moe.py +131 -0
  8. rxnn-0.1.15/src/rxnn/experimental/attention.py +0 -380
  9. rxnn-0.1.15/src/rxnn/transformers/moe.py +0 -185
  10. {rxnn-0.1.15 → rxnn-0.1.17}/LICENSE +0 -0
  11. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/__init__.py +0 -0
  12. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/experimental/__init__.py +0 -0
  13. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/memory/__init__.py +0 -0
  14. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/memory/norm.py +0 -0
  15. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/memory/stm.py +0 -0
  16. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/rxt/__init__.py +0 -0
  17. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/rxt/models.py +0 -0
  18. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/__init__.py +0 -0
  19. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/base.py +0 -0
  20. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/bml.py +0 -0
  21. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/callbacks.py +0 -0
  22. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/dataset.py +0 -0
  23. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/scheduler.py +0 -0
  24. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/training/tokenizer.py +0 -0
  25. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/__init__.py +0 -0
  26. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/attention.py +0 -0
  27. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/ff.py +0 -0
  28. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/layers.py +0 -0
  29. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/mask.py +0 -0
  30. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/models.py +0 -0
  31. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/positional.py +0 -0
  32. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/transformers/sampler.py +0 -0
  33. {rxnn-0.1.15 → rxnn-0.1.17}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.15
3
+ Version: 0.1.17
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -53,6 +53,29 @@ that's generating Infinite Chain-of-Thoughts and is communicating in push-based
53
53
  Reactive communication patterns in RxNN models are adapted to handle asynchronous nature of model - after it finish generating
54
54
  sequence, it has to process it and save it in memory, but it could be done in background.
55
55
 
56
+ ## Release plan
57
+ We are working on three new reactive architectures, that progressively advance from language models to awareness models:
58
+ - Reactive Transformer: Reactive Language Model (RLM) with Short-Term Memory
59
+ - Preactor: extending Reactive Transformer with additional Long-Term Memory, providing theoretically infinite context (only
60
+ single message length is limited) and the ability to learn from interactions (Live Learning)
61
+ - Reactor: AGI awareness model & Strong Reactive Neural Network, that's working in infinite reasoning loop and doesn't require explicit human commands
62
+
63
+ Each new architecture is based on the previous one and adding new features/abilities. They will be progressively
64
+ released with next versions of **RxNN** framework:
65
+ - 0.1.x: Reactive Transformer base models, Base Model Learning (pre-training/fine-tuning) & Transformers extensions (MoE Attention, Short-Term Memory, etc.)
66
+ - 0.2.x: Memory Reinforcement Learning (MRL) for Short-Term Memory & Reactive Transformer, Attention-based Memory System details
67
+ - 0.3.x: Reinforcement Learning from Human Feedback for Reactive models (RxRLHF), basic Tensor Reactive
68
+ Extensions (TRX/Rust) for full Reactive Transformer, RxT-Alpha release (+following models - RxT-Beta, etc.)
69
+ - 0.4.x: Preactor base models, Tensor Database (TDB/Rust) for Long-Term Memory, mxRAG/revRAG subsystems
70
+ - 0.5.x: MRL for Long-Term Memory & Preactor, Live Learning for Preactor, PRx-Alpha release (+following models - PRx-Beta, etc.)
71
+ - 0.6.x: Reactor base models, TRX full implementation, Receptors & Effectors Reactive RNNs
72
+ - 0.7.x: Behavioral Reinforcement Learning (BRL) for Reactor's Infinite Chain-of-Thoughts, Continuous Live Learning for Reactor
73
+ - 0.8.x: Rx-Alpha release
74
+ - 0.9.x: Rx-Beta release
75
+ - 1.0.0: Reactor AGI official release (Expert, Assistant & Utility class models)
76
+ - 1.x.x: Multimodal reactive models (could be released earlier, depending on progress)
77
+ - 2.0.0: Real-Time Vision Reactor - Worker class models
78
+ - x.x.x: ...and more!
56
79
  Apache License
57
80
  Version 2.0, January 2004
58
81
  http://www.apache.org/licenses/
@@ -26,3 +26,27 @@ that's generating Infinite Chain-of-Thoughts and is communicating in push-based
26
26
 
27
27
  Reactive communication patterns in RxNN models are adapted to handle asynchronous nature of model - after it finish generating
28
28
  sequence, it has to process it and save it in memory, but it could be done in background.
29
+
30
+ ## Release plan
31
+ We are working on three new reactive architectures, that progressively advance from language models to awareness models:
32
+ - Reactive Transformer: Reactive Language Model (RLM) with Short-Term Memory
33
+ - Preactor: extending Reactive Transformer with additional Long-Term Memory, providing theoretically infinite context (only
34
+ single message length is limited) and the ability to learn from interactions (Live Learning)
35
+ - Reactor: AGI awareness model & Strong Reactive Neural Network, that's working in infinite reasoning loop and doesn't require explicit human commands
36
+
37
+ Each new architecture is based on the previous one and adding new features/abilities. They will be progressively
38
+ released with next versions of **RxNN** framework:
39
+ - 0.1.x: Reactive Transformer base models, Base Model Learning (pre-training/fine-tuning) & Transformers extensions (MoE Attention, Short-Term Memory, etc.)
40
+ - 0.2.x: Memory Reinforcement Learning (MRL) for Short-Term Memory & Reactive Transformer, Attention-based Memory System details
41
+ - 0.3.x: Reinforcement Learning from Human Feedback for Reactive models (RxRLHF), basic Tensor Reactive
42
+ Extensions (TRX/Rust) for full Reactive Transformer, RxT-Alpha release (+following models - RxT-Beta, etc.)
43
+ - 0.4.x: Preactor base models, Tensor Database (TDB/Rust) for Long-Term Memory, mxRAG/revRAG subsystems
44
+ - 0.5.x: MRL for Long-Term Memory & Preactor, Live Learning for Preactor, PRx-Alpha release (+following models - PRx-Beta, etc.)
45
+ - 0.6.x: Reactor base models, TRX full implementation, Receptors & Effectors Reactive RNNs
46
+ - 0.7.x: Behavioral Reinforcement Learning (BRL) for Reactor's Infinite Chain-of-Thoughts, Continuous Live Learning for Reactor
47
+ - 0.8.x: Rx-Alpha release
48
+ - 0.9.x: Rx-Beta release
49
+ - 1.0.0: Reactor AGI official release (Expert, Assistant & Utility class models)
50
+ - 1.x.x: Multimodal reactive models (could be released earlier, depending on progress)
51
+ - 2.0.0: Real-Time Vision Reactor - Worker class models
52
+ - x.x.x: ...and more!
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.15"
7
+ version = "0.1.17"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"