bit-ttt-engine 0.6.0__tar.gz → 0.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/PKG-INFO +58 -31
  2. bit_ttt_engine-0.6.2/README_PYPI.md +99 -0
  3. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/Cargo.toml +9 -2
  4. bit_ttt_engine-0.6.2/crates/rust_engine/LICENSE +21 -0
  5. bit_ttt_engine-0.6.2/crates/rust_engine/README_PYPI.md +99 -0
  6. bit_ttt_engine-0.6.2/crates/rust_engine/examples/basic_generate.rs +134 -0
  7. bit_ttt_engine-0.6.2/crates/rust_engine/examples/interactive_chat.rs +161 -0
  8. bit_ttt_engine-0.6.2/crates/rust_engine/examples/model_info.rs +113 -0
  9. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/lib.rs +9 -0
  10. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/gguf_model.rs +5 -2
  11. bit_ttt_engine-0.6.2/crates/rust_engine/src/python.rs +988 -0
  12. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/pyproject.toml +1 -0
  13. bit_ttt_engine-0.6.0/README_PYPI.md +0 -73
  14. bit_ttt_engine-0.6.0/crates/rust_engine/README_PYPI.md +0 -73
  15. bit_ttt_engine-0.6.0/crates/rust_engine/src/python.rs +0 -432
  16. {bit_ttt_engine-0.6.0/crates/rust_engine → bit_ttt_engine-0.6.2}/LICENSE +0 -0
  17. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/.cargo/config.toml +0 -0
  18. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/CHANGELOG.md +0 -0
  19. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/Cargo.lock +0 -0
  20. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/README.md +0 -0
  21. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/build.rs +0 -0
  22. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/cortex_rust.pyi +0 -0
  23. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/docs/paged_attention_quality_investigation.md +0 -0
  24. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/examples/benchmark.rs +0 -0
  25. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/examples/cuda_test.rs +0 -0
  26. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/examples/debug_load.rs +0 -0
  27. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/examples/e2e_benchmark.rs +0 -0
  28. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/examples/python_sanity_check.py +0 -0
  29. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/examples/ttt_benchmark.rs +0 -0
  30. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/bench_4bit_gpu.rs +0 -0
  31. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/bench_cpu_kernel.rs +0 -0
  32. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/bench_gemm_4bit.rs +0 -0
  33. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/bench_sizes.rs +0 -0
  34. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/bench_tinyllama.rs +0 -0
  35. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/detect_model.rs +0 -0
  36. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/fast_download.rs +0 -0
  37. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/quick_gen.rs +0 -0
  38. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/run_4bit_llama.rs +0 -0
  39. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/test_13b.rs +0 -0
  40. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/test_4bit_inference.rs +0 -0
  41. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/test_cuda_gemm.rs +0 -0
  42. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/bin/test_memory.rs +0 -0
  43. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/device_utils.rs +0 -0
  44. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/download.rs +0 -0
  45. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/error.rs +0 -0
  46. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/eval/mod.rs +0 -0
  47. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/eval/perplexity.rs +0 -0
  48. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/adaptive_bit_op.cu +0 -0
  49. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/adaptive_bit_op.ptx +0 -0
  50. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/bit_op.cu +0 -0
  51. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/bit_op.ptx +0 -0
  52. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/cpu.rs +0 -0
  53. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/cuda.rs +0 -0
  54. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/fused_ops.cu +0 -0
  55. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/fused_ops.ptx +0 -0
  56. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/fused_ops.rs +0 -0
  57. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/matmul_4bit.cu +0 -0
  58. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/matmul_4bit.ptx +0 -0
  59. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/matmul_4bit.rs +0 -0
  60. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/mod.rs +0 -0
  61. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/packing.rs +0 -0
  62. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/packing_4bit.rs +0 -0
  63. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/paged_attention.cu +0 -0
  64. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/paged_attention.ptx +0 -0
  65. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/kernels/paged_attention.rs +0 -0
  66. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/adaptive_linear.rs +0 -0
  67. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/attention.rs +0 -0
  68. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/bit_linear.rs +0 -0
  69. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/flash_attention.rs +0 -0
  70. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/isomorphic.rs +0 -0
  71. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/kv_cache/mod.rs +0 -0
  72. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/linear_4bit.rs +0 -0
  73. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/rms_norm.rs +0 -0
  74. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/swiglu.rs +0 -0
  75. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers/ttt.rs +0 -0
  76. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/layers.rs +0 -0
  77. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/block.rs +0 -0
  78. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/config.rs +0 -0
  79. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/config_common.rs +0 -0
  80. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/detector.rs +0 -0
  81. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/gguf_loader.rs +0 -0
  82. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/llama/bitllama.rs +0 -0
  83. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/llama/llama_fp16.rs +0 -0
  84. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/llama/mod.rs +0 -0
  85. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/llama_4bit.rs +0 -0
  86. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model/unified.rs +0 -0
  87. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/model.rs +0 -0
  88. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/optim/mod.rs +0 -0
  89. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/optim/schedule_free.rs +0 -0
  90. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/pack/install.rs +0 -0
  91. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/pack/lib.rs +0 -0
  92. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/pack/reader.rs +0 -0
  93. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/pack/types.rs +0 -0
  94. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/pack/verify.rs +0 -0
  95. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/pack/writer.rs +0 -0
  96. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/paged_attention/block_manager.rs +0 -0
  97. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/paged_attention/cache_engine.rs +0 -0
  98. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/paged_attention/mod.rs +0 -0
  99. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/scheduler/mod.rs +0 -0
  100. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/speculative/mod.rs +0 -0
  101. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/tests/attention_test.rs +0 -0
  102. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/tests/bit_linear_test.rs +0 -0
  103. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/tests/format_diagnosis.rs +0 -0
  104. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/tests/isomorphic_test.rs +0 -0
  105. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/tests/ttt_test.rs +0 -0
  106. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/src/wasm.rs +0 -0
  107. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/tests/accuracy_test.rs +0 -0
  108. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/tests/bitllama_e2e.rs +0 -0
  109. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/tests/common.rs +0 -0
  110. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/tests/gguf_e2e.rs +0 -0
  111. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/tests/load_direct_benchmark.rs +0 -0
  112. {bit_ttt_engine-0.6.0 → bit_ttt_engine-0.6.2}/crates/rust_engine/tests/load_packed_e2e.rs +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: bit-ttt-engine
3
- Version: 0.6.0
3
+ Version: 0.6.2
4
4
  Classifier: Development Status :: 4 - Beta
5
5
  Classifier: Programming Language :: Rust
6
6
  Classifier: Programming Language :: Python :: Implementation :: CPython
@@ -10,6 +10,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
10
10
  License-File: LICENSE
11
11
  Summary: 1.58-bit Quantization + Test-Time Training (TTT) Implementation in Pure Rust
12
12
  Keywords: llm,rust,ttt,quantization,ai
13
+ Home-Page: https://github.com/imonoonoko/Bit-TTT-Engine
13
14
  Author: imonoonoko
14
15
  License: MIT
15
16
  Requires-Python: >=3.8
@@ -21,14 +22,13 @@ Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
21
22
  [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
22
23
  [![PyPI](https://img.shields.io/pypi/v/bit-ttt-engine.svg)](https://pypi.org/project/bit-ttt-engine/)
23
24
 
24
- **1.58-bit Quantization + Test-Time Training (TTT)** Implementation in Pure Rust.
25
-
26
- This package provides Python bindings for the Bit-TTT Engine, allowing you to run ultra-light ternary LLMs with real-time adaptation.
25
+ **4-bit Quantization + Test-Time Training (TTT)** Implementation in Pure Rust.
27
26
 
28
27
  ## ✨ Features
29
- 1. **Ultra-Light**: Runs large LLMs on cheap hardware using **1.58-bit (ternary) weights**.
30
- 2. **Adaptive (TTT)**: Learns *while* inferring, adapting to context in real-time.
28
+ 1. **Fast**: **40 tokens/second** on GPU (RTX 4060 Ti).
29
+ 2. **Adaptive (TTT)**: Learns *while* inferring - unique to Bit-TTT!
31
30
  3. **Pure Rust**: High performance with minimal dependencies.
31
+ 4. **Easy**: Load GGUF models directly.
32
32
 
33
33
  ## 🚀 Installation
34
34
 
@@ -36,13 +36,60 @@ This package provides Python bindings for the Bit-TTT Engine, allowing you to ru
36
36
  pip install bit-ttt-engine
37
37
  ```
38
38
 
39
- ## 💻 Usage
39
+ ## 💻 Quick Start (GGUF Models)
40
+
41
+ ```python
42
+ from cortex_rust import GgufModel
43
+
44
+ # Load model
45
+ model = GgufModel("model.gguf", tokenizer="tokenizer.json")
46
+
47
+ # Generate text
48
+ output = model.generate(
49
+ "Hello, how are you?",
50
+ max_tokens=50,
51
+ temperature=0.7
52
+ )
53
+ print(output)
54
+
55
+ # Streaming output
56
+ model.generate_with_callback(
57
+ "Tell me a story",
58
+ lambda t: print(t, end="", flush=True),
59
+ max_tokens=100
60
+ )
61
+ ```
62
+
63
+ ## 🧠 TTT (Test-Time Training)
64
+
65
+ **TTT makes the model adapt during inference** - something no other local LLM can do!
66
+
67
+ ```python
68
+ from cortex_rust import GgufModel
69
+
70
+ model = GgufModel("model.gguf", tokenizer="tokenizer.json")
71
+
72
+ # Enable TTT
73
+ model.enable_ttt(layers=4, learning_rate=0.1)
74
+
75
+ # Without TTT: Pass 1 == Pass 2 (same output)
76
+ # With TTT: Pass 1 != Pass 2 (model is learning!)
77
+
78
+ out1 = model.generate("My name is Alice.", max_tokens=20)
79
+ out2 = model.generate("My name is Alice.", max_tokens=20)
80
+ print(f"Different: {out1 != out2}") # True!
81
+
82
+ # TTT controls
83
+ model.disable_ttt()
84
+ model.reset_ttt_state()
85
+ print(model.ttt_enabled) # False
86
+ ```
87
+
88
+ ## 🏗️ Legacy API (BitLlama)
40
89
 
41
90
  ```python
42
91
  import cortex_rust
43
- import json
44
92
 
45
- # Initialize Configuration
46
93
  config = cortex_rust.BitLlamaConfig(
47
94
  vocab_size=32000,
48
95
  hidden_dim=512,
@@ -50,34 +97,14 @@ config = cortex_rust.BitLlamaConfig(
50
97
  inner_lr=0.001
51
98
  )
52
99
 
53
- # Initialize Model (Inference)
54
100
  model = cortex_rust.BitLlama(
55
101
  config=config,
56
102
  checkpoint_path="path/to/model.safetensors",
57
- device="cpu", # or "cuda"
103
+ device="cpu",
58
104
  tokenizer_path="path/to/tokenizer.json"
59
105
  )
60
106
 
61
- # Generate Text
62
- output = model.generate(prompt="Hello, world!", max_tokens=50)
63
- print(output)
64
- ```
65
-
66
- ## 🏗️ Training (TTT)
67
-
68
- ```python
69
- trainer = cortex_rust.PyTrainer(
70
- config=config,
71
- checkpoint_path="path/to/model.safetensors",
72
- device="cuda"
73
- )
74
-
75
- # Single training step
76
- loss = trainer.train_step(input_ids=[...], targets=[...])
77
- print(f"Loss: {loss}")
78
-
79
- # Save checkpoint
80
- trainer.save_checkpoint("model_updated.safetensors")
107
+ output = model.generate(prompt="Hello!", max_tokens=50)
81
108
  ```
82
109
 
83
110
  ## 📖 Documentation
@@ -0,0 +1,99 @@
1
+ # Bit-TTT Engine: High-Performance Brain Core
2
+
3
+ [![Rust](https://img.shields.io/badge/rust-1.70+-orange.svg)](https://www.rust-lang.org/)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
5
+ [![PyPI](https://img.shields.io/pypi/v/bit-ttt-engine.svg)](https://pypi.org/project/bit-ttt-engine/)
6
+
7
+ **4-bit Quantization + Test-Time Training (TTT)** Implementation in Pure Rust.
8
+
9
+ ## ✨ Features
10
+ 1. **Fast**: **40 tokens/second** on GPU (RTX 4060 Ti).
11
+ 2. **Adaptive (TTT)**: Learns *while* inferring - unique to Bit-TTT!
12
+ 3. **Pure Rust**: High performance with minimal dependencies.
13
+ 4. **Easy**: Load GGUF models directly.
14
+
15
+ ## 🚀 Installation
16
+
17
+ ```bash
18
+ pip install bit-ttt-engine
19
+ ```
20
+
21
+ ## 💻 Quick Start (GGUF Models)
22
+
23
+ ```python
24
+ from cortex_rust import GgufModel
25
+
26
+ # Load model
27
+ model = GgufModel("model.gguf", tokenizer="tokenizer.json")
28
+
29
+ # Generate text
30
+ output = model.generate(
31
+ "Hello, how are you?",
32
+ max_tokens=50,
33
+ temperature=0.7
34
+ )
35
+ print(output)
36
+
37
+ # Streaming output
38
+ model.generate_with_callback(
39
+ "Tell me a story",
40
+ lambda t: print(t, end="", flush=True),
41
+ max_tokens=100
42
+ )
43
+ ```
44
+
45
+ ## 🧠 TTT (Test-Time Training)
46
+
47
+ **TTT makes the model adapt during inference** - something no other local LLM can do!
48
+
49
+ ```python
50
+ from cortex_rust import GgufModel
51
+
52
+ model = GgufModel("model.gguf", tokenizer="tokenizer.json")
53
+
54
+ # Enable TTT
55
+ model.enable_ttt(layers=4, learning_rate=0.1)
56
+
57
+ # Without TTT: Pass 1 == Pass 2 (same output)
58
+ # With TTT: Pass 1 != Pass 2 (model is learning!)
59
+
60
+ out1 = model.generate("My name is Alice.", max_tokens=20)
61
+ out2 = model.generate("My name is Alice.", max_tokens=20)
62
+ print(f"Different: {out1 != out2}") # True!
63
+
64
+ # TTT controls
65
+ model.disable_ttt()
66
+ model.reset_ttt_state()
67
+ print(model.ttt_enabled) # False
68
+ ```
69
+
70
+ ## 🏗️ Legacy API (BitLlama)
71
+
72
+ ```python
73
+ import cortex_rust
74
+
75
+ config = cortex_rust.BitLlamaConfig(
76
+ vocab_size=32000,
77
+ hidden_dim=512,
78
+ num_layers=12,
79
+ inner_lr=0.001
80
+ )
81
+
82
+ model = cortex_rust.BitLlama(
83
+ config=config,
84
+ checkpoint_path="path/to/model.safetensors",
85
+ device="cpu",
86
+ tokenizer_path="path/to/tokenizer.json"
87
+ )
88
+
89
+ output = model.generate(prompt="Hello!", max_tokens=50)
90
+ ```
91
+
92
+ ## 📖 Documentation
93
+ For more details, please visit the [GitHub repository](https://github.com/imonoonoko/Bit-TTT-Engine).
94
+
95
+ ## 🙏 Acknowledgments
96
+ This project incorporates ideas and techniques inspired by the DroPE method published by Sakana AI.
97
+
98
+ ## 💖 License
99
+ MIT License
@@ -1,8 +1,15 @@
1
1
  [package]
2
2
  name = "cortex_rust"
3
- version = "0.6.0"
3
+ version = "0.6.2"
4
4
  edition = "2021"
5
- readme = "README.md"
5
+ description = "High-performance LLM inference with 4-bit quantization and Test-Time Training (TTT)"
6
+ license = "MIT"
7
+ repository = "https://github.com/imonoonoko/Bit-TTT-Engine"
8
+ homepage = "https://github.com/imonoonoko/Bit-TTT-Engine"
9
+ documentation = "https://docs.rs/cortex_rust"
10
+ readme = "README_PYPI.md"
11
+ keywords = ["llm", "rust", "ttt", "quantization", "inference"]
12
+ categories = ["science", "algorithms"]
6
13
 
7
14
  [lib]
8
15
  crate-type = ["cdylib", "rlib"]
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 imonoonoko
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,99 @@
1
+ # Bit-TTT Engine: High-Performance Brain Core
2
+
3
+ [![Rust](https://img.shields.io/badge/rust-1.70+-orange.svg)](https://www.rust-lang.org/)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
5
+ [![PyPI](https://img.shields.io/pypi/v/bit-ttt-engine.svg)](https://pypi.org/project/bit-ttt-engine/)
6
+
7
+ **4-bit Quantization + Test-Time Training (TTT)** Implementation in Pure Rust.
8
+
9
+ ## ✨ Features
10
+ 1. **Fast**: **40 tokens/second** on GPU (RTX 4060 Ti).
11
+ 2. **Adaptive (TTT)**: Learns *while* inferring - unique to Bit-TTT!
12
+ 3. **Pure Rust**: High performance with minimal dependencies.
13
+ 4. **Easy**: Load GGUF models directly.
14
+
15
+ ## 🚀 Installation
16
+
17
+ ```bash
18
+ pip install bit-ttt-engine
19
+ ```
20
+
21
+ ## 💻 Quick Start (GGUF Models)
22
+
23
+ ```python
24
+ from cortex_rust import GgufModel
25
+
26
+ # Load model
27
+ model = GgufModel("model.gguf", tokenizer="tokenizer.json")
28
+
29
+ # Generate text
30
+ output = model.generate(
31
+ "Hello, how are you?",
32
+ max_tokens=50,
33
+ temperature=0.7
34
+ )
35
+ print(output)
36
+
37
+ # Streaming output
38
+ model.generate_with_callback(
39
+ "Tell me a story",
40
+ lambda t: print(t, end="", flush=True),
41
+ max_tokens=100
42
+ )
43
+ ```
44
+
45
+ ## 🧠 TTT (Test-Time Training)
46
+
47
+ **TTT makes the model adapt during inference** - something no other local LLM can do!
48
+
49
+ ```python
50
+ from cortex_rust import GgufModel
51
+
52
+ model = GgufModel("model.gguf", tokenizer="tokenizer.json")
53
+
54
+ # Enable TTT
55
+ model.enable_ttt(layers=4, learning_rate=0.1)
56
+
57
+ # Without TTT: Pass 1 == Pass 2 (same output)
58
+ # With TTT: Pass 1 != Pass 2 (model is learning!)
59
+
60
+ out1 = model.generate("My name is Alice.", max_tokens=20)
61
+ out2 = model.generate("My name is Alice.", max_tokens=20)
62
+ print(f"Different: {out1 != out2}") # True!
63
+
64
+ # TTT controls
65
+ model.disable_ttt()
66
+ model.reset_ttt_state()
67
+ print(model.ttt_enabled) # False
68
+ ```
69
+
70
+ ## 🏗️ Legacy API (BitLlama)
71
+
72
+ ```python
73
+ import cortex_rust
74
+
75
+ config = cortex_rust.BitLlamaConfig(
76
+ vocab_size=32000,
77
+ hidden_dim=512,
78
+ num_layers=12,
79
+ inner_lr=0.001
80
+ )
81
+
82
+ model = cortex_rust.BitLlama(
83
+ config=config,
84
+ checkpoint_path="path/to/model.safetensors",
85
+ device="cpu",
86
+ tokenizer_path="path/to/tokenizer.json"
87
+ )
88
+
89
+ output = model.generate(prompt="Hello!", max_tokens=50)
90
+ ```
91
+
92
+ ## 📖 Documentation
93
+ For more details, please visit the [GitHub repository](https://github.com/imonoonoko/Bit-TTT-Engine).
94
+
95
+ ## 🙏 Acknowledgments
96
+ This project incorporates ideas and techniques inspired by the DroPE method published by Sakana AI.
97
+
98
+ ## 💖 License
99
+ MIT License
@@ -0,0 +1,134 @@
1
+ //! Basic Text Generation Example
2
+ //!
3
+ //! Simple example showing how to generate text with a GGUF model.
4
+ //!
5
+ //! Usage:
6
+ //! cargo run --release --example basic_generate -- \
7
+ //! --model path/to/model.gguf --prompt "Hello, world!"
8
+
9
+ use anyhow::{Context, Result};
10
+ use candle_core::{Device, Tensor};
11
+ use cortex_rust::GgufModel;
12
+ use std::path::PathBuf;
13
+
14
+ fn parse_args() -> (PathBuf, String, usize) {
15
+ let args: Vec<String> = std::env::args().collect();
16
+ let mut model_path = PathBuf::from("model.gguf");
17
+ let mut prompt = "Hello, how are you?".to_string();
18
+ let mut max_tokens = 50usize;
19
+
20
+ let mut i = 1;
21
+ while i < args.len() {
22
+ match args[i].as_str() {
23
+ "--model" | "-m" => {
24
+ if i + 1 < args.len() {
25
+ model_path = PathBuf::from(&args[i + 1]);
26
+ i += 1;
27
+ }
28
+ }
29
+ "--prompt" | "-p" => {
30
+ if i + 1 < args.len() {
31
+ prompt = args[i + 1].clone();
32
+ i += 1;
33
+ }
34
+ }
35
+ "--tokens" | "-t" => {
36
+ if i + 1 < args.len() {
37
+ max_tokens = args[i + 1].parse().unwrap_or(50);
38
+ i += 1;
39
+ }
40
+ }
41
+ "--help" | "-h" => {
42
+ println!("Usage: basic_generate [OPTIONS]");
43
+ println!();
44
+ println!("Options:");
45
+ println!(" -m, --model <PATH> Path to GGUF model");
46
+ println!(" -p, --prompt <TEXT> Prompt text");
47
+ println!(" -t, --tokens <N> Max tokens to generate [default: 50]");
48
+ std::process::exit(0);
49
+ }
50
+ _ => {}
51
+ }
52
+ i += 1;
53
+ }
54
+
55
+ (model_path, prompt, max_tokens)
56
+ }
57
+
58
+ /// Simple greedy sampling
59
+ fn sample_greedy(logits: &Tensor) -> Result<u32> {
60
+ let (batch, seq_len, _) = logits.dims3()?;
61
+ let last_logits = logits.narrow(1, seq_len - 1, 1)?.squeeze(1)?;
62
+ let token_ids = last_logits.argmax(1)?;
63
+ let token_id = if batch == 1 {
64
+ token_ids.squeeze(0)?.to_scalar::<u32>()?
65
+ } else {
66
+ token_ids.get(0)?.to_scalar::<u32>()?
67
+ };
68
+ Ok(token_id)
69
+ }
70
+
71
+ /// Simple byte-level tokenization (for demo purposes)
72
+ fn tokenize(text: &str) -> Vec<i64> {
73
+ text.bytes().map(|b| b as i64).collect()
74
+ }
75
+
76
+ fn main() -> Result<()> {
77
+ let (model_path, prompt, max_tokens) = parse_args();
78
+
79
+ println!("🚀 Bit-TTT-Engine Basic Generation Example");
80
+ println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
81
+ println!("Model: {:?}", model_path);
82
+ println!("Prompt: \"{}\"", prompt);
83
+ println!("Tokens: {}", max_tokens);
84
+ println!();
85
+
86
+ // Load model
87
+ println!("📦 Loading model...");
88
+ let device = Device::Cpu;
89
+ let mut model = GgufModel::load(&model_path, &device)
90
+ .context("Failed to load model")?;
91
+ println!("✅ Model loaded!");
92
+ println!(" Layers: {}", model.config().num_layers);
93
+ println!(" Hidden: {}", model.config().hidden_dim);
94
+ println!();
95
+
96
+ // Tokenize
97
+ let mut tokens = tokenize(&prompt);
98
+ let input = Tensor::from_vec(tokens.clone(), (1, tokens.len()), &device)?;
99
+
100
+ // Prefill
101
+ println!("⚡ Generating...");
102
+ let logits = model.forward(&input, 0)?;
103
+ let first_token = sample_greedy(&logits)?;
104
+ tokens.push(first_token as i64);
105
+
106
+ // Generate
107
+ for _ in 0..max_tokens {
108
+ let pos = tokens.len() - 1;
109
+ let input = Tensor::from_vec(vec![tokens[pos]], (1, 1), &device)?;
110
+ let logits = model.forward(&input, pos)?;
111
+ let next_token = sample_greedy(&logits)?;
112
+ tokens.push(next_token as i64);
113
+ }
114
+
115
+ // Decode (simple byte->char)
116
+ let output: String = tokens.iter()
117
+ .skip(prompt.len())
118
+ .filter_map(|&t| {
119
+ if t >= 0 && t < 256 {
120
+ Some(t as u8 as char)
121
+ } else {
122
+ None
123
+ }
124
+ })
125
+ .collect();
126
+
127
+ println!();
128
+ println!("📝 Output:");
129
+ println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
130
+ println!("{}", output);
131
+ println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
132
+
133
+ Ok(())
134
+ }
@@ -0,0 +1,161 @@
1
+ //! Interactive Chat Example
2
+ //!
3
+ //! A simple REPL for chatting with a GGUF model.
4
+ //!
5
+ //! Usage:
6
+ //! cargo run --release --example interactive_chat -- --model path/to/model.gguf
7
+
8
+ use anyhow::{Context, Result};
9
+ use candle_core::{Device, Tensor};
10
+ use cortex_rust::GgufModel;
11
+ use std::io::{self, BufRead, Write};
12
+ use std::path::PathBuf;
13
+
14
+ fn parse_args() -> (PathBuf, usize) {
15
+ let args: Vec<String> = std::env::args().collect();
16
+ let mut model_path = PathBuf::from("model.gguf");
17
+ let mut max_tokens = 100usize;
18
+
19
+ let mut i = 1;
20
+ while i < args.len() {
21
+ match args[i].as_str() {
22
+ "--model" | "-m" => {
23
+ if i + 1 < args.len() {
24
+ model_path = PathBuf::from(&args[i + 1]);
25
+ i += 1;
26
+ }
27
+ }
28
+ "--tokens" | "-t" => {
29
+ if i + 1 < args.len() {
30
+ max_tokens = args[i + 1].parse().unwrap_or(100);
31
+ i += 1;
32
+ }
33
+ }
34
+ "--help" | "-h" => {
35
+ println!("Interactive Chat - Bit-TTT-Engine");
36
+ println!();
37
+ println!("Usage: interactive_chat [OPTIONS]");
38
+ println!();
39
+ println!("Options:");
40
+ println!(" -m, --model <PATH> Path to GGUF model");
41
+ println!(" -t, --tokens <N> Max tokens per response [default: 100]");
42
+ std::process::exit(0);
43
+ }
44
+ _ => {}
45
+ }
46
+ i += 1;
47
+ }
48
+
49
+ (model_path, max_tokens)
50
+ }
51
+
52
+ fn sample_greedy(logits: &Tensor) -> Result<u32> {
53
+ let (batch, seq_len, _) = logits.dims3()?;
54
+ let last_logits = logits.narrow(1, seq_len - 1, 1)?.squeeze(1)?;
55
+ let token_ids = last_logits.argmax(1)?;
56
+ let token_id = if batch == 1 {
57
+ token_ids.squeeze(0)?.to_scalar::<u32>()?
58
+ } else {
59
+ token_ids.get(0)?.to_scalar::<u32>()?
60
+ };
61
+ Ok(token_id)
62
+ }
63
+
64
+ fn tokenize(text: &str) -> Vec<i64> {
65
+ text.bytes().map(|b| b as i64).collect()
66
+ }
67
+
68
+ fn generate(model: &mut GgufModel, prompt: &str, max_tokens: usize, device: &Device) -> Result<String> {
69
+ let mut tokens = tokenize(prompt);
70
+ let input = Tensor::from_vec(tokens.clone(), (1, tokens.len()), device)?;
71
+
72
+ // Prefill
73
+ let logits = model.forward(&input, 0)?;
74
+ let first_token = sample_greedy(&logits)?;
75
+ tokens.push(first_token as i64);
76
+
77
+ // Generate
78
+ for _ in 0..max_tokens {
79
+ let pos = tokens.len() - 1;
80
+ let input = Tensor::from_vec(vec![tokens[pos]], (1, 1), device)?;
81
+ let logits = model.forward(&input, pos)?;
82
+ let next_token = sample_greedy(&logits)?;
83
+
84
+ // Stop on newline or special tokens
85
+ if next_token == 10 || next_token == 0 {
86
+ break;
87
+ }
88
+ tokens.push(next_token as i64);
89
+ }
90
+
91
+ // Decode
92
+ let output: String = tokens.iter()
93
+ .skip(prompt.len())
94
+ .filter_map(|&t| {
95
+ if t >= 32 && t < 127 {
96
+ Some(t as u8 as char)
97
+ } else {
98
+ None
99
+ }
100
+ })
101
+ .collect();
102
+
103
+ Ok(output)
104
+ }
105
+
106
+ fn main() -> Result<()> {
107
+ let (model_path, max_tokens) = parse_args();
108
+
109
+ println!("╔════════════════════════════════════════════╗");
110
+ println!("║ 🤖 Bit-TTT-Engine Interactive Chat ║");
111
+ println!("╚════════════════════════════════════════════╝");
112
+ println!();
113
+ println!("Model: {:?}", model_path);
114
+ println!("Type 'quit' or 'exit' to end.");
115
+ println!();
116
+
117
+ // Load model
118
+ println!("📦 Loading model...");
119
+ let device = Device::Cpu;
120
+ let mut model = GgufModel::load(&model_path, &device)
121
+ .context("Failed to load model")?;
122
+ println!("✅ Ready!");
123
+ println!();
124
+
125
+ let stdin = io::stdin();
126
+ let mut stdout = io::stdout();
127
+
128
+ loop {
129
+ print!("You: ");
130
+ stdout.flush()?;
131
+
132
+ let mut input = String::new();
133
+ stdin.lock().read_line(&mut input)?;
134
+ let input = input.trim();
135
+
136
+ if input.is_empty() {
137
+ continue;
138
+ }
139
+
140
+ if input.eq_ignore_ascii_case("quit") || input.eq_ignore_ascii_case("exit") {
141
+ println!("👋 Goodbye!");
142
+ break;
143
+ }
144
+
145
+ // Reset cache for new conversation
146
+ model.reset_cache();
147
+
148
+ // Generate response
149
+ match generate(&mut model, input, max_tokens, &device) {
150
+ Ok(response) => {
151
+ println!("🤖: {}", response.trim());
152
+ }
153
+ Err(e) => {
154
+ println!("❌ Error: {}", e);
155
+ }
156
+ }
157
+ println!();
158
+ }
159
+
160
+ Ok(())
161
+ }