entroplain 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/docs/USAGE.md ADDED
@@ -0,0 +1,302 @@
1
+ # Entroplain Documentation
2
+
3
+ ## Installation
4
+
5
+ ### pip (Python)
6
+
7
+ ```bash
8
+ # Core package
9
+ pip install entroplain
10
+
11
+ # With provider support
12
+ pip install "entroplain[openai]"
13
+ pip install "entroplain[anthropic]"
14
+ pip install "entroplain[all]"
15
+ ```
16
+
17
+ ### npm (Node.js)
18
+
19
+ ```bash
20
+ npm install entroplain
21
+ ```
22
+
23
+ ### From Source
24
+
25
+ ```bash
26
+ git clone https://github.com/entroplain/entroplain.git
27
+ cd entroplain
28
+ pip install -e .
29
+ ```
30
+
31
+ ---
32
+
33
+ ## Quick Start
34
+
35
+ ```python
36
+ from entroplain import EntropyMonitor
37
+
38
+ monitor = EntropyMonitor()
39
+
40
+ # Track tokens with entropy
41
+ monitor.track("The", 0.8)
42
+ monitor.track("answer", 0.5)
43
+ monitor.track("is", 0.2)
44
+
45
+ # Check convergence
46
+ if monitor.should_exit():
47
+ print("Reasoning complete!")
48
+ ```
49
+
50
+ ---
51
+
52
+ ## How It Works
53
+
54
+ ### 1. Entropy Calculation
55
+
56
+ For each token, we calculate **Shannon entropy** from the model's output distribution:
57
+
58
+ ```
59
+ H = -Σ p(x) * log₂(p(x))
60
+ ```
61
+
62
+ Where `p(x)` is the probability of token x.
63
+
64
+ ### 2. Valley Detection
65
+
66
+ A **valley** is a local minimum in the entropy trajectory:
67
+
68
+ ```
69
+ Token: A B C D E
70
+ Entropy: 0.8 0.3* 0.7 0.4* 0.9
71
+ ↑ ↑
72
+ Valley 1 Valley 2
73
+ ```
74
+
75
+ Valleys indicate moments when the model was confident about the next token — reasoning milestones.
76
+
77
+ ### 3. Exit Conditions
78
+
79
+ | Condition | When to Exit |
80
+ |-----------|--------------|
81
+ | `entropy_drop` | Entropy < threshold |
82
+ | `valleys_plateau` | Valley count stabilizes |
83
+ | `velocity_zero` | Entropy change < threshold |
84
+ | `combined` | (entropy_low OR valleys_plateau) AND velocity_stable |
85
+
86
+ ---
87
+
88
+ ## Configuration
89
+
90
+ ```python
91
+ monitor = EntropyMonitor(
92
+ # Exit when entropy drops below this
93
+ entropy_threshold=0.15,
94
+
95
+ # Require at least N valleys
96
+ min_valleys=2,
97
+
98
+ # Exit when velocity < this
99
+ velocity_threshold=0.05,
100
+
101
+ # Don't exit before N tokens
102
+ min_tokens=50,
103
+
104
+ # Exit condition strategy
105
+ exit_condition="combined" # or "entropy_drop", "valleys_plateau", "velocity_zero"
106
+ )
107
+ ```
108
+
109
+ ### Environment Variables
110
+
111
+ ```bash
112
+ # Provider API keys
113
+ ENTROPPLAIN_OPENAI_API_KEY=sk-...
114
+ ENTROPPLAIN_ANTHROPIC_API_KEY=sk-ant-...
115
+ ENTROPPLAIN_NVIDIA_API_KEY=nvapi-...
116
+ ENTROPPLAIN_GOOGLE_API_KEY=...
117
+
118
+ # Local models
119
+ ENTROPPLAIN_LOCAL_PROVIDER=ollama
120
+ ENTROPPLAIN_LOCAL_MODEL=llama3.1
121
+ ```
122
+
123
+ ---
124
+
125
+ ## Provider Examples
126
+
127
+ ### OpenAI
128
+
129
+ ```python
130
+ from openai import OpenAI
131
+ from entroplain import EntropyMonitor
132
+
133
+ client = OpenAI()
134
+ monitor = EntropyMonitor()
135
+
136
+ response = client.chat.completions.create(
137
+ model="gpt-4o",
138
+ messages=[{"role": "user", "content": "Explain quantum computing"}],
139
+ logprobs=True,
140
+ top_logprobs=5,
141
+ stream=True
142
+ )
143
+
144
+ for chunk in response:
145
+ if chunk.choices[0].logprobs:
146
+ for content in chunk.choices[0].logprobs.content:
147
+ entropy = monitor.calculate_entropy(
148
+ [lp["logprob"] for lp in content["top_logprobs"]]
149
+ )
150
+ monitor.track(content["token"], entropy)
151
+
152
+ if monitor.should_exit():
153
+ break
154
+ ```
155
+
156
+ ### NVIDIA NIM
157
+
158
+ ```python
159
+ from entroplain import NVIDIAProvider, EntropyMonitor
160
+
161
+ provider = NVIDIAProvider()
162
+ monitor = EntropyMonitor()
163
+
164
+ for token in provider.stream_with_entropy(
165
+ model="meta/llama-3.1-70b-instruct",
166
+ messages=[{"role": "user", "content": "Hello"}]
167
+ ):
168
+ monitor.track(token.token, token.entropy)
169
+
170
+ if monitor.should_exit():
171
+ print("Early exit!")
172
+ break
173
+ ```
174
+
175
+ ### Ollama (Local)
176
+
177
+ ```python
178
+ from entroplain import OllamaProvider, EntropyMonitor
179
+
180
+ provider = OllamaProvider()
181
+ monitor = EntropyMonitor()
182
+
183
+ for token in provider.stream_with_entropy(
184
+ model="llama3.1",
185
+ prompt="Think step by step..."
186
+ ):
187
+ print(token.token, end="")
188
+ monitor.track(token.token, token.entropy)
189
+ ```
190
+
191
+ ---
192
+
193
+ ## Agent Framework Integration
194
+
195
+ ### OpenClaw
196
+
197
+ ```yaml
198
+ # In your agent config
199
+ entropy_monitor:
200
+ enabled: true
201
+ entropy_threshold: 0.15
202
+ min_valleys: 2
203
+
204
+ hooks:
205
+ on_token: entroplain.hooks.track_entropy
206
+ on_exit_check: entroplain.hooks.early_exit
207
+ ```
208
+
209
+ ### Claude Code
210
+
211
+ ```json
212
+ {
213
+ "hooks": {
214
+ "on_token": "entroplain.hooks.track_entropy",
215
+ "on_converge": "entroplain.hooks.early_exit"
216
+ }
217
+ }
218
+ ```
219
+
220
+ ### Custom Agent
221
+
222
+ ```python
223
+ from entroplain.hooks import EntropyHook
224
+
225
+ hook = EntropyHook(config={"entropy_threshold": 0.15})
226
+
227
+ # In your agent loop
228
+ for token, entropy in your_agent.generate():
229
+ result = hook.on_token(token, entropy)
230
+
231
+ if result["should_exit"]:
232
+ print(f"Exiting early at token {result['index']}")
233
+ break
234
+ ```
235
+
236
+ ---
237
+
238
+ ## CLI Reference
239
+
240
+ ```bash
241
+ # Analyze entropy trajectory
242
+ entroplain analyze "What is 2+2?" --model gpt-4o --output results.json
243
+
244
+ # Stream with early exit
245
+ entroplain stream "Solve: x^2=16" --exit-on-converge --threshold 0.15
246
+
247
+ # Run benchmarks
248
+ entroplain benchmark --problems gsm8k --output benchmark.json
249
+
250
+ # Visualize trajectory
251
+ entroplain visualize results.json --output entropy_plot.png
252
+ ```
253
+
254
+ ---
255
+
256
+ ## Research
257
+
258
+ ### Key Findings
259
+
260
+ | Metric | Easy | Medium | Hard |
261
+ |--------|------|--------|------|
262
+ | Avg Valleys | 61.3 | 53.0 | 70.2 |
263
+ | Avg Entropy | 0.376 | 0.327 | 0.295 |
264
+ | Avg Velocity | 0.485 | 0.439 | 0.410 |
265
+
266
+ **H1 Supported:** Harder problems have more entropy valleys (correlates with reasoning complexity)
267
+
268
+ **H2 Supported:** Entropy velocity differs by difficulty (useful for crystallization detection)
269
+
270
+ ### Paper
271
+
272
+ See [`paper.md`](../paper.md) for the full research proposal.
273
+
274
+ ---
275
+
276
+ ## Troubleshooting
277
+
278
+ ### "No logprobs returned"
279
+
280
+ Ensure your API request includes:
281
+ - OpenAI/NVIDIA: `logprobs=True, top_logprobs=5`
282
+ - Anthropic: `logprobs=True`
283
+ - Gemini: `response_logprobs=True`
284
+
285
+ ### "Entropy is always 0"
286
+
287
+ Some providers don't expose logprobs in streaming mode. Try non-streaming or check provider docs.
288
+
289
+ ### "Should_exit always returns False"
290
+
291
+ Check your thresholds:
292
+ - `entropy_threshold` too low?
293
+ - `min_valleys` too high?
294
+ - `min_tokens` not reached?
295
+
296
+ ---
297
+
298
+ ## Support
299
+
300
+ - **Issues:** https://github.com/entroplain/entroplain/issues
301
+ - **Discord:** https://discord.gg/entroplain
302
+ - **Docs:** https://entroplain.ai/docs
@@ -0,0 +1,30 @@
1
+ """
2
+ Entroplain — Entropy-based early exit for efficient agent reasoning.
3
+ """
4
+
5
+ __version__ = "0.1.0"
6
+ __author__ = "Entroplain Contributors"
7
+
8
+ from .monitor import EntropyMonitor, calculate_entropy
9
+ from .providers import (
10
+ OpenAIProvider,
11
+ AnthropicProvider,
12
+ GeminiProvider,
13
+ NVIDIAProvider,
14
+ OllamaProvider,
15
+ LlamaCppProvider,
16
+ )
17
+ from .hooks import track_entropy, early_exit
18
+
19
+ __all__ = [
20
+ "EntropyMonitor",
21
+ "calculate_entropy",
22
+ "OpenAIProvider",
23
+ "AnthropicProvider",
24
+ "GeminiProvider",
25
+ "NVIDIAProvider",
26
+ "OllamaProvider",
27
+ "LlamaCppProvider",
28
+ "track_entropy",
29
+ "early_exit",
30
+ ]
@@ -0,0 +1,152 @@
1
+ """
2
+ CLI interface for Entroplain.
3
+ """
4
+
5
+ import argparse
6
+ import json
7
+ import sys
8
+ from typing import Optional
9
+
10
+
11
+ def analyze_command(args):
12
+ """Analyze a prompt's entropy trajectory."""
13
+ from entroplain import EntropyMonitor, NVIDIAProvider
14
+
15
+ provider = NVIDIAProvider()
16
+ monitor = EntropyMonitor()
17
+
18
+ print(f"Analyzing: {args.prompt[:50]}{'...' if len(args.prompt) > 50 else ''}")
19
+ print(f"Model: {args.model}")
20
+ print("-" * 60)
21
+
22
+ messages = [{"role": "user", "content": args.prompt}]
23
+
24
+ full_response = ""
25
+ for token_data in provider.stream_with_entropy(model=args.model, messages=messages, max_tokens=args.max_tokens):
26
+ monitor.track(token_data.token, token_data.entropy)
27
+ full_response += token_data.token
28
+ print(token_data.token, end="", flush=True)
29
+
30
+ print("\n")
31
+ print("-" * 60)
32
+ print("ENTROPY ANALYSIS")
33
+ print("-" * 60)
34
+
35
+ stats = monitor.get_stats()
36
+ print(f"Total tokens: {stats['token_count']}")
37
+ print(f"Valleys: {stats['valley_count']}")
38
+ print(f"Mean entropy: {stats['mean_entropy']:.4f}")
39
+ print(f"Min entropy: {stats['min_entropy']:.4f}")
40
+ print(f"Max entropy: {stats['max_entropy']:.4f}")
41
+ print(f"Final entropy: {stats['current_entropy']:.4f}")
42
+ print(f"Final velocity: {stats['current_velocity']:.4f}")
43
+
44
+ if args.output:
45
+ with open(args.output, "w") as f:
46
+ json.dump({
47
+ "prompt": args.prompt,
48
+ "model": args.model,
49
+ "response": full_response,
50
+ "stats": stats,
51
+ "trajectory": monitor.get_trajectory()
52
+ }, f, indent=2)
53
+ print(f"\nResults saved to: {args.output}")
54
+
55
+
56
+ def stream_command(args):
57
+ """Stream with early exit."""
58
+ from entroplain import EntropyMonitor, NVIDIAProvider
59
+
60
+ provider = NVIDIAProvider()
61
+ monitor = EntropyMonitor(
62
+ entropy_threshold=args.threshold,
63
+ min_valleys=args.min_valleys
64
+ )
65
+
66
+ print(f"Streaming: {args.prompt[:50]}{'...' if len(args.prompt) > 50 else ''}")
67
+ print(f"Exit threshold: {args.threshold}, min valleys: {args.min_valleys}")
68
+ print("-" * 60)
69
+
70
+ messages = [{"role": "user", "content": args.prompt}]
71
+
72
+ full_response = ""
73
+ exited_early = False
74
+
75
+ for token_data in provider.stream_with_entropy(model=args.model, messages=messages, max_tokens=args.max_tokens):
76
+ monitor.track(token_data.token, token_data.entropy)
77
+ full_response += token_data.token
78
+ print(token_data.token, end="", flush=True)
79
+
80
+ if args.exit_on_converge and monitor.should_exit():
81
+ print("\n\n[EARLY EXIT] Reasoning converged")
82
+ exited_early = True
83
+ break
84
+
85
+ if not exited_early:
86
+ print("\n\n[COMPLETE] Full response generated")
87
+
88
+ stats = monitor.get_stats()
89
+ print(f"Tokens: {stats['token_count']}, Valleys: {stats['valley_count']}, Entropy: {stats['current_entropy']:.4f}")
90
+
91
+
92
+ def benchmark_command(args):
93
+ """Run benchmark tests."""
94
+ print("Benchmark mode not yet implemented")
95
+ print("Coming soon: GSM8K, HotpotQA, custom problem sets")
96
+
97
+
98
+ def visualize_command(args):
99
+ """Visualize entropy trajectory."""
100
+ print("Visualization mode not yet implemented")
101
+ print("Coming soon: matplotlib plots, interactive charts")
102
+
103
+
104
+ def main():
105
+ parser = argparse.ArgumentParser(
106
+ description="Entroplain — Entropy-based early exit for efficient agent reasoning"
107
+ )
108
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
109
+
110
+ # Analyze command
111
+ analyze_parser = subparsers.add_parser("analyze", help="Analyze a prompt's entropy trajectory")
112
+ analyze_parser.add_argument("prompt", help="The prompt to analyze")
113
+ analyze_parser.add_argument("--model", default="meta/llama-3.1-70b-instruct", help="Model to use")
114
+ analyze_parser.add_argument("--max-tokens", type=int, default=512, help="Maximum tokens")
115
+ analyze_parser.add_argument("--output", help="Output file for results (JSON)")
116
+
117
+ # Stream command
118
+ stream_parser = subparsers.add_parser("stream", help="Stream with early exit")
119
+ stream_parser.add_argument("prompt", help="The prompt to stream")
120
+ stream_parser.add_argument("--model", default="meta/llama-3.1-70b-instruct", help="Model to use")
121
+ stream_parser.add_argument("--max-tokens", type=int, default=1024, help="Maximum tokens")
122
+ stream_parser.add_argument("--threshold", type=float, default=0.15, help="Entropy exit threshold")
123
+ stream_parser.add_argument("--min-valleys", type=int, default=2, help="Minimum valleys before exit")
124
+ stream_parser.add_argument("--exit-on-converge", action="store_true", help="Exit when reasoning converges")
125
+
126
+ # Benchmark command
127
+ benchmark_parser = subparsers.add_parser("benchmark", help="Run benchmark tests")
128
+ benchmark_parser.add_argument("--problems", default="gsm8k", help="Problem set to use")
129
+ benchmark_parser.add_argument("--output", default="results.json", help="Output file")
130
+
131
+ # Visualize command
132
+ visualize_parser = subparsers.add_parser("visualize", help="Visualize entropy trajectory")
133
+ visualize_parser.add_argument("input", help="Input JSON file from analyze")
134
+ visualize_parser.add_argument("--output", default="entropy_plot.png", help="Output image file")
135
+
136
+ args = parser.parse_args()
137
+
138
+ if args.command == "analyze":
139
+ analyze_command(args)
140
+ elif args.command == "stream":
141
+ stream_command(args)
142
+ elif args.command == "benchmark":
143
+ benchmark_command(args)
144
+ elif args.command == "visualize":
145
+ visualize_command(args)
146
+ else:
147
+ parser.print_help()
148
+ sys.exit(1)
149
+
150
+
151
+ if __name__ == "__main__":
152
+ main()
@@ -0,0 +1,183 @@
1
+ """
2
+ Hooks for agent framework integration.
3
+ """
4
+
5
+ from typing import Dict, Any, Optional
6
+ from .monitor import EntropyMonitor
7
+
8
+
9
+ # Global monitor instance for hooks
10
+ _monitor: Optional[EntropyMonitor] = None
11
+ _config: Dict[str, Any] = {}
12
+
13
+
14
+ def init_hooks(config: Dict[str, Any] = None):
15
+ """
16
+ Initialize entropy hooks for agent frameworks.
17
+
18
+ Args:
19
+ config: Configuration for the entropy monitor
20
+ """
21
+ global _monitor, _config
22
+
23
+ _config = config or {}
24
+ _monitor = EntropyMonitor(
25
+ entropy_threshold=_config.get("entropy_threshold", 0.15),
26
+ min_valleys=_config.get("min_valleys", 2),
27
+ velocity_threshold=_config.get("velocity_threshold", 0.05),
28
+ min_tokens=_config.get("min_tokens", 50)
29
+ )
30
+
31
+
32
+ def track_entropy(token: str, entropy: float) -> Dict[str, Any]:
33
+ """
34
+ Hook to track entropy for each token.
35
+
36
+ Use in agent frameworks as an on_token callback.
37
+
38
+ Args:
39
+ token: The generated token
40
+ entropy: The entropy value for this token
41
+
42
+ Returns:
43
+ Dict with tracking info and convergence status
44
+ """
45
+ global _monitor
46
+
47
+ if _monitor is None:
48
+ init_hooks()
49
+
50
+ point = _monitor.track(token, entropy)
51
+
52
+ return {
53
+ "token": token,
54
+ "entropy": entropy,
55
+ "index": point.index,
56
+ "is_valley": point.is_valley,
57
+ "velocity": point.velocity,
58
+ "should_exit": _monitor.should_exit(),
59
+ "stats": _monitor.get_stats()
60
+ }
61
+
62
+
63
+ def early_exit() -> bool:
64
+ """
65
+ Hook to check if reasoning has converged.
66
+
67
+ Use in agent frameworks as an exit condition.
68
+
69
+ Returns:
70
+ True if reasoning has converged, False otherwise
71
+ """
72
+ global _monitor
73
+
74
+ if _monitor is None:
75
+ return False
76
+
77
+ return _monitor.should_exit()
78
+
79
+
80
+ def reset_hooks():
81
+ """Reset the global monitor state."""
82
+ global _monitor
83
+
84
+ if _monitor:
85
+ _monitor.reset()
86
+
87
+
88
+ def get_monitor() -> Optional[EntropyMonitor]:
89
+ """Get the current monitor instance."""
90
+ return _monitor
91
+
92
+
93
+ # OpenClaw integration
94
+ def openclaw_config() -> Dict[str, Any]:
95
+ """
96
+ Generate OpenClaw configuration for entropy monitoring.
97
+
98
+ Returns:
99
+ Dict with OpenClaw config structure
100
+ """
101
+ return {
102
+ "entropy_monitor": {
103
+ "enabled": True,
104
+ "exit_threshold": _config.get("entropy_threshold", 0.15),
105
+ "min_valleys": _config.get("min_valleys", 2),
106
+ "velocity_threshold": _config.get("velocity_threshold", 0.05),
107
+ "hooks": {
108
+ "on_token": "entroplain.hooks.track_entropy",
109
+ "on_exit_check": "entroplain.hooks.early_exit"
110
+ }
111
+ }
112
+ }
113
+
114
+
115
+ # Claude Code integration
116
+ def claude_code_hooks() -> Dict[str, str]:
117
+ """
118
+ Generate Claude Code hook configuration.
119
+
120
+ Returns:
121
+ Dict with hook function paths
122
+ """
123
+ return {
124
+ "hooks": {
125
+ "on_token": "entroplain.hooks.track_entropy",
126
+ "on_converge": "entroplain.hooks.early_exit"
127
+ },
128
+ "config": {
129
+ "entropy_threshold": _config.get("entropy_threshold", 0.15),
130
+ "min_valleys": _config.get("min_valleys", 2)
131
+ }
132
+ }
133
+
134
+
135
+ # Generic agent framework integration
136
+ class EntropyHook:
137
+ """
138
+ Class-based hook for frameworks that prefer class instances.
139
+
140
+ Usage:
141
+ hook = EntropyHook(config={"entropy_threshold": 0.15})
142
+
143
+ # In your agent loop
144
+ for token in agent.generate():
145
+ result = hook.on_token(token, entropy)
146
+ if result["should_exit"]:
147
+ break
148
+ """
149
+
150
+ def __init__(self, config: Dict[str, Any] = None):
151
+ self.config = config or {}
152
+ self.monitor = EntropyMonitor(
153
+ entropy_threshold=self.config.get("entropy_threshold", 0.15),
154
+ min_valleys=self.config.get("min_valleys", 2),
155
+ velocity_threshold=self.config.get("velocity_threshold", 0.05),
156
+ min_tokens=self.config.get("min_tokens", 50)
157
+ )
158
+
159
+ def on_token(self, token: str, entropy: float) -> Dict[str, Any]:
160
+ """Process a token and return tracking info."""
161
+ point = self.monitor.track(token, entropy)
162
+
163
+ return {
164
+ "token": token,
165
+ "entropy": entropy,
166
+ "index": point.index,
167
+ "is_valley": point.is_valley,
168
+ "velocity": point.velocity,
169
+ "should_exit": self.monitor.should_exit(),
170
+ "stats": self.monitor.get_stats()
171
+ }
172
+
173
+ def should_exit(self) -> bool:
174
+ """Check if reasoning has converged."""
175
+ return self.monitor.should_exit()
176
+
177
+ def reset(self):
178
+ """Reset the monitor state."""
179
+ self.monitor.reset()
180
+
181
+ def get_stats(self) -> Dict[str, Any]:
182
+ """Get current statistics."""
183
+ return self.monitor.get_stats()