anneal-context 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. anneal_context-0.1.0/.github/workflows/ci.yml +58 -0
  2. anneal_context-0.1.0/.github/workflows/publish.yml +36 -0
  3. anneal_context-0.1.0/.gitignore +14 -0
  4. anneal_context-0.1.0/DEVOPS.md +42 -0
  5. anneal_context-0.1.0/PKG-INFO +156 -0
  6. anneal_context-0.1.0/README.md +138 -0
  7. anneal_context-0.1.0/examples/.gitkeep +0 -0
  8. anneal_context-0.1.0/pyproject.toml +40 -0
  9. anneal_context-0.1.0/skills/claude-code/anneal.md +81 -0
  10. anneal_context-0.1.0/skills/gemini-cli/anneal.md +24 -0
  11. anneal_context-0.1.0/src/anneal/__init__.py +0 -0
  12. anneal_context-0.1.0/src/anneal/assembly/__init__.py +0 -0
  13. anneal_context-0.1.0/src/anneal/assembly/budget_manager.py +25 -0
  14. anneal_context-0.1.0/src/anneal/assembly/result_builder.py +92 -0
  15. anneal_context-0.1.0/src/anneal/formulation/__init__.py +0 -0
  16. anneal_context-0.1.0/src/anneal/formulation/candidate_generator.py +132 -0
  17. anneal_context-0.1.0/src/anneal/formulation/coefficient_builder.py +71 -0
  18. anneal_context-0.1.0/src/anneal/formulation/context_qubo_builder.py +48 -0
  19. anneal_context-0.1.0/src/anneal/graph/__init__.py +0 -0
  20. anneal_context-0.1.0/src/anneal/graph/base.py +67 -0
  21. anneal_context-0.1.0/src/anneal/graph/code_review_graph.py +114 -0
  22. anneal_context-0.1.0/src/anneal/graph/detector.py +67 -0
  23. anneal_context-0.1.0/src/anneal/graph/graphify.py +94 -0
  24. anneal_context-0.1.0/src/anneal/server.py +207 -0
  25. anneal_context-0.1.0/src/anneal/tracing.py +112 -0
  26. anneal_context-0.1.0/tests/__init__.py +0 -0
  27. anneal_context-0.1.0/tests/assembly/__init__.py +0 -0
  28. anneal_context-0.1.0/tests/assembly/test_budget_manager.py +45 -0
  29. anneal_context-0.1.0/tests/assembly/test_result_builder.py +73 -0
  30. anneal_context-0.1.0/tests/formulation/__init__.py +0 -0
  31. anneal_context-0.1.0/tests/formulation/test_candidate_generator.py +89 -0
  32. anneal_context-0.1.0/tests/formulation/test_coefficient_builder.py +94 -0
  33. anneal_context-0.1.0/tests/formulation/test_context_qubo_builder.py +57 -0
  34. anneal_context-0.1.0/tests/graph/__init__.py +0 -0
  35. anneal_context-0.1.0/tests/graph/test_base.py +40 -0
  36. anneal_context-0.1.0/tests/graph/test_code_review_graph.py +77 -0
  37. anneal_context-0.1.0/tests/graph/test_detector.py +71 -0
  38. anneal_context-0.1.0/tests/graph/test_graphify.py +63 -0
  39. anneal_context-0.1.0/tests/test_server.py +69 -0
  40. anneal_context-0.1.0/uv.lock +2027 -0
@@ -0,0 +1,58 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+ strategy:
13
+ matrix:
14
+ python-version: ["3.11", "3.12", "3.13"]
15
+
16
+ steps:
17
+ - uses: actions/checkout@v4
18
+
19
+ - name: Install uv
20
+ uses: astral-sh/setup-uv@v4
21
+
22
+ - name: Set up Python ${{ matrix.python-version }}
23
+ run: uv python install ${{ matrix.python-version }}
24
+
25
+ - name: Create venv and install
26
+ run: |
27
+ uv venv
28
+ uv pip install "spinchain @ git+https://github.com/ameyakhot/spinchain.git"
29
+ uv pip install -e ".[dev]"
30
+
31
+ - name: Lint
32
+ run: uv run ruff check src/ tests/
33
+
34
+ - name: Test
35
+ run: uv run python -m pytest tests/ -v --tb=short
36
+
37
+ test-spinchain-main:
38
+ name: Test against SpinChain main
39
+ runs-on: ubuntu-latest
40
+ continue-on-error: true
41
+
42
+ steps:
43
+ - uses: actions/checkout@v4
44
+
45
+ - name: Install uv
46
+ uses: astral-sh/setup-uv@v4
47
+
48
+ - name: Set up Python
49
+ run: uv python install 3.12
50
+
51
+ - name: Create venv and install
52
+ run: |
53
+ uv venv
54
+ uv pip install "spinchain @ git+https://github.com/ameyakhot/spinchain.git@main"
55
+ uv pip install -e ".[dev]"
56
+
57
+ - name: Test
58
+ run: uv run python -m pytest tests/ -v --tb=short
@@ -0,0 +1,36 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ jobs:
8
+ publish:
9
+ runs-on: ubuntu-latest
10
+ environment: pypi
11
+ permissions:
12
+ id-token: write
13
+
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+
17
+ - name: Install uv
18
+ uses: astral-sh/setup-uv@v4
19
+
20
+ - name: Set up Python
21
+ run: uv python install 3.12
22
+
23
+ - name: Create venv and install
24
+ run: |
25
+ uv venv
26
+ uv pip install spinchain
27
+ uv pip install -e ".[dev]"
28
+
29
+ - name: Run tests before publish
30
+ run: uv run python -m pytest tests/ -v --tb=short
31
+
32
+ - name: Build package
33
+ run: uv build
34
+
35
+ - name: Publish to PyPI
36
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,14 @@
1
+ # Local config (project-specific, not for repo)
2
+ .anneal/
3
+ CLAUDE.md
4
+
5
+ # Python
6
+ __pycache__/
7
+ *.py[cod]
8
+ .venv/
9
+ dist/
10
+ *.egg-info/
11
+ .ruff_cache/
12
+
13
+ # Test artifacts
14
+ .pytest_cache/
@@ -0,0 +1,42 @@
1
+ # Anneal DevOps
2
+
3
+ Full DevOps documentation lives in the SpinChain repo:
4
+ `spinchain/docs/superpowers/specs/2026-04-13-devops-workflow.md`
5
+
6
+ It covers the complete workflow for both SpinChain and Anneal:
7
+ - Project architecture and relationship
8
+ - Local development setup
9
+ - CI/CD pipelines
10
+ - Release and publish process
11
+ - Cross-repo dependency management
12
+ - PyPI trusted publisher setup
13
+
14
+ ## Quick Reference
15
+
16
+ | Item | Value |
17
+ |------|-------|
18
+ | PyPI name | `anneal-context` |
19
+ | Import name | `anneal` |
20
+ | Entry point | `anneal-server` |
21
+ | Depends on | `spinchain>=0.1.0` |
22
+ | CI | `.github/workflows/ci.yml` — test + lint + SpinChain canary |
23
+ | Publish | `.github/workflows/publish.yml` — triggers on GitHub Release |
24
+ | SpinChain public API | `spinchain/PUBLIC_API.md` |
25
+
26
+ ## Local Dev
27
+
28
+ ```bash
29
+ cd ~/quantum/anneal
30
+ source .venv/bin/activate
31
+ uv pip install -e ~/quantum/spinchain # editable spinchain
32
+ uv pip install -e ".[dev]"
33
+ python -m pytest tests/ -v
34
+ ruff check src/ tests/
35
+ ```
36
+
37
+ ## Release
38
+
39
+ 1. Ensure SpinChain is published to PyPI first (if new version needed)
40
+ 2. Update version in `pyproject.toml`
41
+ 3. `git commit`, `git tag vX.Y.Z`, `git push origin main --tags`
42
+ 4. Create GitHub Release from tag → publish.yml runs automatically
@@ -0,0 +1,156 @@
1
+ Metadata-Version: 2.4
2
+ Name: anneal-context
3
+ Version: 0.1.0
4
+ Summary: Optimal context selection for AI coding assistants using QUBO/Ising formulation
5
+ Author-email: Ameya Khot <ameyakhot18@gmail.com>
6
+ License: MIT
7
+ Requires-Python: >=3.11
8
+ Requires-Dist: dimod>=0.12.0
9
+ Requires-Dist: dwave-neal>=0.6.0
10
+ Requires-Dist: mcp>=1.0.0
11
+ Requires-Dist: numpy>=1.26.0
12
+ Requires-Dist: scipy>=1.12.0
13
+ Provides-Extra: dev
14
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
15
+ Requires-Dist: ruff>=0.4.0; extra == 'dev'
16
+ Requires-Dist: spinchain; extra == 'dev'
17
+ Description-Content-Type: text/markdown
18
+
19
+ # Anneal
20
+
21
+ Optimal context selection for AI coding assistants. Cooling random context down to exactly what your AI needs.
22
+
23
+ Anneal reads your codebase's structural graph (from Graphify or code-review-graph),
24
+ formulates "which chunks are optimal?" as a QUBO problem, solves with simulated
25
+ annealing, and returns the minimum context set for your task.
26
+
27
+ ## How It Works
28
+
29
+ 1. Reads codebase graph (Graphify `graph.json` or code-review-graph SQLite)
30
+ 2. Generates candidate chunks (keyword matching + graph topology)
31
+ 3. Formulates QUBO: minimize token cost, maximize relevance, reward dependency coverage
32
+ 4. Solves via simulated annealing (SpinChain engine)
33
+ 5. Returns stability-ranked, dependency-ordered chunks within your token budget
34
+
35
+ ## Requirements
36
+
37
+ - Python 3.11+
38
+ - At least one graph tool: [Graphify](https://github.com/safishamsi/graphify) or [code-review-graph](https://github.com/nicholasgasior/code-review-graph)
39
+
40
+ ## Installation
41
+
42
+ ```bash
43
+ pip install anneal-context
44
+ ```
45
+
46
+ Or with uv:
47
+ ```bash
48
+ uv tool install anneal-context
49
+ ```
50
+
51
+ ## Setup
52
+
53
+ **1. Install a graph tool** (required):
54
+
55
+ ```bash
56
+ # code-review-graph
57
+ npx code-review-graph install
58
+
59
+ # Graphify (Claude Code)
60
+ /plugin marketplace add safishamsi/graphify && /graphify
61
+ ```
62
+
63
+ **2. Create `.anneal/config.toml`** in your project root:
64
+
65
+ ```toml
66
+ [budget]
67
+ default_tokens = 5000
68
+ strategy = "balanced" # "minimal" | "balanced" | "thorough"
69
+
70
+ [solver]
71
+ backend = "simulated-annealing"
72
+ num_reads = 100
73
+ num_sweeps = 1000
74
+ ```
75
+
76
+ Add `.anneal/` to your `.gitignore`.
77
+
78
+ ## MCP Server Setup
79
+
80
+ ### Claude Code
81
+
82
+ Add to `.claude/settings.json`:
83
+ ```json
84
+ {
85
+ "mcpServers": {
86
+ "anneal": {
87
+ "command": "anneal-server"
88
+ }
89
+ }
90
+ }
91
+ ```
92
+
93
+ ### Gemini CLI
94
+
95
+ Add to `~/.gemini/settings.json`:
96
+ ```json
97
+ {
98
+ "mcpServers": {
99
+ "anneal": {
100
+ "command": "anneal-server"
101
+ }
102
+ }
103
+ }
104
+ ```
105
+
106
+ ### OpenAI Codex CLI
107
+
108
+ Add to `~/.codex/config.toml`:
109
+ ```toml
110
+ [[mcp_servers]]
111
+ name = "anneal"
112
+ command = "anneal-server"
113
+ ```
114
+
115
+ ### Cursor / VS Code + Copilot / Aider
116
+
117
+ Any MCP-compatible client: run `anneal-server` via stdio transport.
118
+
119
+ ## Tools
120
+
121
+ ### `get_optimal_context`
122
+
123
+ ```
124
+ Parameters:
125
+ task_description: str -- what you want to do
126
+ token_budget: int | None -- max tokens (default: 5000)
127
+ include_files: list[str] -- always include these paths
128
+ exclude_files: list[str] -- never include these paths
129
+ strategy: str -- "balanced" | "minimal" | "thorough"
130
+
131
+ Returns:
132
+ selected_chunks: list[{path, content, relevance_score, tokens}]
133
+ total_tokens: int
134
+ budget_utilization: float
135
+ stability_score: float
136
+ dependency_graph: dict
137
+ ```
138
+
139
+ ### `get_status`
140
+
141
+ Returns graph source availability, node counts, solver config.
142
+
143
+ ## Development
144
+
145
+ ```bash
146
+ git clone https://github.com/ameyakhot/anneal
147
+ cd anneal
148
+ uv venv && source .venv/bin/activate
149
+ uv pip install -e /path/to/spinchain
150
+ uv pip install -e ".[dev]"
151
+ python -m pytest tests/ -v
152
+ ```
153
+
154
+ ## License
155
+
156
+ MIT
@@ -0,0 +1,138 @@
1
+ # Anneal
2
+
3
+ Optimal context selection for AI coding assistants. Cooling random context down to exactly what your AI needs.
4
+
5
+ Anneal reads your codebase's structural graph (from Graphify or code-review-graph),
6
+ formulates "which chunks are optimal?" as a QUBO problem, solves with simulated
7
+ annealing, and returns the minimum context set for your task.
8
+
9
+ ## How It Works
10
+
11
+ 1. Reads codebase graph (Graphify `graph.json` or code-review-graph SQLite)
12
+ 2. Generates candidate chunks (keyword matching + graph topology)
13
+ 3. Formulates QUBO: minimize token cost, maximize relevance, reward dependency coverage
14
+ 4. Solves via simulated annealing (SpinChain engine)
15
+ 5. Returns stability-ranked, dependency-ordered chunks within your token budget
16
+
17
+ ## Requirements
18
+
19
+ - Python 3.11+
20
+ - At least one graph tool: [Graphify](https://github.com/safishamsi/graphify) or [code-review-graph](https://github.com/nicholasgasior/code-review-graph)
21
+
22
+ ## Installation
23
+
24
+ ```bash
25
+ pip install anneal-context
26
+ ```
27
+
28
+ Or with uv:
29
+ ```bash
30
+ uv tool install anneal-context
31
+ ```
32
+
33
+ ## Setup
34
+
35
+ **1. Install a graph tool** (required):
36
+
37
+ ```bash
38
+ # code-review-graph
39
+ npx code-review-graph install
40
+
41
+ # Graphify (Claude Code)
42
+ /plugin marketplace add safishamsi/graphify && /graphify
43
+ ```
44
+
45
+ **2. Create `.anneal/config.toml`** in your project root:
46
+
47
+ ```toml
48
+ [budget]
49
+ default_tokens = 5000
50
+ strategy = "balanced" # "minimal" | "balanced" | "thorough"
51
+
52
+ [solver]
53
+ backend = "simulated-annealing"
54
+ num_reads = 100
55
+ num_sweeps = 1000
56
+ ```
57
+
58
+ Add `.anneal/` to your `.gitignore`.
59
+
60
+ ## MCP Server Setup
61
+
62
+ ### Claude Code
63
+
64
+ Add to `.claude/settings.json`:
65
+ ```json
66
+ {
67
+ "mcpServers": {
68
+ "anneal": {
69
+ "command": "anneal-server"
70
+ }
71
+ }
72
+ }
73
+ ```
74
+
75
+ ### Gemini CLI
76
+
77
+ Add to `~/.gemini/settings.json`:
78
+ ```json
79
+ {
80
+ "mcpServers": {
81
+ "anneal": {
82
+ "command": "anneal-server"
83
+ }
84
+ }
85
+ }
86
+ ```
87
+
88
+ ### OpenAI Codex CLI
89
+
90
+ Add to `~/.codex/config.toml`:
91
+ ```toml
92
+ [[mcp_servers]]
93
+ name = "anneal"
94
+ command = "anneal-server"
95
+ ```
96
+
97
+ ### Cursor / VS Code + Copilot / Aider
98
+
99
+ Any MCP-compatible client: run `anneal-server` via stdio transport.
100
+
101
+ ## Tools
102
+
103
+ ### `get_optimal_context`
104
+
105
+ ```
106
+ Parameters:
107
+ task_description: str -- what you want to do
108
+ token_budget: int | None -- max tokens (default: 5000)
109
+ include_files: list[str] -- always include these paths
110
+ exclude_files: list[str] -- never include these paths
111
+ strategy: str -- "balanced" | "minimal" | "thorough"
112
+
113
+ Returns:
114
+ selected_chunks: list[{path, content, relevance_score, tokens}]
115
+ total_tokens: int
116
+ budget_utilization: float
117
+ stability_score: float
118
+ dependency_graph: dict
119
+ ```
120
+
121
+ ### `get_status`
122
+
123
+ Returns graph source availability, node counts, solver config.
124
+
125
+ ## Development
126
+
127
+ ```bash
128
+ git clone https://github.com/ameyakhot/anneal
129
+ cd anneal
130
+ uv venv && source .venv/bin/activate
131
+ uv pip install -e /path/to/spinchain
132
+ uv pip install -e ".[dev]"
133
+ python -m pytest tests/ -v
134
+ ```
135
+
136
+ ## License
137
+
138
+ MIT
File without changes
@@ -0,0 +1,40 @@
1
+ [project]
2
+ name = "anneal-context"
3
+ version = "0.1.0"
4
+ description = "Optimal context selection for AI coding assistants using QUBO/Ising formulation"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Ameya Khot", email = "ameyakhot18@gmail.com" }
8
+ ]
9
+ license = { text = "MIT" }
10
+ requires-python = ">=3.11"
11
+ dependencies = [
12
+ "mcp>=1.0.0",
13
+ "dimod>=0.12.0",
14
+ "dwave-neal>=0.6.0",
15
+ "numpy>=1.26.0",
16
+ "scipy>=1.12.0",
17
+ ]
18
+
19
+ [project.optional-dependencies]
20
+ dev = [
21
+ "pytest>=8.0.0",
22
+ "ruff>=0.4.0",
23
+ "spinchain",
24
+ ]
25
+
26
+ [project.scripts]
27
+ anneal-server = "anneal.server:main"
28
+
29
+ [build-system]
30
+ requires = ["hatchling"]
31
+ build-backend = "hatchling.build"
32
+
33
+ [tool.hatch.build.targets.wheel]
34
+ packages = ["src/anneal"]
35
+
36
+ [tool.ruff]
37
+ line-length = 100
38
+
39
+ [tool.pytest.ini_options]
40
+ pythonpath = ["src"]
@@ -0,0 +1,81 @@
1
+ # Anneal — Optimal Context Selection
2
+
3
+ Use this skill to get the optimal code context before starting any task. Anneal
4
+ uses QUBO optimization over the codebase dependency graph to select the minimum
5
+ token set that maximizes task relevance.
6
+
7
+ ## When to Use
8
+
9
+ Call `mcp__anneal__get_optimal_context` at the start of any coding task before
10
+ reading files. This replaces manual file discovery.
11
+
12
+ ## Usage
13
+
14
+ ### Get optimal context
15
+ ```
16
+ mcp__anneal__get_optimal_context(
17
+ task_description="<describe what you want to do>",
18
+ token_budget=5000,
19
+ strategy="balanced",
20
+ include_files=[],
21
+ exclude_files=[],
22
+ )
23
+ ```
24
+
25
+ ### Check status
26
+ ```
27
+ mcp__anneal__get_status()
28
+ ```
29
+
30
+ ## First-Time Setup
31
+
32
+ If `get_optimal_context` returns an error about no graph sources, install one:
33
+
34
+ **code-review-graph** (recommended):
35
+ ```
36
+ npx code-review-graph install
37
+ ```
38
+
39
+ **Graphify:**
40
+ ```
41
+ /plugin marketplace add safishamsi/graphify
42
+ /graphify
43
+ ```
44
+
45
+ Then create `.anneal/config.toml` in your project root:
46
+ ```toml
47
+ [budget]
48
+ default_tokens = 5000
49
+ strategy = "balanced"
50
+
51
+ [solver]
52
+ backend = "simulated-annealing"
53
+ num_reads = 100
54
+ num_sweeps = 1000
55
+ ```
56
+
57
+ ## Installation (Claude Code)
58
+
59
+ Add to your MCP config:
60
+ ```json
61
+ {
62
+ "mcpServers": {
63
+ "anneal": {
64
+ "command": "anneal-server",
65
+ "args": []
66
+ }
67
+ }
68
+ }
69
+ ```
70
+
71
+ Or with uvx (no install):
72
+ ```json
73
+ {
74
+ "mcpServers": {
75
+ "anneal": {
76
+ "command": "uvx",
77
+ "args": ["anneal-context"]
78
+ }
79
+ }
80
+ }
81
+ ```
@@ -0,0 +1,24 @@
1
+ # Anneal — Gemini CLI Setup
2
+
3
+ ## Installation
4
+
5
+ ```bash
6
+ pip install anneal-context
7
+ ```
8
+
9
+ Add to `~/.gemini/settings.json`:
10
+ ```json
11
+ {
12
+ "mcpServers": {
13
+ "anneal": {
14
+ "command": "anneal-server"
15
+ }
16
+ }
17
+ }
18
+ ```
19
+
20
+ ## Usage
21
+
22
+ ```
23
+ @anneal get_optimal_context task_description="add unit tests for auth module"
24
+ ```
File without changes
File without changes
@@ -0,0 +1,25 @@
1
+ # src/anneal/assembly/budget_manager.py
2
+ """Token counting and hard budget enforcement."""
3
+
4
+ from __future__ import annotations
5
+ from anneal.formulation.candidate_generator import Candidate
6
+
7
+ _CHARS_PER_TOKEN = 4
8
+
9
+
10
+ class BudgetManager:
11
+ def count_tokens(self, text: str) -> int:
12
+ return max(0, len(text) // _CHARS_PER_TOKEN)
13
+
14
+ def trim_to_budget(self, candidates: list[Candidate], budget: int) -> list[Candidate]:
15
+ total = sum(c.tokens for c in candidates)
16
+ if total <= budget:
17
+ return candidates
18
+ sorted_cands = sorted(candidates, key=lambda c: -c.relevance_score)
19
+ result = []
20
+ used = 0
21
+ for c in sorted_cands:
22
+ if used + c.tokens <= budget:
23
+ result.append(c)
24
+ used += c.tokens
25
+ return result