1bcoder 0.1.3__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. {1bcoder-0.1.3 → 1bcoder-0.1.5/1bcoder.egg-info}/PKG-INFO +402 -50
  2. {1bcoder-0.1.3 → 1bcoder-0.1.5}/1bcoder.egg-info/SOURCES.txt +16 -1
  3. 1bcoder-0.1.3/README.md → 1bcoder-0.1.5/PKG-INFO +416 -49
  4. 1bcoder-0.1.3/1bcoder.egg-info/PKG-INFO → 1bcoder-0.1.5/README.md +1628 -1291
  5. 1bcoder-0.1.5/_bcoder_data/agents/compact.txt +15 -0
  6. 1bcoder-0.1.5/_bcoder_data/agents/concepts.txt +21 -0
  7. 1bcoder-0.1.5/_bcoder_data/agents/scan.txt +31 -0
  8. 1bcoder-0.1.5/_bcoder_data/aliases.txt +16 -0
  9. 1bcoder-0.1.5/_bcoder_data/doc/OLLAMA_SERVER_PARAM.md +170 -0
  10. 1bcoder-0.1.5/_bcoder_data/proc/action-required.py +42 -0
  11. 1bcoder-0.1.5/_bcoder_data/proc/assist.py +48 -0
  12. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/grounding-check.py +2 -1
  13. 1bcoder-0.1.5/_bcoder_data/proc/md.py +22 -0
  14. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/mdx.py +11 -1
  15. 1bcoder-0.1.5/_bcoder_data/proc/pattern-gate.py +28 -0
  16. 1bcoder-0.1.5/_bcoder_data/proc/scan-save.py +19 -0
  17. 1bcoder-0.1.5/_bcoder_data/proc/tempctx-cut.py +19 -0
  18. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/profiles.txt +7 -0
  19. 1bcoder-0.1.5/_bcoder_data/scripts/auto-bkup.txt +4 -0
  20. 1bcoder-0.1.5/_bcoder_data/scripts/edit-control.txt +6 -0
  21. 1bcoder-0.1.5/_bcoder_data/scripts/simargl-cli_index_files.txt +3 -0
  22. 1bcoder-0.1.5/_bcoder_data/scripts/simargl-cli_index_units.txt +3 -0
  23. 1bcoder-0.1.5/_bcoder_data/scripts/simargl-cli_search.txt +4 -0
  24. {1bcoder-0.1.3 → 1bcoder-0.1.5}/chat.py +8302 -6688
  25. {1bcoder-0.1.3 → 1bcoder-0.1.5}/map_index.py +1 -1
  26. {1bcoder-0.1.3 → 1bcoder-0.1.5}/pyproject.toml +1 -1
  27. 1bcoder-0.1.5/tests/test_utils.py +143 -0
  28. 1bcoder-0.1.3/_bcoder_data/aliases.txt +0 -13
  29. 1bcoder-0.1.3/_bcoder_data/proc/md.py +0 -14
  30. {1bcoder-0.1.3 → 1bcoder-0.1.5}/1bcoder.egg-info/dependency_links.txt +0 -0
  31. {1bcoder-0.1.3 → 1bcoder-0.1.5}/1bcoder.egg-info/entry_points.txt +0 -0
  32. {1bcoder-0.1.3 → 1bcoder-0.1.5}/1bcoder.egg-info/requires.txt +0 -0
  33. {1bcoder-0.1.3 → 1bcoder-0.1.5}/1bcoder.egg-info/top_level.txt +0 -0
  34. {1bcoder-0.1.3 → 1bcoder-0.1.5}/LICENSE +0 -0
  35. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/__init__.py +0 -0
  36. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/agents/advance.txt +0 -0
  37. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/agents/ask.txt +0 -0
  38. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/agents/fill.txt +0 -0
  39. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/agents/planning.txt +0 -0
  40. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/agents/sqlite.txt +0 -0
  41. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/doc/MCP.md +0 -0
  42. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/doc/PARAM.md +0 -0
  43. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/doc/PROC.md +0 -0
  44. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/map.txt +0 -0
  45. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/add-save.py +0 -0
  46. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/collect-files.py +0 -0
  47. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/ctx_cut.py +0 -0
  48. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/extract-code.py +0 -0
  49. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/extract-files.py +0 -0
  50. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/extract-list.py +0 -0
  51. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/regexp-extract.py +0 -0
  52. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/rude_words.py +0 -0
  53. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/secret_check.py +0 -0
  54. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/proc/sql_readonly_guard.py +0 -0
  55. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/prompts/analysis.txt +0 -0
  56. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/prompts/sumarise.txt +0 -0
  57. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/prompts.txt +0 -0
  58. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/AddFunction.txt +0 -0
  59. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/AskProject.txt +0 -0
  60. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/CheckRequirements.txt +0 -0
  61. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/DockerMySQL.txt +0 -0
  62. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/DockerNginx.txt +0 -0
  63. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/DockerPython.txt +0 -0
  64. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/DockerStack.txt +0 -0
  65. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/DuckDuckGoInstant.txt +0 -0
  66. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/EnvTemplate.txt +0 -0
  67. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/Explain.txt +0 -0
  68. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/ExploreProjectStructure.txt +0 -0
  69. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/GitIgnorePython.txt +0 -0
  70. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/MySQLDump.txt +0 -0
  71. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/NewScript.txt +0 -0
  72. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/PipFreeze.txt +0 -0
  73. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/PyPI.txt +0 -0
  74. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/Refactor.txt +0 -0
  75. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/RunAndFix.txt +0 -0
  76. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/SQLiteSchema.txt +0 -0
  77. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/WikiPage.txt +0 -0
  78. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/WikiSearch.txt +0 -0
  79. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/parallel_call.txt +0 -0
  80. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/personal/content/create-regular-content.txt +0 -0
  81. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/personal/content/plan.txt +0 -0
  82. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/personal/test/collect-data-from-test-environment.txt +0 -0
  83. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/plan.txt +0 -0
  84. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/remote/create-content-on-remote-server.txt +0 -0
  85. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/set_ctx.txt +0 -0
  86. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/team-map-worker.txt +0 -0
  87. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/team-search-worker.txt +0 -0
  88. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/team-summarize.txt +0 -0
  89. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/team-tree-worker.txt +0 -0
  90. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/scripts/test.txt +0 -0
  91. {1bcoder-0.1.3 → 1bcoder-0.1.5}/_bcoder_data/teams/code-analysis.yaml +0 -0
  92. {1bcoder-0.1.3 → 1bcoder-0.1.5}/map_query.py +0 -0
  93. {1bcoder-0.1.3 → 1bcoder-0.1.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: 1bcoder
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: AI coding assistant agent for 1B–7B local models (Ollama, LMStudio, llama.cpp). Terminal REPL with file editing, project map, agents, scripts, and parallel multi-model queries.
5
5
  Project-URL: Homepage, https://github.com/szholobetsky/1bcoder
6
6
  Project-URL: Repository, https://github.com/szholobetsky/1bcoder
@@ -15,15 +15,70 @@ Dynamic: license-file
15
15
 
16
16
  # 1bcoder
17
17
 
18
- AI coding assistant agent for 1B–7B local models running locally via [Ollama](https://ollama.com), [LMStudio](https://lmstudio.ai), or [LiteLLM](https://litellm.ai).
18
+ AI coding assistant for small local models (0.5B–4B) runs via [Ollama](https://ollama.com), [LMStudio](https://lmstudio.ai), or any OpenAI-compatible backend.
19
19
 
20
20
  ---
21
21
 
22
- **Core idea:** 1B models hallucinate badly when asked to rewrite large blocks of code. 1bcoder works around this by keeping changes small and structured — the model outputs a single-line fix (`LINE N: content`) or a minimal SEARCH/REPLACE block, which the tool then applies with a diff preview before writing to disk.
22
+ ## The problem
23
23
 
24
- Planning and navigation are externalized: plans live in `.txt` files, project structure is indexed into a searchable map so the model never has to hold the whole codebase in its head.
24
+ Small local models are widely available. Most users interact with them through a chat UI and that works well for quick questions. But chat has hard limits: you cannot feed it a 2000-line log file, cannot ask it to run the tests and read the output, cannot have it walk through a large codebase one chunk at a time. For that kind of work you need an agentic system.
25
25
 
26
- **Target:** programmers running `qwen2.5-coder:0.6b` or `llama3.2:1b` on a 4 GB machineoffline, no cloud, no subscription. The tool does the heavy lifting so the model doesn't have to.
26
+ The problem is that every existing agentic system assumes a capable model underneathtypically 8B+ with native tool-calling support. Their system prompts alone consume more tokens than a 1B model's entire context window. Their tool-calling protocols are complex enough that small models hallucinate the format, miss instructions, or loop. So small models are treated as unusable and left out entirely.
27
+
28
+ This is wrong. Small and very small models are genuinely useful — they just require a different kind of tool.
29
+
30
+ ## Privacy and security
31
+
32
+ Every prompt you send to a cloud-based AI assistant leaves your machine. Your code, your architecture decisions, your internal API names, your database schemas, your business logic — all of it travels to a third-party server, is logged, may be used for training, and is subject to the data retention policies of a company you don't control.
33
+
34
+ For personal projects this is an acceptable tradeoff. For professional work it rarely is. Most employment contracts prohibit sending proprietary code to external services. Many industries (finance, healthcare, defense, government) have regulatory requirements that make cloud AI assistance legally problematic or outright forbidden. Even where there is no explicit rule, leaking internal architecture to a vendor is a security risk that most engineering teams would not accept from any other tool.
35
+
36
+ 1bcoder runs entirely on your hardware. The model runs locally. No prompt leaves your machine. No API key, no telemetry, no network connection required. Your code stays where it is — in your editor, on your filesystem, behind your firewall.
37
+
38
+ This is not a niche concern. It is the default requirement for any serious professional environment.
39
+
40
+ ## What different model sizes can actually do
41
+
42
+ | Size | Reliable in 1bcoder |
43
+ |---|---|
44
+ | 0.5b | Explain a 10–20 line function; identify a known technology from a file name; write a standard construct in an unfamiliar language |
45
+ | 1b | Explain a full module; recognize a tech stack from a directory tree; answer questions about error messages and short log excerpts |
46
+ | 1b thinking | Explain whole-file logic; identify design patterns across a module — still unreliable for editing |
47
+ | 2b–4b | Edit files under instruction; write new functions; follow SEARCH/REPLACE format consistently |
48
+
49
+ Every tier is useful. Each requires a different approach to context preparation. 1bcoder provides the tools to do that preparation with surgical precision.
50
+
51
+ ## How 1bcoder works
52
+
53
+ 1bcoder does not depend on the model for navigation or file selection. The programmer controls what goes into context — using a command system to read files, inject logs, run shell commands, and prepare input before asking the model a question. The model's job is a single bounded subtask on pre-prepared input, not autonomous exploration.
54
+
55
+ This is **human-directed** work: the programmer covers what the small model cannot do, and the model handles what it actually does well.
56
+
57
+ Key design decisions:
58
+
59
+ - **Short agent system prompts, at most 5 tools per agent, one function per agent** — `ask`, `edit`, `fill`, `scan`, `compact`. Not universal agents with bloated skill sets.
60
+ - **Tolerant of long and malformed output** — post-processing is automatic; the programmer does not teach the model JSON syntax.
61
+ - **`/parallel`** — send the same context to several models simultaneously and combine results; a 0.5b and a 1b model working together often outperform either alone; designed to coordinate small models running on multiple machines or phones.
62
+ - **`/map`** — project structure index with structural diff; lets the model navigate a codebase without loading it into context.
63
+ - **`/ctx`** — surgical context management: savepoints, selective compaction, named context library, multi-turn rollback. Small models cannot afford wasted tokens.
64
+ - **`/scan`** — reads any large file chunk by chunk and builds a themed summary without overflowing context.
65
+ - **`/proc`** — parameterized command scripts for repeatable preparation workflows.
66
+
67
+ The combination is what matters: commands to build context precisely, agents scoped to one task, and parallel queries to cover what any single small model misses.
68
+
69
+ ## The autonomy tradeoff
70
+
71
+ Most agent system developers aim for full autonomy: the agent reads the task, explores the codebase, writes the code, runs the tests, and ships — without human involvement. Full autonomy is a legitimate goal. It also requires the largest possible models: GPT-4-class or 70B+ locally, with long context, reliable tool-calling, and robust reasoning under uncertainty. Below that threshold, fully autonomous agents fail in ways that are hard to predict and slow to debug.
72
+
73
+ 1bcoder takes the opposite side of this tradeoff deliberately.
74
+
75
+ We accept that the agent will only be partly autonomous. With a 4B model you can run `/agent ask` safely — it explores, reads, and reports, but does not edit. With a 1.5B model the agent loop is unreliable; use it for single bounded tasks with a clear success criterion. With a 0.5B model there is no autonomous loop at all — but the model is still useful for explaining a function, identifying a pattern, or generating a boilerplate construct when you hand it the exact 15 lines it needs.
76
+
77
+ **Partial autonomy is not a failure mode. It is the design.**
78
+
79
+ The programmer stays in the loop — confirming actions, choosing which files to load, deciding when the model is confused and needs a narrower question. This is not a weakness to be engineered away. It is the honest recognition that small models are precise tools, not general reasoners, and that the programmer's judgment is part of the system.
80
+
81
+ The payoff: 1bcoder works offline, on a laptop, on a phone, on hardware you already own. No subscription. No API key. No 30-second round trips. The model that runs on your machine right now — however small — is enough to start.
27
82
 
28
83
  ---
29
84
 
@@ -69,29 +124,34 @@ Tasks that require the model to decide *what to look at* — refactoring across
69
124
  ## Features
70
125
 
71
126
  - Plain terminal REPL — works in any shell, IDE terminal, or SSH session; status line before each prompt shows active model, disk size, quantization, native context limit, and context fill %
72
- - **`/read`** injects files without line numbers (clean text, ideal for `notes.txt` and structured data); **`/readln`** injects with line numbers (use before `/fix` or `/patch` when line references matter)
127
+ - **`/read`** injects files without line numbers (clean text, ideal for `notes.txt` and structured data); **`/readln`** injects with line numbers (use before `/fix` or `/patch` when line references matter); both accept comma- or space-separated file lists — use directly with `{{find_files}}` or `{{map_files}}` captured from `/find` or `/map find`
73
128
  - **Command autocorrection** — typos in command names, file paths, and keywords are detected and fixed automatically before execution, for both human input and agent actions
74
129
  - **`/tree [path]`** — display directory tree of the whole project or any subtree; ask to inject into context (or pass `ctx` to skip the prompt)
75
- - **`/find <pattern>`** — search filenames and file content with regex; supports `-f`/`-c`/`-i`/`--ext` flags; highlights matches, asks to inject results into context
130
+ - **`/find <pattern>`** — search filenames and file content with regex; supports `-f`/`-c`/`-i`/`--ext` flags; highlights matches, asks to inject results into context; sets `{{find_files}}` after every search; **`/find <terms> -r`** ranked BM25 mode returns top-10 files by relevance; hidden directories (`.git`, `.venv`, etc.) excluded automatically
76
131
  - AI proposes a **one-line fix** (`/fix`) or a **SEARCH/REPLACE patch** (`/patch`) — always shows a diff before applying
77
132
  - **Apply AI code blocks directly** with `/edit <file> code` (new/full file) or `/patch <file> code` (SEARCH/REPLACE from reply, no line numbers needed) — preferred for agent mode
78
133
  - **`<think>` tag support** — reasoning blocks shown in terminal by default; `/think hide` suppresses terminal display; `/think include` keeps reasoning in context for chained turns
79
134
  - Run shell commands and inject their output with `/run`
80
135
  - Save AI replies to files with `/save` (code-fence stripping, multiple files, append modes)
81
- - **Session persistence** — `/ctx save` / `/ctx load` dump and restore full conversations; `/ctx compact` summarizes and compresses the context via AI; `/ctx savepoint` marks a position for rollback or selective compaction; `/ctx clear N` drops the last N messages
82
- - **Scripts** — reusable sequences of commands stored as `.txt` files, run step-by-step or fully automated
136
+ - **Session persistence** — `/ctx save` / `/ctx load` dump and restore full conversations; `/ctx list` browses the `.1bcoder/ctx/` project context library; `/ctx compact` summarizes and compresses the context via AI; `/ctx compact N` compacts last N messages in place; `/ctx savepoint` marks a position for rollback or selective compaction; `/ctx clear N` drops the last N messages
137
+ - **Context composer** — `/ctx compose` builds a merged context from multiple saved ctx files with content-level dedup (identical message blocks appear once); workflow: `/proj find` → numbered results → `/ctx compose add N,M` → `/ctx compose run task.ctx` `/ctx load task.ctx`
138
+ - **Scripts** — reusable sequences of commands stored as `.txt` files; `/script run <file> [key=value ...]` runs all steps automatically; `/script apply` runs step-by-step with Y/n confirmation
83
139
  - **Script from history** — `/script create ctx` captures this session's commands into a reusable script automatically
84
- - **Project map** — scan any codebase into a searchable index (`/map index`), query it (`/map find`), trace call chains (`/map trace`), and diff changes (`/map idiff`) — now includes `ORPHAN_DRIFT` alert (dead code delta) and `GHOST ALERT` (deleted file that other files depended on)
140
+ - **Project map** — scan any codebase into a searchable index (`/map index`), query it (`/map find`), trace call chains (`/map trace`), and diff changes (`/map idiff`) — now includes `ORPHAN_DRIFT` alert (dead code delta) and `GHOST ALERT` (deleted file that other files depended on); `/map find` sets `{{map_files}}` after every hit; hidden directories excluded from indexing
85
141
  - **Ask mode** — `/ask <question>` is an alias for `/agent ask`: a read-only research loop for 4B models that explores the project with tree/find/map tools, never edits files, auto-truncates large results to protect context
86
142
  - **Agent mode** — `/agent <task>` runs an autonomous loop; stops when the model outputs plain text with no ACTION; after the loop a `[s]ummary / [a]ll / [n]one` prompt lets you pull agent results into main context
87
- - **Named agents** — define custom agents in `.1bcoder/agents/<name>.txt` (system prompt, tools, max_turns, aliases, `on_done`); call with `/agent <name> task` or `/<name> task` directly; agent-scoped aliases active only during that run
88
- - **`/plan <goal>`** — planning agent: researches the project, writes a natural-language step-by-step plan to `plan.txt`; run `/agent <task> plan plan.txt` to execute it step by step
143
+ - **Named agents** — define custom agents in `.1bcoder/agents/<name>.txt` (system prompt, tools, max_turns, aliases, `on_done`, `params`, `before`, `gates`); call with `/agent <name> task` or `/<name> task` directly; agent-scoped aliases active only during that run
144
+ - **`/plan <goal>`** — planning agent: researches the project, writes a natural-language step-by-step plan to `plan.txt`; run `/agent <task> file: plan.txt` to execute it step by step
89
145
  - **`/fill`** — fill agent: reads NaN session variables, scans project for `.var` files and config files, sets each value automatically
90
146
  - **Session variables** — `{{name}}` placeholders substituted in any command; save/load from `.var` files for offline reuse without loading files into context
91
- - **Project config** — `/config save` persists session state (host, model, ctx, params, vars, procs) to `.1bcoder/config.yml`; `/config save global` saves to `~/.1bcoder/config.yml`; on startup, the first config with `auto: true` (local global) is applied automatically
147
+ - **Project context** — `/proj set <key>` creates `.1bcoder/projects/<key>/` with `project.txt` (description, keywords, file list); `/proj save`, `/proj find`, `/proj keyword add`, `/proj file add`, `/proj index` (regex-extracts file paths from saved ctx files); active project saved in config and auto-restored on next startup
148
+ - **Project config** — `/config save` persists session state (host, model, ctx, params, vars, procs, active project) to `.1bcoder/config.yml`; `/config save global` saves to `~/.1bcoder/config.yml`; on startup, the first config with `auto: true` (local → global) is applied automatically
92
149
  - **Aliases** — define command shortcuts with `/alias /name = expansion` (supports `{{args}}`); persisted in `aliases.txt`; loaded from global then project directory at startup and survive `/clear`
93
150
  - **Backup/restore** — `/bkup save` rotates existing backups (`file.bkup` → `file.bkup(1)`, `file.bkup(2)`…) so no snapshot is ever overwritten; `/bkup restore` always restores the latest
94
- - **MCP support** connect external tool servers (filesystem, web, git, database, browser…) via the Model Context Protocol
151
+ - **`/tempctx`**agent-internal context control: `/tempctx N` sets a private token budget, `/tempctx cut` removes oldest messages, `/tempctx clear` resets to system+task, `/tempctx show` prints size; only active inside an agent loop so agents can't touch the main context; also settable via `params = agent_ctx = N` in agent files
152
+ - **Agent proc hooks** — `before =` (runs before every LLM call, injects output as `[context]`) and `gates =` (runs after every reply, `FAIL:` retries the current plan step); enables supervised loops, convention enforcement, and hallucination checks without changing the model
153
+ - **`/scan <file> <theme>`** — named agent that reads any file chunk by chunk, extracts info relevant to a theme, and builds a summary in `.1bcoder/scan_result.txt`; uses `/tempctx cut` between chunks so the agent context never overflows; result is injected into main context at the end
154
+ - **MCP support** — connect external tool servers (filesystem, web, git, database, browser…) via the Model Context Protocol; `/mcp connect <name> <command> [--cwd <dir>]` launches the server subprocess with an optional working directory override
95
155
  - **Parallel queries** — send prompts to multiple models simultaneously with `/parallel`; control context sent (`--ctx`/`--last`/`--no-ctx`) and route replies back into main context (`ctx` output) for sub-agent workflows
96
156
  - **Command hooks** — `/hook before|after <cmd> <script>` runs a script before or after edit/patch/fix/insert; `before` hook cancels the command if the script is missing; `{{file}}` and `{{range}}` injected automatically
97
157
  - Switch model or host at runtime without restarting (`/model gemma3:1b`, `/host openai://localhost:1234`)
@@ -118,6 +178,16 @@ cd 1bcoder
118
178
  pip install -e .
119
179
  ```
120
180
 
181
+ When installed from source with `pip install -e .`, the default data files are not copied automatically. Run the included script once to populate `~/.1bcoder/`:
182
+
183
+ ```bat
184
+ deploy_bcoder_data.bat
185
+ ```
186
+
187
+ This copies everything from `_bcoder_data\` (agents, procs, aliases, profiles, scripts) to `%USERPROFILE%\.1bcoder\`. Re-run after pulling updates to sync new defaults.
188
+
189
+ > **Warning:** This will overwrite any files you have customised in `~/.1bcoder\`. Back up your changes before running.
190
+
121
191
  ### Option 3 — Install directly from GitHub
122
192
 
123
193
  ```bash
@@ -552,8 +622,8 @@ When the loop finishes you are prompted: **`[s]ummary / [a]ll / [n]one`** — ch
552
622
  | `f` | Send feedback to the AI and skip the action (redirect the model mid-loop) |
553
623
  | `q` | Stop the agent |
554
624
 
555
- - **`plan step1, step2, ...`** — optional comma-separated list of items injected as hints one per turn
556
- - **`plan <file.txt>`** — load steps from a `.txt` or `.md` file; numbered/bulleted list items become steps; `### Example` / `### Summary` sections are injected as context before step 1; `max_turns` is raised automatically if the file has more steps than the default limit
625
+ - **`plan: step1, step2, ...`** — optional comma-separated list of items injected as hints one per turn
626
+ - **`file: <steps.txt>`** — load steps from a `.txt` or `.md` file; numbered/bulleted list items become steps; `### Example` / `### Summary` sections are injected as context before step 1; `max_turns` is raised automatically if the file has more steps than the default limit; gate FAIL on a step retries it
557
627
 
558
628
  When the loop finishes you are prompted: **`[s]ummary / [a]ll / [n]one`** — choose how much of the agent's conversation to pull into your main context.
559
629
 
@@ -561,8 +631,8 @@ When the loop finishes you are prompted: **`[s]ummary / [a]ll / [n]one`** — ch
561
631
  /agent find and fix the divide by zero bug in calc.py
562
632
  /agent -t 1 read models.py and explain the User class
563
633
  /agent -y -t 5 refactor utils.py
564
- /agent read file plan models.py, views.py, urls.py
565
- /agent implement the changes plan plan.txt # load steps from plan.txt
634
+ /agent read files plan: models.py, views.py, urls.py
635
+ /agent implement the changes file: plan.txt # load steps from plan.txt
566
636
  ```
567
637
 
568
638
  Configure the default agent in `.1bcoder/agent.txt`:
@@ -582,7 +652,7 @@ tools =
582
652
 
583
653
  ### Named agents
584
654
 
585
- Custom agents are defined in `.1bcoder/agents/<name>.txt` (project-local) or `<install>/.1bcoder/agents/<name>.txt` (global). Local files override global ones. Call them with `/agent <name> task` or directly as `/<name> task`.
655
+ Custom agents are defined in `.1bcoder/agents/<name>.txt` (project-local) or `~/.1bcoder/agents/<name>.txt` (global). Local files override global ones. Call them with `/agent <name> task` or directly as `/<name> task`.
586
656
 
587
657
  **Agent file format:**
588
658
 
@@ -610,18 +680,48 @@ tools =
610
680
  aliases =
611
681
  /search = /map find {{args}}
612
682
  /sql = /run python db.py "{{args}}"
683
+
684
+ params =
685
+ num_predict = 150
686
+ agent_ctx = 4000
687
+ temperature = 0.2
688
+
689
+ before =
690
+ assist /short
691
+
692
+ gates =
693
+ action-required
694
+ pattern-gate "@Query" "use CriteriaBuilder only"
613
695
  ```
614
696
 
615
697
  - **`system =`** — inline multiline system prompt; indented lines continue the block; `{tool_list}` is substituted automatically from the `tools =` list
616
- - **`tools =`** — one tool name per indented line; controls what the agent knows about and what gets shown in its system prompt
698
+ - **`tools =`** — one tool name per indented line; controls what the agent knows about and what gets shown in its system prompt; empty `tools =` line means no tools (pure text agent)
617
699
  - **`aliases =`** — agent-scoped aliases; active only during this agent's run, restored to global state after; `{{args}}` is replaced by everything after the alias name
618
- - **`on_done = <command>`** slash command executed once when the agent finishes naturally (no more ACTIONs); use to save the agent's final reply to a file (e.g. `on_done = /save plan.txt -w`)
700
+ - **`params =`**model and agent parameters set for this run; `agent_ctx` sets the agent's private context limit (equivalent to `/tempctx N`); `num_predict`, `temperature`, etc. are forwarded to the model
701
+ - **`before =`** — one proc per indented line; runs before every LLM call; all stdout injected as `[context]` into the agent's message list; useful for injecting a hint or parallel sub-query result each turn
702
+ - **`gates =`** — one proc per indented line (with optional args); runs after each reply; if any proc prints `FAIL:`, the current plan step is retried and the FAIL reason is shown to the model as feedback
703
+ - **`on_done = <command>`** — slash command executed once when the agent finishes naturally (no more ACTIONs); use to save the agent's final reply to a file (e.g. `on_done = /ctx compact scan_result`)
619
704
 
620
705
  ```ini
621
706
  # Example: planning agent saves its output automatically
622
707
  on_done = /save plan.txt -w
623
708
  ```
624
709
 
710
+ **Gate procs** — built-in procs designed for use in `gates =`:
711
+
712
+ | Proc | What it checks | FAIL condition |
713
+ |---|---|---|
714
+ | `action-required` | Agent reply contains `ACTION:` or a completion phrase | Neither found — suggests the bare command if one appears anywhere in the reply |
715
+ | `pattern-gate "regexp" "msg"` | Reply matches the given regular expression | Match found — prints `msg` as the FAIL reason |
716
+ | `grounding-check` | Identifiers in reply exist in `map.txt` | Grounding score < 50% (prints warning; does not FAIL by default) |
717
+
718
+ **Before procs** — built-in procs designed for use in `before =`:
719
+
720
+ | Proc | What it does |
721
+ |---|---|
722
+ | `assist /short` | Reads last reply, asks the LLM for a one-sentence next-step hint, injects as `[context]` |
723
+ | `assist /parallel profile <name>` | Same but uses a parallel profile to query the hint model |
724
+
625
725
  Built-in named agents (global install):
626
726
 
627
727
  | Agent | Command | Description |
@@ -630,6 +730,7 @@ Built-in named agents (global install):
630
730
  | `advance` | `/advance <task>` or `/agent advance` | Full toolset for 7B+ models |
631
731
  | `planning` | `/plan <goal>` | Researches project, writes natural-language plan to `plan.txt` |
632
732
  | `fill` | `/fill` | Reads NaN vars, finds `.var` files, sets missing values from project files |
733
+ | `scan` | `/scan <file> <theme>` | Reads large file chunk by chunk, extracts themed info, saves to `.1bcoder/scan_result.txt` |
633
734
 
634
735
  **`/agent advance`** — named agent from `agents/advance.txt`, full toolset for larger models (7B+), includes `run`, `diff`, `map`, `bkup`, and all edit tools. Shortcut: `/advance`:
635
736
 
@@ -638,6 +739,63 @@ Built-in named agents (global install):
638
739
  /advance read and summarise plan models.py, views.py
639
740
  ```
640
741
 
742
+ **`/concepts <topic>`** — alias for `/agent concepts`. Pure brainstorm agent: no tools, no files. Iterates through a list of concept seeds injected via `plan:`, produces a synthesis paragraph per turn. Useful for exploring design ideas, academic framing, or philosophical grounding without polluting the tool context.
743
+
744
+ ```
745
+ /concepts symbol grounding in software engineering
746
+ ```
747
+
748
+ ---
749
+
750
+ ### Agent procs: before and gates
751
+
752
+ `before =` and `gates =` turn an agent into a supervised loop — useful when the model is small or the task requires strict compliance.
753
+
754
+ **`before =`** fires before every LLM call. All stdout is injected as a `[context]` message so the model sees it before generating its reply. Use cases:
755
+ - `assist /short` — ask a second model for a one-sentence hint based on the last reply
756
+ - `assist /parallel profile ten_experts` — poll multiple models for the next search direction
757
+
758
+ **`gates =`** fires after every reply. Each proc receives the reply on stdin. If any prints `FAIL:`, the agent:
759
+ 1. Re-injects the current plan step (so the model sees the hint again)
760
+ 2. Feeds the FAIL reason as user feedback
761
+ 3. Retries the turn (does not advance to the next plan step)
762
+
763
+ This is the key difference from a regular proc: a gate can hold the agent on a step until it produces a compliant reply.
764
+
765
+ **Combined example** — agent that must always emit an ACTION:
766
+
767
+ ```ini
768
+ # agents/strict.txt
769
+ tools =
770
+ read
771
+ patch
772
+ tempctx
773
+
774
+ gates =
775
+ action-required
776
+
777
+ params =
778
+ agent_ctx = 6000
779
+ num_predict = 200
780
+ ```
781
+
782
+ ```
783
+ /agent strict fix the authentication bug file: plan.txt
784
+ ```
785
+
786
+ If the model reasons without acting, `action-required` prints `FAIL:` and the step is retried with the failure reason visible.
787
+
788
+ **Pattern gate** — enforce coding conventions across all turns:
789
+
790
+ ```ini
791
+ gates =
792
+ pattern-gate "from dual" "no FROM DUAL allowed"
793
+ pattern-gate "select \*" "use explicit columns"
794
+ action-required
795
+ ```
796
+
797
+ Multiple gates can be stacked. All run after each reply; a single FAIL from any of them retries the step.
798
+
641
799
  ---
642
800
 
643
801
  ### Aliases
@@ -681,6 +839,7 @@ Lines starting with `[v]` are already done and skipped. Lines starting with `#`
681
839
  | `/script reset` | Unmark all done steps (also happens automatically when a script runs to completion) |
682
840
  | `/script reapply [key=value ...]` | Reset all done steps then apply automatically; prompts for any NaN `{{variables}}` before running |
683
841
  | `/script refresh` | Reload script from disk and show contents |
842
+ | `/script run <file> [key=value ...]` | **Run all steps automatically** — shorthand for `apply -y` |
684
843
  | `/script apply [file] [key=value ...]` | Run steps one by one (Y/n/q per step) |
685
844
  | `/script apply -y [file] [key=value ...]` | Run all pending steps automatically |
686
845
 
@@ -706,7 +865,8 @@ what is wrong in lines {{range}}?
706
865
  ```
707
866
 
708
867
  ```
709
- /script apply fix-fn.txt file=calc.py range=1-4 hint="wrong operator"
868
+ /script run fix-fn.txt file=calc.py range=1-4 hint="wrong operator"
869
+ /script apply fix-fn.txt file=calc.py range=1-4 # same but asks Y/n per step
710
870
  ```
711
871
 
712
872
  Run a script non-interactively from the command line:
@@ -860,7 +1020,119 @@ procs:
860
1020
  - collect-files output.txt
861
1021
  ```
862
1022
 
863
- When `auto: true`, host and model are used at startup to connect; ctx, params, vars, and procs are also restored.
1023
+ When `auto: true`, host and model are used at startup to connect; ctx, params, vars, procs, and active project are also restored.
1024
+
1025
+ ---
1026
+
1027
+ ### Project management (`/proj`)
1028
+
1029
+ Track ctx files and notes per project ticket, feature branch, or work item — stored locally in `.1bcoder/projects/` relative to the working directory. Each project has a human-editable `project.txt` with description, keywords, and file list.
1030
+
1031
+ ```
1032
+ /proj set ABC-123 # activate project (creates .1bcoder/projects/ABC-123/)
1033
+ /proj set role-impl # any valid folder name works
1034
+ /proj status # show active project and project.txt
1035
+ /proj list # all projects, newest first (* = active)
1036
+ ```
1037
+
1038
+ **Save and browse ctx files:**
1039
+ ```
1040
+ /proj save session1.txt # save current ctx to .1bcoder/projects/ABC-123/session1.txt
1041
+ /proj show # list ctx files in active project (newest first)
1042
+ /proj load session1.txt # load ctx file from active project (path resolved automatically)
1043
+ ```
1044
+
1045
+ **Annotate with keywords and files:**
1046
+ ```
1047
+ /proj keyword add ppcon, payment, legacy
1048
+ /proj file add models.py, views.py, finance/amort.py
1049
+ ```
1050
+
1051
+ **Index file paths from saved ctx files** (extracts from `/read`, `/edit`, `/patch`, `/save`, `/insert` command args):
1052
+ ```
1053
+ /proj index
1054
+ ```
1055
+
1056
+ **Search across all projects** in current working directory:
1057
+ ```
1058
+ /proj find payment # fast: search project.txt + ctx filenames (default)
1059
+ /proj find payment -f # same as above (explicit)
1060
+ /proj find payment -c # content: also grep inside ctx files with line numbers
1061
+ ```
1062
+
1063
+ `-f` output (compact):
1064
+ ```
1065
+ ABC-123 — keywords: payment | role-impl — ctx: payment_flow.txt
1066
+ ```
1067
+
1068
+ `-c` output (with content):
1069
+ ```
1070
+ ABC-123 — keywords: payment
1071
+ [project: role-impl]
1072
+ [ctx: user_action_flow.txt]
1073
+ 1021: system highlight current payment in the grid
1074
+ 1122: when user request payment
1075
+ ```
1076
+
1077
+ **Persist active project** — included in `/config save`, auto-restored on next startup:
1078
+ ```
1079
+ /proj set ABC-123
1080
+ /config save
1081
+ # next startup: [proj] ABC-123
1082
+ ```
1083
+
1084
+ **`project.txt` format** (human-editable):
1085
+ ```
1086
+ Description: Fix amortization calculation in legacy Oracle module
1087
+ Keywords: ppcon, payment, legacy
1088
+ Files:
1089
+ models.py
1090
+ finance/amort.py
1091
+ views.py
1092
+ ```
1093
+
1094
+ ---
1095
+
1096
+ ### Context composer (`/ctx compose`)
1097
+
1098
+ Build a merged context from multiple saved ctx files. Identical message blocks appear only once (content-level dedup) — shared tree/read results from different ctx files are merged into one root, then unique branches are appended.
1099
+
1100
+ **Workflow with `/proj find`:**
1101
+ ```
1102
+ /proj find isbn # search projects — results numbered [1], [2], ...
1103
+ /ctx compose add 1,3 # add result #1 and #3 to queue
1104
+ /ctx compose add all # or add all results
1105
+ /ctx compose list # review: filename, size, accumulated total
1106
+ /ctx compose run task.ctx # merge → task.ctx (dedup applied)
1107
+ /ctx load task.ctx # load — LLM wakes up knowing all branches
1108
+ ```
1109
+
1110
+ **Direct compose (no queue):**
1111
+ ```
1112
+ /ctx compose book-html.txt models.txt requirements.txt
1113
+ ```
1114
+ If no output file given with `run`, merges directly into current context.
1115
+
1116
+ **Path resolution** — bare filename is resolved automatically:
1117
+ 1. `.1bcoder/ctx/<name>`
1118
+ 2. `.1bcoder/projects/<active_key>/<name>`
1119
+ 3. full path as-is
1120
+
1121
+ **Queue commands:**
1122
+ ```
1123
+ /ctx compose add <file> add file to queue
1124
+ /ctx compose add 1,2,3 add by number from last /proj find
1125
+ /ctx compose add all add all /proj find results
1126
+ /ctx compose list show queue with sizes and running total
1127
+ /ctx compose clear clear queue
1128
+ /ctx compose run [out.txt] merge and write (or load into context)
1129
+ ```
1130
+
1131
+ **`/ctx compact N`** — compact last N messages in place without touching the rest:
1132
+ ```
1133
+ /ctx compact 1 # the LLM wrote 800 tokens — compress that one reply
1134
+ /ctx compact 3 # compress last 3 messages into one block
1135
+ ```
864
1136
 
865
1137
  ---
866
1138
 
@@ -869,12 +1141,14 @@ When `auto: true`, host and model are used at startup to connect; ctx, params, v
869
1141
  Connect external tool servers to give the AI access to filesystems, databases, web pages, and more.
870
1142
 
871
1143
  ```
872
- /mcp connect <name> <command>
1144
+ /mcp connect <name> <command> [--cwd <dir>]
873
1145
  /mcp tools [name]
874
- /mcp call <server/tool> [json_args]
1146
+ /mcp call <server/tool> {json_args}
875
1147
  /mcp disconnect <name>
876
1148
  ```
877
1149
 
1150
+ `--cwd <dir>` sets the working directory for the subprocess — useful when the MCP server needs to find files relative to a specific project root.
1151
+
878
1152
  ```
879
1153
  /mcp connect fs npx -y @modelcontextprotocol/server-filesystem .
880
1154
  /mcp connect web uvx mcp-server-fetch
@@ -883,33 +1157,90 @@ Connect external tool servers to give the AI access to filesystems, databases, w
883
1157
  /mcp disconnect fs
884
1158
  ```
885
1159
 
1160
+ **simargl** — semantic file search via task/commit history (see [simargl](https://github.com/szholobetsky/simargl)):
1161
+
1162
+ ```bash
1163
+ pip install simargl
1164
+ cd C:/Project/my-app
1165
+ simargl index files .
1166
+ ```
1167
+
1168
+ ```
1169
+ # connect once per session (model loads here, ~30-60s first time)
1170
+ /mcp connect simargl simargl-mcp
1171
+
1172
+ # search — instant after connect
1173
+ /mcp call simargl/find {"query": "add author field to book class", "mode": "file"}
1174
+
1175
+ # or use the built-in script
1176
+ /script run simargl-find.txt query="add author field to book class" mode=file
1177
+ ```
1178
+
1179
+ If you used a custom `--project` at index time, pass it at connect:
1180
+ ```
1181
+ /mcp connect simargl simargl-mcp --project-id bookcrossing
1182
+ ```
1183
+
886
1184
  See `/doc MCP` for a full list of ready-to-use servers.
887
1185
 
888
1186
  ---
889
1187
 
890
1188
  ### Parallel queries
891
1189
 
892
- Send a prompt to multiple models at the same time.
1190
+ Send a prompt to multiple models at the same time. No quoting required.
893
1191
 
894
1192
  ```
895
- /parallel ["prompt"] [--ctx|--last|--no-ctx] [profile <name>] [host:port|model|(file or ctx) ...]
1193
+ /parallel [main question] [list: a1, a2, a3] profile: name
1194
+ [ctx: full|last|none] [file: path [-n N]]
1195
+ [collect: compact [profile: name]] [--seq]
896
1196
  ```
897
1197
 
898
- | Flag | Behaviour |
1198
+ **Prompt modes**
1199
+
1200
+ | Mode | Behaviour |
899
1201
  |---|---|
900
- | *(default)* | Full conversation context is sent to every worker |
901
- | `--last` | Only the last user message is sent (saves tokens for small models) |
902
- | `--no-ctx` | No context prompt only (fastest, zero leakage) |
1202
+ | plain text | Same prompt sent to all workers |
1203
+ | `list: a1, a2, a3` | Aspects distributed one per worker; combined with the main question as `{main question}\n\nAspect: {aspect_i}` |
1204
+ | comma-separated prompts | Matched 1:1 to workers (last reused for remaining) |
1205
+
1206
+ Use `list:` when you want to explore a single question from multiple angles simultaneously — each model gets the full question plus one specific aspect to focus on:
1207
+
1208
+ ```
1209
+ /parallel Hegel philosophy list: axiology, epistemology, ethics, logic profile: four-models
1210
+ ```
1211
+
1212
+ **Context modes** (default: `ctx: last`)
1213
+
1214
+ | Keyword | Context sent to workers |
1215
+ |---|---|
1216
+ | `ctx: full` | Full conversation context |
1217
+ | `ctx: last` | Last message only (default) |
1218
+ | `ctx: none` | No context — prompt only |
1219
+
1220
+ **`$` and `~` expansion** — `$` expands to the last AI reply, `~` to your last input:
903
1221
 
904
- Workers write results to a file **or** inject them back into the main context:
1222
+ ```
1223
+ /parallel $ profile: short # ask short model to summarise last reply
1224
+ /parallel ~ list: pros, cons profile: two-models # weigh your last question from two angles
1225
+ ```
1226
+
1227
+ **File chunking** — split a file across workers:
905
1228
 
906
1229
  ```
907
- /parallel "review this for bugs" \
908
- localhost:11434|llama3.2:1b|ans/llm1.txt \
909
- localhost:11435|qwen2.5:1b|ctx
1230
+ /parallel file: bigfile.txt -n auto profile: cluster # -n auto = one chunk per worker
1231
+ /parallel file: notes.md profile: small # === separator used if present
910
1232
  ```
911
1233
 
912
- Using `ctx` as the output target injects the worker's reply into the main conversation the next AI turn will see it.
1234
+ **Collect** compact all worker replies into the context after they finish:
1235
+
1236
+ ```
1237
+ /parallel list: q1, q2 profile: small1 collect: compact
1238
+ /parallel list: q1, q2 profile: small1 collect: compact profile: short
1239
+ ```
1240
+
1241
+ `collect: compact` sets a savepoint automatically before workers run, then calls `/ctx compact savepoint` (optionally with an external model) once all workers finish. The result is available as `$`.
1242
+
1243
+ **Sequential mode** — `--seq` runs workers one after another instead of in parallel.
913
1244
 
914
1245
  **Profiles** — save a set of workers for reuse:
915
1246
 
@@ -919,7 +1250,6 @@ Using `ctx` as the output target injects the worker's reply into the main conver
919
1250
  /parallel profile list # show all profiles (local + global)
920
1251
  /parallel profile show <name> # print raw profile string
921
1252
  /parallel profile add <name> # append current host+model to a profile
922
- /parallel "explain this" profile review
923
1253
  ```
924
1254
 
925
1255
  Profiles stored in `~/.1bcoder/profiles.txt` (global) or `.1bcoder/profiles.txt` (project-local):
@@ -928,7 +1258,7 @@ review: localhost:11434|ministral3:3b|ans/review.txt localhost:11435|cogito:3b|a
928
1258
  fast: localhost:11434|qwen2.5-coder:0.6b|ans/q.txt # quick sanity check
929
1259
  ```
930
1260
 
931
- **Sub-agent profiles** — built-in profiles that return answers directly to the main context (`ctx`):
1261
+ **Sub-agent profiles** — built-in profiles that return answers directly to the main context:
932
1262
 
933
1263
  ```
934
1264
  small: localhost:11434|qwen3:0.6b|ctx
@@ -937,16 +1267,14 @@ thinking: localhost:11434|lfm2.5-thinking:1.2b|ctx
937
1267
  short: localhost:11434|llama3.2:1b|ctx
938
1268
  ```
939
1269
 
940
- These are aliased as `/small`, `/explain`, `/thinking`, `/short` — use them like sub-agents:
1270
+ These are aliased as `/small`, `/explain`, `/thinking`, `/short`:
941
1271
 
942
1272
  ```
943
- /small "what does this function return?" --no-ctx # ask tiny model, no context bleed
944
- /explain "$" # ask gemma to explain last reply
945
- /small ~ # repeat last question to a small model
1273
+ /small what does this function return? # ask tiny model, last message as context
1274
+ /explain $ # ask gemma to explain last reply
1275
+ /short ~ ctx: none # repeat last question with no context
946
1276
  ```
947
1277
 
948
- `~` expands to the last message you typed; `$` expands to the last AI reply — combine them to build sub-agent pipelines without copy-pasting.
949
-
950
1278
  ---
951
1279
 
952
1280
  ### Hooks (`/hook`)
@@ -991,9 +1319,10 @@ Save any useful message as a reusable template and load it later with `{{param}}
991
1319
  ```
992
1320
  /prompt save ConvertJavaToPy # saves last user message as ConvertJavaToPy.txt
993
1321
  /prompt load # numbered list, select by number, fill {{params}} interactively
1322
+ /prompt load 2 # load prompt #2 directly, skipping the selection prompt
994
1323
  ```
995
1324
 
996
- Templates stored in `<install>/.1bcoder/prompts/`. Use `{{keyword}}` placeholders — values are prompted on load.
1325
+ Templates stored in `~/.1bcoder/prompts.txt` (one entry per line: `name: text`). Use `{{keyword}}` placeholders — values are prompted interactively on load.
997
1326
 
998
1327
  ---
999
1328
 
@@ -1007,10 +1336,24 @@ Run a Python script against the last LLM reply. Useful for extracting filenames,
1007
1336
  /proc run <name> -f <file> # run against an external file instead of last reply
1008
1337
  /proc on grounding-check # persistent: run after every reply automatically
1009
1338
  /proc off # stop persistent processor
1339
+ /proc before on assist /short # before-proc: run BEFORE every LLM call, inject output as [context]
1340
+ /proc before off # stop before processor
1341
+ /proc gate on action-required # gate: run after reply; FAIL retries current plan step
1342
+ /proc gate on pattern-gate "regexp" "message" # gate with regexp
1343
+ /proc gate off # stop gate processor
1010
1344
  /proc new my-proc # create a new processor from template
1011
1345
  ```
1012
1346
 
1013
- **Processor protocol:** `stdin` = last LLM reply · `stdout` = result · `key=value` lines = extracted params · `ACTION: /command` = confirmed and executed (run mode only) · `ALERT: message` = warning printed, continues · `BLOCK: reason` = cancels the triggering command (hook mode only) · exit 1 = failure.
1347
+ **Processor protocol:**
1348
+ - `stdin` = last LLM reply
1349
+ - `stdout` = result (injected into context)
1350
+ - `key=value` lines = extracted params
1351
+ - `ACTION: /command` = confirmed and executed (run mode only)
1352
+ - `ALERT: message` = warning printed, continues
1353
+ - `BLOCK: reason` = cancels the triggering command (hook mode only)
1354
+ - `FAIL: reason` = gate mode: retries plan step and feeds reason to model; proc mode: prints warning
1355
+ - exit 1 = show stderr as warning, skip ACTION
1356
+ - `BCODER_WORKDIR` env var is set to the current working directory in all proc subprocesses (use to find `map.txt`, project files, etc.)
1014
1357
 
1015
1358
  Built-in processors in `~/.1bcoder/proc/`:
1016
1359
 
@@ -1019,7 +1362,7 @@ Built-in processors in `~/.1bcoder/proc/`:
1019
1362
  | `extract-files` | Extract filenames, `ACTION: /read` if one found | one-shot |
1020
1363
  | `extract-code` | Extract code blocks; `ACTION: /save <file>` if one block + filename detected | one-shot |
1021
1364
  | `extract-list` | Convert first bullet/numbered list in reply to comma-separated line | one-shot |
1022
- | `grounding-check` | Score identifiers against `map.txt`, warn if <50% | persistent |
1365
+ | `grounding-check` | Score identifiers against `map.txt`, warn if <50% | persistent / gate |
1023
1366
  | `collect-files` | Accumulate filenames to `.1bcoder/collected-files.txt` | persistent |
1024
1367
  | `md` | Render last reply as formatted Markdown in terminal | one-shot |
1025
1368
  | `mdx` | Render last reply as Markdown + LaTeX (KaTeX) + Mermaid diagrams in browser | one-shot |
@@ -1027,6 +1370,9 @@ Built-in processors in `~/.1bcoder/proc/`:
1027
1370
  | `rude_words` | Alert if reply contains profanity (`ua` arg adds Ukrainian list) | persistent |
1028
1371
  | `secret_check` | Alert if reply contains sensitive names (google, anthropic…) | persistent |
1029
1372
  | `sql_readonly_guard` | Alert (proc) or block (hook) on write SQL statements | both |
1373
+ | `action-required` | FAIL if agent reply has no `ACTION:` and no completion phrase | gate |
1374
+ | `pattern-gate` | FAIL if reply matches given regexp (`argv[1]` = pattern, `argv[2]` = message) | gate |
1375
+ | `assist` | Before-proc: reads last reply, asks LLM for one-sentence next-step hint | before |
1030
1376
 
1031
1377
  **Guard usage examples:**
1032
1378
  ```
@@ -1079,7 +1425,7 @@ workers:
1079
1425
  /script apply team-summarize.txt --param keyword=auth --param task="404 on login"
1080
1426
  ```
1081
1427
 
1082
- Built-in team scripts in `<install>/.1bcoder/scripts/`:
1428
+ Built-in team scripts in `~/.1bcoder/scripts/`:
1083
1429
 
1084
1430
  | Script | Worker role |
1085
1431
  |---|---|
@@ -1103,12 +1449,18 @@ Built-in team scripts in `<install>/.1bcoder/scripts/`:
1103
1449
  | `/ctx clear <n>` | Remove last N messages from context |
1104
1450
  | `/ctx cut` | Remove oldest messages until context fits |
1105
1451
  | `/ctx compact` | Ask AI to summarize the conversation, replace context with summary |
1106
- | `/ctx save <file>` | Save full conversation to file |
1107
- | `/ctx load <file>` | Restore a saved conversation |
1452
+ | `/ctx compact <N>` | Summarize last N messages in place, replace with one compact block |
1453
+ | `/ctx save <file>` | Save full conversation to file (global ctx folder) |
1454
+ | `/ctx load <file>` | Restore a saved conversation (bare name resolved from `.1bcoder/ctx/`) |
1455
+ | `/ctx list` | List files in `.1bcoder/ctx/` project context library |
1108
1456
  | `/ctx savepoint set` | Mark current position as a savepoint |
1109
1457
  | `/ctx savepoint rollback` | Remove all messages added since the savepoint |
1110
1458
  | `/ctx savepoint compact` | Summarize messages since savepoint, replace with summary |
1111
1459
  | `/ctx savepoint show` | Show savepoint info and messages added since |
1460
+ | `/tempctx <N>` | Set agent context limit to N tokens for this run (also settable via `params = agent_ctx = N` in agent file) |
1461
+ | `/tempctx show` | Show agent context size — only available inside an agent loop |
1462
+ | `/tempctx cut` | Remove oldest messages from agent context until it fits |
1463
+ | `/tempctx clear` | Reset agent context to system prompt + task only |
1112
1464
  | `/think exclude` | Strip `<think>` blocks from context (default) |
1113
1465
  | `/think include` | Keep `<think>` blocks in context (pass model reasoning to next turn) |
1114
1466
  | `/think show` | Show `<think>` blocks in terminal (default) |