1bcoder 0.1.7__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/1bcoder.egg-info/PKG-INFO +22 -6
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/1bcoder.egg-info/SOURCES.txt +8 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/PKG-INFO +22 -6
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/README.md +21 -5
- 1bcoder-0.1.8/_bcoder_data/agents/websearch.txt +28 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/aliases.txt +4 -2
- 1bcoder-0.1.8/_bcoder_data/doc/FLOWS.md +128 -0
- 1bcoder-0.1.8/_bcoder_data/flows/__pycache__/commit_message.cpython-311.pyc +0 -0
- 1bcoder-0.1.8/_bcoder_data/flows/commit_message.py +38 -0
- 1bcoder-0.1.8/_bcoder_data/flows/grounding.py +106 -0
- 1bcoder-0.1.8/_bcoder_data/flows/py_error_trace.py +87 -0
- 1bcoder-0.1.8/_bcoder_data/flows/simargl_files.py +66 -0
- 1bcoder-0.1.8/_bcoder_data/flows/webask.py +59 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/chat.py +298 -96
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/pyproject.toml +1 -1
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/1bcoder.egg-info/dependency_links.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/1bcoder.egg-info/entry_points.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/1bcoder.egg-info/requires.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/1bcoder.egg-info/top_level.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/LICENSE +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/__init__.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/advance.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/ask.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/compact.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/concepts.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/fill.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/planning.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/scan.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/agents/sqlite.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/doc/MCP.md +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/doc/OLLAMA_SERVER_PARAM.md +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/doc/PARAM.md +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/doc/PROC.md +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/doc/TRANSLATE.md +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/map.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/action-required.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/add-save.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/assist.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/collect-files.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/ctx_cut.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/extract-code.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/extract-files.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/extract-list.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/grounding-check.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/md.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/mdx.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/pattern-gate.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/regexp-extract.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/rude_words.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/scan-save.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/secret_check.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/sql_readonly_guard.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/proc/tempctx-cut.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/profiles.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/prompts/analysis.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/prompts/sumarise.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/prompts.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/AddFunction.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/AskProject.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/CheckRequirements.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/DockerMySQL.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/DockerNginx.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/DockerPython.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/DockerStack.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/DuckDuckGoInstant.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/EnvTemplate.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/Explain.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/ExploreProjectStructure.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/GitIgnorePython.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/MySQLDump.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/NewScript.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/PipFreeze.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/PyPI.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/Refactor.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/RunAndFix.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/SQLiteSchema.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/Translate.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/WikiPage.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/WikiSearch.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/auto-bkup.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/edit-control.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/parallel_call.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/personal/content/create-regular-content.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/personal/content/plan.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/personal/test/collect-data-from-test-environment.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/plan.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/remote/create-content-on-remote-server.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/set_ctx.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/simargl-cli_index_files.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/simargl-cli_index_units.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/simargl-cli_search.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/team-map-worker.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/team-search-worker.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/team-summarize.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/team-tree-worker.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/scripts/test.txt +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/_bcoder_data/teams/code-analysis.yaml +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/map_index.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/map_query.py +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/setup.cfg +0 -0
- {1bcoder-0.1.7 → 1bcoder-0.1.8}/tests/test_utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: 1bcoder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: AI coding assistant agent for 1B–7B local models (Ollama, LMStudio, llama.cpp). Terminal REPL with file editing, project map, agents, scripts, and parallel multi-model queries.
|
|
5
5
|
Project-URL: Homepage, https://github.com/szholobetsky/1bcoder
|
|
6
6
|
Project-URL: Repository, https://github.com/szholobetsky/1bcoder
|
|
@@ -371,10 +371,7 @@ Config is saved globally to `~/.1bcoder/translate.json` and applied automaticall
|
|
|
371
371
|
| `/translate on` | Enable translation (uses saved language and mode) |
|
|
372
372
|
| `/translate off` | Disable translation |
|
|
373
373
|
| `/translate status` | Show lang, mode, lm_host, lm_model, enabled |
|
|
374
|
-
| `/translate lang
|
|
375
|
-
| `/translate mode online\|mini\|offline\|lm [profile <name>]` | Switch mode; `profile` loads host+model from profiles.txt |
|
|
376
|
-
| `/translate last` | Retranslate last reply |
|
|
377
|
-
| `/translate last mode offline lang de` | Retranslate with overrides (mode / lang) |
|
|
374
|
+
| `/translate last [mode:<m>] [lang:<code>]` | Retranslate last reply with optional overrides |
|
|
378
375
|
|
|
379
376
|
Common language codes: `uk` Ukrainian, `de` German, `fr` French, `pl` Polish, `es` Spanish, `zh` Chinese. Full list: `/doc translate`
|
|
380
377
|
|
|
@@ -412,7 +409,7 @@ lmtrans: 192.168.0.5:11434|translategemma:4b|ctx
|
|
|
412
409
|
Then configure `/translate` to use it:
|
|
413
410
|
|
|
414
411
|
```
|
|
415
|
-
/translate setup uk lm host
|
|
412
|
+
/translate setup lang:uk mode:lm host:192.168.0.5:11434 model:translategemma:4b
|
|
416
413
|
```
|
|
417
414
|
|
|
418
415
|
---
|
|
@@ -1551,6 +1548,25 @@ Built-in team scripts in `~/.1bcoder/scripts/`:
|
|
|
1551
1548
|
|
|
1552
1549
|
---
|
|
1553
1550
|
|
|
1551
|
+
### Flows (`/flow`)
|
|
1552
|
+
|
|
1553
|
+
Flows are deterministic Python pipelines — they run commands, loop over lists, and pass everything to the LLM in a **temporary context**. Only the final summary lands in your main conversation. Works reliably even with 1B models because the LLM only needs to read, not make tool decisions.
|
|
1554
|
+
|
|
1555
|
+
```
|
|
1556
|
+
/flow list # list all available flows
|
|
1557
|
+
/flow webask what is asyncio -d 5 # web search → fetch top 5 pages → summarize
|
|
1558
|
+
/flow grounding fix MultilineTernary # extract codebase keywords → locate files → summarize
|
|
1559
|
+
/flow simargl_files add user auth # simargl retrieval → read matched files → explain
|
|
1560
|
+
/flow py_error_trace -f error.txt # parse traceback → read code at each location → explain
|
|
1561
|
+
/flow commit_message # git diff → generate commit message
|
|
1562
|
+
```
|
|
1563
|
+
|
|
1564
|
+
Custom flows go in `~/.1bcoder/flows/<name>.py` (global) or `.1bcoder/flows/<name>.py` (project-local).
|
|
1565
|
+
Each flow is a Python file with a single `run(chat, args)` function.
|
|
1566
|
+
Full guide: `/doc flows`
|
|
1567
|
+
|
|
1568
|
+
---
|
|
1569
|
+
|
|
1554
1570
|
### Session controls
|
|
1555
1571
|
|
|
1556
1572
|
| Command | Description |
|
|
@@ -23,11 +23,19 @@ _bcoder_data/agents/fill.txt
|
|
|
23
23
|
_bcoder_data/agents/planning.txt
|
|
24
24
|
_bcoder_data/agents/scan.txt
|
|
25
25
|
_bcoder_data/agents/sqlite.txt
|
|
26
|
+
_bcoder_data/agents/websearch.txt
|
|
27
|
+
_bcoder_data/doc/FLOWS.md
|
|
26
28
|
_bcoder_data/doc/MCP.md
|
|
27
29
|
_bcoder_data/doc/OLLAMA_SERVER_PARAM.md
|
|
28
30
|
_bcoder_data/doc/PARAM.md
|
|
29
31
|
_bcoder_data/doc/PROC.md
|
|
30
32
|
_bcoder_data/doc/TRANSLATE.md
|
|
33
|
+
_bcoder_data/flows/commit_message.py
|
|
34
|
+
_bcoder_data/flows/grounding.py
|
|
35
|
+
_bcoder_data/flows/py_error_trace.py
|
|
36
|
+
_bcoder_data/flows/simargl_files.py
|
|
37
|
+
_bcoder_data/flows/webask.py
|
|
38
|
+
_bcoder_data/flows/__pycache__/commit_message.cpython-311.pyc
|
|
31
39
|
_bcoder_data/proc/action-required.py
|
|
32
40
|
_bcoder_data/proc/add-save.py
|
|
33
41
|
_bcoder_data/proc/assist.py
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: 1bcoder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: AI coding assistant agent for 1B–7B local models (Ollama, LMStudio, llama.cpp). Terminal REPL with file editing, project map, agents, scripts, and parallel multi-model queries.
|
|
5
5
|
Project-URL: Homepage, https://github.com/szholobetsky/1bcoder
|
|
6
6
|
Project-URL: Repository, https://github.com/szholobetsky/1bcoder
|
|
@@ -371,10 +371,7 @@ Config is saved globally to `~/.1bcoder/translate.json` and applied automaticall
|
|
|
371
371
|
| `/translate on` | Enable translation (uses saved language and mode) |
|
|
372
372
|
| `/translate off` | Disable translation |
|
|
373
373
|
| `/translate status` | Show lang, mode, lm_host, lm_model, enabled |
|
|
374
|
-
| `/translate lang
|
|
375
|
-
| `/translate mode online\|mini\|offline\|lm [profile <name>]` | Switch mode; `profile` loads host+model from profiles.txt |
|
|
376
|
-
| `/translate last` | Retranslate last reply |
|
|
377
|
-
| `/translate last mode offline lang de` | Retranslate with overrides (mode / lang) |
|
|
374
|
+
| `/translate last [mode:<m>] [lang:<code>]` | Retranslate last reply with optional overrides |
|
|
378
375
|
|
|
379
376
|
Common language codes: `uk` Ukrainian, `de` German, `fr` French, `pl` Polish, `es` Spanish, `zh` Chinese. Full list: `/doc translate`
|
|
380
377
|
|
|
@@ -412,7 +409,7 @@ lmtrans: 192.168.0.5:11434|translategemma:4b|ctx
|
|
|
412
409
|
Then configure `/translate` to use it:
|
|
413
410
|
|
|
414
411
|
```
|
|
415
|
-
/translate setup uk lm host
|
|
412
|
+
/translate setup lang:uk mode:lm host:192.168.0.5:11434 model:translategemma:4b
|
|
416
413
|
```
|
|
417
414
|
|
|
418
415
|
---
|
|
@@ -1551,6 +1548,25 @@ Built-in team scripts in `~/.1bcoder/scripts/`:
|
|
|
1551
1548
|
|
|
1552
1549
|
---
|
|
1553
1550
|
|
|
1551
|
+
### Flows (`/flow`)
|
|
1552
|
+
|
|
1553
|
+
Flows are deterministic Python pipelines — they run commands, loop over lists, and pass everything to the LLM in a **temporary context**. Only the final summary lands in your main conversation. Works reliably even with 1B models because the LLM only needs to read, not make tool decisions.
|
|
1554
|
+
|
|
1555
|
+
```
|
|
1556
|
+
/flow list # list all available flows
|
|
1557
|
+
/flow webask what is asyncio -d 5 # web search → fetch top 5 pages → summarize
|
|
1558
|
+
/flow grounding fix MultilineTernary # extract codebase keywords → locate files → summarize
|
|
1559
|
+
/flow simargl_files add user auth # simargl retrieval → read matched files → explain
|
|
1560
|
+
/flow py_error_trace -f error.txt # parse traceback → read code at each location → explain
|
|
1561
|
+
/flow commit_message # git diff → generate commit message
|
|
1562
|
+
```
|
|
1563
|
+
|
|
1564
|
+
Custom flows go in `~/.1bcoder/flows/<name>.py` (global) or `.1bcoder/flows/<name>.py` (project-local).
|
|
1565
|
+
Each flow is a Python file with a single `run(chat, args)` function.
|
|
1566
|
+
Full guide: `/doc flows`
|
|
1567
|
+
|
|
1568
|
+
---
|
|
1569
|
+
|
|
1554
1570
|
### Session controls
|
|
1555
1571
|
|
|
1556
1572
|
| Command | Description |
|
|
@@ -356,10 +356,7 @@ Config is saved globally to `~/.1bcoder/translate.json` and applied automaticall
|
|
|
356
356
|
| `/translate on` | Enable translation (uses saved language and mode) |
|
|
357
357
|
| `/translate off` | Disable translation |
|
|
358
358
|
| `/translate status` | Show lang, mode, lm_host, lm_model, enabled |
|
|
359
|
-
| `/translate lang
|
|
360
|
-
| `/translate mode online\|mini\|offline\|lm [profile <name>]` | Switch mode; `profile` loads host+model from profiles.txt |
|
|
361
|
-
| `/translate last` | Retranslate last reply |
|
|
362
|
-
| `/translate last mode offline lang de` | Retranslate with overrides (mode / lang) |
|
|
359
|
+
| `/translate last [mode:<m>] [lang:<code>]` | Retranslate last reply with optional overrides |
|
|
363
360
|
|
|
364
361
|
Common language codes: `uk` Ukrainian, `de` German, `fr` French, `pl` Polish, `es` Spanish, `zh` Chinese. Full list: `/doc translate`
|
|
365
362
|
|
|
@@ -397,7 +394,7 @@ lmtrans: 192.168.0.5:11434|translategemma:4b|ctx
|
|
|
397
394
|
Then configure `/translate` to use it:
|
|
398
395
|
|
|
399
396
|
```
|
|
400
|
-
/translate setup uk lm host
|
|
397
|
+
/translate setup lang:uk mode:lm host:192.168.0.5:11434 model:translategemma:4b
|
|
401
398
|
```
|
|
402
399
|
|
|
403
400
|
---
|
|
@@ -1536,6 +1533,25 @@ Built-in team scripts in `~/.1bcoder/scripts/`:
|
|
|
1536
1533
|
|
|
1537
1534
|
---
|
|
1538
1535
|
|
|
1536
|
+
### Flows (`/flow`)
|
|
1537
|
+
|
|
1538
|
+
Flows are deterministic Python pipelines — they run commands, loop over lists, and pass everything to the LLM in a **temporary context**. Only the final summary lands in your main conversation. Works reliably even with 1B models because the LLM only needs to read, not make tool decisions.
|
|
1539
|
+
|
|
1540
|
+
```
|
|
1541
|
+
/flow list # list all available flows
|
|
1542
|
+
/flow webask what is asyncio -d 5 # web search → fetch top 5 pages → summarize
|
|
1543
|
+
/flow grounding fix MultilineTernary # extract codebase keywords → locate files → summarize
|
|
1544
|
+
/flow simargl_files add user auth # simargl retrieval → read matched files → explain
|
|
1545
|
+
/flow py_error_trace -f error.txt # parse traceback → read code at each location → explain
|
|
1546
|
+
/flow commit_message # git diff → generate commit message
|
|
1547
|
+
```
|
|
1548
|
+
|
|
1549
|
+
Custom flows go in `~/.1bcoder/flows/<name>.py` (global) or `.1bcoder/flows/<name>.py` (project-local).
|
|
1550
|
+
Each flow is a Python file with a single `run(chat, args)` function.
|
|
1551
|
+
Full guide: `/doc flows`
|
|
1552
|
+
|
|
1553
|
+
---
|
|
1554
|
+
|
|
1539
1555
|
### Session controls
|
|
1540
1556
|
|
|
1541
1557
|
| Command | Description |
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# Web search + fetch agent for capable models (4B+)
|
|
2
|
+
# Usage: /agent websearch <question>
|
|
3
|
+
description = Web research agent. Searches DuckDuckGo, fetches relevant pages, summarizes findings.
|
|
4
|
+
max_turns = 10
|
|
5
|
+
auto_exec = true
|
|
6
|
+
auto_apply = true
|
|
7
|
+
|
|
8
|
+
system =
|
|
9
|
+
You are a web research assistant. Search the web and fetch pages to answer the question.
|
|
10
|
+
|
|
11
|
+
To call a tool, write ACTION: on its own line followed by the command.
|
|
12
|
+
Wait for [tool result] before calling the next tool.
|
|
13
|
+
One ACTION per turn.
|
|
14
|
+
When done, write a clear summary answer. Do not write any ACTION when done.
|
|
15
|
+
|
|
16
|
+
Strategy:
|
|
17
|
+
1. Start with ACTION: /web search <question>
|
|
18
|
+
2. Review the search results (titles, URLs, snippets)
|
|
19
|
+
3. Pick the most relevant URLs and fetch them with ACTION: /web fetch <url>
|
|
20
|
+
4. Fetch up to 3 pages total — stop earlier if you already have enough to answer
|
|
21
|
+
5. Write a concise summary that directly answers the original question
|
|
22
|
+
|
|
23
|
+
Available tools:
|
|
24
|
+
{tool_list}
|
|
25
|
+
|
|
26
|
+
tools =
|
|
27
|
+
web search
|
|
28
|
+
web fetch
|
|
@@ -7,8 +7,10 @@
|
|
|
7
7
|
/sqlite = /agent sqlite
|
|
8
8
|
/plan = /agent planning
|
|
9
9
|
/fill = /agent fill
|
|
10
|
-
/scan
|
|
11
|
-
/compact
|
|
10
|
+
/scan = /agent scan {{args}}
|
|
11
|
+
/compact = /agent compact {{args}}
|
|
12
|
+
/websearch = /agent websearch {{args}}
|
|
13
|
+
/webask = /flow webask {{args}}
|
|
12
14
|
|
|
13
15
|
/small = /parallel {{args}} profile: small
|
|
14
16
|
/explain = /parallel {{args}} profile: explain
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# Writing custom flows
|
|
2
|
+
|
|
3
|
+
A **flow** is a Python file in `~/.1bcoder/flows/` (or `.1bcoder/flows/` for project-local).
|
|
4
|
+
Run with `/flow <name> [args]`. List with `/flow list`.
|
|
5
|
+
|
|
6
|
+
## Minimal template
|
|
7
|
+
|
|
8
|
+
```python
|
|
9
|
+
"""One-line description shown by /flow list. Usage: /flow myflow <args>"""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def run(chat, args: str):
|
|
13
|
+
# 1. validate args
|
|
14
|
+
if not args.strip():
|
|
15
|
+
print("usage: /flow myflow <something>")
|
|
16
|
+
return
|
|
17
|
+
|
|
18
|
+
# 2. collect data
|
|
19
|
+
data = chat._agent_exec("/some command", auto_apply=True)
|
|
20
|
+
|
|
21
|
+
# 3. ask LLM in temp context
|
|
22
|
+
temp_msgs = [{"role": "system", "content": chat._role},
|
|
23
|
+
{"role": "user", "content": f"Your question:\n{data}"}]
|
|
24
|
+
chat._sep("AI")
|
|
25
|
+
reply = chat._stream_chat(temp_msgs)
|
|
26
|
+
|
|
27
|
+
# 4. inject only summary into main context
|
|
28
|
+
if reply:
|
|
29
|
+
chat.last_reply = reply
|
|
30
|
+
chat._last_output = reply
|
|
31
|
+
chat.messages.append({"role": "user", "content": "[myflow: label]"})
|
|
32
|
+
chat.messages.append({"role": "assistant", "content": reply})
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## The four parts every flow has
|
|
36
|
+
|
|
37
|
+
### 1. Validate args
|
|
38
|
+
Check required input early and print usage if missing.
|
|
39
|
+
|
|
40
|
+
### 2. Collect data
|
|
41
|
+
Run commands, read files, call external tools — build the raw material for the LLM.
|
|
42
|
+
The key rule: **collect as little as needed**. Prefer narrow searches first, fall back to broad ones.
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
# narrow first, stop when you have something
|
|
46
|
+
result = chat._agent_exec("/map find MyClass -d 2", auto_apply=True)
|
|
47
|
+
if not result or "no matches" in result:
|
|
48
|
+
result = chat._agent_exec("/find MyClass -f", auto_apply=True)
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### 3. Ask LLM in temporary context
|
|
52
|
+
Always use `temp_msgs` — never append raw collected data to `chat.messages`.
|
|
53
|
+
Raw data stays in temp context and is discarded after the LLM responds.
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
temp_msgs = [{"role": "system", "content": chat._role},
|
|
57
|
+
{"role": "user", "content": prompt}]
|
|
58
|
+
reply = chat._stream_chat(temp_msgs)
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### 4. Inject only the summary
|
|
62
|
+
After the LLM replies, add a short label + the reply to main context.
|
|
63
|
+
The raw data (which may be thousands of chars) never pollutes the conversation.
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
chat.messages.append({"role": "user", "content": "[myflow: label]"})
|
|
67
|
+
chat.messages.append({"role": "assistant", "content": reply})
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## chat object — what you can use
|
|
71
|
+
|
|
72
|
+
| API | Description |
|
|
73
|
+
|---|---|
|
|
74
|
+
| `chat._agent_exec(cmd, auto_apply=True)` | Run any `/command`, return output as string |
|
|
75
|
+
| `chat._stream_chat(messages)` | Send messages to LLM, stream output, return reply |
|
|
76
|
+
| `chat._sep("AI")` | Print the `─── AI ───` separator before streaming |
|
|
77
|
+
| `chat._role` | Current system persona string |
|
|
78
|
+
| `chat.messages` | Main conversation history (append summary here) |
|
|
79
|
+
| `chat.last_reply` | Last LLM reply — set this after your LLM call |
|
|
80
|
+
| `chat._last_output` | Last output shown to user — set same as last_reply |
|
|
81
|
+
| `chat._vars` | Session variables dict — read/write `{{varname}}` values |
|
|
82
|
+
| `chat._web_ddg_search(term, n=8)` | DuckDuckGo search → list of (title, url, snippet) |
|
|
83
|
+
| `chat._web_strip_html(bytes)` | Strip HTML tags from fetched page bytes |
|
|
84
|
+
|
|
85
|
+
## Parsing args
|
|
86
|
+
|
|
87
|
+
For simple flags use `re.search`:
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
import re as _re
|
|
91
|
+
|
|
92
|
+
n = 5
|
|
93
|
+
m = _re.search(r"-n\s+(\d+)", args)
|
|
94
|
+
if m:
|
|
95
|
+
n = int(m.group(1))
|
|
96
|
+
args = (args[:m.start()] + args[m.end():]).strip()
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
For file input with `-f`:
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
m = _re.match(r"-f\s+(\S+)", args.strip())
|
|
103
|
+
if m:
|
|
104
|
+
with open(m.group(1), encoding="utf-8") as f:
|
|
105
|
+
content = f.read()
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Prompt tips for small models (1b–3b)
|
|
109
|
+
|
|
110
|
+
- Put the data **before** the question, not after
|
|
111
|
+
- One direct question only — no rule lists
|
|
112
|
+
- Short is better: `"Diff:\n{diff}\n\nWrite a one-line commit message."` beats ten bullet points
|
|
113
|
+
|
|
114
|
+
## File location priority
|
|
115
|
+
|
|
116
|
+
1. `.1bcoder/flows/` — project-local (highest priority, overrides others)
|
|
117
|
+
2. `~/.1bcoder/flows/` — user global
|
|
118
|
+
3. `_bcoder_data/flows/` — built-in defaults
|
|
119
|
+
|
|
120
|
+
## Built-in flows as reference
|
|
121
|
+
|
|
122
|
+
| Flow | Pattern |
|
|
123
|
+
|---|---|
|
|
124
|
+
| `webask` | external API (DDG) → loop fetch → temp LLM |
|
|
125
|
+
| `grounding` | 1bcoder command → parse list → progressive loop search → temp LLM |
|
|
126
|
+
| `simargl_files` | external tool via `/run` → parse list → loop `/read` → temp LLM |
|
|
127
|
+
| `py_error_trace` | parse input text → loop file read at line → temp LLM |
|
|
128
|
+
| `commit_message` | subprocess (git) → temp LLM |
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Generate a git commit message from staged or unstaged changes. Usage: /flow commit_message"""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def run(chat, args: str):
|
|
5
|
+
import subprocess as _sp
|
|
6
|
+
|
|
7
|
+
def _git(cmd: list[str]) -> str:
|
|
8
|
+
try:
|
|
9
|
+
r = _sp.run(["git"] + cmd, capture_output=True, text=True, timeout=15)
|
|
10
|
+
return r.stdout.strip()
|
|
11
|
+
except Exception as e:
|
|
12
|
+
return f"(git error: {e})"
|
|
13
|
+
|
|
14
|
+
# prefer staged diff; fall back to unstaged
|
|
15
|
+
diff = _git(["diff", "--staged"])
|
|
16
|
+
diff_source = "staged changes"
|
|
17
|
+
if not diff:
|
|
18
|
+
diff = _git(["diff", "HEAD"])
|
|
19
|
+
diff_source = "unstaged changes"
|
|
20
|
+
if not diff:
|
|
21
|
+
print("[commit_message_generator] no changes found — run 'git add' to stage files first")
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
print(f"[commit_message_generator] generating from {diff_source} ({len(diff)} chars)")
|
|
25
|
+
|
|
26
|
+
prompt = (
|
|
27
|
+
f"Diff ({diff_source}):\n{diff[:6000]}\n\n"
|
|
28
|
+
f"Write a one-line git commit message describing what changed and why."
|
|
29
|
+
)
|
|
30
|
+
temp_msgs = [{"role": "system", "content": chat._role},
|
|
31
|
+
{"role": "user", "content": prompt}]
|
|
32
|
+
chat._sep("AI")
|
|
33
|
+
reply = chat._stream_chat(temp_msgs)
|
|
34
|
+
if reply:
|
|
35
|
+
chat.last_reply = reply
|
|
36
|
+
chat._last_output = reply
|
|
37
|
+
chat.messages.append({"role": "user", "content": f"[commit_message: {diff_source}]"})
|
|
38
|
+
chat.messages.append({"role": "assistant", "content": reply})
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""Symbol grounding: extract codebase identifiers from task text, locate each progressively, summarize. Usage: /flow grounding <text>"""
|
|
2
|
+
import re as _re
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def _parse_keywords(output: str) -> list[str]:
|
|
6
|
+
"""Extract keyword list from /map keyword extract -c output (CSV on one line)."""
|
|
7
|
+
keywords = []
|
|
8
|
+
for token in _re.split(r"[,\n]+", output):
|
|
9
|
+
token = token.strip()
|
|
10
|
+
token = _re.sub(r"\(\d+\)$", "", token).strip() # strip count suffix like "Name(12)"
|
|
11
|
+
if token and len(token) < 60 and " " not in token and not token.startswith("("):
|
|
12
|
+
keywords.append(token)
|
|
13
|
+
return keywords[:15]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _has_result(output: str) -> bool:
|
|
17
|
+
"""Return True if the output contains a real match (not just a 'no matches' message)."""
|
|
18
|
+
if not output or output == "(no output)":
|
|
19
|
+
return False
|
|
20
|
+
no_match_patterns = ["no matches", "no match", "not found", "nothing found"]
|
|
21
|
+
low = output.lower()
|
|
22
|
+
return not any(p in low for p in no_match_patterns)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _search_keyword(chat, kw: str) -> str:
|
|
26
|
+
"""
|
|
27
|
+
Progressive search for one keyword — stop as soon as we get a result:
|
|
28
|
+
1. /map find {kw} -d 2 — kw in filename, definitions only (no links)
|
|
29
|
+
2. /map find \\{kw} -d 2 — kw as identifier inside any block, definitions only
|
|
30
|
+
3. /find {kw} -c — grep fallback (filenames + line counts)
|
|
31
|
+
"""
|
|
32
|
+
# step 1: keyword in filename
|
|
33
|
+
out = chat._agent_exec(f"/map find {kw} -d 2", auto_apply=True).strip()
|
|
34
|
+
if _has_result(out):
|
|
35
|
+
return f"[filename match]\n{out}"
|
|
36
|
+
|
|
37
|
+
# step 2: keyword as identifier inside blocks
|
|
38
|
+
out = chat._agent_exec(f"/map find \\{kw} -d 2", auto_apply=True).strip()
|
|
39
|
+
if _has_result(out):
|
|
40
|
+
return f"[identifier match]\n{out}"
|
|
41
|
+
|
|
42
|
+
# step 3: grep by filename
|
|
43
|
+
out = chat._agent_exec(f"/find {kw} -f", auto_apply=True).strip()
|
|
44
|
+
if _has_result(out):
|
|
45
|
+
return f"[filename grep]\n{out}"
|
|
46
|
+
|
|
47
|
+
# step 4: grep by content — last resort, may be large
|
|
48
|
+
out = chat._agent_exec(f"/find {kw} -c", auto_apply=True).strip()
|
|
49
|
+
if _has_result(out):
|
|
50
|
+
return f"[content grep]\n{out}"
|
|
51
|
+
|
|
52
|
+
return ""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def run(chat, args: str):
|
|
56
|
+
if not args.strip():
|
|
57
|
+
print("usage: /flow grounding <text or phrase>")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
print(f"[grounding] extracting keywords from: {args}")
|
|
61
|
+
kw_output = chat._agent_exec(f"/map keyword extract {args} -c", auto_apply=True)
|
|
62
|
+
keywords = _parse_keywords(kw_output)
|
|
63
|
+
match_mode = "exact"
|
|
64
|
+
|
|
65
|
+
if not keywords:
|
|
66
|
+
print(f"[grounding] no exact keywords — trying fuzzy match...")
|
|
67
|
+
kw_output = chat._agent_exec(f"/map keyword extract {args} -f -c", auto_apply=True)
|
|
68
|
+
keywords = _parse_keywords(kw_output)
|
|
69
|
+
match_mode = "fuzzy"
|
|
70
|
+
|
|
71
|
+
if not keywords:
|
|
72
|
+
print(f"[grounding] no codebase keywords found — try /map keyword index first")
|
|
73
|
+
print(f"[grounding] raw: {repr(kw_output[:200])}")
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
print(f"[grounding] found {len(keywords)} keyword(s) [{match_mode}]: {', '.join(keywords)}")
|
|
77
|
+
|
|
78
|
+
sections = []
|
|
79
|
+
for kw in keywords:
|
|
80
|
+
result = _search_keyword(chat, kw)
|
|
81
|
+
if result:
|
|
82
|
+
sections.append(f"### {kw}\n{result}")
|
|
83
|
+
else:
|
|
84
|
+
print(f"[grounding] no hits for: {kw}")
|
|
85
|
+
|
|
86
|
+
if not sections:
|
|
87
|
+
print("[grounding] no hits found for any keyword")
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
combined = "\n\n".join(sections)
|
|
91
|
+
prompt = (
|
|
92
|
+
f"Task description: {args}\n\n"
|
|
93
|
+
f"Below are keyword search results from the codebase.\n"
|
|
94
|
+
f"Based on these results, list the specific files where the implementation "
|
|
95
|
+
f"of this task most likely resides. For each file explain in one sentence why.\n\n"
|
|
96
|
+
f"{combined}"
|
|
97
|
+
)
|
|
98
|
+
temp_msgs = [{"role": "system", "content": chat._role},
|
|
99
|
+
{"role": "user", "content": prompt}]
|
|
100
|
+
chat._sep("AI")
|
|
101
|
+
reply = chat._stream_chat(temp_msgs)
|
|
102
|
+
if reply:
|
|
103
|
+
chat.last_reply = reply
|
|
104
|
+
chat._last_output = reply
|
|
105
|
+
chat.messages.append({"role": "user", "content": f"[grounding: {args}]"})
|
|
106
|
+
chat.messages.append({"role": "assistant", "content": reply})
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Analyse a Python traceback: extract file:line locations, read code context, explain the error. Usage: /flow py_error_trace [-f <file>]"""
|
|
2
|
+
import re as _re
|
|
3
|
+
import os as _os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _extract_locations(traceback: str) -> list[tuple[str, int]]:
|
|
7
|
+
"""Extract (filepath, lineno) pairs from a Python traceback."""
|
|
8
|
+
locations = []
|
|
9
|
+
seen = set()
|
|
10
|
+
for m in _re.finditer(r'File "([^"]+)", line (\d+)', traceback):
|
|
11
|
+
path, line = m.group(1), int(m.group(2))
|
|
12
|
+
# skip stdlib and site-packages
|
|
13
|
+
norm = path.replace("\\", "/")
|
|
14
|
+
if any(x in norm for x in ("/lib/", "site-packages", "<frozen", "<string")):
|
|
15
|
+
continue
|
|
16
|
+
key = (norm, line)
|
|
17
|
+
if key not in seen:
|
|
18
|
+
seen.add(key)
|
|
19
|
+
locations.append((path, line))
|
|
20
|
+
return locations
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _read_context(path: str, lineno: int, context: int = 10) -> str:
|
|
24
|
+
"""Read lines around lineno from file."""
|
|
25
|
+
try:
|
|
26
|
+
with open(path, encoding="utf-8", errors="replace") as f:
|
|
27
|
+
lines = f.readlines()
|
|
28
|
+
start = max(0, lineno - context - 1)
|
|
29
|
+
end = min(len(lines), lineno + context)
|
|
30
|
+
numbered = []
|
|
31
|
+
for i, l in enumerate(lines[start:end], start=start+1):
|
|
32
|
+
marker = ">>>" if i == lineno else " "
|
|
33
|
+
numbered.append(f"{marker} {i:4d} | {l.rstrip()}")
|
|
34
|
+
return "\n".join(numbered)
|
|
35
|
+
except OSError:
|
|
36
|
+
return f"(could not read {path})"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def run(chat, args: str):
|
|
40
|
+
# resolve traceback source: -f <file>, empty → last_reply, else inline args
|
|
41
|
+
traceback_text = ""
|
|
42
|
+
m = _re.match(r"-f\s+(\S+)", args.strip())
|
|
43
|
+
if m:
|
|
44
|
+
fpath = m.group(1)
|
|
45
|
+
try:
|
|
46
|
+
with open(fpath, encoding="utf-8", errors="replace") as f:
|
|
47
|
+
traceback_text = f.read()
|
|
48
|
+
print(f"[py_error_trace] reading traceback from {fpath}")
|
|
49
|
+
except OSError as e:
|
|
50
|
+
print(f"[py_error_trace] cannot read file: {e}"); return
|
|
51
|
+
elif args.strip():
|
|
52
|
+
traceback_text = args
|
|
53
|
+
else:
|
|
54
|
+
traceback_text = chat._last_output
|
|
55
|
+
if not traceback_text:
|
|
56
|
+
print("usage: /flow py_error_trace [-f <file>] (or run a command first with /run)")
|
|
57
|
+
return
|
|
58
|
+
print(f"[py_error_trace] using last /run output as traceback")
|
|
59
|
+
|
|
60
|
+
locations = _extract_locations(traceback_text)
|
|
61
|
+
if not locations:
|
|
62
|
+
print("[py_error_trace] no file:line references found in traceback")
|
|
63
|
+
return
|
|
64
|
+
|
|
65
|
+
print(f"[py_error_trace] found {len(locations)} location(s)")
|
|
66
|
+
|
|
67
|
+
sections = [f"## Traceback\n```\n{traceback_text.strip()}\n```"]
|
|
68
|
+
for path, lineno in locations:
|
|
69
|
+
print(f"[py_error_trace] reading {path}:{lineno}")
|
|
70
|
+
code_ctx = _read_context(path, lineno)
|
|
71
|
+
sections.append(f"## {path} (line {lineno})\n```python\n{code_ctx}\n```")
|
|
72
|
+
|
|
73
|
+
combined = "\n\n".join(sections)
|
|
74
|
+
prompt = (
|
|
75
|
+
f"Analyse this Python error traceback and the relevant code locations.\n"
|
|
76
|
+
f"Explain: what caused the error, which line is the root cause, and what needs to be fixed.\n\n"
|
|
77
|
+
f"{combined}"
|
|
78
|
+
)
|
|
79
|
+
temp_msgs = [{"role": "system", "content": chat._role},
|
|
80
|
+
{"role": "user", "content": prompt}]
|
|
81
|
+
chat._sep("AI")
|
|
82
|
+
reply = chat._stream_chat(temp_msgs)
|
|
83
|
+
if reply:
|
|
84
|
+
chat.last_reply = reply
|
|
85
|
+
chat._last_output = reply
|
|
86
|
+
chat.messages.append({"role": "user", "content": "[py_error_trace]"})
|
|
87
|
+
chat.messages.append({"role": "assistant", "content": reply})
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Run simargl task retrieval, read top N matched files, summarize relevance. Usage: /flow simargl_files <task description> [-n N]"""
|
|
2
|
+
import re as _re
|
|
3
|
+
import os as _os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _parse_file_paths(output: str) -> list[str]:
|
|
7
|
+
"""Extract file paths from simargl output lines like '0.82 src/foo/bar.py'."""
|
|
8
|
+
paths = []
|
|
9
|
+
for line in output.splitlines():
|
|
10
|
+
line = line.strip()
|
|
11
|
+
# match lines: optional score + path with extension
|
|
12
|
+
m = _re.match(r"(?:[\d.]+\s+)?(\S+\.\w+)$", line)
|
|
13
|
+
if m:
|
|
14
|
+
paths.append(m.group(1))
|
|
15
|
+
return paths
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def run(chat, args: str):
|
|
19
|
+
max_files = 5
|
|
20
|
+
m = _re.search(r"-n\s+(\d+)", args)
|
|
21
|
+
if m:
|
|
22
|
+
max_files = int(m.group(1))
|
|
23
|
+
args = (args[:m.start()] + args[m.end():]).strip()
|
|
24
|
+
task = args.strip()
|
|
25
|
+
if not task:
|
|
26
|
+
print('usage: /flow simargl_files <task description> [-n N]')
|
|
27
|
+
return
|
|
28
|
+
|
|
29
|
+
print(f"[simargl_files] querying simargl for: {task}")
|
|
30
|
+
simargl_output = chat._agent_exec(f'/run simargl search --mode task --sort rank "{task}"', auto_apply=True)
|
|
31
|
+
file_paths = _parse_file_paths(simargl_output)
|
|
32
|
+
|
|
33
|
+
if not file_paths:
|
|
34
|
+
print("[simargl_files] no file paths found in simargl output")
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
file_paths = file_paths[:max_files]
|
|
38
|
+
print(f"[simargl_files] reading {len(file_paths)} file(s): {', '.join(file_paths)}")
|
|
39
|
+
|
|
40
|
+
sections = []
|
|
41
|
+
for path in file_paths:
|
|
42
|
+
content = chat._agent_exec(f"/read {path}", auto_apply=True).strip()
|
|
43
|
+
if content:
|
|
44
|
+
sections.append(f"### {path}\n{content}")
|
|
45
|
+
else:
|
|
46
|
+
print(f"[simargl_files] could not read {path}")
|
|
47
|
+
|
|
48
|
+
if not sections:
|
|
49
|
+
print("[simargl_files] no file content retrieved")
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
combined = "\n\n".join(sections)
|
|
53
|
+
prompt = (
|
|
54
|
+
f"Given the following task description:\n{task}\n\n"
|
|
55
|
+
f"And the following relevant source files:\n\n{combined}\n\n"
|
|
56
|
+
f"Explain what changes would be needed and in which files."
|
|
57
|
+
)
|
|
58
|
+
temp_msgs = [{"role": "system", "content": chat._role},
|
|
59
|
+
{"role": "user", "content": prompt}]
|
|
60
|
+
chat._sep("AI")
|
|
61
|
+
reply = chat._stream_chat(temp_msgs)
|
|
62
|
+
if reply:
|
|
63
|
+
chat.last_reply = reply
|
|
64
|
+
chat._last_output = reply
|
|
65
|
+
chat.messages.append({"role": "user", "content": f"[simargl_files: {task}]"})
|
|
66
|
+
chat.messages.append({"role": "assistant", "content": reply})
|