vimlm 0.0.4__tar.gz → 0.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vimlm-0.0.4/vimlm.egg-info → vimlm-0.0.5}/PKG-INFO +43 -17
- {vimlm-0.0.4 → vimlm-0.0.5}/README.md +40 -15
- {vimlm-0.0.4 → vimlm-0.0.5}/setup.py +1 -1
- {vimlm-0.0.4 → vimlm-0.0.5/vimlm.egg-info}/PKG-INFO +43 -17
- vimlm-0.0.5/vimlm.egg-info/requires.txt +3 -0
- {vimlm-0.0.4 → vimlm-0.0.5}/vimlm.py +33 -28
- vimlm-0.0.4/vimlm.egg-info/requires.txt +0 -2
- {vimlm-0.0.4 → vimlm-0.0.5}/LICENSE +0 -0
- {vimlm-0.0.4 → vimlm-0.0.5}/setup.cfg +0 -0
- {vimlm-0.0.4 → vimlm-0.0.5}/vimlm.egg-info/SOURCES.txt +0 -0
- {vimlm-0.0.4 → vimlm-0.0.5}/vimlm.egg-info/dependency_links.txt +0 -0
- {vimlm-0.0.4 → vimlm-0.0.5}/vimlm.egg-info/entry_points.txt +0 -0
- {vimlm-0.0.4 → vimlm-0.0.5}/vimlm.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: vimlm
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.5
|
4
4
|
Summary: VimLM - LLM-powered Vim assistant
|
5
5
|
Home-page: https://github.com/JosefAlbers/vimlm
|
6
6
|
Author: Josef Albers
|
@@ -8,7 +8,8 @@ Author-email: albersj66@gmail.com
|
|
8
8
|
Requires-Python: >=3.12.8
|
9
9
|
Description-Content-Type: text/markdown
|
10
10
|
License-File: LICENSE
|
11
|
-
Requires-Dist: nanollama==0.0.
|
11
|
+
Requires-Dist: nanollama==0.0.5b0
|
12
|
+
Requires-Dist: mlx_lm_utils==0.0.1a0
|
12
13
|
Requires-Dist: watchfiles==1.0.4
|
13
14
|
Dynamic: author
|
14
15
|
Dynamic: author-email
|
@@ -19,7 +20,7 @@ Dynamic: requires-dist
|
|
19
20
|
Dynamic: requires-python
|
20
21
|
Dynamic: summary
|
21
22
|
|
22
|
-
# VimLM -
|
23
|
+
# VimLM - Local LLM-Powered Coding Assistant for Vim
|
23
24
|
|
24
25
|

|
25
26
|
|
@@ -27,11 +28,20 @@ An LLM-powered coding companion for Vim, inspired by GitHub Copilot/Cursor. Inte
|
|
27
28
|
|
28
29
|
## Features
|
29
30
|
|
30
|
-
- **
|
31
|
-
- **
|
32
|
-
- **Context-
|
33
|
-
-
|
34
|
-
-
|
31
|
+
- **Model Agnostic** - Use any MLX-compatible model via config file
|
32
|
+
- **Vim-Native UX** - Ctrl-l/Ctrl-r keybindings and split-window responses
|
33
|
+
- **Deep Context** - Understands code context from:
|
34
|
+
- Current file
|
35
|
+
- Visual selections
|
36
|
+
- Referenced files (`!@#$` syntax)
|
37
|
+
- Project directory structure
|
38
|
+
- **Conversational Coding** - Iterative refinement with follow-up queries
|
39
|
+
- **Air-Gapped Security** - 100% offline - no APIs, no tracking, no data leaks
|
40
|
+
|
41
|
+
## Requirements
|
42
|
+
|
43
|
+
- Apple M-series chip (M1/M2/M3/M4)
|
44
|
+
- Python 3.12.8
|
35
45
|
|
36
46
|
## Installation
|
37
47
|
|
@@ -39,15 +49,9 @@ An LLM-powered coding companion for Vim, inspired by GitHub Copilot/Cursor. Inte
|
|
39
49
|
pip install vimlm
|
40
50
|
```
|
41
51
|
|
42
|
-
##
|
52
|
+
## Quick Start
|
43
53
|
|
44
|
-
1.
|
45
|
-
|
46
|
-
```zsh
|
47
|
-
vimlm
|
48
|
-
```
|
49
|
-
|
50
|
-
or
|
54
|
+
1. Launch with default model (DeepSeek-R1-Distill-Qwen-7B-4bit):
|
51
55
|
|
52
56
|
```zsh
|
53
57
|
vimlm your_file.js
|
@@ -58,7 +62,7 @@ vimlm your_file.js
|
|
58
62
|
- Example prompt: "Regex for removing html tags in item.content"
|
59
63
|
|
60
64
|
3. **From Visual Mode**:
|
61
|
-
- Select
|
65
|
+
- Select code → `Ctrl-l`: Send selection + file context
|
62
66
|
- Example prompt: "Convert this to async/await syntax"
|
63
67
|
|
64
68
|
4. **Add Context**: Use `!@#$` to include additional files/folders:
|
@@ -71,6 +75,28 @@ vimlm your_file.js
|
|
71
75
|
- `Ctrl-r`: Continue thread
|
72
76
|
- Example follow-up: "In Manifest V3"
|
73
77
|
|
78
|
+
## Advanced Configuration
|
79
|
+
|
80
|
+
### Custom Model Setup
|
81
|
+
|
82
|
+
1. **Browse models**: [MLX Community Models on Hugging Face](https://huggingface.co/mlx-community)
|
83
|
+
|
84
|
+
2. **Edit config file**:
|
85
|
+
|
86
|
+
```json
|
87
|
+
{
|
88
|
+
"LLM_MODEL": "/path/to/your/mlx_model"
|
89
|
+
}
|
90
|
+
```
|
91
|
+
|
92
|
+
3. **Save to**:
|
93
|
+
|
94
|
+
```
|
95
|
+
~/vimlm/cfg.json
|
96
|
+
```
|
97
|
+
|
98
|
+
4. **Restart VimLM**
|
99
|
+
|
74
100
|
## Key Bindings
|
75
101
|
|
76
102
|
| Binding | Mode | Action |
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# VimLM -
|
1
|
+
# VimLM - Local LLM-Powered Coding Assistant for Vim
|
2
2
|
|
3
3
|

|
4
4
|
|
@@ -6,11 +6,20 @@ An LLM-powered coding companion for Vim, inspired by GitHub Copilot/Cursor. Inte
|
|
6
6
|
|
7
7
|
## Features
|
8
8
|
|
9
|
-
- **
|
10
|
-
- **
|
11
|
-
- **Context-
|
12
|
-
-
|
13
|
-
-
|
9
|
+
- **Model Agnostic** - Use any MLX-compatible model via config file
|
10
|
+
- **Vim-Native UX** - Ctrl-l/Ctrl-r keybindings and split-window responses
|
11
|
+
- **Deep Context** - Understands code context from:
|
12
|
+
- Current file
|
13
|
+
- Visual selections
|
14
|
+
- Referenced files (`!@#$` syntax)
|
15
|
+
- Project directory structure
|
16
|
+
- **Conversational Coding** - Iterative refinement with follow-up queries
|
17
|
+
- **Air-Gapped Security** - 100% offline - no APIs, no tracking, no data leaks
|
18
|
+
|
19
|
+
## Requirements
|
20
|
+
|
21
|
+
- Apple M-series chip (M1/M2/M3/M4)
|
22
|
+
- Python 3.12.8
|
14
23
|
|
15
24
|
## Installation
|
16
25
|
|
@@ -18,15 +27,9 @@ An LLM-powered coding companion for Vim, inspired by GitHub Copilot/Cursor. Inte
|
|
18
27
|
pip install vimlm
|
19
28
|
```
|
20
29
|
|
21
|
-
##
|
30
|
+
## Quick Start
|
22
31
|
|
23
|
-
1.
|
24
|
-
|
25
|
-
```zsh
|
26
|
-
vimlm
|
27
|
-
```
|
28
|
-
|
29
|
-
or
|
32
|
+
1. Launch with default model (DeepSeek-R1-Distill-Qwen-7B-4bit):
|
30
33
|
|
31
34
|
```zsh
|
32
35
|
vimlm your_file.js
|
@@ -37,7 +40,7 @@ vimlm your_file.js
|
|
37
40
|
- Example prompt: "Regex for removing html tags in item.content"
|
38
41
|
|
39
42
|
3. **From Visual Mode**:
|
40
|
-
- Select
|
43
|
+
- Select code → `Ctrl-l`: Send selection + file context
|
41
44
|
- Example prompt: "Convert this to async/await syntax"
|
42
45
|
|
43
46
|
4. **Add Context**: Use `!@#$` to include additional files/folders:
|
@@ -50,6 +53,28 @@ vimlm your_file.js
|
|
50
53
|
- `Ctrl-r`: Continue thread
|
51
54
|
- Example follow-up: "In Manifest V3"
|
52
55
|
|
56
|
+
## Advanced Configuration
|
57
|
+
|
58
|
+
### Custom Model Setup
|
59
|
+
|
60
|
+
1. **Browse models**: [MLX Community Models on Hugging Face](https://huggingface.co/mlx-community)
|
61
|
+
|
62
|
+
2. **Edit config file**:
|
63
|
+
|
64
|
+
```json
|
65
|
+
{
|
66
|
+
"LLM_MODEL": "/path/to/your/mlx_model"
|
67
|
+
}
|
68
|
+
```
|
69
|
+
|
70
|
+
3. **Save to**:
|
71
|
+
|
72
|
+
```
|
73
|
+
~/vimlm/cfg.json
|
74
|
+
```
|
75
|
+
|
76
|
+
4. **Restart VimLM**
|
77
|
+
|
53
78
|
## Key Bindings
|
54
79
|
|
55
80
|
| Binding | Mode | Action |
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: vimlm
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.5
|
4
4
|
Summary: VimLM - LLM-powered Vim assistant
|
5
5
|
Home-page: https://github.com/JosefAlbers/vimlm
|
6
6
|
Author: Josef Albers
|
@@ -8,7 +8,8 @@ Author-email: albersj66@gmail.com
|
|
8
8
|
Requires-Python: >=3.12.8
|
9
9
|
Description-Content-Type: text/markdown
|
10
10
|
License-File: LICENSE
|
11
|
-
Requires-Dist: nanollama==0.0.
|
11
|
+
Requires-Dist: nanollama==0.0.5b0
|
12
|
+
Requires-Dist: mlx_lm_utils==0.0.1a0
|
12
13
|
Requires-Dist: watchfiles==1.0.4
|
13
14
|
Dynamic: author
|
14
15
|
Dynamic: author-email
|
@@ -19,7 +20,7 @@ Dynamic: requires-dist
|
|
19
20
|
Dynamic: requires-python
|
20
21
|
Dynamic: summary
|
21
22
|
|
22
|
-
# VimLM -
|
23
|
+
# VimLM - Local LLM-Powered Coding Assistant for Vim
|
23
24
|
|
24
25
|

|
25
26
|
|
@@ -27,11 +28,20 @@ An LLM-powered coding companion for Vim, inspired by GitHub Copilot/Cursor. Inte
|
|
27
28
|
|
28
29
|
## Features
|
29
30
|
|
30
|
-
- **
|
31
|
-
- **
|
32
|
-
- **Context-
|
33
|
-
-
|
34
|
-
-
|
31
|
+
- **Model Agnostic** - Use any MLX-compatible model via config file
|
32
|
+
- **Vim-Native UX** - Ctrl-l/Ctrl-r keybindings and split-window responses
|
33
|
+
- **Deep Context** - Understands code context from:
|
34
|
+
- Current file
|
35
|
+
- Visual selections
|
36
|
+
- Referenced files (`!@#$` syntax)
|
37
|
+
- Project directory structure
|
38
|
+
- **Conversational Coding** - Iterative refinement with follow-up queries
|
39
|
+
- **Air-Gapped Security** - 100% offline - no APIs, no tracking, no data leaks
|
40
|
+
|
41
|
+
## Requirements
|
42
|
+
|
43
|
+
- Apple M-series chip (M1/M2/M3/M4)
|
44
|
+
- Python 3.12.8
|
35
45
|
|
36
46
|
## Installation
|
37
47
|
|
@@ -39,15 +49,9 @@ An LLM-powered coding companion for Vim, inspired by GitHub Copilot/Cursor. Inte
|
|
39
49
|
pip install vimlm
|
40
50
|
```
|
41
51
|
|
42
|
-
##
|
52
|
+
## Quick Start
|
43
53
|
|
44
|
-
1.
|
45
|
-
|
46
|
-
```zsh
|
47
|
-
vimlm
|
48
|
-
```
|
49
|
-
|
50
|
-
or
|
54
|
+
1. Launch with default model (DeepSeek-R1-Distill-Qwen-7B-4bit):
|
51
55
|
|
52
56
|
```zsh
|
53
57
|
vimlm your_file.js
|
@@ -58,7 +62,7 @@ vimlm your_file.js
|
|
58
62
|
- Example prompt: "Regex for removing html tags in item.content"
|
59
63
|
|
60
64
|
3. **From Visual Mode**:
|
61
|
-
- Select
|
65
|
+
- Select code → `Ctrl-l`: Send selection + file context
|
62
66
|
- Example prompt: "Convert this to async/await syntax"
|
63
67
|
|
64
68
|
4. **Add Context**: Use `!@#$` to include additional files/folders:
|
@@ -71,6 +75,28 @@ vimlm your_file.js
|
|
71
75
|
- `Ctrl-r`: Continue thread
|
72
76
|
- Example follow-up: "In Manifest V3"
|
73
77
|
|
78
|
+
## Advanced Configuration
|
79
|
+
|
80
|
+
### Custom Model Setup
|
81
|
+
|
82
|
+
1. **Browse models**: [MLX Community Models on Hugging Face](https://huggingface.co/mlx-community)
|
83
|
+
|
84
|
+
2. **Edit config file**:
|
85
|
+
|
86
|
+
```json
|
87
|
+
{
|
88
|
+
"LLM_MODEL": "/path/to/your/mlx_model"
|
89
|
+
}
|
90
|
+
```
|
91
|
+
|
92
|
+
3. **Save to**:
|
93
|
+
|
94
|
+
```
|
95
|
+
~/vimlm/cfg.json
|
96
|
+
```
|
97
|
+
|
98
|
+
4. **Restart VimLM**
|
99
|
+
|
74
100
|
## Key Bindings
|
75
101
|
|
76
102
|
| Binding | Mode | Action |
|
@@ -17,7 +17,6 @@ import subprocess
|
|
17
17
|
import json
|
18
18
|
import os
|
19
19
|
from watchfiles import awatch
|
20
|
-
from nanollama32 import Chat
|
21
20
|
import shutil
|
22
21
|
import time
|
23
22
|
from itertools import accumulate
|
@@ -27,6 +26,7 @@ from pathlib import Path
|
|
27
26
|
from string import Template
|
28
27
|
|
29
28
|
DEBUG = True
|
29
|
+
LLM_MODEL = "mlx-community/DeepSeek-R1-Distill-Qwen-7B-4bit"
|
30
30
|
NUM_TOKEN = 2000
|
31
31
|
SEP_CMD = '!@#$'
|
32
32
|
VIMLM_DIR = os.path.expanduser("~/vimlm")
|
@@ -41,20 +41,6 @@ LOG_PATH = os.path.join(VIMLM_DIR, LOG_FILE)
|
|
41
41
|
LTM_PATH = os.path.join(VIMLM_DIR, LTM_FILE)
|
42
42
|
OUT_PATH = os.path.join(WATCH_DIR, OUT_FILE)
|
43
43
|
|
44
|
-
if os.path.exists(WATCH_DIR):
|
45
|
-
shutil.rmtree(WATCH_DIR)
|
46
|
-
os.makedirs(WATCH_DIR)
|
47
|
-
|
48
|
-
try:
|
49
|
-
with open(CFG_PATH, "r") as f:
|
50
|
-
cfg = cfg.load(f)
|
51
|
-
DEBUG = config.get("DEBUG", DEBUG)
|
52
|
-
NUM_TOKEN = config.get("NUM_TOKEN", NUM_TOKEN)
|
53
|
-
SEP_CMD = config.get("SEP_CMD", SEP_CMD)
|
54
|
-
except:
|
55
|
-
with open(CFG_PATH, 'w') as f:
|
56
|
-
json.dump(dict(DEBUG=DEBUG, NUM_TOKEN=NUM_TOKEN, SEP_CMD=SEP_CMD), f, indent=2)
|
57
|
-
|
58
44
|
def toout(s, key='tovim'):
|
59
45
|
with open(OUT_PATH, 'w', encoding='utf-8') as f:
|
60
46
|
f.write(s)
|
@@ -63,18 +49,40 @@ def toout(s, key='tovim'):
|
|
63
49
|
def tolog(log, key='debug'):
|
64
50
|
if not DEBUG and key == 'debug':
|
65
51
|
return
|
66
|
-
|
52
|
+
try:
|
67
53
|
with open(LOG_PATH, "r", encoding="utf-8") as log_f:
|
68
54
|
logs = json.load(log_f)
|
69
|
-
|
55
|
+
except:
|
70
56
|
logs = []
|
71
57
|
logs.append(dict(key=key, log=log, timestamp=time.ctime()))
|
72
58
|
with open(LOG_PATH, "w", encoding="utf-8") as log_f:
|
73
59
|
json.dump(logs, log_f, indent=2)
|
74
60
|
|
61
|
+
if os.path.exists(WATCH_DIR):
|
62
|
+
shutil.rmtree(WATCH_DIR)
|
63
|
+
os.makedirs(WATCH_DIR)
|
64
|
+
|
65
|
+
try:
|
66
|
+
with open(CFG_PATH, "r") as f:
|
67
|
+
config = json.load(f)
|
68
|
+
DEBUG = config.get("DEBUG", DEBUG)
|
69
|
+
LLM_MODEL = config.get("LLM_MODEL", LLM_MODEL)
|
70
|
+
NUM_TOKEN = config.get("NUM_TOKEN", NUM_TOKEN)
|
71
|
+
SEP_CMD = config.get("SEP_CMD", SEP_CMD)
|
72
|
+
except Exception as e:
|
73
|
+
tolog(str(e))
|
74
|
+
with open(CFG_PATH, 'w') as f:
|
75
|
+
json.dump(dict(DEBUG=DEBUG, LLM_MODEL=LLM_MODEL, NUM_TOKEN=NUM_TOKEN, SEP_CMD=SEP_CMD), f, indent=2)
|
76
|
+
|
75
77
|
toout('Loading LLM...')
|
76
|
-
|
77
|
-
|
78
|
+
if LLM_MODEL is None:
|
79
|
+
from nanollama32 import Chat
|
80
|
+
chat = Chat(variant='uncn_llama_32_3b_it')
|
81
|
+
toout('LLM is ready')
|
82
|
+
else:
|
83
|
+
from mlx_lm_utils import Chat
|
84
|
+
chat = Chat(model_path=LLM_MODEL)
|
85
|
+
toout(f'{LLM_MODEL.split('/')[-1]} is ready')
|
78
86
|
|
79
87
|
def is_binary(file_path):
|
80
88
|
try:
|
@@ -142,9 +150,6 @@ def retrieve(src_path, max_len=2000, get_len=len):
|
|
142
150
|
continue
|
143
151
|
return result
|
144
152
|
|
145
|
-
def get_ntok(s):
|
146
|
-
return len(chat.tokenizer.encode(s)[0])
|
147
|
-
|
148
153
|
def ingest(src):
|
149
154
|
def load_cache(cache_path=LTM_PATH):
|
150
155
|
if os.path.exists(cache_path):
|
@@ -161,7 +166,7 @@ def ingest(src):
|
|
161
166
|
toout('Ingesting...')
|
162
167
|
format_ingest = '{volat}{incoming}\n\n---\n\nPlease provide a succint bullet point summary for above:'
|
163
168
|
format_volat = 'Here is a summary of part 1 of **{k}**:\n\n---\n\n{newsum}\n\n---\n\nHere is the next part:\n\n---\n\n'
|
164
|
-
dict_doc = retrieve(src, get_len=get_ntok)
|
169
|
+
dict_doc = retrieve(src, get_len=chat.get_ntok)
|
165
170
|
dict_sum = {}
|
166
171
|
cache = load_cache()
|
167
172
|
for k, v in dict_doc.items():
|
@@ -178,14 +183,14 @@ def ingest(src):
|
|
178
183
|
accum = ''
|
179
184
|
for s in list_str:
|
180
185
|
chat.reset()
|
181
|
-
newsum = chat(format_ingest.format(volat=volat, incoming=s.strip()), max_new=max_new_sum, verbose=False, stream=None)[
|
186
|
+
newsum = chat(format_ingest.format(volat=volat, incoming=s.strip()), max_new=max_new_sum, verbose=False, stream=None)['text']
|
182
187
|
accum += newsum + ' ...\n'
|
183
188
|
volat = format_volat.format(k=k, newsum=newsum)
|
184
189
|
else:
|
185
190
|
accum = list_str[0]
|
186
191
|
chat.reset()
|
187
192
|
toout('')
|
188
|
-
chat_summary = chat(format_ingest.format(volat=f'**{k}**:\n', incoming=accum), max_new=int(NUM_TOKEN/4), verbose=False, stream=OUT_PATH)[
|
193
|
+
chat_summary = chat(format_ingest.format(volat=f'**{k}**:\n', incoming=accum), max_new=int(NUM_TOKEN/4), verbose=False, stream=OUT_PATH)['text']
|
189
194
|
dict_sum[k] = dict(timestamp=v_stamp, summary=chat_summary)
|
190
195
|
dump_cache(dict_sum)
|
191
196
|
result = ''
|
@@ -253,9 +258,9 @@ async def process_files(data):
|
|
253
258
|
prompt = str_template.format(**data)
|
254
259
|
tolog(prompt, 'tollm')
|
255
260
|
toout('')
|
256
|
-
response = chat(prompt, max_new=NUM_TOKEN - get_ntok(prompt), verbose=False, stream=OUT_PATH)
|
257
|
-
toout(response[
|
258
|
-
tolog(response[
|
261
|
+
response = chat(prompt, max_new=NUM_TOKEN - chat.get_ntok(prompt), verbose=False, stream=OUT_PATH)
|
262
|
+
toout(response['text'])
|
263
|
+
tolog(response['benchmark'])
|
259
264
|
|
260
265
|
VIMLMSCRIPT = Template(r"""
|
261
266
|
let s:watched_dir = expand('$WATCH_DIR')
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|