code-lm 0.3.0__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {code_lm-0.3.0/src/code_lm.egg-info → code_lm-0.3.1}/PKG-INFO +20 -7
  2. {code_lm-0.3.0 → code_lm-0.3.1}/README.md +19 -6
  3. {code_lm-0.3.0 → code_lm-0.3.1}/pyproject.toml +1 -1
  4. {code_lm-0.3.0 → code_lm-0.3.1/src/code_lm.egg-info}/PKG-INFO +20 -7
  5. {code_lm-0.3.0 → code_lm-0.3.1}/src/code_lm.egg-info/SOURCES.txt +1 -1
  6. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/__init__.py +1 -1
  7. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/main.py +98 -3
  8. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/models/openrouter.py +96 -2
  9. code_lm-0.3.1/src/lm_code/session.py +97 -0
  10. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/utils.py +1 -3
  11. code_lm-0.3.0/src/lm_code/models/gemini.py +0 -43
  12. {code_lm-0.3.0 → code_lm-0.3.1}/MANIFEST.in +0 -0
  13. {code_lm-0.3.0 → code_lm-0.3.1}/setup.cfg +0 -0
  14. {code_lm-0.3.0 → code_lm-0.3.1}/setup.py +0 -0
  15. {code_lm-0.3.0 → code_lm-0.3.1}/src/code_lm.egg-info/dependency_links.txt +0 -0
  16. {code_lm-0.3.0 → code_lm-0.3.1}/src/code_lm.egg-info/entry_points.txt +0 -0
  17. {code_lm-0.3.0 → code_lm-0.3.1}/src/code_lm.egg-info/requires.txt +0 -0
  18. {code_lm-0.3.0 → code_lm-0.3.1}/src/code_lm.egg-info/top_level.txt +0 -0
  19. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/config.py +0 -0
  20. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/models/__init__.py +0 -0
  21. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/__init__.py +0 -0
  22. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/base.py +0 -0
  23. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/directory_tools.py +0 -0
  24. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/file_tools.py +0 -0
  25. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/quality_tools.py +0 -0
  26. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/summarizer_tool.py +0 -0
  27. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/system_tools.py +0 -0
  28. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/task_complete_tool.py +0 -0
  29. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/test_runner.py +0 -0
  30. {code_lm-0.3.0 → code_lm-0.3.1}/src/lm_code/tools/tree_tool.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-lm
3
- Version: 0.3.0
3
+ Version: 0.3.1
4
4
  Summary: An AI coding assistant using various LLM models.
5
5
  Home-page: https://github.com/Panagiotis897/lm-code
6
6
  Author: Panagiotis897
@@ -47,6 +47,12 @@ LM Code is a powerful AI coding assistant for your terminal supporting 17 free m
47
47
  - Test running: `pytest` and similar tools.
48
48
  - **Customizable Configurations**:
49
49
  - Easily set default models and API keys.
50
+ - **Session Persistence**:
51
+ - Conversation history is saved per project directory.
52
+ - Resume previous sessions when you restart in the same directory.
53
+ - **Mid-Session Commands**:
54
+ - `/compact` — Summarize conversation history to stay within context limits.
55
+ - `/model` — Switch models mid-session without restarting.
50
56
 
51
57
  ---
52
58
 
@@ -134,6 +140,9 @@ During an interactive session:
134
140
 
135
141
  - **`/exit`**: Exit the session.
136
142
  - **`/help`**: Display help information.
143
+ - **`/compact`**: Summarize the conversation history to reduce token usage. Useful when the session gets long and you want to free up context space.
144
+ - **`/model`**: List all available models.
145
+ - **`/model <model_id>`**: Switch to a different model mid-session without losing conversation history.
137
146
 
138
147
  ---
139
148
 
@@ -153,10 +162,16 @@ LM Code is under active development. Contributions, feature requests, and feedba
153
162
 
154
163
  ### Changelog
155
164
 
165
+ #### v0.3.1
166
+ - Added `/compact` command — summarizes conversation history via LLM to stay within context limits.
167
+ - Added `/model` command — list available models or switch models mid-session without restarting.
168
+ - Added session persistence — conversation history is auto-saved per project directory and can be resumed on restart.
169
+ - Updated help text to include new interactive commands.
170
+
156
171
  #### v0.3.0
157
172
  - Updated default model to NVIDIA Nemotron 3 Super 120B.
158
173
  - Added 17 free models from OpenRouter (previously 6).
159
- - Fixed `ModuleNotFoundError: No module named 'gemini_cli'` from stale entry point.
174
+ - Fixed `ModuleNotFoundError: No module named 'gemini_cli'` from stale entry point after package rename.
160
175
  - Fixed `UnicodeDecodeError` on Windows (cp1253) for all subprocess commands.
161
176
  - Fixed API URL (`/chat/completions` was missing) causing HTML response errors.
162
177
  - Improved API error handling for empty/invalid responses.
@@ -168,20 +183,18 @@ LM Code is under active development. Contributions, feature requests, and feedba
168
183
  #### v0.2.5
169
184
  - Added more models to the model list.
170
185
  - Fixed crucial bugs from previous versions.
171
- - Removed Gemini models.
186
+ - Removed legacy Gemini module.
172
187
  - Updated models to latest versions.
173
188
 
174
189
  #### v0.1.0
175
- - Rebranded from Gemini to LM Code.
190
+ - Rebranded to LM Code.
176
191
  - Integrated OpenRouter as the default provider.
177
192
  - Added multi-model support.
178
- - Overhauled CLI commands (`gemini` -> `lmcode`).
193
+ - Overhauled CLI commands.
179
194
 
180
195
  ---
181
196
 
182
197
  ## Future Plans
183
-
184
- - Pricing with appropriate rate limits.
185
198
  - Non-free model support.
186
199
  - MCP Server integration.
187
200
  - Additional providers.
@@ -19,6 +19,12 @@ LM Code is a powerful AI coding assistant for your terminal supporting 17 free m
19
19
  - Test running: `pytest` and similar tools.
20
20
  - **Customizable Configurations**:
21
21
  - Easily set default models and API keys.
22
+ - **Session Persistence**:
23
+ - Conversation history is saved per project directory.
24
+ - Resume previous sessions when you restart in the same directory.
25
+ - **Mid-Session Commands**:
26
+ - `/compact` — Summarize conversation history to stay within context limits.
27
+ - `/model` — Switch models mid-session without restarting.
22
28
 
23
29
  ---
24
30
 
@@ -106,6 +112,9 @@ During an interactive session:
106
112
 
107
113
  - **`/exit`**: Exit the session.
108
114
  - **`/help`**: Display help information.
115
+ - **`/compact`**: Summarize the conversation history to reduce token usage. Useful when the session gets long and you want to free up context space.
116
+ - **`/model`**: List all available models.
117
+ - **`/model <model_id>`**: Switch to a different model mid-session without losing conversation history.
109
118
 
110
119
  ---
111
120
 
@@ -125,10 +134,16 @@ LM Code is under active development. Contributions, feature requests, and feedba
125
134
 
126
135
  ### Changelog
127
136
 
137
+ #### v0.3.1
138
+ - Added `/compact` command — summarizes conversation history via LLM to stay within context limits.
139
+ - Added `/model` command — list available models or switch models mid-session without restarting.
140
+ - Added session persistence — conversation history is auto-saved per project directory and can be resumed on restart.
141
+ - Updated help text to include new interactive commands.
142
+
128
143
  #### v0.3.0
129
144
  - Updated default model to NVIDIA Nemotron 3 Super 120B.
130
145
  - Added 17 free models from OpenRouter (previously 6).
131
- - Fixed `ModuleNotFoundError: No module named 'gemini_cli'` from stale entry point.
146
+ - Fixed `ModuleNotFoundError: No module named 'gemini_cli'` from stale entry point after package rename.
132
147
  - Fixed `UnicodeDecodeError` on Windows (cp1253) for all subprocess commands.
133
148
  - Fixed API URL (`/chat/completions` was missing) causing HTML response errors.
134
149
  - Improved API error handling for empty/invalid responses.
@@ -140,20 +155,18 @@ LM Code is under active development. Contributions, feature requests, and feedba
140
155
  #### v0.2.5
141
156
  - Added more models to the model list.
142
157
  - Fixed crucial bugs from previous versions.
143
- - Removed Gemini models.
158
+ - Removed legacy Gemini module.
144
159
  - Updated models to latest versions.
145
160
 
146
161
  #### v0.1.0
147
- - Rebranded from Gemini to LM Code.
162
+ - Rebranded to LM Code.
148
163
  - Integrated OpenRouter as the default provider.
149
164
  - Added multi-model support.
150
- - Overhauled CLI commands (`gemini` -> `lmcode`).
165
+ - Overhauled CLI commands.
151
166
 
152
167
  ---
153
168
 
154
169
  ## Future Plans
155
-
156
- - Pricing with appropriate rate limits.
157
170
  - Non-free model support.
158
171
  - MCP Server integration.
159
172
  - Additional providers.
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "code-lm"
7
- version = "0.3.0"
7
+ version = "0.3.1"
8
8
  authors = [
9
9
  { name="Panagiotis897", email="orion256business@gmail.com" }
10
10
  ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-lm
3
- Version: 0.3.0
3
+ Version: 0.3.1
4
4
  Summary: An AI coding assistant using various LLM models.
5
5
  Home-page: https://github.com/Panagiotis897/lm-code
6
6
  Author: Panagiotis897
@@ -47,6 +47,12 @@ LM Code is a powerful AI coding assistant for your terminal supporting 17 free m
47
47
  - Test running: `pytest` and similar tools.
48
48
  - **Customizable Configurations**:
49
49
  - Easily set default models and API keys.
50
+ - **Session Persistence**:
51
+ - Conversation history is saved per project directory.
52
+ - Resume previous sessions when you restart in the same directory.
53
+ - **Mid-Session Commands**:
54
+ - `/compact` — Summarize conversation history to stay within context limits.
55
+ - `/model` — Switch models mid-session without restarting.
50
56
 
51
57
  ---
52
58
 
@@ -134,6 +140,9 @@ During an interactive session:
134
140
 
135
141
  - **`/exit`**: Exit the session.
136
142
  - **`/help`**: Display help information.
143
+ - **`/compact`**: Summarize the conversation history to reduce token usage. Useful when the session gets long and you want to free up context space.
144
+ - **`/model`**: List all available models.
145
+ - **`/model <model_id>`**: Switch to a different model mid-session without losing conversation history.
137
146
 
138
147
  ---
139
148
 
@@ -153,10 +162,16 @@ LM Code is under active development. Contributions, feature requests, and feedba
153
162
 
154
163
  ### Changelog
155
164
 
165
+ #### v0.3.1
166
+ - Added `/compact` command — summarizes conversation history via LLM to stay within context limits.
167
+ - Added `/model` command — list available models or switch models mid-session without restarting.
168
+ - Added session persistence — conversation history is auto-saved per project directory and can be resumed on restart.
169
+ - Updated help text to include new interactive commands.
170
+
156
171
  #### v0.3.0
157
172
  - Updated default model to NVIDIA Nemotron 3 Super 120B.
158
173
  - Added 17 free models from OpenRouter (previously 6).
159
- - Fixed `ModuleNotFoundError: No module named 'gemini_cli'` from stale entry point.
174
+ - Fixed `ModuleNotFoundError: No module named 'gemini_cli'` from stale entry point after package rename.
160
175
  - Fixed `UnicodeDecodeError` on Windows (cp1253) for all subprocess commands.
161
176
  - Fixed API URL (`/chat/completions` was missing) causing HTML response errors.
162
177
  - Improved API error handling for empty/invalid responses.
@@ -168,20 +183,18 @@ LM Code is under active development. Contributions, feature requests, and feedba
168
183
  #### v0.2.5
169
184
  - Added more models to the model list.
170
185
  - Fixed crucial bugs from previous versions.
171
- - Removed Gemini models.
186
+ - Removed legacy Gemini module.
172
187
  - Updated models to latest versions.
173
188
 
174
189
  #### v0.1.0
175
- - Rebranded from Gemini to LM Code.
190
+ - Rebranded to LM Code.
176
191
  - Integrated OpenRouter as the default provider.
177
192
  - Added multi-model support.
178
- - Overhauled CLI commands (`gemini` -> `lmcode`).
193
+ - Overhauled CLI commands.
179
194
 
180
195
  ---
181
196
 
182
197
  ## Future Plans
183
-
184
- - Pricing with appropriate rate limits.
185
198
  - Non-free model support.
186
199
  - MCP Server integration.
187
200
  - Additional providers.
@@ -11,9 +11,9 @@ src/code_lm.egg-info/top_level.txt
11
11
  src/lm_code/__init__.py
12
12
  src/lm_code/config.py
13
13
  src/lm_code/main.py
14
+ src/lm_code/session.py
14
15
  src/lm_code/utils.py
15
16
  src/lm_code/models/__init__.py
16
- src/lm_code/models/gemini.py
17
17
  src/lm_code/models/openrouter.py
18
18
  src/lm_code/tools/__init__.py
19
19
  src/lm_code/tools/base.py
@@ -2,4 +2,4 @@
2
2
  LM CLI - A command-line interface for interacting with various LLM models.
3
3
  """
4
4
 
5
- __version__ = "0.1.70"
5
+ __version__ = "0.3.1"
@@ -13,11 +13,13 @@ from pathlib import Path
13
13
  import yaml
14
14
  import logging
15
15
  import time
16
+ import questionary
16
17
 
17
18
  from .models.openrouter import OpenRouterModel, list_available_models
18
19
  from .config import Config
19
20
  from .utils import count_tokens
20
21
  from .tools import AVAILABLE_TOOLS
22
+ from .session import SessionManager
21
23
 
22
24
  # Setup console and config
23
25
  console = Console(
@@ -259,11 +261,51 @@ def start_interactive_session(model_name: str, console: Console):
259
261
  )
260
262
  return
261
263
 
264
+ # --- Session persistence setup ---
265
+ session_mgr = SessionManager()
266
+ project_dir = str(Path.cwd())
267
+ saved_session = session_mgr.load_session(project_dir)
268
+
269
+ if saved_session:
270
+ saved_model = saved_session.get("model_name", "?")
271
+ saved_time = time.strftime(
272
+ "%Y-%m-%d %H:%M",
273
+ time.localtime(saved_session.get("timestamp", 0)),
274
+ )
275
+ msg_count = len(saved_session.get("chat_history", []))
276
+ console.print(
277
+ f"\n[yellow]Found saved session[/yellow] (model: {saved_model}, "
278
+ f"saved: {saved_time}, {msg_count} messages)"
279
+ )
280
+ try:
281
+ resume = questionary.confirm(
282
+ "Resume previous session?",
283
+ default=True,
284
+ auto_enter=False,
285
+ ).ask()
286
+ except (KeyboardInterrupt, EOFError):
287
+ resume = None
288
+
289
+ if resume is None:
290
+ console.print("\n[yellow]Exiting.[/yellow]")
291
+ return
292
+ else:
293
+ resume = False
294
+ # ---
295
+
262
296
  try:
263
297
  console.print(f"\nInitializing model [bold]{model_name}[/bold]...")
264
298
  # Pass the console object to OpenRouterModel constructor
265
299
  model = OpenRouterModel(api_key=api_key, console=console, model_name=model_name)
266
- console.print("[green]Model initialized successfully.[/green]\n")
300
+
301
+ # Restore session history if resuming
302
+ if resume and saved_session:
303
+ model.chat_history = saved_session.get("chat_history", model.chat_history)
304
+ console.print(
305
+ f"[green]Session restored ({len(model.chat_history)} messages).[/green]\n"
306
+ )
307
+ else:
308
+ console.print("[green]Model initialized successfully.[/green]\n")
267
309
 
268
310
  except Exception as e:
269
311
  console.print(
@@ -277,6 +319,7 @@ def start_interactive_session(model_name: str, console: Console):
277
319
 
278
320
  # --- Session Start Message ---
279
321
  console.print("Type '/help' for commands, '/exit' or Ctrl+C to quit.")
322
+ console.print(f"[dim]Current model: {model.current_model_name}[/dim]")
280
323
 
281
324
  while True:
282
325
  try:
@@ -287,6 +330,9 @@ def start_interactive_session(model_name: str, console: Console):
287
330
  elif user_input.lower() == "/help":
288
331
  show_help()
289
332
  continue
333
+ elif user_input.lower().startswith("/model"):
334
+ _handle_model_command(user_input, model, console, SUPPORTED_MODELS)
335
+ continue
290
336
 
291
337
  # Display initial "thinking" status - generate handles intermediate ones
292
338
  response_text = model.generate(user_input)
@@ -299,8 +345,17 @@ def start_interactive_session(model_name: str, console: Console):
299
345
  log.warning("generate() returned None unexpectedly.")
300
346
  continue
301
347
 
302
- console.print("[bold green]Assistant:[/bold green]")
303
- console.print(Markdown(response_text), highlight=True)
348
+ # Show compact result without markdown rendering
349
+ if user_input.lower().strip() == "/compact":
350
+ console.print(f"[cyan]{response_text}[/cyan]")
351
+ else:
352
+ console.print("[bold green]Assistant:[/bold green]")
353
+ console.print(Markdown(response_text), highlight=True)
354
+
355
+ # Auto-save session after each turn
356
+ session_mgr.save_session(
357
+ project_dir, model.chat_history, model.current_model_name
358
+ )
304
359
 
305
360
  except KeyboardInterrupt:
306
361
  console.print("\n[yellow]Session interrupted. Exiting.[/yellow]")
@@ -311,6 +366,43 @@ def start_interactive_session(model_name: str, console: Console):
311
366
  )
312
367
  log.error("Error during interactive loop", exc_info=True)
313
368
 
369
+ # Save session on exit
370
+ session_mgr.save_session(project_dir, model.chat_history, model.current_model_name)
371
+ console.print("[dim]Session saved.[/dim]")
372
+
373
+
374
+ def _handle_model_command(
375
+ user_input: str, model, console: Console, supported_models: list
376
+ ):
377
+ """Handle /model command to switch models mid-session."""
378
+ parts = user_input.strip().split(None, 1)
379
+ if len(parts) < 2:
380
+ # No model specified — show available models
381
+ console.print("[cyan]Available models:[/cyan]")
382
+ for m in supported_models:
383
+ current = (
384
+ " [bold green](current)[/bold green]"
385
+ if m["id"] == model.current_model_name
386
+ else ""
387
+ )
388
+ console.print(f" [bold]{m['id']}[/bold]{current} — {m['description']}")
389
+ console.print("\nUsage: /model <model_id>")
390
+ return
391
+
392
+ target = parts[1].strip()
393
+
394
+ # Validate the model is in supported list
395
+ valid_ids = {m["id"] for m in supported_models}
396
+ if target not in valid_ids:
397
+ console.print(f"[red]Unknown model:[/red] {target}")
398
+ console.print("Use [bold]/model[/bold] (no argument) to list available models.")
399
+ return
400
+
401
+ old_model = model.current_model_name
402
+ model.switch_model(target)
403
+ console.print(f"[green]Switched model:[/green] {old_model} → [bold]{target}[/bold]")
404
+ console.print(f"[dim]Current model: {model.current_model_name}[/dim]")
405
+
314
406
 
315
407
  def show_help():
316
408
  """Show help information for interactive mode."""
@@ -329,6 +421,9 @@ def show_help():
329
421
  [cyan]Interactive Commands:[/cyan]
330
422
  /exit
331
423
  /help
424
+ /compact Summarize conversation history to save tokens
425
+ /model List available models
426
+ /model <name> Switch to a different model mid-session
332
427
 
333
428
  [cyan]CLI Commands:[/cyan]
334
429
  lmcode setup KEY
@@ -188,6 +188,99 @@ class OpenRouterModel:
188
188
  )
189
189
  return tools
190
190
 
191
+ # --- Model switching ---
192
+ def switch_model(self, model_name: str) -> None:
193
+ """Switch to a different model without resetting conversation history."""
194
+ self.current_model_name = model_name
195
+ log.info(f"Switched model to: {model_name}")
196
+
197
+ # --- Compact history via summarization ---
198
+ def compact_history(self) -> str:
199
+ """Summarize the conversation history to reduce token usage."""
200
+ if len(self.chat_history) <= 2:
201
+ return "Nothing to compact."
202
+
203
+ self.console.print("[yellow]Compacting conversation history...[/yellow]")
204
+
205
+ # Extract user-facing conversation (skip system message at index 0)
206
+ conversation_text = ""
207
+ for msg in self.chat_history[1:]:
208
+ role = msg.get("role", "?")
209
+ content = msg.get("content", "")
210
+ if role == "user":
211
+ # Strip the orientation context prefix for cleaner summary
212
+ lines = content.split("\n")
213
+ user_lines = []
214
+ capturing = False
215
+ for line in lines:
216
+ if line.startswith("User request:"):
217
+ capturing = True
218
+ if capturing:
219
+ user_lines.append(line)
220
+ if user_lines:
221
+ conversation_text += f"User: {' '.join(user_lines)}\n"
222
+ else:
223
+ conversation_text += f"User: {content[:500]}\n"
224
+ elif role == "assistant":
225
+ conversation_text += f"Assistant: {content[:1000]}\n"
226
+ elif role == "tool":
227
+ name = msg.get("name", "tool")
228
+ conversation_text += f"Tool({name}): {content[:500]}\n"
229
+
230
+ if not conversation_text.strip():
231
+ return "Nothing to compact."
232
+
233
+ summarization_prompt = (
234
+ "You are a summarizer. Summarize the following conversation between a user and an AI coding assistant. "
235
+ "Preserve all key details: what was discussed, what files were viewed/edited, "
236
+ "what decisions were made, and what the current state of work is. "
237
+ "Be concise but complete. This summary will replace the conversation history.\n\n"
238
+ f"Conversation:\n{conversation_text}\n\n"
239
+ "Provide a concise summary:"
240
+ )
241
+
242
+ try:
243
+ payload = {
244
+ "model": self.current_model_name,
245
+ "messages": [
246
+ {
247
+ "role": "system",
248
+ "content": "You are a helpful summarizer. Return only the summary, no preamble.",
249
+ },
250
+ {"role": "user", "content": summarization_prompt},
251
+ ],
252
+ "temperature": 0.3,
253
+ "max_tokens": 1000,
254
+ }
255
+
256
+ with self.console.status("[yellow]Generating summary...", spinner="dots"):
257
+ response = requests.post(
258
+ self.base_url, headers=self.headers, json=payload
259
+ )
260
+ response.raise_for_status()
261
+ data = response.json()
262
+
263
+ summary = data["choices"][0]["message"]["content"]
264
+ original_count = len(self.chat_history)
265
+
266
+ # Keep system message, replace everything else with summary
267
+ self.chat_history = [
268
+ self.chat_history[0], # system message
269
+ {
270
+ "role": "assistant",
271
+ "content": f"[Conversation summary]\n{summary}",
272
+ },
273
+ ]
274
+
275
+ log.info(
276
+ f"Compacted history from {original_count} to {len(self.chat_history)} messages"
277
+ )
278
+ return f"Compacted {original_count} messages down to a summary."
279
+
280
+ except Exception as e:
281
+ log.error(f"Compaction failed: {e}", exc_info=True)
282
+ return f"Compaction failed: {e}"
283
+
191
284
  # --- Native Function Calling Agent Loop ---
192
285
  def generate(self, prompt: str) -> str | None:
193
286
  logging.info(
@@ -196,10 +289,11 @@ class OpenRouterModel:
196
289
  original_user_prompt = prompt
197
290
  if prompt.startswith("/"):
198
291
  command = prompt.split()[0].lower()
199
- # Handle commands like /compact here eventually
200
292
  if command in ["/exit", "/help"]:
201
293
  logging.info(f"Handled command: {command}")
202
- return None # Or return specific help text
294
+ return None
295
+ if command == "/compact":
296
+ return self.compact_history()
203
297
 
204
298
  # === Step 1: Mandatory Orientation ===
205
299
  orientation_context = ""
@@ -0,0 +1,97 @@
1
+ """
2
+ Session persistence for LM Code.
3
+ Saves and loads conversation history per project directory.
4
+ """
5
+
6
+ import hashlib
7
+ import json
8
+ import logging
9
+ import os
10
+ import time
11
+ from pathlib import Path
12
+ from typing import Optional, Dict, List, Any
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+ SESSIONS_DIR = Path.home() / ".config" / "lm-code" / "sessions"
17
+
18
+
19
+ def _project_hash(project_dir: str) -> str:
20
+ """Create a stable hash from the absolute project directory path."""
21
+ abs_path = str(Path(project_dir).resolve())
22
+ return hashlib.sha256(abs_path.encode()).hexdigest()[:16]
23
+
24
+
25
+ class SessionManager:
26
+ """Manages per-project session persistence."""
27
+
28
+ def __init__(self):
29
+ SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
30
+
31
+ def _session_path(self, project_dir: str) -> Path:
32
+ h = _project_hash(project_dir)
33
+ return SESSIONS_DIR / f"{h}.json"
34
+
35
+ def save_session(
36
+ self,
37
+ project_dir: str,
38
+ chat_history: List[Dict[str, Any]],
39
+ model_name: str,
40
+ ) -> None:
41
+ """Save chat history and metadata for a project directory."""
42
+ data = {
43
+ "project_dir": str(Path(project_dir).resolve()),
44
+ "model_name": model_name,
45
+ "timestamp": time.time(),
46
+ "chat_history": chat_history,
47
+ }
48
+ path = self._session_path(project_dir)
49
+ try:
50
+ with open(path, "w", encoding="utf-8") as f:
51
+ json.dump(data, f, ensure_ascii=False, indent=2)
52
+ log.debug(f"Session saved to {path}")
53
+ except Exception as e:
54
+ log.error(f"Failed to save session: {e}")
55
+
56
+ def load_session(self, project_dir: str) -> Optional[Dict[str, Any]]:
57
+ """Load a saved session for a project directory, or None."""
58
+ path = self._session_path(project_dir)
59
+ if not path.exists():
60
+ return None
61
+ try:
62
+ with open(path, "r", encoding="utf-8") as f:
63
+ data = json.load(f)
64
+ log.info(f"Session loaded from {path}")
65
+ return data
66
+ except Exception as e:
67
+ log.error(f"Failed to load session: {e}")
68
+ return None
69
+
70
+ def delete_session(self, project_dir: str) -> bool:
71
+ """Delete a saved session. Returns True if one was deleted."""
72
+ path = self._session_path(project_dir)
73
+ if path.exists():
74
+ path.unlink()
75
+ log.info(f"Session deleted: {path}")
76
+ return True
77
+ return False
78
+
79
+ def list_sessions(self) -> List[Dict[str, Any]]:
80
+ """List all saved sessions with metadata."""
81
+ sessions = []
82
+ for p in SESSIONS_DIR.glob("*.json"):
83
+ try:
84
+ with open(p, "r", encoding="utf-8") as f:
85
+ data = json.load(f)
86
+ sessions.append(
87
+ {
88
+ "project_dir": data.get("project_dir", "?"),
89
+ "model_name": data.get("model_name", "?"),
90
+ "timestamp": data.get("timestamp", 0),
91
+ "message_count": len(data.get("chat_history", [])),
92
+ }
93
+ )
94
+ except Exception:
95
+ continue
96
+ sessions.sort(key=lambda s: s["timestamp"], reverse=True)
97
+ return sessions
@@ -9,9 +9,7 @@ import json
9
9
  def count_tokens(text):
10
10
  """
11
11
  Count the number of tokens in a text string.
12
-
13
- This is a rough estimate for Gemini 2.5 Pro, using GPT-4 tokenizer as a proxy.
14
- For production, you'd want to use model-specific token counting.
12
+ Uses GPT-4 tokenizer as a proxy.
15
13
  """
16
14
  try:
17
15
  encoding = tiktoken.encoding_for_model("gpt-4")
@@ -1,43 +0,0 @@
1
- """
2
- Gemini model integration for the CLI tool.
3
- """
4
-
5
- import logging
6
- import time
7
- from rich.console import Console
8
- from rich.panel import Panel
9
- import questionary
10
-
11
- from ..utils import count_tokens
12
- from ..tools import get_tool, AVAILABLE_TOOLS
13
-
14
- # Setup logging (basic config, consider moving to main.py)
15
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s')
16
- log = logging.getLogger(__name__)
17
-
18
- MAX_AGENT_ITERATIONS = 10
19
- CONTEXT_TRUNCATION_THRESHOLD_TOKENS = 800000 # Example token limit
20
-
21
-
22
- class GeminiModel:
23
- """Interface for Gemini models using native function calling agentic loop."""
24
-
25
- def __init__(self, console: Console):
26
- """Initialize the Gemini model interface."""
27
- self.console = console
28
-
29
- # --- Tool Definition ---
30
- self.function_declarations = None # Tools have been removed
31
- # ---
32
-
33
- # --- System Prompt (Native Functions & Planning) ---
34
- self.system_instruction = "Initialize system prompt."
35
- # ---
36
-
37
- # --- Initialize Persistent History ---
38
- self.chat_history = [
39
- {'role': 'user', 'parts': [self.system_instruction]},
40
- {'role': 'model', 'parts': ["Okay, I'm ready. Provide the directory context and your request."]}
41
- ]
42
- log.info("Initialized persistent chat history.")
43
- # ---
File without changes
File without changes
File without changes
File without changes