npcsh 1.0.19__py3-none-any.whl → 1.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/_state.py CHANGED
@@ -2198,10 +2198,10 @@ def execute_command(
2198
2198
  active_model = npc_model or state.chat_model
2199
2199
  active_provider = npc_provider or state.chat_provider
2200
2200
  if state.current_mode == 'agent':
2201
- print('# of parsed commands: ', len(commands))
2202
- print('Commands:' '\n'.join(commands))
2201
+ #print('# of parsed commands: ', len(commands))
2202
+ #print('Commands:' '\n'.join(commands))
2203
2203
  for i, cmd_segment in enumerate(commands):
2204
- render_markdown(f'- executing command {i+1}/{len(commands)}')
2204
+ render_markdown(f'- Executing command {i+1}/{len(commands)}')
2205
2205
  is_last_command = (i == len(commands) - 1)
2206
2206
  stream_this_segment = state.stream_output and not is_last_command
2207
2207
  try:
@@ -2353,8 +2353,9 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2353
2353
  ctx_path = os.path.join(team_dir, "team.ctx")
2354
2354
  folder_context = input("Enter a short description for this project/team (optional): ").strip()
2355
2355
  team_ctx_data = {
2356
- "forenpc": default_forenpc_name, "model": forenpc_model,
2357
- "provider": forenpc_provider, "api_key": None, "api_url": None,
2356
+ "forenpc": default_forenpc_name,
2357
+ "model": forenpc_model,
2358
+ "provider": forenpc_provider,
2358
2359
  "context": folder_context if folder_context else None
2359
2360
  }
2360
2361
  use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
npcsh/corca.py CHANGED
@@ -163,97 +163,131 @@ class MCPClientNPC:
163
163
  pass
164
164
  self.session = None
165
165
 
166
- def execute_command_corca(command: str, state: ShellState, command_history) -> Tuple[ShellState, Any]:
167
- mcp_tools = []
168
-
169
- if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
170
- mcp_tools = state.mcp_client.available_tools_llm
171
- else:
172
- cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
173
-
174
- active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
175
-
176
- # Get the initial response with tools available but don't auto-process
177
- response_dict = get_llm_response(
178
- prompt=command,
179
- model=active_npc.model or state.chat_model,
180
- provider=active_npc.provider or state.chat_provider,
181
- npc=state.npc,
182
- messages=state.messages,
183
- tools=mcp_tools,
184
- auto_process_tool_calls=False,
185
- stream=state.stream_output
186
- )
187
-
188
- # Process the streaming response to extract tool calls
189
- stream_response = response_dict.get('response')
190
- messages = response_dict.get('messages', state.messages)
191
-
192
- # Collect the streamed content and extract tool calls
166
+
167
+ def process_mcp_stream(stream_response, active_npc):
168
+ """Process streaming response and extract content + tool calls for both Ollama and OpenAI providers"""
193
169
  collected_content = ""
194
170
  tool_calls = []
195
- current_tool_call = None
196
171
 
197
- print("DEBUG: Processing stream response...")
172
+ interrupted = False
198
173
 
199
- if hasattr(stream_response, '__iter__'):
200
- # Process the stream to extract content and tool calls
201
- for chunk in stream_response:
202
- print(f"DEBUG: Chunk type: {type(chunk)}")
203
-
204
- if hasattr(chunk, 'choices') and chunk.choices:
205
- delta = chunk.choices[0].delta
206
-
207
- if hasattr(delta, 'content') and delta.content:
208
- collected_content += delta.content
209
- print(delta.content, end='', flush=True)
210
-
211
- if hasattr(delta, 'tool_calls') and delta.tool_calls:
212
- for tool_call_delta in delta.tool_calls:
213
- print(f"DEBUG: Tool call delta: {tool_call_delta}")
174
+ # Save cursor position at the start
175
+ sys.stdout.write('\033[s') # Save cursor position
176
+ sys.stdout.flush()
177
+ try:
178
+ for chunk in stream_response:
179
+ if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
180
+ if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
181
+ for tool_call in chunk.message.tool_calls:
182
+ tool_call_data = {
183
+ 'id': getattr(tool_call, 'id', ''),
184
+ 'type': 'function',
185
+ 'function': {
186
+ 'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
187
+ 'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
188
+ }
189
+ }
214
190
 
215
- if hasattr(tool_call_delta, 'index'):
216
- idx = tool_call_delta.index
217
-
218
- # Initialize tool call if needed
219
- while len(tool_calls) <= idx:
220
- tool_calls.append({
221
- 'id': '',
222
- 'type': 'function',
223
- 'function': {
224
- 'name': '',
225
- 'arguments': ''
226
- }
227
- })
228
-
229
- # Update tool call data
230
- if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
231
- tool_calls[idx]['id'] = tool_call_delta.id
232
-
233
- if hasattr(tool_call_delta, 'function'):
234
- if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
235
- tool_calls[idx]['function']['name'] = tool_call_delta.function.name
191
+ if isinstance(tool_call_data['function']['arguments'], str):
192
+ try:
193
+ tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
194
+ except json.JSONDecodeError:
195
+ tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
196
+
197
+ tool_calls.append(tool_call_data)
198
+
199
+ if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
200
+ collected_content += chunk.message.content
201
+ print(chunk.message.content, end='', flush=True)
202
+
203
+ # Handle OpenAI-style responses (including gpt-oss)
204
+ else:
205
+ if hasattr(chunk, 'choices') and chunk.choices:
206
+ delta = chunk.choices[0].delta
207
+
208
+ if hasattr(delta, 'content') and delta.content:
209
+ collected_content += delta.content
210
+ print(delta.content, end='', flush=True)
211
+
212
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
213
+ for tool_call_delta in delta.tool_calls:
214
+ if hasattr(tool_call_delta, 'index'):
215
+ idx = tool_call_delta.index
236
216
 
237
- if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
238
- tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
239
-
240
- print(f"\nDEBUG: Final collected_content: {collected_content}")
241
- print(f"DEBUG: Final tool_calls: {tool_calls}")
242
-
243
- # Update messages with the assistant response
244
- state.messages = messages
245
- if collected_content or tool_calls:
246
- assistant_message = {"role": "assistant", "content": collected_content}
247
- if tool_calls:
248
- assistant_message["tool_calls"] = tool_calls
249
- state.messages.append(assistant_message)
217
+ while len(tool_calls) <= idx:
218
+ tool_calls.append({
219
+ 'id': '',
220
+ 'type': 'function',
221
+ 'function': {'name': '', 'arguments': ''}
222
+ })
223
+
224
+ if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
225
+ tool_calls[idx]['id'] = tool_call_delta.id
226
+
227
+ if hasattr(tool_call_delta, 'function'):
228
+ if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
229
+ tool_calls[idx]['function']['name'] = tool_call_delta.function.name
230
+
231
+ if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
232
+ tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
233
+ except KeyboardInterrupt:
234
+ interrupted = True
235
+ print('\n⚠️ Stream interrupted by user')
236
+ if interrupted:
237
+ str_output += "\n\n[⚠️ Response interrupted by user]"
238
+ # Always restore cursor position and clear everything after it
239
+ sys.stdout.write('\033[u') # Restore cursor position
240
+ sys.stdout.write('\033[J') # Clear from cursor down
241
+ sys.stdout.flush()
250
242
 
251
- return state, {
252
- "output": collected_content,
253
- "tool_calls": tool_calls,
254
- "messages": state.messages
255
- }
256
-
243
+ # Now render the markdown at the restored position
244
+ render_markdown(collected_content)
245
+ print('\n')
246
+ return collected_content, tool_calls
247
+
248
+ def execute_command_corca(command: str, state: ShellState, command_history) -> Tuple[ShellState, Any]:
249
+ mcp_tools = []
250
+
251
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
252
+ mcp_tools = state.mcp_client.available_tools_llm
253
+ else:
254
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
255
+
256
+ active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
257
+
258
+ response_dict = get_llm_response(
259
+ prompt=command,
260
+ model=active_npc.model or state.chat_model,
261
+ provider=active_npc.provider or state.chat_provider,
262
+ npc=state.npc,
263
+ messages=state.messages,
264
+ tools=mcp_tools,
265
+ auto_process_tool_calls=False,
266
+ stream=state.stream_output
267
+ )
268
+
269
+ stream_response = response_dict.get('response')
270
+ messages = response_dict.get('messages', state.messages)
271
+
272
+ print("DEBUG: Processing stream response...")
273
+ collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
274
+
275
+ print(f"\nDEBUG: Final collected_content: {collected_content}")
276
+ print(f"DEBUG: Final tool_calls: {tool_calls}")
277
+
278
+ state.messages = messages
279
+ if collected_content or tool_calls:
280
+ assistant_message = {"role": "assistant", "content": collected_content}
281
+ if tool_calls:
282
+ assistant_message["tool_calls"] = tool_calls
283
+ state.messages.append(assistant_message)
284
+
285
+ return state, {
286
+ "output": collected_content,
287
+ "tool_calls": tool_calls,
288
+ "messages": state.messages
289
+ }
290
+
257
291
  def print_corca_welcome_message():
258
292
  turq = "\033[38;2;64;224;208m"
259
293
  chrome = "\033[38;2;211;211;211m"
@@ -313,11 +347,13 @@ def process_corca_result(
313
347
  tool_responses = []
314
348
  for tool_call in tool_calls:
315
349
  tool_name = tool_call['function']['name']
316
- tool_args_str = tool_call['function']['arguments']
350
+ tool_args = tool_call['function']['arguments']
317
351
  tool_call_id = tool_call['id']
318
352
 
319
353
  try:
320
- tool_args = json.loads(tool_args_str) if tool_args_str.strip() else {}
354
+ if isinstance(tool_args, str):
355
+ tool_args = json.loads(tool_args) if tool_args.strip() else {}
356
+
321
357
  except json.JSONDecodeError:
322
358
  tool_args = {}
323
359
 
@@ -389,45 +425,10 @@ def process_corca_result(
389
425
  follow_up_tool_calls = []
390
426
 
391
427
  if result_state.stream_output:
392
- collected_content = ""
393
- follow_up_tool_calls = []
394
-
395
428
  if hasattr(follow_up_content, '__iter__'):
396
- for chunk in follow_up_content:
397
- if hasattr(chunk, 'choices') and chunk.choices:
398
- delta = chunk.choices[0].delta
399
-
400
- if hasattr(delta, 'content') and delta.content:
401
- collected_content += delta.content
402
- print(delta.content, end='', flush=True)
403
-
404
- if hasattr(delta, 'tool_calls') and delta.tool_calls:
405
- for tool_call_delta in delta.tool_calls:
406
- if hasattr(tool_call_delta, 'index'):
407
- idx = tool_call_delta.index
408
-
409
- while len(follow_up_tool_calls) <= idx:
410
- follow_up_tool_calls.append({
411
- 'id': '',
412
- 'type': 'function',
413
- 'function': {
414
- 'name': '',
415
- 'arguments': ''
416
- }
417
- })
418
-
419
- if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
420
- follow_up_tool_calls[idx]['id'] = tool_call_delta.id
421
-
422
- if hasattr(tool_call_delta, 'function'):
423
- if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
424
- follow_up_tool_calls[idx]['function']['name'] = tool_call_delta.function.name
425
-
426
- if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
427
- follow_up_tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
429
+ collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, active_npc)
428
430
  else:
429
- collected_content = str(follow_up_content)
430
-
431
+ collected_content = str(follow_up_content)
431
432
  follow_up_content = collected_content
432
433
  else:
433
434
  if follow_up_messages:
@@ -452,11 +453,11 @@ def process_corca_result(
452
453
  print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
453
454
  for tool_call in follow_up_tool_calls:
454
455
  tool_name = tool_call['function']['name']
455
- tool_args_str = tool_call['function']['arguments']
456
+ tool_args = tool_call['function']['arguments']
456
457
  tool_call_id = tool_call['id']
457
458
 
458
459
  try:
459
- tool_args = json.loads(tool_args_str) if tool_args_str.strip() else {}
460
+ tool_args = json.loads(tool_args) if tool_args.strip() else {}
460
461
  except json.JSONDecodeError:
461
462
  tool_args = {}
462
463
 
@@ -648,7 +649,7 @@ def enter_corca_mode(command: str,
648
649
  if state.npc:
649
650
  prompt_npc_name = state.npc.name
650
651
 
651
- prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:corca:{prompt_npc_name}🦌> "
652
+ prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:{prompt_npc_name}🦌> "
652
653
  prompt = readline_safe_prompt(prompt_str)
653
654
 
654
655
  user_input = get_multiline_input(prompt).strip()
@@ -680,14 +681,31 @@ def enter_corca_mode(command: str,
680
681
 
681
682
  render_markdown("\n# Exiting Corca Mode")
682
683
  return {"output": "", "messages": state.messages}
683
-
684
684
  def main():
685
685
  parser = argparse.ArgumentParser(description="Corca - An MCP-powered npcsh shell.")
686
686
  parser.add_argument("--mcp-server-path", type=str, help="Path to an MCP server script to connect to.")
687
687
  args = parser.parse_args()
688
688
 
689
689
  command_history, team, default_npc = setup_shell()
690
-
690
+
691
+ # Override default_npc with corca priority
692
+ project_team_path = os.path.abspath('./npc_team/')
693
+ global_team_path = os.path.expanduser('~/.npcsh/npc_team/')
694
+
695
+ project_corca_path = os.path.join(project_team_path, "corca.npc")
696
+ global_corca_path = os.path.join(global_team_path, "corca.npc")
697
+
698
+ if os.path.exists(project_corca_path):
699
+ default_npc = NPC(file=project_corca_path,
700
+ db_conn=command_history.engine)
701
+ elif os.path.exists(global_corca_path):
702
+ default_npc = NPC(file=global_corca_path,
703
+ db_conn=command_history.engine)
704
+ print('Team Default: ', team.provider, team.model)
705
+ if default_npc.model is None:
706
+ default_npc.model = team.model
707
+ if default_npc.provider is None:
708
+ default_npc.provider = team.provider
691
709
  from npcsh._state import initial_state
692
710
  initial_shell_state = initial_state
693
711
  initial_shell_state.team = team
@@ -704,6 +722,5 @@ def main():
704
722
  }
705
723
 
706
724
  enter_corca_mode(**kwargs)
707
-
708
725
  if __name__ == "__main__":
709
726
  main()
npcsh/guac.py CHANGED
@@ -9,6 +9,7 @@ import argparse
9
9
  import importlib.metadata
10
10
  import matplotlib.pyplot as plt
11
11
 
12
+ import logging
12
13
  plt.ioff()
13
14
 
14
15
  import platform
@@ -28,7 +29,7 @@ import sys
28
29
  from npcpy.memory.command_history import CommandHistory, start_new_conversation
29
30
  from npcpy.npc_compiler import Team, NPC
30
31
  from npcpy.llm_funcs import get_llm_response
31
- from npcpy.npc_sysenv import render_markdown,print_and_process_stream_with_markdown
32
+ from npcpy.npc_sysenv import render_markdown,print_and_process_stream
32
33
 
33
34
 
34
35
  from npcsh._state import (
@@ -434,28 +435,15 @@ def _handle_guac_refresh(state: ShellState, project_name: str, src_dir: Path):
434
435
  print(f"Error during /refresh: {e}")
435
436
  traceback.print_exc()
436
437
  def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None, lang='python', default_mode_choice=None):
437
- base_dir = Path.cwd()
438
-
439
- if config_dir is None:
440
- config_dir = base_dir / ".guac"
441
- else:
442
- config_dir = Path(config_dir)
443
-
444
- if plots_dir is None:
445
- plots_dir = base_dir / "plots"
446
- else:
447
- plots_dir = Path(plots_dir)
448
-
438
+ base_dir = Path.cwd()
449
439
  if npc_team_dir is None:
450
440
  npc_team_dir = base_dir / "npc_team"
451
441
  else:
452
442
  npc_team_dir = Path(npc_team_dir)
453
-
454
- for p in [config_dir, plots_dir, npc_team_dir]:
455
- p.mkdir(parents=True, exist_ok=True)
456
-
443
+ npc_team_dir.mkdir(parents=True, exist_ok=True)
457
444
  # Setup Guac workspace
458
445
  workspace_dirs = _get_workspace_dirs(npc_team_dir)
446
+
459
447
  _ensure_workspace_dirs(workspace_dirs)
460
448
 
461
449
  # Rest of existing setup_guac_mode code...
@@ -483,14 +471,8 @@ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None, lang='py
483
471
  package_root = str(base_dir)
484
472
  package_name = "project"
485
473
 
486
- project_name = existing_ctx.get("GUAC_PROJECT_NAME")
487
474
  project_description = existing_ctx.get("GUAC_PROJECT_DESCRIPTION")
488
-
489
- if project_name is None:
490
- try:
491
- project_name = input("Enter the project name: ").strip() or "unknown_project"
492
- except EOFError:
493
- project_name = "unknown_project"
475
+
494
476
  if project_description is None:
495
477
  try:
496
478
  project_description = input("Enter a short description of the project: ").strip() or "No description provided."
@@ -500,35 +482,68 @@ def setup_guac_mode(config_dir=None, plots_dir=None, npc_team_dir=None, lang='py
500
482
  updated_ctx = {**existing_ctx}
501
483
  updated_ctx.update({
502
484
  "GUAC_TEAM_NAME": "guac_team",
503
- "GUAC_DESCRIPTION": f"A team of NPCs specialized in {lang} analysis for project {project_name}",
485
+ "GUAC_DESCRIPTION": f"A team of NPCs specialized in {lang} analysis for project {package_name}",
504
486
  "GUAC_FORENPC": "guac",
505
- "GUAC_PROJECT_NAME": project_name,
506
487
  "GUAC_PROJECT_DESCRIPTION": project_description,
507
488
  "GUAC_LANG": lang,
508
489
  "GUAC_PACKAGE_ROOT": package_root,
509
490
  "GUAC_PACKAGE_NAME": package_name,
510
491
  "GUAC_WORKSPACE_PATHS": {k: str(v) for k, v in workspace_dirs.items()},
511
492
  })
512
-
493
+
494
+ pkg_root_path = Path(package_root)
495
+ try:
496
+ pkg_root_path.mkdir(parents=True, exist_ok=True)
497
+ package_dir = pkg_root_path / package_name
498
+
499
+ if not package_dir.exists():
500
+ package_dir.mkdir(parents=True, exist_ok=True)
501
+ (package_dir / "__init__.py").write_text("# package initialized by setup_guac_mode\n")
502
+ logging.info("Created minimal package directory at %s", package_dir)
503
+ except Exception as e:
504
+ logging.warning("Could not ensure package root/dir: %s", e)
513
505
  with open(team_ctx_path, "w") as f:
514
506
  yaml.dump(updated_ctx, f, default_flow_style=False)
515
507
  print("Updated team.ctx with GUAC-specific information.")
516
508
 
509
+
510
+
511
+ setup_py_path = pkg_root_path / "setup.py"
512
+
513
+
514
+ try:
515
+ if not setup_py_path.exists():
516
+ setup_content = f'''
517
+ from setuptools import setup, find_packages
518
+ setup(
519
+ name="{package_name}",
520
+ version="{existing_ctx.get("GUAC_PACKAGE_VERSION", "0.0.0")}",
521
+ description="{project_description.replace('"', '\\"')}",
522
+ packages=find_packages(),
523
+ include_package_data=True,
524
+ install_requires=[],
525
+ )
526
+ '''
527
+ setup_py_path.write_text(setup_content)
528
+ logging.info("Created minimal setup.py at %s", setup_py_path)
529
+ except Exception as e:
530
+ logging.warning("Could not write setup.py: %s", e)
531
+
517
532
  default_mode_val = default_mode_choice or "agent"
518
533
  setup_npc_team(npc_team_dir, lang)
519
534
 
535
+
536
+
520
537
  print(f"\nGuac mode configured for package: {package_name} at {package_root}")
521
538
  print(f"Workspace created at: {workspace_dirs['workspace']}")
522
539
 
523
540
  return {
524
541
  "language": lang,
525
542
  "package_root": Path(package_root),
526
- "config_path": config_dir / "config.json",
527
543
  "plots_dir": plots_dir,
528
544
  "npc_team_dir": npc_team_dir,
529
545
  "config_dir": config_dir,
530
546
  "default_mode": default_mode_val,
531
- "project_name": project_name,
532
547
  "project_description": project_description,
533
548
  "package_name": package_name
534
549
  }
@@ -944,33 +959,11 @@ def _handle_file_drop(input_text: str, npc_team_dir: Path) -> Tuple[str, List[st
944
959
 
945
960
  if not file_paths:
946
961
 
947
- return input_text, processed_files
962
+ return input_text, processed_files, file_paths
948
963
 
949
964
  modified_input = input_text
950
- for file_path in file_paths:
951
- expanded_path = Path(file_path.replace('~', str(Path.home()))).resolve()
952
-
953
- if expanded_path.exists() and expanded_path.is_file():
954
- workspace_dirs = _get_workspace_dirs(npc_team_dir)
955
- _ensure_workspace_dirs(workspace_dirs)
956
-
957
- ext = expanded_path.suffix[1:].upper() if expanded_path.suffix else "OTHERS"
958
- category = EXTENSION_MAP.get(ext, "data_inputs")
959
- target_dir = workspace_dirs.get(category, workspace_dirs["data_inputs"])
960
-
961
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
962
- new_filename = f"{timestamp}_{expanded_path.name}"
963
- target_path = target_dir / new_filename
964
-
965
- try:
966
- shutil.copy2(expanded_path, target_path)
967
- processed_files.append(str(target_path))
968
- modified_input = modified_input.replace(file_path, str(target_path))
969
- print(f"📁 Copied {expanded_path.name} to workspace: {target_path}")
970
- except Exception as e:
971
- print(f"[ERROR] Failed to copy file: {e}")
972
-
973
- return modified_input, processed_files
965
+
966
+ return modified_input, processed_files, file_paths
974
967
 
975
968
 
976
969
  def _capture_plot_state(session_id: str, db_path: str, npc_team_dir: Path):
@@ -1125,19 +1118,22 @@ def _save_matplotlib_figures(npc_team_dir: Path) -> List[str]:
1125
1118
 
1126
1119
  return saved_figures
1127
1120
 
1121
+ import sys
1122
+ from io import StringIO
1123
+ from contextlib import redirect_stdout, redirect_stderr
1128
1124
 
1129
- def _run_agentic_mode(command: str,
1130
- state: ShellState,
1131
- locals_dict: Dict[str, Any],
1125
+ def _run_agentic_mode(command: str,
1126
+ state: ShellState,
1127
+ locals_dict: Dict[str, Any],
1132
1128
  npc_team_dir: Path) -> Tuple[ShellState, Any]:
1133
1129
  """Run agentic mode with continuous iteration based on progress"""
1134
- max_iterations = 10 # Higher maximum as a safety limit
1130
+ max_iterations = 3 # low maximum as a safety limit
1135
1131
  iteration = 0
1136
1132
  full_output = []
1137
1133
  current_command = command
1138
1134
  consecutive_failures = 0
1139
- max_consecutive_failures = 2
1140
-
1135
+ max_consecutive_failures = 3
1136
+
1141
1137
  # Build context of existing variables
1142
1138
  existing_vars_context = "EXISTING VARIABLES IN ENVIRONMENT:\n"
1143
1139
  for var_name, var_value in locals_dict.items():
@@ -1150,52 +1146,121 @@ def _run_agentic_mode(command: str,
1150
1146
  existing_vars_context += f"- {var_name} ({var_type}): {var_repr}\n"
1151
1147
  except:
1152
1148
  existing_vars_context += f"- {var_name} ({type(var_value).__name__}): <unrepresentable>\n"
1153
-
1149
+ previous_code = ''
1150
+ next_step = ''
1151
+ steps = []
1154
1152
  while iteration < max_iterations and consecutive_failures < max_consecutive_failures:
1155
1153
  iteration += 1
1156
1154
  print(f"\n🔄 Agentic iteration {iteration}")
1157
-
1155
+
1158
1156
  prompt = f"""
1159
- USER REQUEST: {current_command}
1160
-
1157
+ USER REQUEST: {current_command} {next_step}
1158
+
1159
+ Here is the existing variable context:
1160
+
1161
+ ```
1161
1162
  {existing_vars_context}
1163
+ ```
1164
+ PREVIOUS ATTEMPTS: ```{full_output[-1] if full_output else 'None'}```
1165
+
1166
+ DO NOT SIMPLY COPY A PREVIOUS ATTEMPT.
1167
+
1168
+ Your goal is to generate Python code that BUILDS ON EXISTING VARIABLES to accomplish this task: {current_command}, with this next step planned: `{next_step} `
1169
+
1162
1170
 
1163
- PREVIOUS ATTEMPTS: {full_output[-1] if full_output else 'None'}
1171
+ You will notice in the local envs that there are functions for reading, editing, and loading files.
1172
+ You should use these to your advantage as they will help you to clearly understand the user's system best.
1173
+
1174
+ Here are all the previous steps: {steps}
1164
1175
 
1165
- Generate Python code that BUILDS ON EXISTING VARIABLES to accomplish this task.
1166
1176
  DO NOT redefine variables that already exist unless absolutely necessary.
1167
1177
  Use the existing variables and add/modify as needed.
1168
- Be sure to generate logs and information that oncne executed provide us with enough information to keep moving forward.
1169
- log variables and behaviors so we can pinpoint fixes clearly rather than getting stufck in nonsensical problematic loops.
1170
-
1178
+ Be sure to generate logs and information that once executed provide us with enough information to keep moving forward.
1179
+ log variables and behaviors so we can pinpoint fixes clearly rather than getting stuck in nonsensical problematic loops.
1171
1180
 
1172
1181
  Provide ONLY executable Python code without any explanations or markdown formatting.
1173
- Focus on incremental changes rather than rewriting everything. Do not re-write any functions that are currently within the existing vars contxt or which appear to have no need to be changed.
1182
+ Focus on incremental changes rather than rewriting everything. Do not re-write any functions that are currently within the existing vars context or which appear to have no need to be changed.
1174
1183
 
1175
1184
  Do not include any leading ```python. Begin directly with the code.
1185
+ Do not write your code to include a __main__ check or portion unless the user asks.
1186
+ These should be functional components and building blocks that you and the user will take and build a great
1187
+ library of modules. Keep things straightforward and do not do unnecessary exception handling unless requested.
1188
+ Failing fast in research is important and so it is necessary to
1189
+ No try except blocks unless requested.
1190
+ Determine and log information in a way that helps us move forward rather than by exception handling.
1191
+ Do not simply generate code that resembles the previous code.
1192
+ While this code may one day be `production` code with such error handling,
1193
+ at the moment, we are simply in the business of experimentation.
1194
+ Do not use the python `input()` function. if you have a question, ask directly by typing <request_for_input> request </request_for_input>
1195
+
1196
+ users may ask you to edit code directly. do this by loading the code in and evaluating it. once it is evaluated, you may attempt to write changes to it.
1197
+
1198
+ Always evaluate before attempting to fix. Read first. Gather information. Look at files. This will not be your final code, this is just part of
1199
+ an ongoing workflow.
1200
+
1201
+
1202
+ If a user is asking for help debugging, it's better to figure out what is wrong by attempting to run it yourself, and if they do not prefer that,
1203
+ then it's best to use static parsing methods and arguments based on deduction rather than attempting to just fix everything over and over.
1204
+
1205
+ Do not over-complicate the code.
1206
+
1207
+ Do not include any '__name__'=='__main__' block.
1176
1208
  """
1209
+ #
1177
1210
 
1178
- llm_response = get_llm_response(prompt,
1179
- npc=state.npc,
1180
- stream=True)
1181
-
1211
+
1212
+ llm_response = get_llm_response(prompt,
1213
+ npc=state.npc,
1214
+ stream=True,
1215
+ messages=state.messages)
1182
1216
 
1183
- generated_code = print_and_process_stream_with_markdown(llm_response.get('response'),
1184
- state.npc.model,
1185
- state.npc.provider,
1186
- show=True)
1217
+ generated_code = print_and_process_stream(llm_response.get('response'),
1218
+ state.npc.model,
1219
+ state.npc.provider
1220
+ )
1187
1221
 
1222
+
1223
+ state.messages.append({'role':'user', 'content':current_command })
1224
+ state.messages.append({'role':'assistant', 'content': generated_code})
1225
+
1226
+ if '<request_for_input>' in generated_code:
1227
+ generated_code = generated_code.split('>')[1].split('<')[0]
1228
+ user_feedback = input("\n🤔 Agent requests feedback (press Enter to continue or type your input): ").strip()
1229
+
1230
+ current_command = f"{current_command} - User feedback: {user_feedback}"
1231
+ max_iterations += int(max_iterations/2)
1232
+
1233
+ continue
1234
+
1188
1235
  if generated_code.startswith('```python'):
1189
1236
  generated_code = generated_code[len('```python'):].strip()
1190
1237
  if generated_code.endswith('```'):
1191
1238
  generated_code = generated_code[:-3].strip()
1192
1239
 
1193
- print(f"\n# Generated Code (Iteration {iteration}):\n---\n{generated_code}\n---\n")
1194
-
1240
+ #print(f"\n# Generated Code (Iteration {iteration}):\n---\n{generated_code}\n---\n")
1241
+
1195
1242
  try:
1196
- state, exec_output = execute_python_code(generated_code, state, locals_dict)
1197
- full_output.append(f"Iteration {iteration}:\nCode:\n{generated_code}\nOutput:\n{exec_output}")
1198
-
1243
+ # Capture stdout/stderr during execution
1244
+ stdout_capture = StringIO()
1245
+ stderr_capture = StringIO()
1246
+
1247
+ with redirect_stdout(stdout_capture), redirect_stderr(stderr_capture):
1248
+ state, exec_output = execute_python_code(generated_code,
1249
+ state,
1250
+ locals_dict)
1251
+
1252
+ captured_stdout = stdout_capture.getvalue()
1253
+ captured_stderr = stderr_capture.getvalue()
1254
+ print(exec_output)
1255
+
1256
+ if captured_stdout:
1257
+ print("\n📤 Captured stdout:\n", captured_stdout)
1258
+ if captured_stderr:
1259
+ print("\n❌ Captured stderr:\n", captured_stderr)
1260
+
1261
+ combined_output = f"{exec_output}\nstdout:\n{captured_stdout}\nstderr:\n{captured_stderr}"
1262
+ full_output.append(f"Iteration {iteration}:\nCode:\n{generated_code}\nOutput:\n{combined_output}")
1263
+
1199
1264
  # Update the context with new variables
1200
1265
  new_vars = []
1201
1266
  for var_name, var_value in locals_dict.items():
@@ -1203,66 +1268,82 @@ def _run_agentic_mode(command: str,
1203
1268
  var_name not in existing_vars_context and
1204
1269
  var_name not in ['In', 'Out', 'exit', 'quit', 'get_ipython']):
1205
1270
  new_vars.append(var_name)
1206
-
1271
+
1207
1272
  if new_vars:
1208
1273
  existing_vars_context += f"\nNEW VARIABLES CREATED: {', '.join(new_vars)}\n"
1209
-
1274
+
1210
1275
  analysis_prompt = f"""
1211
- CODE EXECUTION RESULTS: {exec_output}
1276
+ CODE EXECUTION RESULTS: {combined_output}
1212
1277
 
1213
1278
  EXISTING VARIABLES: {existing_vars_context}
1279
+
1280
+ EXECUTED_CODE: {generated_code}
1281
+
1282
+ PREVIOUS_CODE: {previous_code}
1214
1283
 
1284
+ PREVIOUS ATTEMPTS: ```{full_output[-3:] if full_output else 'None'}```
1285
+
1286
+ Here are the steps so far: {steps}
1287
+
1215
1288
  ANALYSIS:
1216
- - Is there MEANINGFUL PROGRESS? Return 'progress' if making good progress
1289
+ - Is there MEANINGFUL PROGRESS? Return 'progress' if making good progress. If the previous code and current executed code are essentially accomplishing the same thing, that is not progress. If the steps have been too similar or not improved, then consider it a problem.
1217
1290
  - Is there a PROBLEM? Return 'problem' if stuck or error occurred
1218
-
1219
- - Return ONLY one of these words followed by a brief explanation.
1291
+ - Is there an ambiguity that should be resolved? Return 'question'.
1292
+ - Is the analysis complete enough to get feedback from the user? If it's pretty much done, return 'complete'
1293
+ - Return ONLY one of these words followed by a brief explanation to take the next step forward.
1220
1294
  """
1221
-
1222
1295
  analysis_response = get_llm_response(analysis_prompt,
1223
- model=state.chat_model,
1224
- provider=state.chat_provider,
1225
- npc=state.npc,
1296
+ npc=state.npc,
1226
1297
  stream=False)
1227
-
1298
+
1228
1299
  analysis = analysis_response.get("response", "").strip().lower()
1229
- print(f"\n# Analysis:\n{analysis}")
1230
-
1300
+ next_step = analysis[8:]
1301
+ state.messages.append({'role':'assistant', 'content':f'''- Is there MEANINGFUL PROGRESS? Is there a PROBLEM? Is there an ambiguity that should be resolved?
1302
+ Indeed: {analysis} '''})
1303
+ print(f"\n# Analysis:\n{analysis}")
1304
+
1305
+ previous_code = generated_code
1306
+
1231
1307
  if analysis.startswith('complete'):
1232
1308
  print("✅ Task completed successfully!")
1233
1309
  break
1310
+ if analysis.startswith('question'):
1311
+ print('Please help answer')
1312
+ break
1234
1313
  elif analysis.startswith('progress'):
1235
1314
  consecutive_failures = 0 # Reset failure counter on progress
1236
- print("➡️ Making progress, continuing to next iteration...")
1237
- # Continue to next iteration
1315
+ print("➡️ Making progress, continuing to next iteration...")
1316
+ current_command = next_step
1238
1317
  elif analysis.startswith('problem'):
1239
- consecutive_failures += 1
1240
- print(f"⚠️ Problem detected ({consecutive_failures}/{max_consecutive_failures} consecutive failures)")
1241
-
1242
- user_feedback = input("\n🤔 Agent requests feedback (press Enter to continue or type your response): ").strip()
1243
- if user_feedback:
1244
- current_command = f"{current_command} - User feedback: {user_feedback}"
1245
- elif consecutive_failures >= max_consecutive_failures:
1246
- print("❌ Too many consecutive failures, stopping iteration")
1247
- break
1318
+
1319
+ print(f"⚠️ Problem detected ({consecutive_failures}/{max_consecutive_failures} consecutive failures)")
1320
+
1321
+ current_command = f"{current_command} - PROBLEM in addressing it: {analysis}"
1322
+ max_iterations += int(max_iterations/2)
1323
+ continue
1248
1324
  else:
1249
1325
  # Default behavior for unexpected responses
1250
1326
  consecutive_failures += 1
1251
1327
  print(f"❓ Unexpected analysis response, counting as failure ({consecutive_failures}/{max_consecutive_failures})")
1252
-
1328
+ if consecutive_failures >= max_consecutive_failures:
1329
+ print("❌ Too many consecutive failures, stopping iteration.")
1330
+ break
1331
+ except KeyboardInterrupt as e:
1332
+ user_input = input('User input: ')
1333
+ current_command = current_command+user_input
1253
1334
  except Exception as e:
1254
1335
  error_msg = f"Error in iteration {iteration}: {str(e)}"
1255
1336
  print(error_msg)
1256
1337
  full_output.append(error_msg)
1257
1338
  consecutive_failures += 1
1258
1339
  current_command = f"{current_command} - Error: {str(e)}"
1259
-
1340
+
1341
+
1260
1342
  if consecutive_failures >= max_consecutive_failures:
1261
- print("❌ Too many consecutive errors, stopping iteration")
1343
+ print("❌ Too many consecutive errors, stopping iteration.")
1262
1344
  break
1263
-
1264
- return state, "# Agentic execution completed\n" + '\n'.join(full_output)
1265
1345
 
1346
+ return state, "# Agentic execution completed\n" + '\n'.join(full_output)
1266
1347
 
1267
1348
  def print_guac_bowl():
1268
1349
  bowl_art = """
@@ -1353,10 +1434,10 @@ def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str,
1353
1434
  return state, f"Error loading file: {e}"
1354
1435
 
1355
1436
  # Handle file drops in text (multiple files or files with other text)
1356
- processed_command, processed_files = _handle_file_drop(stripped_command, npc_team_dir)
1437
+ processed_command, processed_files, file_paths = _handle_file_drop(stripped_command, npc_team_dir)
1357
1438
  if processed_files:
1358
1439
  print(f"📁 Processed {len(processed_files)} files")
1359
- stripped_command = processed_command
1440
+ stripped_command = processed_command + 'Here are the files associated with the request'
1360
1441
 
1361
1442
  # Handle /refresh command
1362
1443
  if stripped_command == "/refresh":
@@ -1416,7 +1497,6 @@ def execute_guac_command(command: str, state: ShellState, locals_dict: Dict[str,
1416
1497
  {locals_context_string}
1417
1498
  Begin directly with the code
1418
1499
  """
1419
-
1420
1500
  llm_response = get_llm_response(prompt_cmd,
1421
1501
  model=state.chat_model,
1422
1502
  provider=state.chat_provider,
@@ -1485,12 +1565,196 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1485
1565
 
1486
1566
  except Exception as e:
1487
1567
  print(f"Warning: Could not load package {package_name}: {e}", file=sys.stderr)
1568
+
1569
+ from npcpy.data.load import load_file_contents
1570
+
1571
+ def read_file(file_path, max_lines=10000, encoding='utf-8'):
1572
+ """
1573
+ Read and print file contents up to max_lines.
1574
+ Uses npcpy.data.load for specialized file types, falls back to text reading.
1575
+ Returns the content as a string for further processing.
1576
+ """
1577
+ path = Path(file_path).expanduser().resolve()
1488
1578
 
1579
+ if not path.exists():
1580
+ print(f"File not found: {path}")
1581
+ return None
1582
+
1583
+ if not path.is_file():
1584
+ print(f"Not a file: {path}")
1585
+ return None
1586
+
1587
+ try:
1588
+ # Try using npcpy's load_file_contents first for specialized formats
1589
+ file_ext = path.suffix.upper().lstrip('.')
1590
+ if file_ext in ['PDF', 'DOCX', 'PPTX', 'HTML', 'HTM', 'CSV', 'XLS', 'XLSX', 'JSON']:
1591
+ chunks = load_file_contents(str(path), chunk_size=10000) # Large chunk to get full content
1592
+ if chunks and not chunks[0].startswith("Error") and not chunks[0].startswith("Unsupported"):
1593
+ content = '\n'.join(chunks)
1594
+ lines = content.split('\n')
1595
+
1596
+ if len(lines) > max_lines:
1597
+ lines = lines[:max_lines]
1598
+ print(f"File truncated at {max_lines} lines. Use windowed reading for larger files.")
1599
+
1600
+ print(f"Reading {path.name} ({len(lines)} lines, {len(content)} chars)")
1601
+ print("=" * 60)
1602
+
1603
+ for i, line in enumerate(lines, 1):
1604
+ print(f"{i:4d} | {line}")
1605
+
1606
+ print("=" * 60)
1607
+ print(f"End of {path.name}")
1608
+ return content
1609
+
1610
+ # Fall back to regular text reading
1611
+ with open(path, 'r', encoding=encoding) as f:
1612
+ lines = []
1613
+ for i, line in enumerate(f, 1):
1614
+ if i > max_lines:
1615
+ print(f"File truncated at {max_lines} lines. Use windowed reading for larger files.")
1616
+ break
1617
+ lines.append(line.rstrip('\n\r'))
1618
+
1619
+ content = '\n'.join(lines)
1620
+
1621
+ print(f"Reading {path.name} ({len(lines)} lines, {len(content)} chars)")
1622
+ print("=" * 60)
1623
+
1624
+ for i, line in enumerate(lines, 1):
1625
+ print(f"{i:4d} | {line}")
1626
+
1627
+ print("=" * 60)
1628
+ print(f"End of {path.name}")
1629
+
1630
+ return content
1631
+
1632
+ except UnicodeDecodeError:
1633
+ try:
1634
+ with open(path, 'rb') as f:
1635
+ data = f.read(min(1024, max_lines * 80))
1636
+ print(f"Binary file {path.name} ({len(data)} bytes)")
1637
+ print("=" * 60)
1638
+ print(data.hex()[:1000] + ("..." if len(data) > 500 else ""))
1639
+ print("=" * 60)
1640
+ return data
1641
+ except Exception as e:
1642
+ print(f"Error reading file: {e}")
1643
+ return None
1644
+ except Exception as e:
1645
+ print(f"Error reading file: {e}")
1646
+ return None
1647
+
1648
+ def edit_file(file_path, content=None, line_number=None, new_line=None, insert_at=None, append=False, backup=True):
1649
+ """
1650
+ Edit file contents in various ways:
1651
+ - edit_file(path, content="new content") - replace entire file
1652
+ - edit_file(path, line_number=5, new_line="new text") - replace specific line
1653
+ - edit_file(path, insert_at=5, new_line="inserted text") - insert at line
1654
+ - edit_file(path, append=True, content="appended") - append to file
1655
+ """
1656
+ path = Path(file_path).expanduser().resolve()
1657
+
1658
+ # Create parent directories if needed
1659
+ path.parent.mkdir(parents=True, exist_ok=True)
1660
+
1661
+ # Backup original if it exists
1662
+ if backup and path.exists():
1663
+ backup_path = path.with_suffix(path.suffix + '.backup')
1664
+ import shutil
1665
+ shutil.copy2(path, backup_path)
1666
+ print(f"Backup saved: {backup_path.name}")
1667
+
1668
+ try:
1669
+ # Read existing content if file exists
1670
+ existing_lines = []
1671
+ if path.exists():
1672
+ with open(path, 'r', encoding='utf-8') as f:
1673
+ existing_lines = [line.rstrip('\n\r') for line in f]
1674
+
1675
+ if content is not None:
1676
+ if append:
1677
+ with open(path, 'a', encoding='utf-8') as f:
1678
+ f.write('\n' + content if existing_lines else content)
1679
+ print(f"Appended to {path.name}")
1680
+ else:
1681
+ with open(path, 'w', encoding='utf-8') as f:
1682
+ f.write(content)
1683
+ print(f"Wrote {path.name} ({len(content)} chars)")
1684
+
1685
+ elif line_number is not None and new_line is not None:
1686
+ if line_number < 1:
1687
+ print("Line numbers start at 1")
1688
+ return False
1689
+
1690
+ while len(existing_lines) < line_number:
1691
+ existing_lines.append("")
1692
+
1693
+ if line_number <= len(existing_lines):
1694
+ old_line = existing_lines[line_number - 1] if line_number <= len(existing_lines) else ""
1695
+ existing_lines[line_number - 1] = new_line
1696
+
1697
+ with open(path, 'w', encoding='utf-8') as f:
1698
+ f.write('\n'.join(existing_lines))
1699
+
1700
+ print(f"Line {line_number} in {path.name}:")
1701
+ print(f" - OLD: {old_line}")
1702
+ print(f" + NEW: {new_line}")
1703
+ else:
1704
+ print(f"File only has {len(existing_lines)} lines")
1705
+ return False
1706
+
1707
+ elif insert_at is not None and new_line is not None:
1708
+ if insert_at < 1:
1709
+ insert_at = 1
1710
+
1711
+ existing_lines.insert(insert_at - 1, new_line)
1712
+
1713
+ with open(path, 'w', encoding='utf-8') as f:
1714
+ f.write('\n'.join(existing_lines))
1715
+
1716
+ print(f"Inserted at line {insert_at} in {path.name}: {new_line}")
1717
+
1718
+ else:
1719
+ print("Must specify either 'content', or 'line_number + new_line', or 'insert_at + new_line'")
1720
+ return False
1721
+
1722
+ return True
1723
+
1724
+ except Exception as e:
1725
+ print(f"Error editing file: {e}")
1726
+ return False
1727
+
1728
+ def load_file(file_path):
1729
+ """
1730
+ Simple wrapper around npcpy's load_file_contents for direct data loading.
1731
+ Returns the loaded data in appropriate format.
1732
+ """
1733
+ path = Path(file_path).expanduser().resolve()
1734
+
1735
+ if not path.exists():
1736
+ print(f"File not found: {path}")
1737
+ return None
1738
+
1739
+ chunks = load_file_contents(str(path))
1740
+ if chunks and not chunks[0].startswith("Error") and not chunks[0].startswith("Unsupported"):
1741
+ content = '\n'.join(chunks)
1742
+ print(f"Loaded {path.name} using npcpy loader")
1743
+ return content
1744
+ else:
1745
+ print(f"Could not load {path.name}: {chunks[0] if chunks else 'Unknown error'}")
1746
+ return None
1747
+
1489
1748
  core_imports = {
1490
1749
  'pd': pd, 'np': np, 'plt': plt, 'datetime': datetime,
1491
1750
  'Path': Path, 'os': os, 'sys': sys, 'json': json,
1492
- 'yaml': yaml, 're': re, 'traceback': traceback
1751
+ 'yaml': yaml, 're': re, 'traceback': traceback,
1752
+ 'edit_file': edit_file,
1753
+ 'read_file':read_file,
1754
+ 'load_file':load_file,
1493
1755
  }
1756
+
1757
+
1494
1758
  locals_dict.update(core_imports)
1495
1759
  locals_dict.update({f"guac_{k}": v for k, v in workspace_dirs.items()})
1496
1760
 
@@ -1532,17 +1796,24 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1532
1796
 
1533
1797
  process_result(user_input, state, result, state.command_history)
1534
1798
 
1535
- except (KeyboardInterrupt, EOFError):
1799
+ except EOFError:
1536
1800
  print("\nExiting Guac Mode...")
1801
+ try:
1802
+ readline.write_history_file(READLINE_HISTORY_FILE)
1803
+ except:
1804
+ pass
1537
1805
  if _guac_monitor_stop_event:
1538
1806
  _guac_monitor_stop_event.set()
1539
1807
  if _guac_monitor_thread:
1540
1808
  _guac_monitor_thread.join(timeout=1.0)
1541
1809
  break
1542
-
1543
- break
1544
1810
  except SystemExit as e:
1811
+ try:
1812
+ readline.write_history_file(READLINE_HISTORY_FILE)
1813
+ except:
1814
+ pass
1545
1815
  print(f"\n{e}")
1816
+
1546
1817
  if _guac_monitor_stop_event:
1547
1818
  _guac_monitor_stop_event.set()
1548
1819
  if _guac_monitor_thread:
@@ -1552,6 +1823,10 @@ def run_guac_repl(state: ShellState, project_name: str, package_root: Path, pack
1552
1823
  except Exception:
1553
1824
  print("An unexpected error occurred in the REPL:")
1554
1825
  traceback.print_exc()
1826
+ try:
1827
+ readline.write_history_file(READLINE_HISTORY_FILE)
1828
+ except:
1829
+ pass
1555
1830
 
1556
1831
  if _guac_monitor_stop_event:
1557
1832
  _guac_monitor_stop_event.set()
npcsh/npcsh.py CHANGED
@@ -207,7 +207,7 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
207
207
  state,
208
208
  output,
209
209
  command_history,
210
- already_printed=False)
210
+ )
211
211
 
212
212
  except KeyboardInterrupt:
213
213
  if is_windows:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.19
3
+ Version: 1.0.21
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -1,21 +1,21 @@
1
1
  npcsh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- npcsh/_state.py,sha256=VROSuNgTX2ex8erM1DbIl9_N3_c_bYlrctI0rVm38x4,90187
2
+ npcsh/_state.py,sha256=ynUucPJ81gQTh10dCq9CZJTqmUx31fXSipISd8A3gSU,90177
3
3
  npcsh/alicanto.py,sha256=-muGqd0O2m8xcFBctEavSEizWbQmzuPSdcT-3YqYBhY,45043
4
- npcsh/corca.py,sha256=2KoYoG1hwbUmMI2YlQ1RrrfkW5bXAYz2EIPRNMMTt1g,31569
5
- npcsh/guac.py,sha256=7DoRzEY4HrHGDzxehLJUBfM7C6rbNLP71Mnn28D6Ylo,65141
4
+ npcsh/corca.py,sha256=FcZ-tyGkj2oo6q8vvpOlYnYbmc_34Yo7VEMZATpJNdE,32094
5
+ npcsh/guac.py,sha256=9VnVi8w1lOm8bfL7cStsBIZmqE_TNGKo1dG78b6K0Ew,79410
6
6
  npcsh/mcp_helpers.py,sha256=Ktd2yXuBnLL2P7OMalgGLj84PXJSzaucjqmJVvWx6HA,12723
7
7
  npcsh/mcp_server.py,sha256=htQBSN6y3g3zVCy2ADsdBuQT9PhqaOCSQG_RS9IinWI,5853
8
8
  npcsh/npc.py,sha256=elJ494nl_pv8iDrDvBFAlmFvBdci0_CZBjZwbOjVj0c,9113
9
- npcsh/npcsh.py,sha256=A9_-CPUQgQgljOBAOmu_neOCxuxRPo7l-_Mwk56H8DM,8304
9
+ npcsh/npcsh.py,sha256=Z_Cz3mgEJeIiY91Z-c4YDKY2tlv23RngM4YK2Gqf3Vc,8283
10
10
  npcsh/plonk.py,sha256=7w7J2bht5QXOyV2UK045nAPDmrSrTGLX-sh56KQ3-k0,14653
11
11
  npcsh/pti.py,sha256=UciiiH2Kz4ERQFy0-FX6BQEU2VxYQEUril-_Cvj76Y0,7853
12
12
  npcsh/routes.py,sha256=BOa3w8xGKa4MqUv3BzIrbGiqU3bZ1YpWKyy55hB6A_0,46637
13
13
  npcsh/spool.py,sha256=r0-oXkrNDbxQ5ZNHr1vSJcdmVj3jdWbz7qYcEDoSrj8,10354
14
14
  npcsh/wander.py,sha256=BiN6eYyFnEsFzo8MFLRkdZ8xS9sTKkQpjiCcy9chMcc,23225
15
15
  npcsh/yap.py,sha256=ipkY3uMDw8gNrPSZ9qJFWVQ_fXtLmQ2oz_6_WZt2hew,21097
16
- npcsh-1.0.19.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
- npcsh-1.0.19.dist-info/METADATA,sha256=yxiTw_Xtw-v0YxAwuMmgXbxKhsvUuBDqimyvyeKvPak,23040
18
- npcsh-1.0.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- npcsh-1.0.19.dist-info/entry_points.txt,sha256=S5yIuGm8ZXQ4siHYgN5gs0J7bxgobSEULXf8L5HaW5o,206
20
- npcsh-1.0.19.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
- npcsh-1.0.19.dist-info/RECORD,,
16
+ npcsh-1.0.21.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
17
+ npcsh-1.0.21.dist-info/METADATA,sha256=dg5OIv4CiSl0NXrv_Yl9vSTofdC0--3Fb8Rr9Ou2Hek,23040
18
+ npcsh-1.0.21.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ npcsh-1.0.21.dist-info/entry_points.txt,sha256=S5yIuGm8ZXQ4siHYgN5gs0J7bxgobSEULXf8L5HaW5o,206
20
+ npcsh-1.0.21.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
21
+ npcsh-1.0.21.dist-info/RECORD,,
File without changes