npcsh 1.0.16__py3-none-any.whl → 1.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcsh/pti.py CHANGED
@@ -1,234 +1,214 @@
1
+ import os
2
+ import sys
3
+ import shlex
4
+ import argparse
5
+ from typing import Dict, List, Any, Optional
6
+
7
+ from termcolor import colored
8
+
9
+ from npcpy.memory.command_history import CommandHistory, save_conversation_message
10
+ from npcpy.npc_sysenv import (
11
+ render_markdown
12
+ )
13
+ from npcpy.llm_funcs import get_llm_response
14
+ from npcpy.npc_compiler import NPC
15
+ from npcpy.data.load import load_file_contents
16
+
17
+ from npcsh._state import (
18
+ ShellState,
19
+ setup_shell,
20
+ get_multiline_input,
21
+ readline_safe_prompt,
22
+ get_npc_path
23
+ )
24
+
25
+ ice = "🧊"
26
+ bear = "🐻‍❄️"
27
+ def print_pti_welcome_message():
28
+
29
+ print(f"""
30
+ Welcome to PTI Mode!
1
31
 
2
- # pti
3
- import json
4
- from typing import Dict, List, Optional, Any, Generator
5
- import os
6
- from npcpy.memory.command_history import CommandHistory, save_attachment_to_message, start_new_conversation,save_conversation_message
7
- from npcpy.npc_sysenv import (NPCSH_REASONING_MODEL,
8
- NPCSH_REASONING_PROVIDER,
9
- NPCSH_CHAT_MODEL,
10
- NPCSH_CHAT_PROVIDER,
11
- NPCSH_API_URL,
12
- NPCSH_STREAM_OUTPUT,print_and_process_stream_with_markdown)
13
- from npcpy.llm_funcs import get_llm_response, handle_request_input
14
-
15
- from npcpy.npc_compiler import NPC
16
- from npcpy.data.load import load_csv, load_pdf
17
- from npcpy.data.text import rag_search
18
-
19
-
20
-
21
-
22
-
23
-
24
- def enter_reasoning_human_in_the_loop(
25
- user_input=None,
26
- messages: List[Dict[str, str]] = None,
27
- reasoning_model: str = NPCSH_REASONING_MODEL,
28
- reasoning_provider: str = NPCSH_REASONING_PROVIDER,
29
- files : List = None,
30
- npc: Any = None,
31
- conversation_id : str= False,
32
- answer_only: bool = False,
33
- context=None,
34
- ) :
35
- """
36
- Stream responses while checking for think tokens and handling human input when needed.
37
-
38
- Args:
39
- messages: List of conversation messages
40
- model: LLM model to use
41
- provider: Model provider
42
- npc: NPC instance if applicable
43
-
44
- """
45
- # Get the initial stream
46
- loaded_content = {} # New dictionary to hold loaded content
47
-
48
- # Create conversation ID if not provided
49
- if not conversation_id:
50
- conversation_id = start_new_conversation()
51
-
52
- command_history = CommandHistory()
53
- # Load specified files if any
54
- if files:
55
- for file in files:
56
- extension = os.path.splitext(file)[1].lower()
57
- try:
58
- if extension == ".pdf":
59
- content = load_pdf(file)["texts"].iloc[0]
60
- elif extension == ".csv":
61
- content = load_csv(file)
62
- else:
63
- print(f"Unsupported file type: {file}")
64
- continue
65
- loaded_content[file] = content
66
- print(f"Loaded content from: {file}")
67
- except Exception as e:
68
- print(f"Error loading {file}: {str(e)}")
32
+ {ice}{ice}{ice} {ice}{ice}{ice} {bear}
33
+ {ice} {ice} {ice} {bear}
34
+ {ice}{ice}{ice} {ice} {bear}
35
+ {ice} {ice} {bear}
36
+ {ice} {ice} {bear}
37
+
38
+ Pardon-The-Interruption for human-in-the-loop reasoning.
39
+ Type 'exit' or 'quit' to return to the main shell.
40
+ """)
41
+
42
+ def enter_pti_mode(command: str, **kwargs):
43
+ state: ShellState = kwargs.get('shell_state')
44
+ command_history: CommandHistory = kwargs.get('command_history')
69
45
 
46
+ if not state or not command_history:
47
+ return {"output": "Error: PTI mode requires shell state and history.", "messages": kwargs.get('messages', [])}
70
48
 
49
+ all_command_parts = shlex.split(command)
50
+ parsed_args_list = all_command_parts[1:]
51
+
52
+ parser = argparse.ArgumentParser(prog="/pti", description="Enter PTI mode for human-in-the-loop reasoning.")
53
+ parser.add_argument('initial_prompt', nargs='*', help="Initial prompt to start the session.")
54
+ parser.add_argument("-f", "--files", nargs="*", default=[], help="Files to load into context.")
55
+
71
56
  try:
72
- while True:
57
+ args = parser.parse_args(parsed_args_list)
58
+ except SystemExit:
59
+ return {"output": "Invalid arguments for /pti. Usage: /pti [initial prompt] [-f file1 file2 ...]", "messages": state.messages}
73
60
 
74
- if loaded_content:
75
- context_content = ""
76
- for filename, content in loaded_content.items():
77
- retrieved_docs = rag_search(
78
- user_input,
79
- content,
80
- )
81
- if retrieved_docs:
82
- context_content += (
83
- f"\n\nLoaded content from: {filename}\n{content}\n\n"
84
- )
85
- if len(context_content) > 0:
86
- user_input += f"""
87
- Here is the loaded content that may be relevant to your query:
88
- {context_content}
89
- Please reference it explicitly in your response and use it for answering.
90
- """
91
- if answer_only:
92
- response = get_llm_response(
93
- user_input,
94
- model = reasoning_model,
95
- provider=reasoning_provider,
96
- messages=messages,
97
- stream=True,
98
- )
99
- assistant_reply, messages = response['response'], response['messages']
100
- assistant_reply = print_and_process_stream_with_markdown(assistant_reply, reasoning_model, reasoning_provider)
101
- messages.append({'role':'assistant', 'content':assistant_reply})
102
- return enter_reasoning_human_in_the_loop(user_input = None,
103
- messages=messages,
104
- reasoning_model=reasoning_model,
105
- reasoning_provider=reasoning_provider, answer_only=False)
106
- else:
107
- message= "Think first though and use <think> tags in your chain of thought. Once finished, either answer plainly or write a request for input by beginning with the <request_for_input> tag. and close it with a </request_for_input>"
108
- if user_input is None:
109
- user_input = input('🐻‍❄️>')
61
+ print_pti_welcome_message()
62
+
63
+ frederic_path = get_npc_path("frederic", command_history.db_path)
64
+ state.npc = NPC(file=frederic_path)
65
+ print(colored("Defaulting to NPC: frederic", "cyan"))
66
+ state.npc = NPC(name="frederic")
67
+
68
+ pti_messages = list(state.messages)
69
+ loaded_content = {}
70
+
71
+ if args.files:
72
+ for file_path in args.files:
73
+ try:
74
+ content_chunks = load_file_contents(file_path)
75
+ loaded_content[file_path] = "\n".join(content_chunks)
76
+ print(colored(f"Successfully loaded content from: {file_path}", "green"))
77
+ except Exception as e:
78
+ print(colored(f"Error loading {file_path}: {e}", "red"))
79
+
80
+ user_input = " ".join(args.initial_prompt)
81
+
82
+ while True:
83
+ try:
84
+ if not user_input:
85
+ npc_name = state.npc.name if state.npc and isinstance(state.npc, NPC) else "frederic"
86
+ model_name = state.reasoning_model
110
87
 
111
- message_id = save_conversation_message(
112
- command_history,
113
- conversation_id,
114
- "user",
115
- user_input,
116
- wd=os.getcwd(),
117
- model=reasoning_model,
118
- provider=reasoning_provider,
119
- npc=npc.name if npc else None,
120
-
121
- )
122
- response = get_llm_response(
123
- user_input+message,
124
- model = reasoning_model,
125
- provider=reasoning_provider,
126
- messages=messages,
88
+ prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:{npc_name}:{model_name}{bear}> "
89
+ prompt = readline_safe_prompt(prompt_str)
90
+ user_input = get_multiline_input(prompt).strip()
91
+
92
+ if user_input.lower() in ["exit", "quit", "done"]:
93
+ break
94
+
95
+ if not user_input:
96
+ continue
97
+
98
+ prompt_for_llm = user_input
99
+ if loaded_content:
100
+ context_str = "\n".join([f"--- Content from {fname} ---\n{content}" for fname, content in loaded_content.items()])
101
+ prompt_for_llm += f"\n\nUse the following context to inform your answer:\n{context_str}"
102
+
103
+ prompt_for_llm += "\n\nThink step-by-step using <think> tags. When you need more information from me, enclose your question in <request_for_input> tags."
104
+
105
+ save_conversation_message(
106
+ command_history,
107
+ state.conversation_id,
108
+ "user",
109
+ user_input,
110
+ wd=state.current_path,
111
+ model=state.reasoning_model,
112
+ provider=state.reasoning_provider,
113
+ npc=state.npc.name if isinstance(state.npc, NPC) else None,
114
+ )
115
+ pti_messages.append({"role": "user", "content": user_input})
116
+
117
+ try:
118
+ response_dict = get_llm_response(
119
+ prompt=prompt_for_llm,
120
+ model=state.reasoning_model,
121
+ provider=state.reasoning_provider,
122
+ messages=pti_messages,
127
123
  stream=True,
124
+ npc=state.npc
128
125
  )
129
-
130
- assistant_reply, messages = response['response'], response['messages']
131
- thoughts = []
126
+ stream = response_dict.get('response')
127
+
132
128
  response_chunks = []
133
- in_think_block = False # the thinking chain generated after reasoning
129
+ request_found = False
134
130
 
135
- thinking = False # the reasoning content
131
+ for chunk in stream:
132
+ chunk_content = ""
133
+ if state.reasoning_provider == "ollama":
134
+ chunk_content = chunk.get("message", {}).get("content", "")
135
+ else:
136
+ chunk_content = "".join(
137
+ choice.delta.content
138
+ for choice in chunk.choices
139
+ if choice.delta.content is not None
140
+ )
141
+
142
+ print(chunk_content, end='')
143
+ sys.stdout.flush()
144
+ response_chunks.append(chunk_content)
145
+
146
+ combined_text = "".join(response_chunks)
147
+ if "</request_for_input>" in combined_text:
148
+ request_found = True
149
+ break
150
+
151
+ full_response_text = "".join(response_chunks)
152
+
153
+ save_conversation_message(
154
+ command_history,
155
+ state.conversation_id,
156
+ "assistant",
157
+ full_response_text,
158
+ wd=state.current_path,
159
+ model=state.reasoning_model,
160
+ provider=state.reasoning_provider,
161
+ npc=state.npc.name if isinstance(state.npc, NPC) else None,
162
+ )
163
+ pti_messages.append({"role": "assistant", "content": full_response_text})
136
164
 
165
+ print()
166
+ user_input = None
167
+ continue
168
+
169
+ except KeyboardInterrupt:
170
+ print(colored("\n\n--- Stream Interrupted ---", "yellow"))
171
+ interrupt_text = input('🐻‍❄️> ').strip()
172
+ if interrupt_text:
173
+ user_input = interrupt_text
174
+ else:
175
+ user_input = None
176
+ continue
137
177
 
138
- for chunk in assistant_reply:
139
- if thinking:
140
- if not in_think_block:
141
- in_think_block = True
142
- try:
143
-
144
- if reasoning_provider == "ollama":
145
- chunk_content = chunk.get("message", {}).get("content", "")
146
- else:
147
- chunk_content = ''
148
- reasoning_content = ''
149
- for c in chunk.choices:
150
- if hasattr(c.delta, "reasoning_content"):
151
-
152
- reasoning_content += c.delta.reasoning_content
153
-
154
- if reasoning_content:
155
- thinking = True
156
- chunk_content = reasoning_content
157
- chunk_content += "".join(
158
- choice.delta.content
159
- for choice in chunk.choices
160
- if choice.delta.content is not None
161
- )
162
- response_chunks.append(chunk_content)
163
- print(chunk_content, end='')
164
- combined_text = "".join(response_chunks)
165
-
166
- if in_think_block:
167
- if '</thinking>' in combined_text:
168
- in_think_block = False
169
- thoughts.append(chunk_content)
170
-
171
- if "</request_for_input>" in combined_text:
172
- # Process the LLM's input request
173
- request_text = "".join(thoughts)
174
-
175
- print("\nPlease provide the requested information: ")
176
-
177
- user_input = input('🐻‍❄️>')
178
-
179
- messages.append({"role": "assistant", "content": request_text})
180
-
181
- print("\n[Continuing with provided information...]\n")
182
- return enter_reasoning_human_in_the_loop( user_input = user_input,
183
- messages=messages,
184
- reasoning_model=reasoning_model,
185
- reasoning_provider=reasoning_provider,
186
- npc=npc,
187
- answer_only=True)
188
-
189
-
190
- except KeyboardInterrupt:
191
- user_interrupt = input("\n[Stream interrupted by user]\n Enter your additional input: ")
192
-
193
-
194
- # Add the interruption to messages and restart stream
195
- messages.append(
196
- {"role": "user", "content": f"[INTERRUPT] {user_interrupt}"}
197
- )
198
- print(f"\n[Continuing with added context...]\n")
199
-
200
- except KeyboardInterrupt:
201
- user_interrupt = input("\n[Stream interrupted by user]\n 🔴🔴🔴🔴\nEnter your additional input: ")
202
-
178
+ except KeyboardInterrupt:
179
+ print()
180
+ continue
181
+ except EOFError:
182
+ print("\nExiting PTI Mode.")
183
+ break
203
184
 
204
- # Add the interruption to messages and restart stream
205
- messages.append(
206
- {"role": "user", "content": f"[INTERRUPT] {user_interrupt}"}
207
- )
208
- print(f"\n[Continuing with added context...]\n")
209
-
210
- return {'messages':messages, }
211
-
185
+ render_markdown("\n# Exiting PTI Mode")
186
+ return {"output": "", "messages": pti_messages}
212
187
 
213
188
  def main():
214
- # Example usage
215
- import argparse
216
- parser = argparse.ArgumentParser(description="Enter PTI mode for chatting with an LLM")
217
- parser.add_argument("--npc", default='~/.npcsh/npc_team/frederic.npc', help="Path to NPC File")
218
- parser.add_argument("--model", default=NPCSH_REASONING_MODEL, help="Model to use")
219
- parser.add_argument("--provider", default=NPCSH_REASONING_PROVIDER, help="Provider to use")
220
- parser.add_argument("--files", nargs="*", help="Files to load into context")
189
+ parser = argparse.ArgumentParser(description="PTI - Pardon-The-Interruption human-in-the-loop shell.")
190
+ parser.add_argument('initial_prompt', nargs='*', help="Initial prompt to start the session.")
191
+ parser.add_argument("-f", "--files", nargs="*", default=[], help="Files to load into context.")
221
192
  args = parser.parse_args()
193
+
194
+ command_history, team, default_npc = setup_shell()
195
+
196
+ from npcsh._state import initial_state
197
+ initial_shell_state = initial_state
198
+ initial_shell_state.team = team
199
+ initial_shell_state.npc = default_npc
222
200
 
223
- npc = NPC(file=args.npc)
224
- enter_reasoning_human_in_the_loop(
225
- messages = [],
226
- npc=npc,
227
- reasoning_model=args.model,
228
- reasoning_provider=args.provider,
229
- files=args.files,
230
- )
201
+ fake_command_str = "/pti " + " ".join(args.initial_prompt)
202
+ if args.files:
203
+ fake_command_str += " --files " + " ".join(args.files)
204
+
205
+ kwargs = {
206
+ 'command': fake_command_str,
207
+ 'shell_state': initial_shell_state,
208
+ 'command_history': command_history
209
+ }
210
+
211
+ enter_pti_mode(**kwargs)
231
212
 
232
213
  if __name__ == "__main__":
233
- main()
234
-
214
+ main()
npcsh/routes.py CHANGED
@@ -3,6 +3,10 @@
3
3
  from typing import Callable, Dict, Any, List, Optional, Union
4
4
  import functools
5
5
  import os
6
+ import subprocess
7
+ import sys
8
+ from pathlib import Path
9
+
6
10
  import traceback
7
11
  import shlex
8
12
  import time
@@ -36,6 +40,7 @@ from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
36
40
  from npcsh._state import (
37
41
  NPCSH_VISION_MODEL,
38
42
  NPCSH_VISION_PROVIDER,
43
+ set_npcsh_config_value,
39
44
  NPCSH_API_URL,
40
45
  NPCSH_CHAT_MODEL,
41
46
  NPCSH_CHAT_PROVIDER,
@@ -53,15 +58,20 @@ from npcsh._state import (
53
58
  normalize_and_expand_flags,
54
59
  get_argument_help
55
60
  )
61
+ from npcsh.corca import enter_corca_mode
56
62
  from npcsh.guac import enter_guac_mode
57
63
  from npcsh.plonk import execute_plonk_command, format_plonk_summary
58
64
  from npcsh.alicanto import alicanto
65
+ from npcsh.pti import enter_pti_mode
59
66
  from npcsh.spool import enter_spool_mode
60
67
  from npcsh.wander import enter_wander_mode
61
68
  from npcsh.yap import enter_yap_mode
62
69
 
63
70
 
64
71
 
72
+ NPC_STUDIO_DIR = Path.home() / ".npcsh" / "npc-studio"
73
+
74
+
65
75
  class CommandRouter:
66
76
  def __init__(self):
67
77
  self.routes = {}
@@ -220,6 +230,10 @@ def compile_handler(command: str, **kwargs):
220
230
 
221
231
 
222
232
 
233
+ @router.route("corca", "Enter the Corca MCP-powered agentic shell. Usage: /corca [--mcp-server-path path]")
234
+ def corca_handler(command: str, **kwargs):
235
+ return enter_corca_mode(command=command, **kwargs)
236
+
223
237
  @router.route("flush", "Flush the last N messages")
224
238
  def flush_handler(command: str, **kwargs):
225
239
  messages = safe_get(kwargs, "messages", [])
@@ -277,7 +291,8 @@ def guac_handler(command, **kwargs):
277
291
  team = Team(npc_team_dir, db_conn=db_conn)
278
292
 
279
293
 
280
- enter_guac_mode(npc=npc,
294
+ enter_guac_mode(workspace_dirs,
295
+ npc=npc,
281
296
  team=team,
282
297
  config_dir=config_dir,
283
298
  plots_dir=plots_dir,
@@ -368,9 +383,77 @@ def init_handler(command: str, **kwargs):
368
383
  output = f"Error initializing project: {e}"
369
384
  return {"output": output, "messages": messages}
370
385
 
386
+ def ensure_repo():
387
+ """Clone or update the npc-studio repo."""
388
+ if not NPC_STUDIO_DIR.exists():
389
+ os.makedirs(NPC_STUDIO_DIR.parent, exist_ok=True)
390
+ subprocess.check_call([
391
+ "git", "clone",
392
+ "https://github.com/npc-worldwide/npc-studio.git",
393
+ str(NPC_STUDIO_DIR)
394
+ ])
395
+ else:
396
+ subprocess.check_call(
397
+ ["git", "pull"],
398
+ cwd=NPC_STUDIO_DIR
399
+ )
371
400
 
401
+ def install_dependencies():
402
+ """Install npm and pip dependencies."""
403
+ # Install frontend deps
404
+ subprocess.check_call(["npm", "install"], cwd=NPC_STUDIO_DIR)
372
405
 
406
+ # Install backend deps
407
+ req_file = NPC_STUDIO_DIR / "requirements.txt"
408
+ if req_file.exists():
409
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", str(req_file)])
410
+ def launch_npc_studio(path_to_open: str = None):
411
+ """
412
+ Launch the NPC Studio backend + frontend.
413
+ Returns PIDs for processes.
414
+ """
415
+ ensure_repo()
416
+ install_dependencies()
417
+
418
+ # Start backend (Flask server)
419
+ backend = subprocess.Popen(
420
+ [sys.executable, "npc_studio_serve.py"],
421
+ cwd=NPC_STUDIO_DIR,
422
+ shell = False
423
+ )
424
+
425
+ # Start server (Electron)
426
+ dev_server = subprocess.Popen(
427
+ ["npm", "run", "dev"],
428
+ cwd=NPC_STUDIO_DIR,
429
+ shell=False
430
+ )
431
+
432
+ # Start frontend (Electron)
433
+ frontend = subprocess.Popen(
434
+ ["npm", "start"],
435
+ cwd=NPC_STUDIO_DIR,
436
+ shell=False
437
+ )
438
+
439
+ return backend, dev_server, frontend
440
+ # ========== Router handler ==========
441
+ @router.route("npc-studio", "Start npc studio")
442
+ def npc_studio_handler(command: str, **kwargs):
443
+ messages = kwargs.get("messages", [])
444
+ user_command = " ".join(command.split()[1:])
373
445
 
446
+ try:
447
+ backend, electron, frontend = launch_npc_studio(user_command or None)
448
+ return {
449
+ "output": f"NPC Studio started!\nBackend PID={backend.pid}, Electron PID={electron.pid} Frontend PID={frontend.pid}",
450
+ "messages": messages
451
+ }
452
+ except Exception as e:
453
+ return {
454
+ "output": f"Failed to start NPC Studio: {e}",
455
+ "messages": messages
456
+ }
374
457
  @router.route("ots", "Take screenshot and analyze with vision model")
375
458
  def ots_handler(command: str, **kwargs):
376
459
  command_parts = command.split()
@@ -438,6 +521,8 @@ def ots_handler(command: str, **kwargs):
438
521
  return {"output": f"Error during /ots command: {e}", "messages": messages}
439
522
 
440
523
 
524
+
525
+
441
526
  @router.route("plan", "Execute a plan command")
442
527
  def plan_handler(command: str, **kwargs):
443
528
  messages = safe_get(kwargs, "messages", [])
@@ -452,9 +537,9 @@ def plan_handler(command: str, **kwargs):
452
537
  # traceback.print_exc()
453
538
  # return {"output": f"Error executing plan: {e}", "messages": messages}
454
539
 
455
- @router.route("pti", "Use pardon-the-interruption mode to interact with the LLM")
540
+ @router.route("pti", "Enter Pardon-The-Interruption mode for human-in-the-loop reasoning.")
456
541
  def pti_handler(command: str, **kwargs):
457
- return
542
+ return enter_pti_mode(command=command, **kwargs)
458
543
 
459
544
  @router.route("plonk", "Use vision model to interact with GUI. Usage: /plonk <task description>")
460
545
  def plonk_handler(command: str, **kwargs):
@@ -825,29 +910,18 @@ def sleep_handler(command: str, **kwargs):
825
910
  @router.route("spool", "Enter interactive chat (spool) mode")
826
911
  def spool_handler(command: str, **kwargs):
827
912
  try:
828
- # Handle NPC loading if npc is passed as a string (name)
829
913
  npc = safe_get(kwargs, 'npc')
830
914
  team = safe_get(kwargs, 'team')
831
915
 
832
- # If npc is a string, try to load it from the team
833
916
  if isinstance(npc, str) and team:
834
917
  npc_name = npc
835
918
  if npc_name in team.npcs:
836
919
  npc = team.npcs[npc_name]
837
920
  else:
838
921
  return {"output": f"Error: NPC '{npc_name}' not found in team. Available NPCs: {', '.join(team.npcs.keys())}", "messages": safe_get(kwargs, "messages", [])}
839
-
840
- return enter_spool_mode(
841
- model=safe_get(kwargs, 'model', NPCSH_CHAT_MODEL),
842
- provider=safe_get(kwargs, 'provider', NPCSH_CHAT_PROVIDER),
843
- npc=npc,
844
- team=team,
845
- messages=safe_get(kwargs, 'messages'),
846
- conversation_id=safe_get(kwargs, 'conversation_id'),
847
- stream=safe_get(kwargs, 'stream', NPCSH_STREAM_OUTPUT),
848
- attachments=safe_get(kwargs, 'attachments'),
849
- rag_similarity_threshold = safe_get(kwargs, 'rag_similarity_threshold', 0.3),
850
- )
922
+ kwargs['npc'] = npc
923
+ return enter_spool_mode(
924
+ **kwargs)
851
925
  except Exception as e:
852
926
  traceback.print_exc()
853
927
  return {"output": f"Error entering spool mode: {e}", "messages": safe_get(kwargs, "messages", [])}
@@ -907,13 +981,15 @@ def vixynt_handler(command: str, **kwargs):
907
981
  width = safe_get(kwargs, 'width', 1024)
908
982
  output_file = safe_get(kwargs, 'output_file')
909
983
  attachments = safe_get(kwargs, 'attachments')
984
+ if isinstance(attachments, str):
985
+ attachments = attachments.split(',')
986
+
910
987
  messages = safe_get(kwargs, 'messages', [])
911
988
 
912
989
  user_prompt = " ".join(safe_get(kwargs, 'positional_args', []))
913
990
 
914
991
  if not user_prompt:
915
992
  return {"output": "Usage: /vixynt <prompt> [--output_file path] [--attachments path]", "messages": messages}
916
-
917
993
  try:
918
994
  image = gen_image(
919
995
  prompt=user_prompt,