npcpy 1.3.9__py3-none-any.whl → 1.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/serve.py CHANGED
@@ -42,7 +42,8 @@ class SilentUndefined(Undefined):
42
42
  return ""
43
43
 
44
44
  # Import ShellState and helper functions from npcsh
45
- from npcsh._state import ShellState
45
+ from npcsh._state import ShellState, initialize_base_npcs_if_needed
46
+ from npcsh.config import NPCSH_DB_PATH
46
47
 
47
48
 
48
49
  from npcpy.memory.knowledge_graph import load_kg_from_db
@@ -814,19 +815,44 @@ def _get_jinx_files_recursively(directory):
814
815
  @app.route("/api/jinxs/available", methods=["GET"])
815
816
  def get_available_jinxs():
816
817
  try:
818
+ import yaml
817
819
  current_path = request.args.get('currentPath')
818
820
  jinx_names = set()
819
821
 
822
+ def get_jinx_name_from_file(filepath):
823
+ """Read jinx_name from file, fallback to filename."""
824
+ try:
825
+ with open(filepath, 'r') as f:
826
+ data = yaml.safe_load(f)
827
+ if data and 'jinx_name' in data:
828
+ return data['jinx_name']
829
+ except:
830
+ pass
831
+ return os.path.basename(filepath)[:-5]
832
+
833
+ # 1. Project jinxs
820
834
  if current_path:
821
835
  team_jinxs_dir = os.path.join(current_path, 'npc_team', 'jinxs')
822
836
  jinx_paths = _get_jinx_files_recursively(team_jinxs_dir)
823
837
  for path in jinx_paths:
824
- jinx_names.add(os.path.basename(path)[:-5])
838
+ jinx_names.add(get_jinx_name_from_file(path))
825
839
 
840
+ # 2. Global user jinxs (~/.npcsh)
826
841
  global_jinxs_dir = os.path.expanduser('~/.npcsh/npc_team/jinxs')
827
842
  jinx_paths = _get_jinx_files_recursively(global_jinxs_dir)
828
843
  for path in jinx_paths:
829
- jinx_names.add(os.path.basename(path)[:-5])
844
+ jinx_names.add(get_jinx_name_from_file(path))
845
+
846
+ # 3. Package built-in jinxs (from npcsh package)
847
+ try:
848
+ import npcsh
849
+ package_dir = os.path.dirname(npcsh.__file__)
850
+ package_jinxs_dir = os.path.join(package_dir, 'npc_team', 'jinxs')
851
+ jinx_paths = _get_jinx_files_recursively(package_jinxs_dir)
852
+ for path in jinx_paths:
853
+ jinx_names.add(get_jinx_name_from_file(path))
854
+ except Exception as pkg_err:
855
+ print(f"Could not load package jinxs: {pkg_err}")
830
856
 
831
857
  return jsonify({'jinxs': sorted(list(jinx_names)), 'error': None})
832
858
  except Exception as e:
@@ -1870,14 +1896,8 @@ def get_jinxs_global():
1870
1896
  with open(jinx_path, 'r') as f:
1871
1897
  raw_data = yaml.safe_load(f)
1872
1898
 
1873
- inputs = []
1874
- for inp in raw_data.get("inputs", []):
1875
- if isinstance(inp, str):
1876
- inputs.append(inp)
1877
- elif isinstance(inp, dict):
1878
- inputs.append(list(inp.keys())[0])
1879
- else:
1880
- inputs.append(str(inp))
1899
+ # Preserve full input definitions including defaults
1900
+ inputs = raw_data.get("inputs", [])
1881
1901
 
1882
1902
  rel_path = os.path.relpath(jinx_path, global_jinx_directory)
1883
1903
  path_without_ext = rel_path[:-5]
@@ -1912,14 +1932,8 @@ def get_jinxs_project():
1912
1932
  with open(jinx_path, 'r') as f:
1913
1933
  raw_data = yaml.safe_load(f)
1914
1934
 
1915
- inputs = []
1916
- for inp in raw_data.get("inputs", []):
1917
- if isinstance(inp, str):
1918
- inputs.append(inp)
1919
- elif isinstance(inp, dict):
1920
- inputs.append(list(inp.keys())[0])
1921
- else:
1922
- inputs.append(str(inp))
1935
+ # Preserve full input definitions including defaults
1936
+ inputs = raw_data.get("inputs", [])
1923
1937
 
1924
1938
  rel_path = os.path.relpath(jinx_path, project_dir)
1925
1939
  path_without_ext = rel_path[:-5]
@@ -2354,39 +2368,97 @@ def init_project_team():
2354
2368
 
2355
2369
  @app.route("/api/npcsh/check", methods=["GET"])
2356
2370
  def check_npcsh_folder():
2357
- """Check if ~/.npcsh folder exists and has a valid npc_team."""
2371
+ """Check if npcsh has been initialized by looking for actual npc_team content."""
2358
2372
  try:
2359
2373
  npcsh_path = os.path.expanduser("~/.npcsh")
2360
2374
  npc_team_path = os.path.join(npcsh_path, "npc_team")
2361
-
2362
- exists = os.path.exists(npcsh_path)
2363
- has_npc_team = os.path.exists(npc_team_path)
2364
- has_forenpc = os.path.exists(os.path.join(npc_team_path, "forenpc.npc")) if has_npc_team else False
2365
-
2375
+ # Check if npc_team exists and has .npc files (actual initialization)
2376
+ initialized = os.path.isdir(npc_team_path) and any(
2377
+ f.endswith('.npc') for f in os.listdir(npc_team_path)
2378
+ ) if os.path.exists(npc_team_path) else False
2366
2379
  return jsonify({
2367
- "exists": exists,
2368
- "has_npc_team": has_npc_team,
2369
- "has_forenpc": has_forenpc,
2380
+ "initialized": initialized,
2370
2381
  "path": npcsh_path,
2371
2382
  "error": None
2372
2383
  })
2373
2384
  except Exception as e:
2374
- print(f"Error checking npcsh folder: {e}")
2385
+ print(f"Error checking npcsh: {e}")
2375
2386
  return jsonify({"error": str(e)}), 500
2376
2387
 
2388
+ @app.route("/api/npcsh/package-contents", methods=["GET"])
2389
+ def get_package_contents():
2390
+ """Get NPCs and jinxs available in the npcsh package for installation."""
2391
+ try:
2392
+ from npcsh._state import get_package_dir
2393
+ package_dir = get_package_dir()
2394
+ package_npc_team_dir = os.path.join(package_dir, "npc_team")
2395
+
2396
+ npcs = []
2397
+ jinxs = []
2398
+
2399
+ if os.path.exists(package_npc_team_dir):
2400
+ # Get NPCs
2401
+ for f in os.listdir(package_npc_team_dir):
2402
+ if f.endswith('.npc'):
2403
+ npc_path = os.path.join(package_npc_team_dir, f)
2404
+ try:
2405
+ with open(npc_path, 'r') as file:
2406
+ npc_data = yaml.safe_load(file) or {}
2407
+ npcs.append({
2408
+ "name": npc_data.get("name", f[:-4]),
2409
+ "primary_directive": npc_data.get("primary_directive", ""),
2410
+ "model": npc_data.get("model", ""),
2411
+ "provider": npc_data.get("provider", ""),
2412
+ })
2413
+ except Exception as e:
2414
+ print(f"Error reading NPC {f}: {e}")
2415
+
2416
+ # Get jinxs recursively
2417
+ jinxs_dir = os.path.join(package_npc_team_dir, "jinxs")
2418
+ if os.path.exists(jinxs_dir):
2419
+ for root, dirs, files in os.walk(jinxs_dir):
2420
+ for f in files:
2421
+ if f.endswith('.jinx'):
2422
+ jinx_path = os.path.join(root, f)
2423
+ rel_path = os.path.relpath(jinx_path, jinxs_dir)
2424
+ try:
2425
+ with open(jinx_path, 'r') as file:
2426
+ jinx_data = yaml.safe_load(file) or {}
2427
+ jinxs.append({
2428
+ "name": f[:-5],
2429
+ "path": rel_path[:-5],
2430
+ "description": jinx_data.get("description", ""),
2431
+ })
2432
+ except Exception as e:
2433
+ print(f"Error reading jinx {f}: {e}")
2434
+
2435
+ return jsonify({
2436
+ "npcs": npcs,
2437
+ "jinxs": jinxs,
2438
+ "package_dir": package_dir,
2439
+ "error": None
2440
+ })
2441
+ except Exception as e:
2442
+ print(f"Error getting package contents: {e}")
2443
+ import traceback
2444
+ traceback.print_exc()
2445
+ return jsonify({"error": str(e), "npcs": [], "jinxs": []}), 500
2446
+
2447
+
2377
2448
  @app.route("/api/npcsh/init", methods=["POST"])
2378
2449
  def init_npcsh_folder():
2379
- """Initialize the ~/.npcsh folder with a default npc_team."""
2450
+ """Initialize npcsh with config and default npc_team."""
2380
2451
  try:
2381
- npcsh_path = os.path.expanduser("~/.npcsh")
2382
- result = initialize_npc_project(directory=npcsh_path)
2452
+ db_path = os.path.expanduser(NPCSH_DB_PATH)
2453
+ os.makedirs(os.path.dirname(db_path), exist_ok=True)
2454
+ initialize_base_npcs_if_needed(db_path)
2383
2455
  return jsonify({
2384
- "message": result,
2385
- "path": npcsh_path,
2456
+ "message": "npcsh initialized",
2457
+ "path": os.path.expanduser("~/.npcsh"),
2386
2458
  "error": None
2387
2459
  })
2388
2460
  except Exception as e:
2389
- print(f"Error initializing npcsh folder: {e}")
2461
+ print(f"Error initializing npcsh: {e}")
2390
2462
  return jsonify({"error": str(e)}), 500
2391
2463
 
2392
2464
  @app.route("/api/context/websites", methods=["GET"])
@@ -3417,7 +3489,13 @@ def stream():
3417
3489
  npc_name = data.get("npc", None)
3418
3490
  npc_source = data.get("npcSource", "global")
3419
3491
  current_path = data.get("currentPath")
3420
- is_resend = data.get("isResend", False) # ADD THIS LINE
3492
+ is_resend = data.get("isResend", False)
3493
+ parent_message_id = data.get("parentMessageId", None)
3494
+ # Accept frontend-generated message IDs to maintain parent-child relationships after reload
3495
+ frontend_user_message_id = data.get("userMessageId", None)
3496
+ frontend_assistant_message_id = data.get("assistantMessageId", None)
3497
+ # For sub-branches: the parent of the user message (points to an assistant message)
3498
+ user_parent_message_id = data.get("userParentMessageId", None)
3421
3499
 
3422
3500
  if current_path:
3423
3501
  loaded_vars = load_project_env(current_path)
@@ -3513,55 +3591,63 @@ def stream():
3513
3591
 
3514
3592
 
3515
3593
  attachments = data.get("attachments", [])
3594
+ print(f"[DEBUG] Received attachments: {attachments}")
3516
3595
  command_history = CommandHistory(app.config.get('DB_PATH'))
3517
- images = []
3596
+ images = []
3518
3597
  attachments_for_db = []
3519
3598
  attachment_paths_for_llm = []
3520
3599
 
3521
- message_id = generate_message_id()
3600
+ # Use frontend-provided ID if available, otherwise generate new one
3601
+ message_id = frontend_user_message_id if frontend_user_message_id else generate_message_id()
3522
3602
  if attachments:
3523
- attachment_dir = os.path.expanduser(f"~/.npcsh/attachments/{conversation_id+message_id}/")
3524
- os.makedirs(attachment_dir, exist_ok=True)
3603
+ print(f"[DEBUG] Processing {len(attachments)} attachments")
3525
3604
 
3526
3605
  for attachment in attachments:
3527
3606
  try:
3528
3607
  file_name = attachment["name"]
3529
-
3530
3608
  extension = file_name.split(".")[-1].upper() if "." in file_name else ""
3531
3609
  extension_mapped = extension_map.get(extension, "others")
3532
-
3533
- save_path = os.path.join(attachment_dir, file_name)
3534
3610
 
3535
- if "data" in attachment and attachment["data"]:
3536
- decoded_data = base64.b64decode(attachment["data"])
3537
- with open(save_path, "wb") as f:
3538
- f.write(decoded_data)
3539
-
3540
- elif "path" in attachment and attachment["path"]:
3541
- shutil.copy(attachment["path"], save_path)
3542
-
3543
- else:
3611
+ file_path = None
3612
+ file_content_bytes = None
3613
+
3614
+ # Use original path directly if available
3615
+ if "path" in attachment and attachment["path"]:
3616
+ file_path = attachment["path"]
3617
+ if os.path.exists(file_path):
3618
+ with open(file_path, "rb") as f:
3619
+ file_content_bytes = f.read()
3620
+
3621
+ # Fall back to base64 data if no path
3622
+ elif "data" in attachment and attachment["data"]:
3623
+ file_content_bytes = base64.b64decode(attachment["data"])
3624
+ # Save to temp file for LLM processing
3625
+ import tempfile
3626
+ temp_dir = tempfile.mkdtemp()
3627
+ file_path = os.path.join(temp_dir, file_name)
3628
+ with open(file_path, "wb") as f:
3629
+ f.write(file_content_bytes)
3630
+
3631
+ if not file_path:
3544
3632
  continue
3545
3633
 
3546
- attachment_paths_for_llm.append(save_path)
3634
+ attachment_paths_for_llm.append(file_path)
3547
3635
 
3548
3636
  if extension_mapped == "images":
3549
- images.append(save_path)
3550
-
3551
- with open(save_path, "rb") as f:
3552
- file_content_bytes = f.read()
3637
+ images.append(file_path)
3553
3638
 
3554
3639
  attachments_for_db.append({
3555
3640
  "name": file_name,
3556
- "path": save_path,
3641
+ "path": file_path,
3557
3642
  "type": extension_mapped,
3558
3643
  "data": file_content_bytes,
3559
- "size": os.path.getsize(save_path)
3644
+ "size": len(file_content_bytes) if file_content_bytes else 0
3560
3645
  })
3561
3646
 
3562
3647
  except Exception as e:
3563
3648
  print(f"Error processing attachment {attachment.get('name', 'N/A')}: {e}")
3564
3649
  traceback.print_exc()
3650
+ print(f"[DEBUG] After processing - images: {images}, attachment_paths_for_llm: {attachment_paths_for_llm}")
3565
3651
  messages = fetch_messages_for_conversation(conversation_id)
3566
3652
  if len(messages) == 0 and npc_object is not None:
3567
3653
  messages = [{'role': 'system',
@@ -3603,16 +3689,17 @@ def stream():
3603
3689
  api_url = None
3604
3690
 
3605
3691
  if exe_mode == 'chat':
3692
+ print(f"[DEBUG] Calling get_llm_response with images={images}, attachments={attachment_paths_for_llm}")
3606
3693
  stream_response = get_llm_response(
3607
- commandstr,
3608
- messages=messages,
3609
- images=images,
3694
+ commandstr,
3695
+ messages=messages,
3696
+ images=images,
3610
3697
  model=model,
3611
- provider=provider,
3612
- npc=npc_object,
3698
+ provider=provider,
3699
+ npc=npc_object,
3613
3700
  api_url = api_url,
3614
3701
  team=team_object,
3615
- stream=True,
3702
+ stream=True,
3616
3703
  attachments=attachment_paths_for_llm,
3617
3704
  auto_process_tool_calls=True,
3618
3705
  **tool_args
@@ -3924,25 +4011,27 @@ def stream():
3924
4011
  user_message_filled += txt
3925
4012
 
3926
4013
  # Only save user message if it's NOT a resend
3927
- if not is_resend: # ADD THIS CONDITION
4014
+ if not is_resend:
3928
4015
  save_conversation_message(
3929
- command_history,
3930
- conversation_id,
3931
- "user",
3932
- user_message_filled if len(user_message_filled) > 0 else commandstr,
3933
- wd=current_path,
3934
- model=model,
3935
- provider=provider,
4016
+ command_history,
4017
+ conversation_id,
4018
+ "user",
4019
+ user_message_filled if len(user_message_filled) > 0 else commandstr,
4020
+ wd=current_path,
4021
+ model=model,
4022
+ provider=provider,
3936
4023
  npc=npc_name,
3937
- team=team,
3938
- attachments=attachments_for_db,
4024
+ team=team,
4025
+ attachments=attachments_for_db,
3939
4026
  message_id=message_id,
4027
+ parent_message_id=user_parent_message_id, # For sub-branches: points to assistant message
3940
4028
  )
3941
4029
 
3942
4030
 
3943
4031
 
3944
4032
 
3945
- message_id = generate_message_id()
4033
+ # Use frontend-provided assistant message ID if available
4034
+ message_id = frontend_assistant_message_id if frontend_assistant_message_id else generate_message_id()
3946
4035
 
3947
4036
  def event_stream(current_stream_id):
3948
4037
  complete_response = []
@@ -4209,6 +4298,7 @@ def stream():
4209
4298
  reasoning_content=''.join(complete_reasoning) if complete_reasoning else None,
4210
4299
  tool_calls=accumulated_tool_calls if accumulated_tool_calls else None,
4211
4300
  tool_results=tool_results_for_db if tool_results_for_db else None,
4301
+ parent_message_id=parent_message_id,
4212
4302
  )
4213
4303
 
4214
4304
  # Start background tasks for memory extraction and context compression
@@ -4388,6 +4478,7 @@ def get_conversation_messages(conversation_id):
4388
4478
  ch.reasoning_content,
4389
4479
  ch.tool_calls,
4390
4480
  ch.tool_results,
4481
+ ch.parent_message_id,
4391
4482
  GROUP_CONCAT(ma.id) as attachment_ids,
4392
4483
  ROW_NUMBER() OVER (
4393
4484
  PARTITION BY ch.role, strftime('%s', ch.timestamp)
@@ -4431,9 +4522,10 @@ def get_conversation_messages(conversation_id):
4431
4522
  "reasoningContent": msg[11] if len(msg) > 11 else None,
4432
4523
  "toolCalls": parse_json_field(msg[12]) if len(msg) > 12 else None,
4433
4524
  "toolResults": parse_json_field(msg[13]) if len(msg) > 13 else None,
4525
+ "parentMessageId": msg[14] if len(msg) > 14 else None,
4434
4526
  "attachments": (
4435
4527
  get_message_attachments(msg[1])
4436
- if len(msg) > 1 and msg[14] # attachment_ids is at index 14
4528
+ if len(msg) > 1 and msg[15] # attachment_ids is now at index 15
4437
4529
  else []
4438
4530
  ),
4439
4531
  }
@@ -4448,6 +4540,157 @@ def get_conversation_messages(conversation_id):
4448
4540
  return jsonify({"error": str(e), "messages": []}), 500
4449
4541
 
4450
4542
 
4543
+ # ==================== CONVERSATION BRANCHES ====================
4544
+
4545
+ @app.route("/api/conversation/<conversation_id>/branches", methods=["GET"])
4546
+ def get_conversation_branches(conversation_id):
4547
+ """Get all branches for a conversation."""
4548
+ try:
4549
+ engine = get_db_connection()
4550
+ with engine.connect() as conn:
4551
+ query = text("""
4552
+ SELECT id, name, parent_branch_id, branch_from_message_id, created_at, metadata
4553
+ FROM conversation_branches
4554
+ WHERE conversation_id = :conversation_id
4555
+ ORDER BY created_at ASC
4556
+ """)
4557
+ result = conn.execute(query, {"conversation_id": conversation_id})
4558
+ branches = result.fetchall()
4559
+
4560
+ return jsonify({
4561
+ "branches": [
4562
+ {
4563
+ "id": b[0],
4564
+ "name": b[1],
4565
+ "parentBranchId": b[2],
4566
+ "branchFromMessageId": b[3],
4567
+ "createdAt": b[4],
4568
+ "metadata": json.loads(b[5]) if b[5] else None
4569
+ }
4570
+ for b in branches
4571
+ ],
4572
+ "error": None
4573
+ })
4574
+ except Exception as e:
4575
+ print(f"Error getting branches: {e}")
4576
+ return jsonify({"branches": [], "error": str(e)}), 500
4577
+
4578
+
4579
+ @app.route("/api/conversation/<conversation_id>/branches", methods=["POST"])
4580
+ def create_conversation_branch(conversation_id):
4581
+ """Create a new branch for a conversation."""
4582
+ try:
4583
+ data = request.get_json()
4584
+ branch_id = data.get("id") or generate_message_id()
4585
+ name = data.get("name", f"Branch {branch_id[:8]}")
4586
+ parent_branch_id = data.get("parentBranchId", "main")
4587
+ branch_from_message_id = data.get("branchFromMessageId")
4588
+ created_at = data.get("createdAt") or datetime.now().isoformat()
4589
+ metadata = json.dumps(data.get("metadata")) if data.get("metadata") else None
4590
+
4591
+ engine = get_db_connection()
4592
+ with engine.connect() as conn:
4593
+ query = text("""
4594
+ INSERT INTO conversation_branches
4595
+ (id, conversation_id, name, parent_branch_id, branch_from_message_id, created_at, metadata)
4596
+ VALUES (:id, :conversation_id, :name, :parent_branch_id, :branch_from_message_id, :created_at, :metadata)
4597
+ """)
4598
+ conn.execute(query, {
4599
+ "id": branch_id,
4600
+ "conversation_id": conversation_id,
4601
+ "name": name,
4602
+ "parent_branch_id": parent_branch_id,
4603
+ "branch_from_message_id": branch_from_message_id,
4604
+ "created_at": created_at,
4605
+ "metadata": metadata
4606
+ })
4607
+ conn.commit()
4608
+
4609
+ return jsonify({"success": True, "branchId": branch_id})
4610
+ except Exception as e:
4611
+ print(f"Error creating branch: {e}")
4612
+ return jsonify({"success": False, "error": str(e)}), 500
4613
+
4614
+
4615
+ @app.route("/api/conversation/<conversation_id>/branches/<branch_id>", methods=["DELETE"])
4616
+ def delete_conversation_branch(conversation_id, branch_id):
4617
+ """Delete a branch."""
4618
+ try:
4619
+ engine = get_db_connection()
4620
+ with engine.connect() as conn:
4621
+ # Delete branch metadata
4622
+ query = text("DELETE FROM conversation_branches WHERE id = :branch_id AND conversation_id = :conversation_id")
4623
+ conn.execute(query, {"branch_id": branch_id, "conversation_id": conversation_id})
4624
+
4625
+ # Optionally delete messages on this branch (or leave them orphaned)
4626
+ # For now, we leave them - they just won't be displayed
4627
+ conn.commit()
4628
+
4629
+ return jsonify({"success": True})
4630
+ except Exception as e:
4631
+ print(f"Error deleting branch: {e}")
4632
+ return jsonify({"success": False, "error": str(e)}), 500
4633
+
4634
+
4635
+ @app.route("/api/conversation/<conversation_id>/messages/branch/<branch_id>", methods=["GET"])
4636
+ def get_branch_messages(conversation_id, branch_id):
4637
+ """Get messages for a specific branch."""
4638
+ try:
4639
+ engine = get_db_connection()
4640
+ with engine.connect() as conn:
4641
+ # For 'main' branch, get messages with NULL or 'main' branch_id
4642
+ if branch_id == 'main':
4643
+ query = text("""
4644
+ SELECT message_id, timestamp, role, content, model, provider, npc, reasoning_content, tool_calls, tool_results
4645
+ FROM conversation_history
4646
+ WHERE conversation_id = :conversation_id
4647
+ AND (branch_id IS NULL OR branch_id = 'main')
4648
+ ORDER BY timestamp ASC, id ASC
4649
+ """)
4650
+ else:
4651
+ query = text("""
4652
+ SELECT message_id, timestamp, role, content, model, provider, npc, reasoning_content, tool_calls, tool_results
4653
+ FROM conversation_history
4654
+ WHERE conversation_id = :conversation_id
4655
+ AND branch_id = :branch_id
4656
+ ORDER BY timestamp ASC, id ASC
4657
+ """)
4658
+
4659
+ result = conn.execute(query, {"conversation_id": conversation_id, "branch_id": branch_id})
4660
+ messages = result.fetchall()
4661
+
4662
+ def parse_json_field(value):
4663
+ if not value:
4664
+ return None
4665
+ try:
4666
+ return json.loads(value)
4667
+ except:
4668
+ return None
4669
+
4670
+ return jsonify({
4671
+ "messages": [
4672
+ {
4673
+ "message_id": m[0],
4674
+ "timestamp": m[1],
4675
+ "role": m[2],
4676
+ "content": m[3],
4677
+ "model": m[4],
4678
+ "provider": m[5],
4679
+ "npc": m[6],
4680
+ "reasoningContent": m[7],
4681
+ "toolCalls": parse_json_field(m[8]),
4682
+ "toolResults": parse_json_field(m[9])
4683
+ }
4684
+ for m in messages
4685
+ ],
4686
+ "error": None
4687
+ })
4688
+ except Exception as e:
4689
+ print(f"Error getting branch messages: {e}")
4690
+ return jsonify({"messages": [], "error": str(e)}), 500
4691
+
4692
+
4693
+ # ==================== END CONVERSATION BRANCHES ====================
4451
4694
 
4452
4695
  @app.after_request
4453
4696
  def after_request(response):
@@ -4940,6 +5183,140 @@ def download_hf_model():
4940
5183
  return jsonify({'error': str(e)}), 500
4941
5184
 
4942
5185
 
5186
+ @app.route('/api/models/hf/search', methods=['GET'])
5187
+ def search_hf_models():
5188
+ """Search HuggingFace for GGUF models."""
5189
+ query = request.args.get('q', '')
5190
+ limit = int(request.args.get('limit', 20))
5191
+
5192
+ if not query:
5193
+ return jsonify({'models': [], 'error': 'No search query provided'})
5194
+
5195
+ try:
5196
+ from huggingface_hub import HfApi
5197
+
5198
+ api = HfApi()
5199
+ # Search for models with GGUF in name or tags
5200
+ models = api.list_models(
5201
+ search=query,
5202
+ filter="gguf",
5203
+ limit=limit,
5204
+ sort="downloads",
5205
+ direction=-1
5206
+ )
5207
+
5208
+ results = []
5209
+ for model in models:
5210
+ results.append({
5211
+ 'id': model.id,
5212
+ 'author': model.author,
5213
+ 'downloads': model.downloads,
5214
+ 'likes': model.likes,
5215
+ 'tags': model.tags[:10] if model.tags else [],
5216
+ 'last_modified': model.last_modified.isoformat() if model.last_modified else None,
5217
+ })
5218
+
5219
+ return jsonify({'models': results, 'error': None})
5220
+ except ImportError:
5221
+ return jsonify({'error': 'huggingface_hub not installed. Run: pip install huggingface_hub'}), 500
5222
+ except Exception as e:
5223
+ print(f"Error searching HF models: {e}")
5224
+ return jsonify({'error': str(e)}), 500
5225
+
5226
+
5227
+ @app.route('/api/models/hf/files', methods=['GET'])
5228
+ def list_hf_model_files():
5229
+ """List GGUF files in a HuggingFace repository."""
5230
+ repo_id = request.args.get('repo_id', '')
5231
+
5232
+ if not repo_id:
5233
+ return jsonify({'files': [], 'error': 'No repo_id provided'})
5234
+
5235
+ try:
5236
+ from huggingface_hub import list_repo_files, repo_info
5237
+
5238
+ # Get repo info
5239
+ info = repo_info(repo_id)
5240
+
5241
+ # List all files
5242
+ all_files = list_repo_files(repo_id)
5243
+
5244
+ # Filter for GGUF files and get their sizes
5245
+ gguf_files = []
5246
+ for f in all_files:
5247
+ if f.endswith('.gguf'):
5248
+ # Try to get file size from siblings
5249
+ size = None
5250
+ for sibling in info.siblings or []:
5251
+ if sibling.rfilename == f:
5252
+ size = sibling.size
5253
+ break
5254
+
5255
+ # Parse quantization from filename
5256
+ quant = 'unknown'
5257
+ for q in ['Q2_K', 'Q3_K_S', 'Q3_K_M', 'Q3_K_L', 'Q4_0', 'Q4_1', 'Q4_K_S', 'Q4_K_M', 'Q5_0', 'Q5_1', 'Q5_K_S', 'Q5_K_M', 'Q6_K', 'Q8_0', 'F16', 'F32', 'IQ1', 'IQ2', 'IQ3', 'IQ4']:
5258
+ if q.lower() in f.lower() or q in f:
5259
+ quant = q
5260
+ break
5261
+
5262
+ gguf_files.append({
5263
+ 'filename': f,
5264
+ 'size': size,
5265
+ 'size_gb': round(size / (1024**3), 2) if size else None,
5266
+ 'quantization': quant,
5267
+ })
5268
+
5269
+ # Sort by quantization quality (Q4_K_M is usually best balance)
5270
+ quant_order = {'Q4_K_M': 0, 'Q4_K_S': 1, 'Q5_K_M': 2, 'Q5_K_S': 3, 'Q3_K_M': 4, 'Q6_K': 5, 'Q8_0': 6}
5271
+ gguf_files.sort(key=lambda x: quant_order.get(x['quantization'], 99))
5272
+
5273
+ return jsonify({
5274
+ 'repo_id': repo_id,
5275
+ 'files': gguf_files,
5276
+ 'total_files': len(all_files),
5277
+ 'gguf_count': len(gguf_files),
5278
+ 'error': None
5279
+ })
5280
+ except ImportError:
5281
+ return jsonify({'error': 'huggingface_hub not installed. Run: pip install huggingface_hub'}), 500
5282
+ except Exception as e:
5283
+ print(f"Error listing HF files: {e}")
5284
+ return jsonify({'error': str(e)}), 500
5285
+
5286
+
5287
+ @app.route('/api/models/hf/download_file', methods=['POST'])
5288
+ def download_hf_file():
5289
+ """Download a specific file from a HuggingFace repository."""
5290
+ data = request.json
5291
+ repo_id = data.get('repo_id', '')
5292
+ filename = data.get('filename', '')
5293
+ target_dir = data.get('target_dir', '~/.npcsh/models/gguf')
5294
+
5295
+ if not repo_id or not filename:
5296
+ return jsonify({'error': 'repo_id and filename are required'}), 400
5297
+
5298
+ target_dir = os.path.expanduser(target_dir)
5299
+ os.makedirs(target_dir, exist_ok=True)
5300
+
5301
+ try:
5302
+ from huggingface_hub import hf_hub_download
5303
+
5304
+ print(f"Downloading {filename} from {repo_id} to {target_dir}")
5305
+ path = hf_hub_download(
5306
+ repo_id=repo_id,
5307
+ filename=filename,
5308
+ local_dir=target_dir,
5309
+ local_dir_use_symlinks=False
5310
+ )
5311
+
5312
+ return jsonify({'path': path, 'error': None})
5313
+ except ImportError:
5314
+ return jsonify({'error': 'huggingface_hub not installed. Run: pip install huggingface_hub'}), 500
5315
+ except Exception as e:
5316
+ print(f"Error downloading HF file: {e}")
5317
+ return jsonify({'error': str(e)}), 500
5318
+
5319
+
4943
5320
  # ============== Local Model Provider Status ==============
4944
5321
  @app.route('/api/models/local/scan', methods=['GET'])
4945
5322
  def scan_local_models():
@@ -5003,6 +5380,213 @@ def get_local_model_status():
5003
5380
  return jsonify({'status': 'unknown', 'running': False, 'error': f'Unknown provider: {provider}'})
5004
5381
 
5005
5382
 
5383
+ # ============== Audio / Voice ==============
5384
+ @app.route('/api/audio/tts', methods=['POST'])
5385
+ def text_to_speech_endpoint():
5386
+ """Convert text to speech and return audio file."""
5387
+ try:
5388
+ import base64
5389
+ from npcpy.gen.audio_gen import (
5390
+ text_to_speech, get_available_engines,
5391
+ pcm16_to_wav, KOKORO_VOICES
5392
+ )
5393
+
5394
+ data = request.json or {}
5395
+ text = data.get('text', '')
5396
+ engine = data.get('engine', 'kokoro') # kokoro, elevenlabs, openai, gemini, gtts
5397
+ voice = data.get('voice', 'af_heart')
5398
+
5399
+ if not text:
5400
+ return jsonify({'success': False, 'error': 'No text provided'}), 400
5401
+
5402
+ # Check engine availability
5403
+ engines = get_available_engines()
5404
+ if engine not in engines:
5405
+ return jsonify({'success': False, 'error': f'Unknown engine: {engine}'}), 400
5406
+
5407
+ if not engines[engine]['available']:
5408
+ # Try fallback to kokoro or gtts
5409
+ if engines.get('kokoro', {}).get('available'):
5410
+ engine = 'kokoro'
5411
+ elif engines.get('gtts', {}).get('available'):
5412
+ engine = 'gtts'
5413
+ voice = 'en'
5414
+ else:
5415
+ return jsonify({
5416
+ 'success': False,
5417
+ 'error': f'{engine} not available. Install: {engines[engine].get("install", engines[engine].get("requires", ""))}'
5418
+ }), 400
5419
+
5420
+ # Generate audio
5421
+ audio_bytes = text_to_speech(text, engine=engine, voice=voice)
5422
+
5423
+ # Determine format
5424
+ if engine in ['kokoro']:
5425
+ audio_format = 'wav'
5426
+ elif engine in ['elevenlabs', 'gtts']:
5427
+ audio_format = 'mp3'
5428
+ elif engine in ['openai', 'gemini']:
5429
+ # These return PCM16, convert to WAV
5430
+ audio_bytes = pcm16_to_wav(audio_bytes, sample_rate=24000)
5431
+ audio_format = 'wav'
5432
+ else:
5433
+ audio_format = 'wav'
5434
+
5435
+ audio_data = base64.b64encode(audio_bytes).decode('utf-8')
5436
+
5437
+ return jsonify({
5438
+ 'success': True,
5439
+ 'audio': audio_data,
5440
+ 'format': audio_format,
5441
+ 'engine': engine,
5442
+ 'voice': voice
5443
+ })
5444
+
5445
+ except ImportError as e:
5446
+ return jsonify({'success': False, 'error': f'TTS dependency not installed: {e}'}), 500
5447
+ except Exception as e:
5448
+ print(f"TTS error: {e}")
5449
+ traceback.print_exc()
5450
+ return jsonify({'success': False, 'error': str(e)}), 500
5451
+
5452
+
5453
+ @app.route('/api/audio/stt', methods=['POST'])
5454
+ def speech_to_text_endpoint():
5455
+ """Convert speech audio to text using various STT engines."""
5456
+ try:
5457
+ import tempfile
5458
+ import base64
5459
+ from npcpy.data.audio import speech_to_text, get_available_stt_engines
5460
+
5461
+ data = request.json or {}
5462
+ audio_data = data.get('audio') # Base64 encoded audio
5463
+ audio_format = data.get('format', 'webm') # webm, wav, mp3
5464
+ language = data.get('language') # None for auto-detect
5465
+ engine = data.get('engine', 'whisper') # whisper, openai, gemini, elevenlabs, groq
5466
+ model_size = data.get('model', 'base') # For whisper: tiny, base, small, medium, large
5467
+
5468
+ if not audio_data:
5469
+ return jsonify({'success': False, 'error': 'No audio data provided'}), 400
5470
+
5471
+ # Decode base64 audio
5472
+ audio_bytes = base64.b64decode(audio_data)
5473
+
5474
+ # Convert to wav if needed
5475
+ wav_bytes = audio_bytes
5476
+ if audio_format != 'wav':
5477
+ with tempfile.NamedTemporaryFile(suffix=f'.{audio_format}', delete=False) as f:
5478
+ f.write(audio_bytes)
5479
+ temp_path = f.name
5480
+
5481
+ wav_path = temp_path.replace(f'.{audio_format}', '.wav')
5482
+ converted = False
5483
+
5484
+ # Try ffmpeg first
5485
+ try:
5486
+ subprocess.run([
5487
+ 'ffmpeg', '-y', '-i', temp_path,
5488
+ '-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000',
5489
+ wav_path
5490
+ ], check=True, capture_output=True)
5491
+ with open(wav_path, 'rb') as f:
5492
+ wav_bytes = f.read()
5493
+ converted = True
5494
+ os.unlink(wav_path)
5495
+ except FileNotFoundError:
5496
+ pass
5497
+ except subprocess.CalledProcessError:
5498
+ pass
5499
+
5500
+ # Try pydub as fallback
5501
+ if not converted:
5502
+ try:
5503
+ from pydub import AudioSegment
5504
+ audio = AudioSegment.from_file(temp_path, format=audio_format)
5505
+ audio = audio.set_frame_rate(16000).set_channels(1)
5506
+ import io
5507
+ wav_buffer = io.BytesIO()
5508
+ audio.export(wav_buffer, format='wav')
5509
+ wav_bytes = wav_buffer.getvalue()
5510
+ converted = True
5511
+ except ImportError:
5512
+ pass
5513
+ except Exception as e:
5514
+ print(f"pydub conversion failed: {e}")
5515
+
5516
+ os.unlink(temp_path)
5517
+
5518
+ if not converted:
5519
+ return jsonify({
5520
+ 'success': False,
5521
+ 'error': 'Audio conversion failed. Install ffmpeg: sudo apt-get install ffmpeg'
5522
+ }), 500
5523
+
5524
+ # Use the unified speech_to_text function
5525
+ result = speech_to_text(
5526
+ wav_bytes,
5527
+ engine=engine,
5528
+ language=language,
5529
+ model_size=model_size
5530
+ )
5531
+
5532
+ return jsonify({
5533
+ 'success': True,
5534
+ 'text': result.get('text', ''),
5535
+ 'language': result.get('language', language or 'en'),
5536
+ 'segments': result.get('segments', [])
5537
+ })
5538
+
5539
+ except Exception as e:
5540
+ print(f"STT error: {e}")
5541
+ traceback.print_exc()
5542
+ return jsonify({'success': False, 'error': str(e)}), 500
5543
+
5544
+
5545
+ @app.route('/api/audio/stt/engines', methods=['GET'])
5546
+ def get_stt_engines_endpoint():
5547
+ """Get available STT engines."""
5548
+ try:
5549
+ from npcpy.data.audio import get_available_stt_engines
5550
+ engines = get_available_stt_engines()
5551
+ return jsonify({'success': True, 'engines': engines})
5552
+ except Exception as e:
5553
+ print(f"Error getting STT engines: {e}")
5554
+ return jsonify({'success': False, 'error': str(e)}), 500
5555
+
5556
+
5557
+ @app.route('/api/audio/voices', methods=['GET'])
5558
+ def get_available_voices_endpoint():
5559
+ """Get available TTS voices/engines."""
5560
+ try:
5561
+ from npcpy.gen.audio_gen import get_available_engines, get_available_voices
5562
+
5563
+ engines_info = get_available_engines()
5564
+ result = {}
5565
+
5566
+ for engine_id, info in engines_info.items():
5567
+ voices = get_available_voices(engine_id) if info['available'] else []
5568
+ result[engine_id] = {
5569
+ 'name': info['name'],
5570
+ 'type': info.get('type', 'unknown'),
5571
+ 'available': info['available'],
5572
+ 'description': info.get('description', ''),
5573
+ 'default': engine_id == 'kokoro',
5574
+ 'voices': voices
5575
+ }
5576
+ if not info['available']:
5577
+ if 'install' in info:
5578
+ result[engine_id]['install'] = info['install']
5579
+ if 'requires' in info:
5580
+ result[engine_id]['requires'] = info['requires']
5581
+
5582
+ return jsonify({'success': True, 'engines': result})
5583
+
5584
+ except Exception as e:
5585
+ print(f"Error getting voices: {e}")
5586
+ traceback.print_exc()
5587
+ return jsonify({'success': False, 'error': str(e)}), 500
5588
+
5589
+
5006
5590
  # ============== Activity Tracking ==============
5007
5591
  @app.route('/api/activity/track', methods=['POST'])
5008
5592
  def track_activity():
@@ -5018,6 +5602,56 @@ def track_activity():
5018
5602
  return jsonify({'success': False, 'error': str(e)}), 500
5019
5603
 
5020
5604
 
5605
+ # ============== Studio Action Results ==============
5606
+ # Storage for pending action results that agents are waiting for
5607
+ _studio_action_results = {}
5608
+
5609
+ @app.route('/api/studio/action_result', methods=['POST'])
5610
+ def studio_action_result():
5611
+ """
5612
+ Receive action results from the frontend after executing studio.* tool calls.
5613
+ This allows the agent to continue with the result of UI actions.
5614
+ """
5615
+ try:
5616
+ data = request.json or {}
5617
+ stream_id = data.get('streamId')
5618
+ tool_id = data.get('toolId')
5619
+ result = data.get('result', {})
5620
+
5621
+ if not stream_id or not tool_id:
5622
+ return jsonify({'success': False, 'error': 'Missing streamId or toolId'}), 400
5623
+
5624
+ # Store the result keyed by stream_id and tool_id
5625
+ key = f"{stream_id}_{tool_id}"
5626
+ _studio_action_results[key] = result
5627
+
5628
+ print(f"[Studio] Received action result for {key}: {result.get('success', False)}")
5629
+ return jsonify({'success': True, 'stored': key})
5630
+ except Exception as e:
5631
+ print(f"Error storing studio action result: {e}")
5632
+ return jsonify({'success': False, 'error': str(e)}), 500
5633
+
5634
+
5635
+ @app.route('/api/studio/action_result/<stream_id>/<tool_id>', methods=['GET'])
5636
+ def get_studio_action_result(stream_id, tool_id):
5637
+ """
5638
+ Retrieve a pending action result for the agent to continue.
5639
+ """
5640
+ try:
5641
+ key = f"{stream_id}_{tool_id}"
5642
+ result = _studio_action_results.get(key)
5643
+
5644
+ if result is None:
5645
+ return jsonify({'success': False, 'pending': True}), 202
5646
+
5647
+ # Remove the result after retrieval (one-time use)
5648
+ del _studio_action_results[key]
5649
+ return jsonify({'success': True, 'result': result})
5650
+ except Exception as e:
5651
+ print(f"Error retrieving studio action result: {e}")
5652
+ return jsonify({'success': False, 'error': str(e)}), 500
5653
+
5654
+
5021
5655
  def start_flask_server(
5022
5656
  port=5337,
5023
5657
  cors_origins=None,
@@ -5071,8 +5705,22 @@ if __name__ == "__main__":
5071
5705
 
5072
5706
  SETTINGS_FILE = Path(os.path.expanduser("~/.npcshrc"))
5073
5707
 
5074
-
5075
- db_path = os.path.expanduser("~/npcsh_history.db")
5708
+ # Use standard npcsh paths
5709
+ db_path = os.path.expanduser("~/.npcsh/npcsh_history.db")
5076
5710
  user_npc_directory = os.path.expanduser("~/.npcsh/npc_team")
5077
5711
 
5078
- start_flask_server(db_path=db_path, user_npc_directory=user_npc_directory)
5712
+ # Ensure directories exist
5713
+ os.makedirs(os.path.dirname(db_path), exist_ok=True)
5714
+ os.makedirs(user_npc_directory, exist_ok=True)
5715
+
5716
+ # Initialize base NPCs if needed (creates ~/.npcsh structure)
5717
+ try:
5718
+ initialize_base_npcs_if_needed(db_path)
5719
+ print(f"[SERVE] Base NPCs initialized")
5720
+ except Exception as e:
5721
+ print(f"[SERVE] Warning: Failed to initialize base NPCs: {e}")
5722
+
5723
+ # Get port from environment or use default
5724
+ port = int(os.environ.get('INCOGNIDE_PORT', 5337))
5725
+
5726
+ start_flask_server(db_path=db_path, user_npc_directory=user_npc_directory, port=port)