parishad 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
parishad/cli/code.py CHANGED
@@ -16,6 +16,8 @@ import sys
16
16
  import subprocess
17
17
  import socket
18
18
  import shutil
19
+ import threading
20
+ import queue
19
21
  from pathlib import Path
20
22
  from typing import Optional, Dict, List, Tuple
21
23
 
@@ -2187,6 +2189,40 @@ class ParishadApp(App):
2187
2189
  """Message to open setup screen from worker thread."""
2188
2190
  pass
2189
2191
 
2192
+ # Custom messages for non-blocking thread worker communication
2193
+ class LogMessage(Message):
2194
+ """Non-blocking log message from worker thread."""
2195
+ def __init__(self, text: str) -> None:
2196
+ self.text = text
2197
+ super().__init__()
2198
+
2199
+ class SabhaResultReady(Message):
2200
+ """Non-blocking message when Sabha result is ready."""
2201
+ def __init__(self, trace) -> None:
2202
+ self.trace = trace
2203
+ super().__init__()
2204
+
2205
+ class SabhaError(Message):
2206
+ """Non-blocking message when Sabha encounters an error."""
2207
+ def __init__(self, error: Exception, traceback_str: str) -> None:
2208
+ self.error = error
2209
+ self.traceback_str = traceback_str
2210
+ super().__init__()
2211
+
2212
+ class CouncilReady(Message):
2213
+ """Non-blocking message when council initialization completes."""
2214
+ def __init__(self, success: bool, profile: str = "", error_msg: str = "") -> None:
2215
+ self.success = success
2216
+ self.profile = profile
2217
+ self.error_msg = error_msg
2218
+ super().__init__()
2219
+
2220
+ class WorkerComplete(Message):
2221
+ """Non-blocking message when any worker completes."""
2222
+ def __init__(self, worker_type: str) -> None:
2223
+ self.worker_type = worker_type
2224
+ super().__init__()
2225
+
2190
2226
  CSS = CSS
2191
2227
  SCREENS = {"setup": SetupScreen}
2192
2228
  BINDINGS = [
@@ -2204,6 +2240,12 @@ class ParishadApp(App):
2204
2240
  self._initializing = False # Prevent concurrent initialization
2205
2241
  self._processing_query = False # Prevent concurrent query processing
2206
2242
 
2243
+ # CRITICAL FOR WINDOWS: Thread-safe result queue for native threading
2244
+ # This bypasses Textual's worker system which causes freezes on Windows
2245
+ self._result_queue = queue.Queue()
2246
+ self._worker_thread = None
2247
+ self._subprocess = None # For subprocess-based inference
2248
+
2207
2249
  # Load config from disk
2208
2250
  self.config = load_parishad_config()
2209
2251
 
@@ -2306,8 +2348,9 @@ class ParishadApp(App):
2306
2348
  if self._initializing:
2307
2349
  return
2308
2350
 
2309
- # Run model loading asynchronously to avoid freezing UI
2310
- self.run_worker(self._async_initialize_council(), exclusive=True)
2351
+ # Run model loading in a thread worker to avoid freezing UI on Windows
2352
+ # CRITICAL: Using thread=True ensures blocking model loading doesn't freeze the TUI
2353
+ self.run_worker(self._initialize_council_thread_worker, thread=True, exclusive=True)
2311
2354
 
2312
2355
  async def _async_initialize_council(self) -> None:
2313
2356
  """Async worker to initialize Sabha council without blocking UI."""
@@ -2442,6 +2485,117 @@ class ParishadApp(App):
2442
2485
  finally:
2443
2486
  self._initializing = False
2444
2487
 
2488
+ def _initialize_council_thread_worker(self) -> None:
2489
+ """
2490
+ Initialize Sabha council in a dedicated thread worker.
2491
+
2492
+ CRITICAL FOR WINDOWS: This method runs in a real OS thread (not an asyncio executor)
2493
+ which prevents the TUI from freezing during blocking model loading.
2494
+
2495
+ UI updates are sent via non-blocking post_message to prevent deadlock.
2496
+ """
2497
+ if self._initializing:
2498
+ self.post_message(self.LogMessage("[yellow]Already initializing...[/yellow]\n"))
2499
+ return
2500
+
2501
+ self._initializing = True
2502
+
2503
+ try:
2504
+ from parishad.orchestrator.engine import Parishad
2505
+ from parishad.config.user_config import load_user_config
2506
+
2507
+ self.post_message(self.LogMessage("[cyan]🔄 Initializing Sabha council...[/cyan]\n"))
2508
+
2509
+ # Load user config for profile (same as CLI run does)
2510
+ user_cfg = load_user_config()
2511
+ profile = user_cfg.default_profile
2512
+ mode = user_cfg.default_mode
2513
+
2514
+ self.post_message(self.LogMessage(f"[dim] • Profile: {profile}[/dim]\n"))
2515
+ self.post_message(self.LogMessage(f"[dim] • Mode: {mode}[/dim]\n"))
2516
+
2517
+ # Get pipeline config from Sabha selection
2518
+ if self.config:
2519
+ config_name = self.config.get_pipeline_config()
2520
+ self.post_message(self.LogMessage(f"[dim] • Pipeline: {config_name}[/dim]\n"))
2521
+ else:
2522
+ config_name = "core" # Default fallback
2523
+ self.post_message(self.LogMessage(f"[dim] • Pipeline: {config_name} (default)[/dim]\n"))
2524
+
2525
+ self.post_message(self.LogMessage(f"[yellow]⏳ Loading models (this may take 30-60 seconds)...[/yellow]\n"))
2526
+ self.post_message(self.LogMessage(f"[dim] • Creating Parishad engine...[/dim]\n"))
2527
+
2528
+ # Build user_forced_config from model_map
2529
+ user_forced_config = {}
2530
+ if self.config and self.config.model_map:
2531
+ # Initialize manager to resolve paths
2532
+ from parishad.models.downloader import ModelManager
2533
+ model_manager = ModelManager()
2534
+
2535
+ msg_backend = self.config.backend or "ollama"
2536
+
2537
+ for slot, model_id in self.config.model_map.items():
2538
+ # Default to current config backend
2539
+ current_backend = msg_backend
2540
+ model_file = None
2541
+
2542
+ # Check if it's a known model to resolve backend/path
2543
+ model_info = model_manager.registry.get(model_id)
2544
+ if model_info:
2545
+ # Handle Enum comparison correctly
2546
+ source = model_info.source.value if hasattr(model_info.source, "value") else str(model_info.source)
2547
+
2548
+ if source == "local":
2549
+ current_backend = "llama_cpp"
2550
+ model_file = str(model_info.path)
2551
+ elif source == "ollama":
2552
+ current_backend = "ollama"
2553
+ elif source == "mlx":
2554
+ current_backend = "mlx"
2555
+ else:
2556
+ # Fallback heuristics if not in registry
2557
+ if model_id.startswith("local:"):
2558
+ current_backend = "llama_cpp"
2559
+ elif model_id.startswith("ollama:") or ":" in model_id:
2560
+ current_backend = "ollama"
2561
+
2562
+ user_forced_config[slot] = {
2563
+ "model_id": model_id,
2564
+ "backend_type": current_backend
2565
+ }
2566
+ if model_file:
2567
+ user_forced_config[slot]["model_file"] = model_file
2568
+
2569
+ # Create the Parishad engine (blocking call in this thread)
2570
+ self.council = Parishad(
2571
+ config=config_name,
2572
+ model_config_path=None, # Let engine use profiles + models.yaml
2573
+ profile=profile,
2574
+ pipeline_config_path=None,
2575
+ trace_dir=None,
2576
+ mock=False,
2577
+ stub=False,
2578
+ mode=mode,
2579
+ user_forced_config=user_forced_config or None,
2580
+ no_retry=False,
2581
+ )
2582
+
2583
+ if self.council:
2584
+ self.post_message(self.CouncilReady(success=True, profile=profile))
2585
+ else:
2586
+ self.post_message(self.CouncilReady(success=False, error_msg="Council initialization returned None"))
2587
+
2588
+ except Exception as e:
2589
+ import traceback
2590
+ tb = traceback.format_exc()
2591
+ self.post_message(self.CouncilReady(
2592
+ success=False,
2593
+ error_msg=f"{type(e).__name__}: {e}\n{tb}"
2594
+ ))
2595
+ self.council = None
2596
+ finally:
2597
+ self._initializing = False
2598
+
2445
2599
  # DEPRECATED: TUI now uses same engine setup as CLI 'parishad run'
2446
2600
  # This method is no longer called
2447
2601
  # def _create_model_config_from_tui(self):
@@ -2626,8 +2780,145 @@ class ParishadApp(App):
2626
2780
  self.log_message("[yellow]⚠ Already processing a query, please wait...[/yellow]")
2627
2781
  return
2628
2782
 
2629
- # Run Sabha execution asynchronously to prevent UI freezing
2630
- self.run_worker(self._async_run_sabha(final_prompt, progress), exclusive=True)
2783
+ # CRITICAL FOR WINDOWS: The GIL blocks Textual's event loop during llama-cpp inference
2784
+ # even when using threads. We use subprocess.Popen to spawn a SEPARATE Python process.
2785
+ # This is the only way to keep the TUI responsive on Windows with llama-cpp-python.
2786
+ # See: docs/TUI_FREEZE_WINDOWS.md for full technical explanation.
2787
+ self._processing_query = True
2788
+
2789
+ # Save query to temp file for subprocess to read
2790
+ query_file = Path.home() / ".parishad" / "temp_query.txt"
2791
+ result_file = Path.home() / ".parishad" / "temp_result.json"
2792
+ status_file = Path.home() / ".parishad" / "temp_status.txt"
2793
+
2794
+ # Clean up old files
2795
+ for f in [result_file, status_file]:
2796
+ if f.exists():
2797
+ f.unlink()
2798
+
2799
+ query_file.write_text(final_prompt, encoding="utf-8")
2800
+
2801
+ # Get the Python executable path
2802
+ python_exe = sys.executable
2803
+
2804
+ # Build inline script that runs inference and saves result
2805
+ inline_script = f'''
2806
+ import sys
2807
+ import json
2808
+ from pathlib import Path
2809
+
2810
+ query_file = Path(r"{query_file}")
2811
+ result_file = Path(r"{result_file}")
2812
+ status_file = Path(r"{status_file}")
2813
+
2814
+ try:
2815
+ status_file.write_text("starting", encoding="utf-8")
2816
+
2817
+ query = query_file.read_text(encoding="utf-8")
2818
+
2819
+ status_file.write_text("loading", encoding="utf-8")
2820
+
2821
+ # Import and run inference
2822
+ from parishad.orchestrator.engine import Parishad
2823
+ from parishad.config.user_config import load_user_config
2824
+
2825
+ user_cfg = load_user_config()
2826
+
2827
+ council = Parishad(
2828
+ config="core",
2829
+ profile=user_cfg.default_profile,
2830
+ mode=user_cfg.default_mode,
2831
+ )
2832
+
2833
+ status_file.write_text("running", encoding="utf-8")
2834
+
2835
+ trace = council.run(query)
2836
+
2837
+ status_file.write_text("complete", encoding="utf-8")
2838
+
2839
+ # Save result as JSON
2840
+ result = {{
2841
+ "success": True,
2842
+ "roles": len(trace.roles),
2843
+ "tokens": trace.total_tokens,
2844
+ "final_answer": trace.final_answer.final_answer if trace.final_answer else None,
2845
+ "error": trace.error,
2846
+ }}
2847
+ result_file.write_text(json.dumps(result, indent=2), encoding="utf-8")
2848
+
2849
+ except Exception as e:
2850
+ import traceback
2851
+ result = {{
2852
+ "success": False,
2853
+ "error": str(e),
2854
+ "traceback": traceback.format_exc()
2855
+ }}
2856
+ result_file.write_text(json.dumps(result, indent=2), encoding="utf-8")
2857
+ status_file.write_text("error", encoding="utf-8")
2858
+ '''
2859
+
2860
+ # Write script to temp file
2861
+ script_file = Path.home() / ".parishad" / "temp_inference_script.py"
2862
+ script_file.write_text(inline_script, encoding="utf-8")
2863
+
2864
+ # Launch subprocess - runs in a COMPLETELY SEPARATE PROCESS (no GIL sharing!)
2865
+ # Use CREATE_NO_WINDOW on Windows to avoid console popup
2866
+ startupinfo = None
2867
+ if sys.platform == "win32":
2868
+ startupinfo = subprocess.STARTUPINFO()
2869
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
2870
+ startupinfo.wShowWindow = 0 # SW_HIDE
2871
+
2872
+ self._subprocess = subprocess.Popen(
2873
+ [python_exe, str(script_file)],
2874
+ stdout=subprocess.PIPE,
2875
+ stderr=subprocess.PIPE,
2876
+ startupinfo=startupinfo,
2877
+ cwd=str(self.cwd),
2878
+ )
2879
+
2880
+ # Poll for result file
2881
+ def poll_subprocess_result():
2882
+ # Check if result file exists (inference complete)
2883
+ if result_file.exists():
2884
+ try:
2885
+ result = json.loads(result_file.read_text(encoding="utf-8"))
2886
+
2887
+ if result.get("success"):
2888
+ # Display the result
2889
+ self.log_message(f"\n[dim]━━━ Sabha Activity ({result.get('roles')} roles, {result.get('tokens')} tokens) ━━━[/dim]")
2890
+
2891
+ if result.get("final_answer"):
2892
+ self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{result['final_answer']}\n")
2893
+ elif result.get("error"):
2894
+ self.log_message(f"\n[red]Error: {result['error']}[/red]")
2895
+ else:
2896
+ self.log_message("\n[green]Query completed successfully![/green]")
2897
+ else:
2898
+ self.log_message(f"\n[red]Error: {result.get('error')}[/red]\n[dim]{result.get('traceback', '')[:500]}...[/dim]")
2899
+
2900
+ # Cleanup temp files
2901
+ for f in [result_file, status_file, script_file]:
2902
+ try:
2903
+ f.unlink()
2904
+ except:
2905
+ pass
2906
+
2907
+ self._processing_query = False
2908
+ try:
2909
+ self.query_one("#prompt-input", Input).focus()
2910
+ except:
2911
+ pass
2912
+
2913
+ except Exception:
2914
+ self._processing_query = False
2915
+ else:
2916
+ # Keep polling until result is ready
2917
+ if self._processing_query:
2918
+ self.set_timer(0.5, poll_subprocess_result)
2919
+
2920
+ # Start polling for result
2921
+ self.set_timer(0.5, poll_subprocess_result)
2631
2922
 
2632
2923
  async def _async_run_sabha(self, query: str, progress: RoleProgressBar) -> None:
2633
2924
  """Execute Sabha council asynchronously to prevent UI freezing."""
@@ -2740,6 +3031,315 @@ class ParishadApp(App):
2740
3031
  except:
2741
3032
  pass
2742
3033
 
3034
+ def _native_sabha_worker(self, query: str) -> None:
3035
+ """
3036
+ Native Python thread worker for Sabha execution.
3037
+
3038
+ CRITICAL FOR WINDOWS: This uses a regular Python thread and a thread-safe
3039
+ queue instead of Textual's worker system which causes freezes on Windows.
3040
+ """
3041
+ debug_log(">>> WORKER THREAD STARTED <<<")
3042
+ debug_log(f"Worker thread ID: {threading.current_thread().ident}")
3043
+ debug_log(f"Query to process: {query[:100]}...")
3044
+
3045
+ try:
3046
+ # Run the blocking inference in this native thread
3047
+ debug_log("Calling self.council.run()... (this will block)")
3048
+ debug_log("=== INFERENCE START ===")
3049
+
3050
+ trace = self.council.run(query)
3051
+
3052
+ debug_log("=== INFERENCE COMPLETE ===")
3053
+ debug_log(f"Trace received: {trace is not None}")
3054
+ if trace:
3055
+ debug_log(f"Trace roles: {len(trace.roles)}, tokens: {trace.total_tokens}")
3056
+
3057
+ # Put result in queue (thread-safe, non-blocking)
3058
+ debug_log("Putting result in queue...")
3059
+ self._result_queue.put(("success", trace))
3060
+ debug_log("Result queued successfully!")
3061
+
3062
+ except Exception as e:
3063
+ import traceback
3064
+ tb = traceback.format_exc()
3065
+ debug_log(f"!!! WORKER ERROR: {type(e).__name__}: {e}")
3066
+ debug_log(f"Traceback: {tb[:500]}")
3067
+ # Put error in queue
3068
+ self._result_queue.put(("error", (e, tb)))
3069
+ debug_log("Error queued.")
3070
+
3071
+ debug_log(">>> WORKER THREAD EXITING <<<")
3072
+
3073
+ def _poll_result_queue(self) -> None:
3074
+ """
3075
+ Timer callback to poll the result queue for Sabha results.
3076
+
3077
+ This is called by a Textual timer and runs on the main event loop thread,
3078
+ so it's safe to update the UI directly.
3079
+ """
3080
+ debug_log("POLL: Timer fired, checking queue...")
3081
+
3082
+ try:
3083
+ # Non-blocking check for results
3084
+ result_type, result_data = self._result_queue.get_nowait()
3085
+
3086
+ debug_log(f"POLL: Got result from queue! Type: {result_type}")
3087
+
3088
+ # Process the result on the main thread (safe for UI updates)
3089
+ if result_type == "success":
3090
+ debug_log("POLL: Processing success result, calling _display_sabha_result_direct...")
3091
+ self._display_sabha_result_direct(result_data)
3092
+ debug_log("POLL: Display complete!")
3093
+ else:
3094
+ debug_log("POLL: Processing error result...")
3095
+ error, tb = result_data
3096
+ self.log_message(f"\n[red]Error ({type(error).__name__}): {error}[/red]\n[dim]{tb[:500]}...[/dim]")
3097
+
3098
+ # Clean up
3099
+ debug_log("POLL: Cleaning up, setting _processing_query = False")
3100
+ self._processing_query = False
3101
+ try:
3102
+ self.query_one("#prompt-input", Input).focus()
3103
+ debug_log("POLL: Input refocused!")
3104
+ except Exception:
3105
+ debug_log("POLL: Could not refocus input")
3106
+
3107
+ debug_log("=== QUERY EXECUTION COMPLETE ===")
3108
+
3109
+ except queue.Empty:
3110
+ # No result yet, keep polling
3111
+ if self._processing_query:
3112
+ # Don't log every tick to avoid log spam, just every 10th
3113
+ self.set_timer(0.1, self._poll_result_queue)
3114
+
3115
+ def _display_sabha_result_direct(self, trace) -> None:
3116
+ """Display Sabha result directly (called from main thread via poll timer)."""
3117
+ # Update progress bar based on trace
3118
+ try:
3119
+ progress = self.query_one("#role-progress", RoleProgressBar)
3120
+ for role_output in trace.roles:
3121
+ role_name = role_output.role.lower()
3122
+ progress.mark_complete(role_name)
3123
+ except Exception:
3124
+ pass # Progress bar update is non-critical
3125
+
3126
+ # Display role activity summary (collapsible style)
3127
+ self.log_message(f"\n[dim]━━━ Sabha Activity ({len(trace.roles)} roles, {trace.total_tokens} tokens) ━━━[/dim]")
3128
+
3129
+ for role_output in trace.roles:
3130
+ role_name = role_output.role.lower()
3131
+ info = ROLE_INFO.get(role_name, {"emoji": "❓", "name": role_name.title()})
3132
+ status_icon = "[green]✓[/green]" if role_output.status == "success" else "[red]✗[/red]"
3133
+
3134
+ # Brief summary of what the role did
3135
+ summary = ""
3136
+ if role_name == "darbari" and role_output.core_output:
3137
+ task_type = role_output.core_output.get("task_type", "unknown")
3138
+ summary = f"→ Task: {task_type}"
3139
+ elif role_name == "majumdar" and role_output.core_output:
3140
+ steps = role_output.core_output.get("steps", [])
3141
+ summary = f"→ {len(steps)} step plan"
3142
+ elif role_name == "prerak" and role_output.core_output:
3143
+ flags = role_output.core_output.get("flags", [])
3144
+ if not flags:
3145
+ summary = "→ No issues"
3146
+ else:
3147
+ summary = f"→ {len(flags)} issue(s)"
3148
+ elif role_name == "raja" and role_output.core_output:
3149
+ conf = role_output.core_output.get("confidence", 0)
3150
+ summary = f"→ Confidence: {int(conf*100)}%"
3151
+
3152
+ # Show model used
3153
+ model_str = ""
3154
+ if role_output.metadata and role_output.metadata.model_id:
3155
+ mid = role_output.metadata.model_id
3156
+ if "/" in mid:
3157
+ mid = mid.split("/")[-1]
3158
+ if mid.endswith(".gguf"):
3159
+ mid = mid[:-5]
3160
+ model_str = f"[dim]({mid})[/dim]"
3161
+
3162
+ if role_output.status == "error":
3163
+ err_msg = role_output.error or "Unknown error"
3164
+ summary = f"[red]{err_msg}[/red]"
3165
+
3166
+ self.log_message(f" {info['emoji']} {info['name']} {model_str}: {status_icon} {summary}")
3167
+
3168
+ self.log_message(f"[dim]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/dim]\n")
3169
+
3170
+ # Check for file generation
3171
+ for role_output in trace.roles:
3172
+ if role_output.core_output and role_output.core_output.get("target_file"):
3173
+ fpath = role_output.core_output.get("target_file")
3174
+ self.log_message(f"\n[bold blue]📁 File Generated:[/bold blue] {fpath}")
3175
+
3176
+ # Display the final answer from Raja
3177
+ if trace.final_answer:
3178
+ answer = trace.final_answer.final_answer
3179
+ self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{answer}\n")
3180
+ elif trace.error:
3181
+ self.log_message(f"\n[red]Error: {trace.error}[/red]")
3182
+ else:
3183
+ file_gen = any(r.core_output and r.core_output.get("target_file") for r in trace.roles)
3184
+ if not file_gen:
3185
+ self.log_message("\n[yellow]No answer generated[/yellow]")
3186
+
3187
+ def _run_sabha_thread_worker(self, query: str) -> None:
3188
+ """
3189
+ Execute Sabha council in a dedicated thread worker.
3190
+
3191
+ CRITICAL FOR WINDOWS: This method runs in a real OS thread (not an asyncio executor)
3192
+ which prevents the TUI from freezing during blocking llama-cpp inference.
3193
+
3194
+ UI updates are sent via non-blocking post_message to prevent deadlock.
3195
+ """
3196
+ if self._processing_query:
3197
+ return
3198
+
3199
+ self._processing_query = True
3200
+
3201
+ try:
3202
+ # Run the blocking inference in this thread
3203
+ # This won't freeze the UI because it's a real thread worker
3204
+ trace = self.council.run(query)
3205
+
3206
+ # Send non-blocking message with result (won't deadlock!)
3207
+ self.post_message(self.SabhaResultReady(trace))
3208
+
3209
+ except Exception as e:
3210
+ import traceback
3211
+ tb = traceback.format_exc()
3212
+ # Send non-blocking error message
3213
+ self.post_message(self.SabhaError(e, tb))
3214
+ finally:
3215
+ self._processing_query = False
3216
+ # Send non-blocking completion message
3217
+ self.post_message(self.WorkerComplete("sabha"))
3218
+
3219
+ def on_parishad_app_log_message(self, message: LogMessage) -> None:
3220
+ """Handle non-blocking log messages from worker threads."""
3221
+ self.log_message(message.text)
3222
+
3223
+ def on_parishad_app_sabha_result_ready(self, message: SabhaResultReady) -> None:
3224
+ """Handle Sabha result from worker thread (non-blocking)."""
3225
+ trace = message.trace
3226
+
3227
+ # Update progress bar based on trace
3228
+ try:
3229
+ progress = self.query_one("#role-progress", RoleProgressBar)
3230
+ for role_output in trace.roles:
3231
+ role_name = role_output.role.lower()
3232
+ progress.mark_complete(role_name)
3233
+ except Exception:
3234
+ pass # Progress bar update is non-critical
3235
+
3236
+ # Display role activity summary (collapsible style)
3237
+ self.log_message(f"\n[dim]━━━ Sabha Activity ({len(trace.roles)} roles, {trace.total_tokens} tokens) ━━━[/dim]")
3238
+
3239
+ for role_output in trace.roles:
3240
+ role_name = role_output.role.lower()
3241
+ info = ROLE_INFO.get(role_name, {"emoji": "❓", "name": role_name.title()})
3242
+ status_icon = "[green]✓[/green]" if role_output.status == "success" else "[red]✗[/red]"
3243
+
3244
+ # Brief summary of what the role did
3245
+ summary = ""
3246
+ if role_name == "darbari" and role_output.core_output:
3247
+ task_type = role_output.core_output.get("task_type", "unknown")
3248
+ summary = f"→ Task: {task_type}"
3249
+ elif role_name == "majumdar" and role_output.core_output:
3250
+ steps = role_output.core_output.get("steps", [])
3251
+ summary = f"→ {len(steps)} step plan"
3252
+ elif role_name == "prerak" and role_output.core_output:
3253
+ flags = role_output.core_output.get("flags", [])
3254
+ if not flags:
3255
+ summary = "→ No issues"
3256
+ else:
3257
+ summary = f"→ {len(flags)} issue(s)"
3258
+ elif role_name == "raja" and role_output.core_output:
3259
+ conf = role_output.core_output.get("confidence", 0)
3260
+ summary = f"→ Confidence: {int(conf*100)}%"
3261
+
3262
+ # Show model used
3263
+ model_str = ""
3264
+ if role_output.metadata and role_output.metadata.model_id:
3265
+ mid = role_output.metadata.model_id
3266
+ # Strip path
3267
+ if "/" in mid:
3268
+ mid = mid.split("/")[-1]
3269
+ # Strip extension (optional but cleaner)
3270
+ if mid.endswith(".gguf"):
3271
+ mid = mid[:-5]
3272
+ model_str = f"[dim]({mid})[/dim]"
3273
+
3274
+ if role_output.status == "error":
3275
+ err_msg = role_output.error or "Unknown error"
3276
+ # Show full error
3277
+ summary = f"[red]{err_msg}[/red]"
3278
+
3279
+ self.log_message(f" {info['emoji']} {info['name']} {model_str}: {status_icon} {summary}")
3280
+
3281
+ self.log_message(f"[dim]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/dim]\n")
3282
+
3283
+ # Check for silent file generation (common source of confusion)
3284
+ for role_output in trace.roles:
3285
+ if role_output.core_output and role_output.core_output.get("target_file"):
3286
+ fpath = role_output.core_output.get("target_file")
3287
+ self.log_message(f"\n[bold blue]📁 File Generated:[/bold blue] {fpath}")
3288
+
3289
+ # Display the final answer from Raja
3290
+ if trace.final_answer:
3291
+ answer = trace.final_answer.final_answer
3292
+ self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{answer}\n")
3293
+ elif trace.error:
3294
+ self.log_message(f"\n[red]Error: {trace.error}[/red]")
3295
+ else:
3296
+ # Check if we generated a file but no text answer
3297
+ file_gen = any(r.core_output and r.core_output.get("target_file") for r in trace.roles)
3298
+ if not file_gen:
3299
+ self.log_message("\n[yellow]No answer generated[/yellow]")
3300
+
3301
+ def on_parishad_app_sabha_error(self, message: SabhaError) -> None:
3302
+ """Handle Sabha error from worker thread (non-blocking)."""
3303
+ self.log_message(f"\n[red]Error ({type(message.error).__name__}): {message.error}[/red]\n[dim]{message.traceback_str[:500]}...[/dim]")
3304
+
3305
+ def on_parishad_app_worker_complete(self, message: WorkerComplete) -> None:
3306
+ """Handle worker completion - refocus input."""
3307
+ try:
3308
+ self.query_one("#prompt-input", Input).focus()
3309
+ except Exception:
3310
+ pass
3311
+
3312
+ def on_parishad_app_council_ready(self, message: CouncilReady) -> None:
3313
+ """Handle council initialization completion (non-blocking)."""
3314
+ if message.success:
3315
+ self.log_message(
3316
+ f"[green]✅ Sabha council ready![/green]\n"
3317
+ f"[dim]Models loaded from profile '{message.profile}'[/dim]\n"
3318
+ f"[dim]You can now start asking questions.[/dim]\n"
3319
+ )
3320
+ else:
3321
+ self.log_message(
3322
+ f"[red]✗ Error loading Sabha council:[/red]\n"
3323
+ f"[dim]{message.error_msg}[/dim]\n"
3324
+ )
3325
+
3326
+ # Keep these for backward compatibility but they are no longer used for thread workers
3327
+ def _display_sabha_result(self, trace) -> None:
3328
+ """Display Sabha result on main thread (DEPRECATED - use message handlers now)."""
3329
+ # Delegate to message handler
3330
+ self.on_parishad_app_sabha_result_ready(self.SabhaResultReady(trace))
3331
+
3332
+ def _display_sabha_error(self, error: Exception, tb: str) -> None:
3333
+ """Display Sabha error on main thread (DEPRECATED - use message handlers now)."""
3334
+ self.on_parishad_app_sabha_error(self.SabhaError(error, tb))
3335
+
3336
+ def _refocus_input(self) -> None:
3337
+ """Refocus input widget after query completion (DEPRECATED - use message handlers now)."""
3338
+ try:
3339
+ self.query_one("#prompt-input", Input).focus()
3340
+ except Exception:
3341
+ pass
3342
+
2743
3343
  def handle_command(self, parsed: ParsedInput) -> None:
2744
3344
  """Handle slash commands with ParsedInput."""
2745
3345
  cmd = parsed.command_name
@@ -656,23 +656,47 @@ class ParishadEngine:
656
656
  content = output.core_output.get("content")
657
657
 
658
658
  if target_file and content:
659
- try:
660
- # Use FS tool to write
661
- logger.info(f"Writing file {target_file} via Sainik")
662
-
663
- # Simple content write
664
- result = self.fs_tool.run("write", path=target_file, content=content)
665
-
666
- if not result.success:
667
- logger.error(f"Failed to write file {target_file}: {result.error}")
668
- output.error = f"File write failed: {result.error}"
669
- # Optionally mark partial success?
670
- else:
671
- logger.info(f"Successfully wrote {target_file}")
659
+ # Smart filter: Only write files if user explicitly requested it
660
+ # Check if the query contains file-related keywords
661
+ query_lower = ctx.user_query.lower()
662
+ file_keywords = [
663
+ 'create', 'write', 'save', 'generate', 'make', 'update', 'modify',
664
+ 'file', 'script', '.py', '.txt', '.md', '.json', '.yaml', '.yml',
665
+ 'to file', 'in file', 'save to', 'write to'
666
+ ]
667
+
668
+ # Check if any file keyword is in the query
669
+ should_write_file = any(keyword in query_lower for keyword in file_keywords)
670
+
671
+ # Also check for specific file path mentions (e.g., "src/main.py")
672
+ import re
673
+ file_path_pattern = r'\b[\w/\\]+\.\w+\b'
674
+ if re.search(file_path_pattern, ctx.user_query):
675
+ should_write_file = True
676
+
677
+ if should_write_file:
678
+ try:
679
+ # Use FS tool to write
680
+ logger.info(f"Writing file {target_file} via Sainik")
672
681
 
673
- except Exception as e:
674
- logger.error(f"Error handling file write for {target_file}: {e}")
675
- output.error = f"File write exception: {str(e)}"
682
+ # Simple content write
683
+ result = self.fs_tool.run("write", path=target_file, content=content)
684
+
685
+ if not result.success:
686
+ logger.error(f"Failed to write file {target_file}: {result.error}")
687
+ output.error = f"File write failed: {result.error}"
688
+ # Optionally mark partial success?
689
+ else:
690
+ logger.info(f"Successfully wrote {target_file}")
691
+
692
+ except Exception as e:
693
+ logger.error(f"Error handling file write for {target_file}: {e}")
694
+ output.error = f"File write exception: {str(e)}"
695
+ else:
696
+ logger.info(f"Skipping file write for {target_file} - no file creation keyword detected in query")
697
+ # Clear the target_file from output to prevent confusion
698
+ if "target_file" in output.core_output:
699
+ output.core_output["target_file"] = None
676
700
 
677
701
  # Phase 13: General Tool Execution (Agentic)
678
702
  if role_name == "sainik" and output.status == "success":
@@ -884,6 +908,9 @@ class ParishadEngine:
884
908
  if self.trace_dir:
885
909
  self._save_trace(trace)
886
910
 
911
+ # Save final answer to output.json in workspace root
912
+ self._save_output_json(trace)
913
+
887
914
  logger.info(
888
915
  f"Parishad run complete: {ctx.query_id} "
889
916
  f"(tokens: {ctx.tokens_used}/{budget}, success: {success})"
@@ -965,6 +992,31 @@ class ParishadEngine:
965
992
  f.write(trace.to_json())
966
993
 
967
994
  logger.debug(f"Trace saved: {filepath}")
995
+
996
+ def _save_output_json(self, trace: Trace) -> None:
997
+ """Save final answer to output.json in workspace root."""
998
+ try:
999
+ import json
1000
+ from pathlib import Path
1001
+
1002
+ # Get the workspace root (current working directory)
1003
+ output_path = Path.cwd() / "output.json"
1004
+
1005
+ # Extract the final answer text
1006
+ if trace.final_answer:
1007
+ output_content = trace.final_answer.final_answer
1008
+ else:
1009
+ output_content = "No answer generated"
1010
+
1011
+ # Write the output to output.json
1012
+ with open(output_path, "w", encoding="utf-8") as f:
1013
+ json.dump(output_content, f, indent=2, ensure_ascii=False)
1014
+
1015
+ logger.debug(f"Output saved to: {output_path}")
1016
+
1017
+ except Exception as e:
1018
+ logger.warning(f"Failed to save output.json: {e}")
1019
+
968
1020
 
969
1021
 
970
1022
  class Parishad:
parishad/roles/sainik.py CHANGED
@@ -52,8 +52,12 @@ You must ALWAYS respond with a valid JSON object in the following format:
52
52
  Guidelines:
53
53
  - If writing code, put the COMPLETE runnable code in "content".
54
54
  - If writing text, put the clear explanation in "content".
55
- - If the user asked to change/create a file, you MUST specify "target_file".
56
- - IMPORTANT: Do NOT create files unless explicitly asked! For general questions/explanations, keep "target_file": null.
55
+ - **CRITICAL: ONLY set "target_file" if the user EXPLICITLY asks to create/save/write a file with a specific filename.**
56
+ - Examples where you SHOULD set target_file: "create a script called math_utils.py", "save this to config.json", "write code in src/main.py"
57
+ - Examples where you should NOT set target_file: "what is 2+2", "write code to add numbers", "how do I calculate X", "solve this math problem", "explain Y"
58
+ - For math problems, explanations, and general questions: Put the answer in "content" and set "target_file": null
59
+ - NEVER write to docs/ directory unless explicitly instructed to do so by the user.
60
+ - NEVER create files just because you're writing code - only if the user wants to SAVE it to a file.
57
61
  - "target_file" should be relative to the current directory (e.g., "src/main.py").
58
62
  - If "target_file" is a text/markdown/json file (not executable code), put the RAW content in "content". DO NOT write a Python script to create it.
59
63
  - If you need to Use a tool, add it to `tool_calls`. Available tools will be listed in the prompt.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: parishad
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness
5
5
  Project-URL: Homepage, https://github.com/parishad-council/parishad
6
6
  Project-URL: Documentation, https://github.com/parishad-council/parishad#readme
@@ -5,7 +5,7 @@ parishad/checker/deterministic.py,sha256=reXH8aew_pBGn85MRMQcHRzi6fWU_JaGniimYUq
5
5
  parishad/checker/ensemble.py,sha256=ozJwmQFNY4ojcYB3I0o5-3xUk43tdDqHm-jGVgO6Bhg,17506
6
6
  parishad/checker/retrieval.py,sha256=jQ5Zf38Y1rcBrhmAOyNs4sTHFwF98Iqugsw1rGLHPks,16974
7
7
  parishad/cli/__init__.py,sha256=iI_jctu7iGt7SZMfdkAQ75eFORLjy4soWC9DMzV8WaI,67
8
- parishad/cli/code.py,sha256=0pRqoEtHeVILihiycYUXQ2v1-7TmVmXtKze-YvUE9m8,119493
8
+ parishad/cli/code.py,sha256=IPlIfA6u59gnZv4JkuKIP0ihGbebE6i40vqwNZpBVKU,146215
9
9
  parishad/cli/main.py,sha256=qTegCLubN38iYivhrvdJFz-akwyWAKEOegq9XioTeuY,38513
10
10
  parishad/cli/prarambh.py,sha256=4vvRzJHWC6ECPdL4IKv5laZDy4v3ZYDq4vjqu3Hll30,4296
11
11
  parishad/cli/sthapana.py,sha256=Sxk-TA-WCKW2CSPBQPhhEgaON60HKVDUaMorFjc-BNQ,13534
@@ -35,7 +35,7 @@ parishad/models/backends/openai_api.py,sha256=9CmBvhQEcPX0R1RtQnHWVcTdH9wMT6Pu1a
35
35
  parishad/models/backends/transformers_hf.py,sha256=z-nw18yY62BorAawrand59GgD_0dmnYqibQGcP6sadQ,6619
36
36
  parishad/orchestrator/__init__.py,sha256=a52VEdW_98XwD3FseRu_9_sKagFs6z5PtXS_o4j2fbs,413
37
37
  parishad/orchestrator/config_loader.py,sha256=sc8cXhgaUg9b5KCUXQLtcAI-dzfOubyWDaIY0TZORoM,6975
38
- parishad/orchestrator/engine.py,sha256=WAKcHqG20tRqt-Hg62iR-TOswmgc0Y0OclSuSlZe0lA,45644
38
+ parishad/orchestrator/engine.py,sha256=F-2UjoU74sczqtEyakV97_r9yKLXOCp0Am9uhP2-KFc,48136
39
39
  parishad/orchestrator/exceptions.py,sha256=jLNkoKvhuKny2upGv04L7Dj4SAi0umVRC0RAukX5BBM,391
40
40
  parishad/roles/__init__.py,sha256=OF8Zb-yNP9nX0gS9gnYkt0QoG_KyHYwtw_OiWplFnxc,1171
41
41
  parishad/roles/base.py,sha256=u5nipc3S6Bv86eAYbntYkFj3AszzwmZ74BVukMdBMjo,24866
@@ -46,7 +46,7 @@ parishad/roles/pantapradhan.py,sha256=Hko47GQdbb561EHhFpWXrIIHw4bCHz8vu8sUtQmlic
46
46
  parishad/roles/prerak.py,sha256=cpXPqibt1CT_Sz1q5F_m9mZKZVAGixPsIYtg3Vnwmo8,11879
47
47
  parishad/roles/raja.py,sha256=iliR3v4YHnwSPPU1_GZUkNiTKgcoZ9EoiWJng_pfdBU,12096
48
48
  parishad/roles/sacheev.py,sha256=terhWEEB_Zhti2Wb3LAK2aOYk8sFE72Q9cy7CfTor6I,6594
49
- parishad/roles/sainik.py,sha256=DdQ4GNTvVkEE950i7UaKLbzcX9J6IB-iFCxdxHHRIVo,15523
49
+ parishad/roles/sainik.py,sha256=GsKoMuWo9khO6aeJWZwJZ3-JSoF1eoXmP-G4S38TFF4,16043
50
50
  parishad/roles/sar_senapati.py,sha256=XT7pPTYoJorusay1_v2Sk5-GgZX2auwRmJegLOvJGWA,5413
51
51
  parishad/roles/vidushak.py,sha256=f6lmtZrIbAO97RMG2NQYSm14JbqguBA7ukOT-n6i0cM,2018
52
52
  parishad/tools/__init__.py,sha256=Ni_RlR4AnOFIBivbM-sfpRdZI0Tpu9MXkbs3n4Ngx8I,279
@@ -62,8 +62,8 @@ parishad/utils/logging.py,sha256=TT16YB6cOhBPyo9a6dZRww3SjStrZKihdtq9hwqBXJo,214
62
62
  parishad/utils/scanner.py,sha256=8wxaNgH3i_T4AdyBuLr9L4KcQ_AORguA6xvnOIyem8k,5841
63
63
  parishad/utils/text.py,sha256=S_3Ox4T3C87XfyXdR2b3JMatpCiOozaqPUbVic7OIFM,1617
64
64
  parishad/utils/tracing.py,sha256=x35BmMO9M83dVCy73kYqkOFE4zKMrUFe_RuV8qIWJaM,4304
65
- parishad-0.1.7.dist-info/METADATA,sha256=s-3tEYV7_E91nMqPpibGm7waSC0y8JuUvt-wIQ7sq0A,11081
66
- parishad-0.1.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
67
- parishad-0.1.7.dist-info/entry_points.txt,sha256=cCF4Bg5sLxlLMJhnOnWNua3XYzAGlL5ri-55y0fWPek,51
68
- parishad-0.1.7.dist-info/licenses/LICENSE,sha256=Xow-fDHX9pzrvBkPHImvQa-Uc1g6BDbz9IE4jPfB6D0,1073
69
- parishad-0.1.7.dist-info/RECORD,,
65
+ parishad-0.1.8.dist-info/METADATA,sha256=iDRypzVnC6fXg3ay9dx-GbCANkLqbCqvfRQcciBVgtg,11081
66
+ parishad-0.1.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
67
+ parishad-0.1.8.dist-info/entry_points.txt,sha256=cCF4Bg5sLxlLMJhnOnWNua3XYzAGlL5ri-55y0fWPek,51
68
+ parishad-0.1.8.dist-info/licenses/LICENSE,sha256=Xow-fDHX9pzrvBkPHImvQa-Uc1g6BDbz9IE4jPfB6D0,1073
69
+ parishad-0.1.8.dist-info/RECORD,,