parishad 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. parishad/__init__.py +70 -0
  2. parishad/__main__.py +10 -0
  3. parishad/checker/__init__.py +25 -0
  4. parishad/checker/deterministic.py +644 -0
  5. parishad/checker/ensemble.py +496 -0
  6. parishad/checker/retrieval.py +546 -0
  7. parishad/cli/__init__.py +6 -0
  8. parishad/cli/code.py +3254 -0
  9. parishad/cli/main.py +1158 -0
  10. parishad/cli/prarambh.py +99 -0
  11. parishad/cli/sthapana.py +368 -0
  12. parishad/config/modes.py +139 -0
  13. parishad/config/pipeline.core.yaml +128 -0
  14. parishad/config/pipeline.extended.yaml +172 -0
  15. parishad/config/pipeline.fast.yaml +89 -0
  16. parishad/config/user_config.py +115 -0
  17. parishad/data/catalog.py +118 -0
  18. parishad/data/models.json +108 -0
  19. parishad/memory/__init__.py +79 -0
  20. parishad/models/__init__.py +181 -0
  21. parishad/models/backends/__init__.py +247 -0
  22. parishad/models/backends/base.py +211 -0
  23. parishad/models/backends/huggingface.py +318 -0
  24. parishad/models/backends/llama_cpp.py +239 -0
  25. parishad/models/backends/mlx_lm.py +141 -0
  26. parishad/models/backends/ollama.py +253 -0
  27. parishad/models/backends/openai_api.py +193 -0
  28. parishad/models/backends/transformers_hf.py +198 -0
  29. parishad/models/costs.py +385 -0
  30. parishad/models/downloader.py +1557 -0
  31. parishad/models/optimizations.py +871 -0
  32. parishad/models/profiles.py +610 -0
  33. parishad/models/reliability.py +876 -0
  34. parishad/models/runner.py +651 -0
  35. parishad/models/tokenization.py +287 -0
  36. parishad/orchestrator/__init__.py +24 -0
  37. parishad/orchestrator/config_loader.py +210 -0
  38. parishad/orchestrator/engine.py +1113 -0
  39. parishad/orchestrator/exceptions.py +14 -0
  40. parishad/roles/__init__.py +71 -0
  41. parishad/roles/base.py +712 -0
  42. parishad/roles/dandadhyaksha.py +163 -0
  43. parishad/roles/darbari.py +246 -0
  44. parishad/roles/majumdar.py +274 -0
  45. parishad/roles/pantapradhan.py +150 -0
  46. parishad/roles/prerak.py +357 -0
  47. parishad/roles/raja.py +345 -0
  48. parishad/roles/sacheev.py +203 -0
  49. parishad/roles/sainik.py +427 -0
  50. parishad/roles/sar_senapati.py +164 -0
  51. parishad/roles/vidushak.py +69 -0
  52. parishad/tools/__init__.py +7 -0
  53. parishad/tools/base.py +57 -0
  54. parishad/tools/fs.py +110 -0
  55. parishad/tools/perception.py +96 -0
  56. parishad/tools/retrieval.py +74 -0
  57. parishad/tools/shell.py +103 -0
  58. parishad/utils/__init__.py +7 -0
  59. parishad/utils/hardware.py +122 -0
  60. parishad/utils/logging.py +79 -0
  61. parishad/utils/scanner.py +164 -0
  62. parishad/utils/text.py +61 -0
  63. parishad/utils/tracing.py +133 -0
  64. parishad-0.1.0.dist-info/METADATA +256 -0
  65. parishad-0.1.0.dist-info/RECORD +68 -0
  66. parishad-0.1.0.dist-info/WHEEL +4 -0
  67. parishad-0.1.0.dist-info/entry_points.txt +2 -0
  68. parishad-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,427 @@
1
+ """
2
+ Sainik (Implementor) role for the Parishad council.
3
+ Executes plans and generates solutions (Code/Text).
4
+ Combines functionality of Worker, WorkerCode, and WorkerText.
5
+ """
6
+
7
+ from typing import Any, Optional
8
+
9
+ from .base import (
10
+ Role,
11
+ RoleInput,
12
+ RoleOutput,
13
+ Slot,
14
+ Candidate,
15
+ )
16
+
17
+
18
+ WORKER_SYSTEM_PROMPT = """You are Sainik, the Implementor in the Parishad council. Your job is to execute the plan created by Majumdar/Sar-Senapati and generate high-quality solutions.
19
+
20
+ Your responsibilities:
21
+ 1. Follow the plan step by step
22
+ 2. Generate accurate, complete solutions
23
+ 3. Track your reasoning process
24
+ 4. Identify potential issues or uncertainties
25
+ 5. Produce output in the expected format
26
+
27
+ You must ALWAYS respond with a valid JSON object in the following format:
28
+ ```json
29
+ {
30
+ "content": "Your complete solution/answer/code here",
31
+ "content_type": "code|text|numeric|mixed",
32
+ "language": "python",
33
+ "reasoning_trace": [
34
+ "Step 1: I analyzed...",
35
+ "Step 2: I implemented..."
36
+ ],
37
+ "confidence": 0.85,
38
+ "warnings": ["Potential issue: ..."],
39
+ "imports": ["required_imports"],
40
+ "key_points": ["key takeaway 1"],
41
+ "target_file": "path/to/output_file.ext",
42
+ "tool_calls": [
43
+ {
44
+ "tool": "tool_name",
45
+ "action": "action_name",
46
+ "args": { "arg1": "value" }
47
+ }
48
+ ]
49
+ }
50
+ ```
51
+
52
+ Guidelines:
53
+ - If writing code, put the COMPLETE runnable code in "content".
54
+ - If writing text, put the clear explanation in "content".
55
+ - If the user asked to change/create a file, you MUST specify "target_file".
56
+ - "target_file" should be relative to the current directory (e.g., "src/main.py").
57
+ - If "target_file" is a text/markdown/json file (not executable code), put the RAW content in "content". DO NOT write a Python script to create it.
58
+ - If you need to Use a tool, add it to `tool_calls`. Available tools will be listed in the prompt.
59
+ - Be honest about confidence."""
60
+
61
+
62
+ WORKER_USER_TEMPLATE = """Execute the following plan and generate a solution.
63
+
64
+ ORIGINAL QUERY:
65
+ {user_query}
66
+
67
+ TASK SPECIFICATION:
68
+ {task_spec}
69
+
70
+ EXECUTION PLAN:
71
+ {plan}
72
+
73
+ {retry_context}
74
+
75
+ Follow the plan and generate a complete solution. Respond with ONLY a valid JSON object."""
76
+
77
+
78
+ WORKER_CODE_EMPHASIS = """
79
+ IMPORTANT: This is a CODE task. Your "content" field must contain complete, runnable code.
80
+ - Include all necessary imports at the top
81
+ - Write a complete solution that can be executed
82
+ - Add brief comments for complex logic
83
+ - The code should be ready to run without modifications
84
+ - Set "content_type" to "code"
85
+ - Set "language" to the programming language used"""
86
+
87
+
88
+ WORKER_MATH_EMPHASIS = """
89
+ IMPORTANT: This is a MATH task. Show your work clearly.
90
+ - Include step-by-step calculations in reasoning_trace
91
+ - Double-check your arithmetic
92
+ - State your final answer clearly in the content field
93
+ - If the answer is numeric, ensure it is exact"""
94
+
95
+
96
+ WORKER_TEXT_EMPHASIS = """
97
+ IMPORTANT: This is a TEXT/EXPLANATION task.
98
+ - Write clear, well-structured explanations
99
+ - Organize content logically
100
+ - Be concise but complete
101
+ - Set "content_type" to "text"
102
+ - If editing a file, provide the NEW FULL CONTENT of the file, not a description of changes."""
103
+
104
+
105
+ class Sainik(Role):
106
+ """
107
+ Sainik (Implementor) executes the plan and generates candidate solutions.
108
+ Handles both Text and Code generation based on task type.
109
+
110
+ - Slot: MID (7-13B)
111
+ - Purpose: Main content generation following Planner's steps
112
+ - Output: Candidate with content, reasoning trace, confidence
113
+ """
114
+
115
+ name = "sainik"
116
+ default_slot = Slot.MID
117
+
118
+
119
+ def __init__(self, model_runner: Any, tools: Optional[list[Any]] = None, **kwargs):
120
+ super().__init__(
121
+ model_runner=model_runner,
122
+ slot=kwargs.get("slot", Slot.MID),
123
+ max_tokens=kwargs.get("max_tokens", 2048),
124
+ temperature=kwargs.get("temperature", 0.6)
125
+ )
126
+ self.tools = tools or []
127
+
128
+ @property
129
+ def system_prompt(self) -> str:
130
+ return WORKER_SYSTEM_PROMPT
131
+
132
+ def format_input(self, role_input: RoleInput) -> str:
133
+ task_spec_str = self._format_task_spec(role_input.task_spec)
134
+ plan_str = self._format_plan(role_input.plan)
135
+ retry_context = self._format_retry_context(role_input.context)
136
+
137
+ # Phase 13: Tool Integration - Inject File Context & Tool Descriptions
138
+ file_context = ""
139
+ tool_descriptions = ""
140
+
141
+ if self.tools:
142
+ tool_descriptions = "\n\nAVAILABLE TOOLS:\n"
143
+ for tool in self.tools:
144
+ tool_descriptions += f"- {tool.name}: {tool.description or 'No description'}\n"
145
+
146
+ if tool.name == "file_system":
147
+ try:
148
+ # List files in current directory to give context
149
+ result = tool.run("list", path=".")
150
+ if result.success:
151
+ file_context += f"\n\nCURRENT DIRECTORY CONTEXT:\n{result.data}\n"
152
+ except Exception as e:
153
+ file_context += f"\n\nError accessing file system: {str(e)}\n"
154
+
155
+
156
+ # Add task-specific emphasis
157
+ task_type = ""
158
+ if role_input.task_spec:
159
+ task_type = role_input.task_spec.get("task_type", "")
160
+
161
+ prompt = WORKER_USER_TEMPLATE.format(
162
+ user_query=role_input.user_query,
163
+ task_spec=task_spec_str,
164
+ plan=plan_str,
165
+ retry_context=retry_context + file_context + tool_descriptions # Append file context and tool details
166
+ )
167
+
168
+ if task_type == "code":
169
+ prompt += WORKER_CODE_EMPHASIS
170
+ elif task_type == "math":
171
+ prompt += WORKER_MATH_EMPHASIS
172
+ else:
173
+ prompt += WORKER_TEXT_EMPHASIS
174
+
175
+ return prompt
176
+
177
+ def _format_task_spec(self, task_spec: Optional[dict]) -> str:
178
+ """Format task spec for inclusion in prompt."""
179
+ if not task_spec:
180
+ return "No task specification provided."
181
+
182
+ lines = [
183
+ f"Problem: {task_spec.get('problem', 'Not specified')}",
184
+ f"Task Type: {task_spec.get('task_type', 'Unknown')}",
185
+ f"Output Format: {task_spec.get('output_format', 'text')}",
186
+ ]
187
+
188
+ constraints = task_spec.get('constraints', [])
189
+ if constraints:
190
+ lines.append(f"Constraints: {', '.join(constraints)}")
191
+
192
+ return "\n".join(lines)
193
+
194
+ def _format_plan(self, plan: Optional[dict]) -> str:
195
+ """Format plan for inclusion in prompt."""
196
+ if not plan:
197
+ return "No plan provided. Complete the task directly."
198
+
199
+ lines = []
200
+
201
+ if plan.get("suggested_approach"):
202
+ lines.append(f"Approach: {plan['suggested_approach']}")
203
+
204
+ steps = plan.get("steps", [])
205
+ for step in steps:
206
+ step_id = step.get("id", "?")
207
+ desc = step.get("description", "")
208
+ lines.append(f"Step {step_id}: {desc}")
209
+
210
+ if step.get("rationale"):
211
+ lines.append(f" Rationale: {step['rationale']}")
212
+
213
+ expected_output = plan.get("expected_output_type", "")
214
+ if expected_output:
215
+ lines.append(f"\nExpected Output Type: {expected_output}")
216
+
217
+ instructions = plan.get("worker_instructions", "")
218
+ if instructions:
219
+ lines.append(f"\nInstructions: {instructions}")
220
+
221
+ return "\n".join(lines)
222
+
223
+ def _format_retry_context(self, context: dict) -> str:
224
+ """Format retry context if this is a retry attempt."""
225
+ if not context.get("is_retry"):
226
+ return ""
227
+
228
+ lines = ["\n--- RETRY CONTEXT ---"]
229
+
230
+ previous_output = context.get("previous_output", "")
231
+ if previous_output:
232
+ lines.append(f"Your previous output:\n{previous_output[:500]}...")
233
+
234
+ checker_feedback = context.get("checker_feedback", {})
235
+ if checker_feedback:
236
+ flags = checker_feedback.get("flags", [])
237
+ if flags:
238
+ lines.append("\nIssues identified by Challenger:")
239
+ for flag in flags[:5]: # Limit to 5 flags
240
+ lines.append(f"- [{flag.get('severity', 'unknown')}] {flag.get('detail', '')}")
241
+
242
+ edits = checker_feedback.get("suggested_edits", [])
243
+ if edits:
244
+ lines.append("\nSuggested fixes:")
245
+ for edit in edits[:3]:
246
+ lines.append(f"- {edit}")
247
+
248
+ lines.append("\nPlease fix the issues and regenerate your solution.")
249
+ lines.append("--- END RETRY CONTEXT ---\n")
250
+
251
+ return "\n".join(lines)
252
+
253
+ def parse_output(self, raw_output: str) -> dict[str, Any]:
254
+ """
255
+ Parse LLM output into Candidate dict with robust fallback.
256
+
257
+ Strategy:
258
+ 1. If empty output, return empty but valid dict
259
+ 2. Try to parse as JSON with expected schema
260
+ 3. Fall back to extracting text answer from raw output
261
+ 4. NEVER fail completely - always return valid dict
262
+ """
263
+ raw = raw_output.strip()
264
+
265
+ # Handle empty output
266
+ if not raw:
267
+ logger.warning("Sainik received empty output from model")
268
+ return {
269
+ "content": "",
270
+ "content_type": "text",
271
+ "confidence": 0.0,
272
+ "reasoning_trace": [],
273
+ "warnings": ["Model returned empty output"],
274
+ "parse_status": "empty"
275
+ }
276
+
277
+ # Try strict JSON parsing first
278
+ try:
279
+ data = self._extract_json(raw)
280
+
281
+ # Check if we got valid structured output
282
+ content = data.get("content", "")
283
+ if not content and "raw_output" in data:
284
+ # _extract_json returned fallback, try text extraction
285
+ content = self._extract_text_answer(data["raw_output"])
286
+ parse_status = "fallback_text"
287
+ warnings = data.get("warnings", [])
288
+ warnings.append("Model output did not follow JSON format; extracted text answer")
289
+ else:
290
+ # Valid JSON with content field
291
+ parse_status = "json_ok"
292
+ warnings = data.get("warnings", [])
293
+
294
+ # Infer content type if not provided
295
+ content_type = data.get("content_type", "text")
296
+ if not data.get("content_type"):
297
+ content_type = self._infer_content_type(content)
298
+
299
+ # Normalize confidence
300
+ confidence = data.get("confidence", 0.5)
301
+ if isinstance(confidence, str):
302
+ try:
303
+ confidence = float(confidence)
304
+ except ValueError:
305
+ confidence = 0.5
306
+ confidence = max(0.0, min(1.0, confidence))
307
+
308
+ # Lower confidence for fallback parsing
309
+ if parse_status == "fallback_text":
310
+ confidence = min(confidence, 0.6)
311
+
312
+ return {
313
+ "content": content,
314
+ "content_type": content_type,
315
+ "language": data.get("language"),
316
+ "reasoning_trace": data.get("reasoning_trace", []),
317
+ "confidence": confidence,
318
+ "warnings": warnings,
319
+ "imports": data.get("imports", []),
320
+ "key_points": data.get("key_points", []),
321
+ "target_file": data.get("target_file"),
322
+ "tool_calls": data.get("tool_calls", []),
323
+ "parse_status": parse_status,
324
+ "raw_output": raw
325
+ }
326
+
327
+ except Exception as e:
328
+ # Catastrophic parse error - use pure text extraction
329
+ logger.exception("Sainik.parse_output: unexpected error during parsing")
330
+ text_answer = self._extract_text_answer(raw)
331
+
332
+ return {
333
+ "content": text_answer,
334
+ "content_type": "text",
335
+ "confidence": 0.5,
336
+ "reasoning_trace": [],
337
+ "warnings": [f"Parse error: {str(e)}; using raw text"],
338
+ "parse_status": "error_fallback",
339
+ "raw_output": raw
340
+ }
341
+
342
+ def _extract_text_answer(self, raw: str) -> str:
343
+ """
344
+ Extract answer from raw text using heuristics when JSON parsing fails.
345
+
346
+ Heuristics (in order):
347
+ 1. Look for "Answer:" or "Result:" prefix patterns
348
+ 2. Look for last code block if present
349
+ 3. Take last non-empty line
350
+ 4. Return full text if nothing else works
351
+ """
352
+ import re
353
+
354
+ if not raw:
355
+ return ""
356
+
357
+ # Try to find explicit answer markers
358
+ answer_patterns = [
359
+ r'(?i)(?:final\s*)?answer\s*[:=]\s*(.+)',
360
+ r'(?i)result\s*[:=]\s*(.+)',
361
+ r'(?i)solution\s*[:=]\s*(.+)',
362
+ r'(?i)output\s*[:=]\s*(.+)'
363
+ ]
364
+
365
+ for pattern in answer_patterns:
366
+ match = re.search(pattern, raw, re.MULTILINE)
367
+ if match:
368
+ answer = match.group(1).strip()
369
+ if answer:
370
+ return answer
371
+
372
+ # Try to extract code block if present (might be the answer)
373
+ code_block_pattern = r'```(?:\w+)?\s*\n([\s\S]*?)\n```'
374
+ code_blocks = re.findall(code_block_pattern, raw)
375
+ if code_blocks:
376
+ # Use last code block
377
+ last_block = code_blocks[-1].strip()
378
+ if last_block:
379
+ return last_block
380
+
381
+ # Try last non-empty line (common pattern: model explains, then gives answer)
382
+ lines = [line.strip() for line in raw.split('\n') if line.strip()]
383
+ if lines:
384
+ last_line = lines[-1]
385
+ # Prefer last line if it's short (likely a direct answer)
386
+ if len(last_line) < 200:
387
+ return last_line
388
+
389
+ # Give up and return first 500 chars of raw text
390
+ return raw[:500] if len(raw) > 500 else raw
391
+
392
+ def _infer_content_type(self, content: str) -> str:
393
+ """Infer content type from content."""
394
+ # Check for code indicators
395
+ code_indicators = [
396
+ "def ", "class ", "import ", "from ", "return ", # Python
397
+ "function ", "const ", "let ", "var ", # JavaScript
398
+ "public ", "private ", "void ", "int ", # Java/C++
399
+ ]
400
+
401
+ for indicator in code_indicators:
402
+ if indicator in content:
403
+ return "code"
404
+
405
+ # Check for numeric answer
406
+ content_stripped = content.strip()
407
+ try:
408
+ float(content_stripped)
409
+ return "numeric"
410
+ except ValueError:
411
+ pass
412
+
413
+ return "text"
414
+
415
+ def create_candidate(self, role_input: RoleInput) -> Candidate:
416
+ """Execute Sainik and return a Candidate object."""
417
+ output = self(role_input)
418
+
419
+ if output.status == "error":
420
+ return Candidate(
421
+ content="Error generating solution",
422
+ content_type="text",
423
+ confidence=0.0,
424
+ warnings=[f"Implementor error: {output.error}"]
425
+ )
426
+
427
+ return Candidate.from_dict(output.core_output)
@@ -0,0 +1,164 @@
1
+ """
2
+ SarSenapati (Executor/PlannerExec) role for the Parishad council.
3
+ Converts high-level strategic plans into concrete, executable steps.
4
+ """
5
+
6
+ from typing import Any, Optional
7
+
8
+ from .base import (
9
+ Role,
10
+ RoleInput,
11
+ RoleOutput,
12
+ Slot,
13
+ )
14
+
15
+
16
+ PLANNER_EXEC_SYSTEM_PROMPT = """You are Sar-Senapati, the Executor in the Parishad council. Your job is to convert high-level strategic plans into concrete, executable steps.
17
+
18
+ Your responsibilities:
19
+ 1. Take the high-level plan and make it actionable
20
+ 2. Create detailed, step-by-step instructions
21
+ 3. Specify exact operations for each step
22
+ 4. Identify dependencies between steps
23
+ 5. Add verification checkpoints
24
+
25
+ You must ALWAYS respond with a valid JSON object in the following format:
26
+ ```json
27
+ {
28
+ "steps": [
29
+ {
30
+ "id": 1,
31
+ "description": "Concrete action to take",
32
+ "rationale": "Why this step is needed",
33
+ "expected_output": "What this step produces",
34
+ "depends_on": [],
35
+ "verification": "How to verify this step succeeded"
36
+ }
37
+ ],
38
+ "checkpoints": [1, 3],
39
+ "expected_output_type": "python_function|explanation|numeric_answer|structured_data",
40
+ "worker_instructions": "Specific guidance for the Implementor"
41
+ }
42
+ ```
43
+
44
+ Be specific and concrete. Every step should be directly actionable by the Implementor."""
45
+
46
+
47
+ PLANNER_EXEC_USER_TEMPLATE = """Convert the high-level plan into executable steps.
48
+
49
+ ORIGINAL QUERY:
50
+ {user_query}
51
+
52
+ TASK SPECIFICATION:
53
+ {task_spec}
54
+
55
+ HIGH-LEVEL PLAN:
56
+ {plan_high}
57
+
58
+ Create detailed, actionable steps. Respond with ONLY a valid JSON object."""
59
+
60
+
61
+ class SarSenapati(Role):
62
+ """
63
+ SarSenapati (Executor) converts high-level plans into executable steps.
64
+
65
+ - Slot: MID (7-13B)
66
+ - Purpose: Create detailed execution plan from strategy
67
+ - Output: Concrete steps with dependencies and checkpoints
68
+ """
69
+
70
+ name = "sar_senapati"
71
+ default_slot = Slot.MID
72
+
73
+ def __init__(self, model_runner: Any, **kwargs):
74
+ super().__init__(
75
+ model_runner=model_runner,
76
+ slot=kwargs.get("slot", Slot.MID),
77
+ max_tokens=kwargs.get("max_tokens", 1024),
78
+ temperature=kwargs.get("temperature", 0.4)
79
+ )
80
+
81
+ @property
82
+ def system_prompt(self) -> str:
83
+ return PLANNER_EXEC_SYSTEM_PROMPT
84
+
85
+ def format_input(self, role_input: RoleInput) -> str:
86
+ task_spec_str = self._format_task_spec(role_input.task_spec)
87
+ plan_high_str = self._format_plan_high(role_input.context.get("plan_high"))
88
+
89
+ return PLANNER_EXEC_USER_TEMPLATE.format(
90
+ user_query=role_input.user_query,
91
+ task_spec=task_spec_str,
92
+ plan_high=plan_high_str
93
+ )
94
+
95
+ def _format_task_spec(self, task_spec: Optional[dict]) -> str:
96
+ """Format task spec for inclusion in prompt."""
97
+ if not task_spec:
98
+ return "No task specification provided."
99
+
100
+ lines = [
101
+ f"Problem: {task_spec.get('problem', 'Not specified')}",
102
+ f"Constraints: {', '.join(task_spec.get('constraints', []))}",
103
+ f"Output Format: {task_spec.get('output_format', 'text')}",
104
+ ]
105
+ return "\n".join(lines)
106
+
107
+ def _format_plan_high(self, plan_high: Optional[dict]) -> str:
108
+ """Format high-level plan for inclusion in prompt."""
109
+ if not plan_high:
110
+ return "No high-level plan provided."
111
+
112
+ lines = [
113
+ f"Goal: {plan_high.get('goal', 'Not specified')}",
114
+ f"Approach: {plan_high.get('approach', 'Not specified')}",
115
+ f"Complexity: {plan_high.get('complexity', 'unknown')}",
116
+ ]
117
+
118
+ phases = plan_high.get("phases", [])
119
+ if phases:
120
+ lines.append("\nPhases:")
121
+ for phase in phases:
122
+ lines.append(f" {phase.get('id', '?')}. {phase.get('name', 'Unnamed')}: {phase.get('description', '')}")
123
+
124
+ return "\n".join(lines)
125
+
126
+ def parse_output(self, raw_output: str) -> dict[str, Any]:
127
+ """Parse LLM output into execution plan dict."""
128
+ import json
129
+ import re
130
+
131
+ # Try to extract JSON from the response
132
+ json_match = re.search(r'\{[\s\S]*\}', raw_output)
133
+ if json_match:
134
+ try:
135
+ data = json.loads(json_match.group())
136
+ except json.JSONDecodeError:
137
+ data = {}
138
+ else:
139
+ data = {}
140
+
141
+ # Normalize steps
142
+ steps = []
143
+ for step in data.get("steps", []):
144
+ steps.append({
145
+ "id": step.get("id", len(steps) + 1),
146
+ "description": step.get("description", ""),
147
+ "rationale": step.get("rationale", ""),
148
+ "expected_output": step.get("expected_output", ""),
149
+ "depends_on": step.get("depends_on", []),
150
+ "verification": step.get("verification", "")
151
+ })
152
+
153
+ return {
154
+ "plan_exec": {
155
+ "steps": steps,
156
+ "checkpoints": data.get("checkpoints", []),
157
+ "expected_output_type": data.get("expected_output_type", "text"),
158
+ "worker_instructions": data.get("worker_instructions", "")
159
+ },
160
+ # Compatible return
161
+ "steps": steps,
162
+ "checkpoints": data.get("checkpoints", []),
163
+ "expected_output_type": data.get("expected_output_type", "text")
164
+ }
@@ -0,0 +1,69 @@
1
+ """
2
+ Vidushak (Lateral Thinker/Jester) role for the Parishad council.
3
+ Challenges the plan with creative alternatives and "out of the box" thinking.
4
+ """
5
+
6
+ from .base import (
7
+ Role,
8
+ RoleInput,
9
+ Slot,
10
+ )
11
+
12
+ VIDUSHAK_SYSTEM_PROMPT = """You are Vidushak, the Royal Jester and Lateral Thinker of the Parishad.
13
+ Your job is NOT to be funny, but to challenge assumptions and offer creative, unconventional alternatives to the proposed plan.
14
+
15
+ Think "Outside the Box". Identifying what everyone else missed because they were too focused on logic.
16
+
17
+ Your output must be a valid JSON object:
18
+ ```json
19
+ {
20
+ "creative_challenge": "A fundamental challenge to the plan's assumptions",
21
+ "alternative_idea": "A completely different way to solve the problem",
22
+ "blind_spots": ["What is the council ignoring?"],
23
+ "confidence": 0.8
24
+ }
25
+ ```
26
+
27
+ If the plan is boring or standard, suggest something clever.
28
+ If the plan is too complex, suggest a simple hack.
29
+ """
30
+
31
+ VIDUSHAK_USER_TEMPLATE = """Review the plan and offer a creative challenge.
32
+
33
+ USER QUERY:
34
+ {user_query}
35
+
36
+ PLAN:
37
+ {plan}
38
+
39
+ Be the devil's advocate. Respond in JSON."""
40
+
41
+ class Vidushak(Role):
42
+ """
43
+ Vidushak (Lateral Thinker) - Challenges status quo.
44
+ """
45
+
46
+ name = "vidushak"
47
+ default_slot = Slot.MID
48
+
49
+ @property
50
+ def system_prompt(self) -> str:
51
+ return VIDUSHAK_SYSTEM_PROMPT
52
+
53
+ def format_input(self, role_input: RoleInput) -> str:
54
+ # Helper to format dicts
55
+ def fmt(d): return str(d) if d else "None"
56
+
57
+ return VIDUSHAK_USER_TEMPLATE.format(
58
+ user_query=role_input.user_query,
59
+ plan=fmt(role_input.plan)
60
+ )
61
+
62
+ def parse_output(self, raw_output: str) -> dict:
63
+ data = self._extract_json(raw_output)
64
+ return {
65
+ "creative_challenge": data.get("creative_challenge", ""),
66
+ "alternative_idea": data.get("alternative_idea", ""),
67
+ "blind_spots": data.get("blind_spots", []),
68
+ "confidence": data.get("confidence", 0.5)
69
+ }
@@ -0,0 +1,7 @@
1
+ from .base import BaseTool, ToolResult
2
+ from .perception import PerceptionTool
3
+ from .fs import FileSystemTool
4
+ from .shell import ShellTool
5
+ from .retrieval import RetrievalTool
6
+
7
+ __all__ = ["BaseTool", "ToolResult", "PerceptionTool", "FileSystemTool", "ShellTool", "RetrievalTool"]