emdash-core 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (187) hide show
  1. emdash_core/__init__.py +3 -0
  2. emdash_core/agent/__init__.py +37 -0
  3. emdash_core/agent/agents.py +225 -0
  4. emdash_core/agent/code_reviewer.py +476 -0
  5. emdash_core/agent/compaction.py +143 -0
  6. emdash_core/agent/context_manager.py +140 -0
  7. emdash_core/agent/events.py +338 -0
  8. emdash_core/agent/handlers.py +224 -0
  9. emdash_core/agent/inprocess_subagent.py +377 -0
  10. emdash_core/agent/mcp/__init__.py +50 -0
  11. emdash_core/agent/mcp/client.py +346 -0
  12. emdash_core/agent/mcp/config.py +302 -0
  13. emdash_core/agent/mcp/manager.py +496 -0
  14. emdash_core/agent/mcp/tool_factory.py +213 -0
  15. emdash_core/agent/prompts/__init__.py +38 -0
  16. emdash_core/agent/prompts/main_agent.py +104 -0
  17. emdash_core/agent/prompts/subagents.py +131 -0
  18. emdash_core/agent/prompts/workflow.py +136 -0
  19. emdash_core/agent/providers/__init__.py +34 -0
  20. emdash_core/agent/providers/base.py +143 -0
  21. emdash_core/agent/providers/factory.py +80 -0
  22. emdash_core/agent/providers/models.py +220 -0
  23. emdash_core/agent/providers/openai_provider.py +463 -0
  24. emdash_core/agent/providers/transformers_provider.py +217 -0
  25. emdash_core/agent/research/__init__.py +81 -0
  26. emdash_core/agent/research/agent.py +143 -0
  27. emdash_core/agent/research/controller.py +254 -0
  28. emdash_core/agent/research/critic.py +428 -0
  29. emdash_core/agent/research/macros.py +469 -0
  30. emdash_core/agent/research/planner.py +449 -0
  31. emdash_core/agent/research/researcher.py +436 -0
  32. emdash_core/agent/research/state.py +523 -0
  33. emdash_core/agent/research/synthesizer.py +594 -0
  34. emdash_core/agent/reviewer_profile.py +475 -0
  35. emdash_core/agent/rules.py +123 -0
  36. emdash_core/agent/runner.py +601 -0
  37. emdash_core/agent/session.py +262 -0
  38. emdash_core/agent/spec_schema.py +66 -0
  39. emdash_core/agent/specification.py +479 -0
  40. emdash_core/agent/subagent.py +397 -0
  41. emdash_core/agent/subagent_prompts.py +13 -0
  42. emdash_core/agent/toolkit.py +482 -0
  43. emdash_core/agent/toolkits/__init__.py +64 -0
  44. emdash_core/agent/toolkits/base.py +96 -0
  45. emdash_core/agent/toolkits/explore.py +47 -0
  46. emdash_core/agent/toolkits/plan.py +55 -0
  47. emdash_core/agent/tools/__init__.py +141 -0
  48. emdash_core/agent/tools/analytics.py +436 -0
  49. emdash_core/agent/tools/base.py +131 -0
  50. emdash_core/agent/tools/coding.py +484 -0
  51. emdash_core/agent/tools/github_mcp.py +592 -0
  52. emdash_core/agent/tools/history.py +13 -0
  53. emdash_core/agent/tools/modes.py +153 -0
  54. emdash_core/agent/tools/plan.py +206 -0
  55. emdash_core/agent/tools/plan_write.py +135 -0
  56. emdash_core/agent/tools/search.py +412 -0
  57. emdash_core/agent/tools/spec.py +341 -0
  58. emdash_core/agent/tools/task.py +262 -0
  59. emdash_core/agent/tools/task_output.py +204 -0
  60. emdash_core/agent/tools/tasks.py +454 -0
  61. emdash_core/agent/tools/traversal.py +588 -0
  62. emdash_core/agent/tools/web.py +179 -0
  63. emdash_core/analytics/__init__.py +5 -0
  64. emdash_core/analytics/engine.py +1286 -0
  65. emdash_core/api/__init__.py +5 -0
  66. emdash_core/api/agent.py +308 -0
  67. emdash_core/api/agents.py +154 -0
  68. emdash_core/api/analyze.py +264 -0
  69. emdash_core/api/auth.py +173 -0
  70. emdash_core/api/context.py +77 -0
  71. emdash_core/api/db.py +121 -0
  72. emdash_core/api/embed.py +131 -0
  73. emdash_core/api/feature.py +143 -0
  74. emdash_core/api/health.py +93 -0
  75. emdash_core/api/index.py +162 -0
  76. emdash_core/api/plan.py +110 -0
  77. emdash_core/api/projectmd.py +210 -0
  78. emdash_core/api/query.py +320 -0
  79. emdash_core/api/research.py +122 -0
  80. emdash_core/api/review.py +161 -0
  81. emdash_core/api/router.py +76 -0
  82. emdash_core/api/rules.py +116 -0
  83. emdash_core/api/search.py +119 -0
  84. emdash_core/api/spec.py +99 -0
  85. emdash_core/api/swarm.py +223 -0
  86. emdash_core/api/tasks.py +109 -0
  87. emdash_core/api/team.py +120 -0
  88. emdash_core/auth/__init__.py +17 -0
  89. emdash_core/auth/github.py +389 -0
  90. emdash_core/config.py +74 -0
  91. emdash_core/context/__init__.py +52 -0
  92. emdash_core/context/models.py +50 -0
  93. emdash_core/context/providers/__init__.py +11 -0
  94. emdash_core/context/providers/base.py +74 -0
  95. emdash_core/context/providers/explored_areas.py +183 -0
  96. emdash_core/context/providers/touched_areas.py +360 -0
  97. emdash_core/context/registry.py +73 -0
  98. emdash_core/context/reranker.py +199 -0
  99. emdash_core/context/service.py +260 -0
  100. emdash_core/context/session.py +352 -0
  101. emdash_core/core/__init__.py +104 -0
  102. emdash_core/core/config.py +454 -0
  103. emdash_core/core/exceptions.py +55 -0
  104. emdash_core/core/models.py +265 -0
  105. emdash_core/core/review_config.py +57 -0
  106. emdash_core/db/__init__.py +67 -0
  107. emdash_core/db/auth.py +134 -0
  108. emdash_core/db/models.py +91 -0
  109. emdash_core/db/provider.py +222 -0
  110. emdash_core/db/providers/__init__.py +5 -0
  111. emdash_core/db/providers/supabase.py +452 -0
  112. emdash_core/embeddings/__init__.py +24 -0
  113. emdash_core/embeddings/indexer.py +534 -0
  114. emdash_core/embeddings/models.py +192 -0
  115. emdash_core/embeddings/providers/__init__.py +7 -0
  116. emdash_core/embeddings/providers/base.py +112 -0
  117. emdash_core/embeddings/providers/fireworks.py +141 -0
  118. emdash_core/embeddings/providers/openai.py +104 -0
  119. emdash_core/embeddings/registry.py +146 -0
  120. emdash_core/embeddings/service.py +215 -0
  121. emdash_core/graph/__init__.py +26 -0
  122. emdash_core/graph/builder.py +134 -0
  123. emdash_core/graph/connection.py +692 -0
  124. emdash_core/graph/schema.py +416 -0
  125. emdash_core/graph/writer.py +667 -0
  126. emdash_core/ingestion/__init__.py +7 -0
  127. emdash_core/ingestion/change_detector.py +150 -0
  128. emdash_core/ingestion/git/__init__.py +5 -0
  129. emdash_core/ingestion/git/commit_analyzer.py +196 -0
  130. emdash_core/ingestion/github/__init__.py +6 -0
  131. emdash_core/ingestion/github/pr_fetcher.py +296 -0
  132. emdash_core/ingestion/github/task_extractor.py +100 -0
  133. emdash_core/ingestion/orchestrator.py +540 -0
  134. emdash_core/ingestion/parsers/__init__.py +10 -0
  135. emdash_core/ingestion/parsers/base_parser.py +66 -0
  136. emdash_core/ingestion/parsers/call_graph_builder.py +121 -0
  137. emdash_core/ingestion/parsers/class_extractor.py +154 -0
  138. emdash_core/ingestion/parsers/function_extractor.py +202 -0
  139. emdash_core/ingestion/parsers/import_analyzer.py +119 -0
  140. emdash_core/ingestion/parsers/python_parser.py +123 -0
  141. emdash_core/ingestion/parsers/registry.py +72 -0
  142. emdash_core/ingestion/parsers/ts_ast_parser.js +313 -0
  143. emdash_core/ingestion/parsers/typescript_parser.py +278 -0
  144. emdash_core/ingestion/repository.py +346 -0
  145. emdash_core/models/__init__.py +38 -0
  146. emdash_core/models/agent.py +68 -0
  147. emdash_core/models/index.py +77 -0
  148. emdash_core/models/query.py +113 -0
  149. emdash_core/planning/__init__.py +7 -0
  150. emdash_core/planning/agent_api.py +413 -0
  151. emdash_core/planning/context_builder.py +265 -0
  152. emdash_core/planning/feature_context.py +232 -0
  153. emdash_core/planning/feature_expander.py +646 -0
  154. emdash_core/planning/llm_explainer.py +198 -0
  155. emdash_core/planning/similarity.py +509 -0
  156. emdash_core/planning/team_focus.py +821 -0
  157. emdash_core/server.py +153 -0
  158. emdash_core/sse/__init__.py +5 -0
  159. emdash_core/sse/stream.py +196 -0
  160. emdash_core/swarm/__init__.py +17 -0
  161. emdash_core/swarm/merge_agent.py +383 -0
  162. emdash_core/swarm/session_manager.py +274 -0
  163. emdash_core/swarm/swarm_runner.py +226 -0
  164. emdash_core/swarm/task_definition.py +137 -0
  165. emdash_core/swarm/worker_spawner.py +319 -0
  166. emdash_core/swarm/worktree_manager.py +278 -0
  167. emdash_core/templates/__init__.py +10 -0
  168. emdash_core/templates/defaults/agent-builder.md.template +82 -0
  169. emdash_core/templates/defaults/focus.md.template +115 -0
  170. emdash_core/templates/defaults/pr-review-enhanced.md.template +309 -0
  171. emdash_core/templates/defaults/pr-review.md.template +80 -0
  172. emdash_core/templates/defaults/project.md.template +85 -0
  173. emdash_core/templates/defaults/research_critic.md.template +112 -0
  174. emdash_core/templates/defaults/research_planner.md.template +85 -0
  175. emdash_core/templates/defaults/research_synthesizer.md.template +128 -0
  176. emdash_core/templates/defaults/reviewer.md.template +81 -0
  177. emdash_core/templates/defaults/spec.md.template +41 -0
  178. emdash_core/templates/defaults/tasks.md.template +78 -0
  179. emdash_core/templates/loader.py +296 -0
  180. emdash_core/utils/__init__.py +45 -0
  181. emdash_core/utils/git.py +84 -0
  182. emdash_core/utils/image.py +502 -0
  183. emdash_core/utils/logger.py +51 -0
  184. emdash_core-0.1.7.dist-info/METADATA +35 -0
  185. emdash_core-0.1.7.dist-info/RECORD +187 -0
  186. emdash_core-0.1.7.dist-info/WHEEL +4 -0
  187. emdash_core-0.1.7.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,217 @@
1
+ """Local Transformers-based LLM provider.
2
+
3
+ Uses HuggingFace transformers to run models locally without API calls.
4
+ Requires: pip install transformers torch accelerate
5
+ """
6
+
7
+ import logging
8
+ from typing import Any, Optional
9
+
10
+ from .base import LLMProvider, LLMResponse, ToolCall
11
+
12
+ log = logging.getLogger(__name__)
13
+
14
+
15
+ # Model specifications: model_id -> (context_limit, description)
16
+ LOCAL_MODELS = {
17
+ "microsoft/Phi-3-mini-4k-instruct": (4096, "Phi-3 Mini 4K - 3.8B params, good for summaries"),
18
+ "microsoft/Phi-3-mini-128k-instruct": (131072, "Phi-3 Mini 128K - 3.8B params, long context"),
19
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0": (2048, "TinyLlama - 1.1B params, very fast"),
20
+ "Qwen/Qwen2.5-1.5B-Instruct": (32768, "Qwen 2.5 1.5B - fast and capable"),
21
+ "Qwen/Qwen2.5-3B-Instruct": (32768, "Qwen 2.5 3B - balanced speed/quality"),
22
+ }
23
+
24
+ DEFAULT_LOCAL_MODEL = "Qwen/Qwen2.5-1.5B-Instruct"
25
+
26
+
27
+ class TransformersProvider(LLMProvider):
28
+ """
29
+ Local LLM provider using HuggingFace Transformers.
30
+
31
+ Runs models locally - no API key or network required.
32
+ Models are lazy-loaded on first use to avoid startup overhead.
33
+
34
+ Usage:
35
+ provider = TransformersProvider("Qwen/Qwen2.5-1.5B-Instruct")
36
+ response = provider.chat([{"role": "user", "content": "Hello"}])
37
+ print(response.content)
38
+
39
+ Or via factory:
40
+ provider = get_provider("local:Qwen/Qwen2.5-1.5B-Instruct")
41
+ """
42
+
43
+ def __init__(self, model: str = DEFAULT_LOCAL_MODEL):
44
+ """Initialize with a model name.
45
+
46
+ Args:
47
+ model: HuggingFace model ID (e.g., "Qwen/Qwen2.5-1.5B-Instruct")
48
+ """
49
+ # Strip local: prefix if present
50
+ if model.startswith("local:"):
51
+ model = model[6:]
52
+ elif model.startswith("transformers:"):
53
+ model = model[13:]
54
+
55
+ super().__init__(model)
56
+ self._pipeline = None
57
+ self._tokenizer = None
58
+
59
+ @property
60
+ def is_available(self) -> bool:
61
+ """Check if transformers is installed."""
62
+ try:
63
+ import transformers
64
+ import torch
65
+ return True
66
+ except ImportError:
67
+ return False
68
+
69
+ def _get_pipeline(self):
70
+ """Lazy-load the text generation pipeline."""
71
+ if self._pipeline is None:
72
+ if not self.is_available:
73
+ raise ImportError(
74
+ "transformers and torch are required for local LLM. "
75
+ "Install with: pip install transformers torch accelerate"
76
+ )
77
+
78
+ from transformers import pipeline
79
+ import torch
80
+
81
+ log.info(f"Loading local model: {self.model}")
82
+
83
+ # Determine device and dtype
84
+ if torch.cuda.is_available():
85
+ device_map = "auto"
86
+ torch_dtype = torch.float16
87
+ log.info("Using CUDA GPU")
88
+ elif torch.backends.mps.is_available():
89
+ device_map = "mps"
90
+ torch_dtype = torch.float16
91
+ log.info("Using Apple MPS")
92
+ else:
93
+ device_map = "cpu"
94
+ torch_dtype = torch.float32
95
+ log.info("Using CPU (slower)")
96
+
97
+ self._pipeline = pipeline(
98
+ "text-generation",
99
+ model=self.model,
100
+ device_map=device_map,
101
+ torch_dtype=torch_dtype,
102
+ trust_remote_code=True,
103
+ )
104
+ log.info(f"Model loaded: {self.model}")
105
+
106
+ return self._pipeline
107
+
108
+ def chat(
109
+ self,
110
+ messages: list[dict],
111
+ tools: Optional[list[dict]] = None,
112
+ system: Optional[str] = None,
113
+ reasoning: bool = False,
114
+ ) -> LLMResponse:
115
+ """Generate response using local model.
116
+
117
+ Args:
118
+ messages: List of message dicts with 'role' and 'content'
119
+ tools: Tool schemas (not supported for local models)
120
+ system: Optional system prompt
121
+ reasoning: Enable reasoning mode (not supported)
122
+
123
+ Returns:
124
+ LLMResponse with generated content
125
+ """
126
+ if tools:
127
+ log.warning("Tool calling not supported with local transformers models")
128
+
129
+ pipe = self._get_pipeline()
130
+
131
+ # Build messages list with system prompt
132
+ chat_messages = []
133
+ if system:
134
+ chat_messages.append({"role": "system", "content": system})
135
+ chat_messages.extend(messages)
136
+
137
+ # Use the pipeline's chat template
138
+ try:
139
+ result = pipe(
140
+ chat_messages,
141
+ max_new_tokens=512,
142
+ do_sample=True,
143
+ temperature=0.7,
144
+ top_p=0.9,
145
+ return_full_text=False,
146
+ )
147
+
148
+ content = result[0]["generated_text"]
149
+
150
+ # Handle case where result is a list of messages
151
+ if isinstance(content, list) and len(content) > 0:
152
+ content = content[-1].get("content", "")
153
+
154
+ return LLMResponse(content=content.strip(), raw=result)
155
+
156
+ except Exception as e:
157
+ log.error(f"Generation failed: {e}")
158
+ # Fallback to simple prompt format
159
+ return self._fallback_generate(chat_messages)
160
+
161
+ def _fallback_generate(self, messages: list[dict]) -> LLMResponse:
162
+ """Fallback generation using simple prompt format."""
163
+ pipe = self._get_pipeline()
164
+
165
+ # Build a simple prompt
166
+ parts = []
167
+ for msg in messages:
168
+ role = msg.get("role", "user")
169
+ content = msg.get("content", "")
170
+ if role == "system":
171
+ parts.append(f"System: {content}")
172
+ elif role == "user":
173
+ parts.append(f"User: {content}")
174
+ elif role == "assistant":
175
+ parts.append(f"Assistant: {content}")
176
+ parts.append("Assistant:")
177
+
178
+ prompt = "\n".join(parts)
179
+
180
+ result = pipe(
181
+ prompt,
182
+ max_new_tokens=512,
183
+ do_sample=True,
184
+ temperature=0.7,
185
+ top_p=0.9,
186
+ return_full_text=False,
187
+ )
188
+
189
+ content = result[0]["generated_text"]
190
+ return LLMResponse(content=content.strip(), raw=result)
191
+
192
+ def get_context_limit(self) -> int:
193
+ """Get the context window size for this model."""
194
+ if self.model in LOCAL_MODELS:
195
+ return LOCAL_MODELS[self.model][0]
196
+ # Default conservative limit
197
+ return 2048
198
+
199
+ def format_tool_result(self, tool_call_id: str, result: str) -> dict:
200
+ """Format a tool result message (not supported)."""
201
+ return {"role": "tool", "content": result, "tool_call_id": tool_call_id}
202
+
203
+ def format_assistant_message(self, response: LLMResponse) -> dict:
204
+ """Format an assistant response for message history."""
205
+ return {"role": "assistant", "content": response.content or ""}
206
+
207
+ @classmethod
208
+ def list_models(cls) -> list[dict]:
209
+ """List available local models."""
210
+ return [
211
+ {
212
+ "id": model_id,
213
+ "context_limit": spec[0],
214
+ "description": spec[1],
215
+ }
216
+ for model_id, spec in LOCAL_MODELS.items()
217
+ ]
@@ -0,0 +1,81 @@
1
+ """Deep Research Agent module.
2
+
3
+ This module provides a multi-agent research system for deep code exploration.
4
+
5
+ Components:
6
+ - PlannerAgent: Creates research plans with prioritized questions
7
+ - ResearcherAgent: Executes tool macros and collects evidence
8
+ - CriticAgent: Evaluates research quality and team value adherence
9
+ - SynthesizerAgent: Generates final reports
10
+ - ResearchController: Orchestrates the research loop
11
+ - DeepResearchAgent: Main entry point
12
+
13
+ Team Values Enforced:
14
+ - V1: Truth over fluency - prefer "unknown" over guesses
15
+ - V2: Evidence-first - all claims must be backed by tool outputs
16
+ - V3: Reviewer-first - output includes review checklists
17
+ - V4: Cost awareness - minimize tool calls, use budget limits
18
+ - V5: Actionable outcomes - end with concrete tasks
19
+ - V6: Team alignment - use team vocabulary
20
+ """
21
+
22
+ from .state import (
23
+ EvidenceItem,
24
+ Claim,
25
+ Gap,
26
+ ResearchQuestion,
27
+ ResearchPlan,
28
+ FollowUpQuestion,
29
+ Contradiction,
30
+ ValuesViolation,
31
+ CritiqueScores,
32
+ Critique,
33
+ IterationResult,
34
+ ResearchState,
35
+ )
36
+ from .planner import PlannerAgent
37
+ from .researcher import ResearcherAgent
38
+ from .critic import CriticAgent
39
+ from .synthesizer import SynthesizerAgent
40
+ from .controller import ResearchController
41
+ from .agent import DeepResearchAgent, research
42
+ from .macros import (
43
+ ToolMacro,
44
+ MacroStep,
45
+ MacroExecutor,
46
+ get_macro,
47
+ list_macros,
48
+ suggest_macros,
49
+ )
50
+
51
+ __all__ = [
52
+ # State
53
+ "EvidenceItem",
54
+ "Claim",
55
+ "Gap",
56
+ "ResearchQuestion",
57
+ "ResearchPlan",
58
+ "FollowUpQuestion",
59
+ "Contradiction",
60
+ "ValuesViolation",
61
+ "CritiqueScores",
62
+ "Critique",
63
+ "IterationResult",
64
+ "ResearchState",
65
+ # Agents
66
+ "PlannerAgent",
67
+ "ResearcherAgent",
68
+ "CriticAgent",
69
+ "SynthesizerAgent",
70
+ "ResearchController",
71
+ "DeepResearchAgent",
72
+ # Convenience
73
+ "research",
74
+ # Macros
75
+ "ToolMacro",
76
+ "MacroStep",
77
+ "MacroExecutor",
78
+ "get_macro",
79
+ "list_macros",
80
+ "suggest_macros",
81
+ ]
@@ -0,0 +1,143 @@
1
+ """Deep Research Agent - main entry point.
2
+
3
+ Provides a simple interface for deep code research using the
4
+ multi-agent research system.
5
+ """
6
+
7
+ from typing import Optional
8
+
9
+ from rich.console import Console
10
+
11
+ from ..events import AgentEventEmitter, NullEmitter
12
+ from .controller import ResearchController
13
+ from .state import ResearchState
14
+
15
+
16
+ class DeepResearchAgent:
17
+ """High-level interface for deep code research.
18
+
19
+ Wraps the ResearchController with a simpler API.
20
+
21
+ Example:
22
+ agent = DeepResearchAgent()
23
+ report = agent.research("How does authentication work?")
24
+ print(report)
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ model_tier: str = "fast",
30
+ verbose: bool = True,
31
+ emitter: Optional[AgentEventEmitter] = None,
32
+ ):
33
+ """Initialize the deep research agent.
34
+
35
+ Args:
36
+ model_tier: Model tier (fast, standard, powerful)
37
+ verbose: Whether to print progress
38
+ emitter: Event emitter for unified output
39
+ """
40
+ self.model_tier = model_tier
41
+ self.verbose = verbose
42
+ self.emitter = emitter or NullEmitter(agent_name="DeepResearchAgent")
43
+ self.console = Console()
44
+
45
+ self.controller = ResearchController(
46
+ model_tier=model_tier,
47
+ verbose=verbose,
48
+ emitter=self.emitter,
49
+ )
50
+
51
+ self._last_state: Optional[ResearchState] = None
52
+
53
+ def research(
54
+ self,
55
+ goal: str,
56
+ context: str = "",
57
+ max_iterations: int = 3,
58
+ budgets: Optional[dict] = None,
59
+ ) -> str:
60
+ """Conduct deep research on a topic.
61
+
62
+ Args:
63
+ goal: Research goal/question
64
+ context: Additional context
65
+ max_iterations: Maximum research iterations
66
+ budgets: Resource budgets
67
+
68
+ Returns:
69
+ Final research report as markdown
70
+ """
71
+ report, state = self.controller.research(
72
+ goal=goal,
73
+ context=context,
74
+ max_iterations=max_iterations,
75
+ budgets=budgets,
76
+ )
77
+
78
+ self._last_state = state
79
+ return report
80
+
81
+ def get_last_state(self) -> Optional[ResearchState]:
82
+ """Get the state from the last research run.
83
+
84
+ Returns:
85
+ ResearchState or None if no research has been run
86
+ """
87
+ return self._last_state
88
+
89
+ def get_summary(self) -> str:
90
+ """Get a summary of the last research run.
91
+
92
+ Returns:
93
+ Summary string
94
+ """
95
+ if not self._last_state:
96
+ return "No research has been conducted yet."
97
+
98
+ state = self._last_state
99
+
100
+ lines = [
101
+ f"Research: {state.plan.goal}",
102
+ f"Iterations: {state.iteration}",
103
+ f"Evidence: {len(state.get_all_evidence())}",
104
+ f"Claims: {len(state.get_all_claims())}",
105
+ f"Gaps: {len(state.get_all_gaps())}",
106
+ f"Status: {'Approved' if state.is_approved() else 'In Progress'}",
107
+ f"Budget used: {state.budget_used_percent():.0f}%",
108
+ ]
109
+
110
+ return "\n".join(lines)
111
+
112
+
113
+ def research(
114
+ goal: str,
115
+ context: str = "",
116
+ model_tier: str = "fast",
117
+ max_iterations: int = 3,
118
+ verbose: bool = True,
119
+ ) -> str:
120
+ """Convenience function for quick research.
121
+
122
+ Args:
123
+ goal: Research goal/question
124
+ context: Additional context
125
+ model_tier: Model tier to use
126
+ max_iterations: Maximum iterations
127
+ verbose: Whether to print progress
128
+
129
+ Returns:
130
+ Research report as markdown
131
+
132
+ Example:
133
+ report = research("How does the payment system work?")
134
+ """
135
+ agent = DeepResearchAgent(
136
+ model_tier=model_tier,
137
+ verbose=verbose,
138
+ )
139
+ return agent.research(
140
+ goal=goal,
141
+ context=context,
142
+ max_iterations=max_iterations,
143
+ )
@@ -0,0 +1,254 @@
1
+ """Research controller for orchestrating the research loop.
2
+
3
+ The Controller manages the research cycle:
4
+ 1. Plan -> questions
5
+ 2. Research -> evidence, claims
6
+ 3. Critique -> decision
7
+ 4. If not approved, back to 2 or adjust plan
8
+ 5. Synthesize final report
9
+ """
10
+
11
+ from typing import Optional
12
+
13
+ from rich.console import Console
14
+
15
+ from ..events import AgentEventEmitter, NullEmitter
16
+ from .state import (
17
+ ResearchPlan,
18
+ ResearchState,
19
+ IterationResult,
20
+ )
21
+ from .planner import PlannerAgent
22
+ from .researcher import ResearcherAgent
23
+ from .critic import CriticAgent
24
+ from .synthesizer import SynthesizerAgent
25
+
26
+
27
+ # Model tiers for escalation
28
+ MODEL_TIERS = {
29
+ "fast": "gpt-4o-mini",
30
+ "standard": "gpt-4o",
31
+ "powerful": "gpt-4o",
32
+ }
33
+
34
+
35
+ class ResearchController:
36
+ """Orchestrates the multi-agent research loop.
37
+
38
+ The Controller coordinates:
39
+ - PlannerAgent: Creates research plan
40
+ - ResearcherAgent: Executes tools, collects evidence
41
+ - CriticAgent: Evaluates progress
42
+ - SynthesizerAgent: Writes final report
43
+
44
+ It handles model tier escalation and budget management.
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ model_tier: str = "fast",
50
+ verbose: bool = True,
51
+ emitter: Optional[AgentEventEmitter] = None,
52
+ ):
53
+ """Initialize the research controller.
54
+
55
+ Args:
56
+ model_tier: Initial model tier (fast, standard, powerful)
57
+ verbose: Whether to print progress
58
+ emitter: Event emitter for unified output
59
+ """
60
+ self.model_tier = model_tier
61
+ self.verbose = verbose
62
+ self.console = Console()
63
+ self.emitter = emitter or NullEmitter(agent_name="ResearchController")
64
+
65
+ # Initialize agents with current model tier
66
+ model = MODEL_TIERS.get(model_tier, "gpt-4o-mini")
67
+ self.planner = PlannerAgent(model=model, verbose=verbose)
68
+ self.researcher = ResearcherAgent(model=model, verbose=verbose, emitter=self.emitter)
69
+ self.critic = CriticAgent(model=model, verbose=verbose)
70
+ self.synthesizer = SynthesizerAgent(model=model, verbose=verbose)
71
+
72
+ def research(
73
+ self,
74
+ goal: str,
75
+ context: str = "",
76
+ max_iterations: int = 3,
77
+ budgets: Optional[dict] = None,
78
+ ) -> tuple[str, ResearchState]:
79
+ """Execute the full research loop.
80
+
81
+ Args:
82
+ goal: Research goal
83
+ context: Additional context
84
+ max_iterations: Maximum iterations
85
+ budgets: Resource budgets
86
+
87
+ Returns:
88
+ Tuple of (final_report, research_state)
89
+ """
90
+ if self.verbose:
91
+ self.console.print(f"\n[bold cyan]Starting research: {goal}[/bold cyan]\n")
92
+
93
+ # Phase 1: Planning
94
+ plan = self.planner.create_plan(
95
+ goal=goal,
96
+ context=context,
97
+ max_iterations=max_iterations,
98
+ budgets=budgets,
99
+ )
100
+
101
+ # Initialize state
102
+ state = ResearchState(plan=plan)
103
+
104
+ # Phase 2: Research Loop
105
+ while state.iteration < plan.max_iterations:
106
+ if self.verbose:
107
+ self.console.print(f"\n[cyan]--- Iteration {state.iteration + 1} ---[/cyan]")
108
+
109
+ # Get questions for this iteration
110
+ questions = self._get_questions_for_iteration(state)
111
+ if not questions:
112
+ if self.verbose:
113
+ self.console.print("[yellow]No more questions to investigate[/yellow]")
114
+ break
115
+
116
+ # Run research
117
+ evidence, updated_context = self.researcher.run_macros(
118
+ questions=questions,
119
+ context=state.context,
120
+ budget=state.remaining_budget,
121
+ )
122
+
123
+ # Update context
124
+ state.context.update(updated_context)
125
+
126
+ # Update budget
127
+ state.remaining_budget["tool_calls"] = max(
128
+ 0,
129
+ state.remaining_budget.get("tool_calls", 0) - len(evidence)
130
+ )
131
+
132
+ # Generate claims
133
+ all_prior_claims = state.get_all_claims()
134
+ claims = self.researcher.propose_claims(
135
+ goal=plan.goal,
136
+ questions=questions,
137
+ evidence=evidence,
138
+ prior_claims=all_prior_claims,
139
+ )
140
+
141
+ # Identify gaps
142
+ gaps = self.researcher.identify_gaps(plan, claims, evidence)
143
+
144
+ # Critique
145
+ critique = self.critic.evaluate(
146
+ plan=plan,
147
+ evidence=evidence,
148
+ claims=claims,
149
+ gaps=gaps,
150
+ iteration=state.iteration,
151
+ budget_used_percent=state.budget_used_percent(),
152
+ )
153
+
154
+ # Record iteration
155
+ result = IterationResult(
156
+ iteration=state.iteration,
157
+ evidence=evidence,
158
+ claims=claims,
159
+ gaps=gaps,
160
+ critique=critique,
161
+ model_tier=self.model_tier,
162
+ )
163
+ state.history.append(result)
164
+ state.iteration += 1
165
+
166
+ # Check decision
167
+ if critique.decision == "APPROVE":
168
+ if self.verbose:
169
+ self.console.print("[green]Research APPROVED[/green]")
170
+ break
171
+
172
+ elif critique.decision == "ESCALATE":
173
+ if self._escalate_model():
174
+ if self.verbose:
175
+ self.console.print(f"[yellow]Escalated to {self.model_tier}[/yellow]")
176
+ else:
177
+ if self.verbose:
178
+ self.console.print("[yellow]Cannot escalate further[/yellow]")
179
+ break
180
+
181
+ elif critique.decision == "REJECT":
182
+ if self.verbose:
183
+ self.console.print("[red]Research REJECTED[/red]")
184
+ for issue in critique.must_fix:
185
+ self.console.print(f" - {issue}")
186
+ # Try to fix in next iteration
187
+
188
+ # Add follow-up questions to queue
189
+ for followup in critique.follow_up_questions:
190
+ if followup.qid and followup.qid not in state.answered_questions:
191
+ if followup.qid not in state.question_queue:
192
+ state.question_queue.insert(0, followup.qid)
193
+
194
+ # Phase 3: Synthesis
195
+ report = self.synthesizer.write(plan, state.history)
196
+
197
+ return report, state
198
+
199
+ def _get_questions_for_iteration(
200
+ self,
201
+ state: ResearchState,
202
+ ) -> list:
203
+ """Get questions to investigate in this iteration.
204
+
205
+ Prioritizes P0, then P1, then follow-ups from Critic.
206
+
207
+ Args:
208
+ state: Current research state
209
+
210
+ Returns:
211
+ List of ResearchQuestion objects
212
+ """
213
+ questions = []
214
+
215
+ # Take from queue
216
+ qids_to_process = state.question_queue[:3] # Up to 3 at a time
217
+
218
+ for qid in qids_to_process:
219
+ question = state.get_question_by_qid(qid)
220
+ if question and qid not in state.answered_questions:
221
+ questions.append(question)
222
+ state.answered_questions.add(qid)
223
+
224
+ # Remove processed from queue
225
+ for qid in qids_to_process:
226
+ if qid in state.question_queue:
227
+ state.question_queue.remove(qid)
228
+
229
+ return questions
230
+
231
+ def _escalate_model(self) -> bool:
232
+ """Escalate to a more powerful model tier.
233
+
234
+ Returns:
235
+ True if escalation was possible
236
+ """
237
+ tiers = ["fast", "standard", "powerful"]
238
+ current_idx = tiers.index(self.model_tier) if self.model_tier in tiers else 0
239
+
240
+ if current_idx >= len(tiers) - 1:
241
+ return False
242
+
243
+ self.model_tier = tiers[current_idx + 1]
244
+ model = MODEL_TIERS[self.model_tier]
245
+
246
+ # Reinitialize agents with new model
247
+ self.planner = PlannerAgent(model=model, verbose=self.verbose)
248
+ self.researcher = ResearcherAgent(
249
+ model=model, verbose=self.verbose, emitter=self.emitter
250
+ )
251
+ self.critic = CriticAgent(model=model, verbose=self.verbose)
252
+ self.synthesizer = SynthesizerAgent(model=model, verbose=self.verbose)
253
+
254
+ return True