superqode 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. superqode/__init__.py +33 -0
  2. superqode/acp/__init__.py +23 -0
  3. superqode/acp/client.py +913 -0
  4. superqode/acp/permission_screen.py +457 -0
  5. superqode/acp/types.py +480 -0
  6. superqode/acp_discovery.py +856 -0
  7. superqode/agent/__init__.py +22 -0
  8. superqode/agent/edit_strategies.py +334 -0
  9. superqode/agent/loop.py +892 -0
  10. superqode/agent/qe_report_templates.py +39 -0
  11. superqode/agent/system_prompts.py +353 -0
  12. superqode/agent_output.py +721 -0
  13. superqode/agent_stream.py +953 -0
  14. superqode/agents/__init__.py +59 -0
  15. superqode/agents/acp_registry.py +305 -0
  16. superqode/agents/client.py +249 -0
  17. superqode/agents/data/augmentcode.com.toml +51 -0
  18. superqode/agents/data/cagent.dev.toml +51 -0
  19. superqode/agents/data/claude.com.toml +60 -0
  20. superqode/agents/data/codeassistant.dev.toml +51 -0
  21. superqode/agents/data/codex.openai.com.toml +57 -0
  22. superqode/agents/data/fastagent.ai.toml +66 -0
  23. superqode/agents/data/geminicli.com.toml +77 -0
  24. superqode/agents/data/goose.block.xyz.toml +54 -0
  25. superqode/agents/data/junie.jetbrains.com.toml +56 -0
  26. superqode/agents/data/kimi.moonshot.cn.toml +57 -0
  27. superqode/agents/data/llmlingagent.dev.toml +51 -0
  28. superqode/agents/data/molt.bot.toml +49 -0
  29. superqode/agents/data/opencode.ai.toml +60 -0
  30. superqode/agents/data/stakpak.dev.toml +51 -0
  31. superqode/agents/data/vtcode.dev.toml +51 -0
  32. superqode/agents/discovery.py +266 -0
  33. superqode/agents/messaging.py +160 -0
  34. superqode/agents/persona.py +166 -0
  35. superqode/agents/registry.py +421 -0
  36. superqode/agents/schema.py +72 -0
  37. superqode/agents/unified.py +367 -0
  38. superqode/app/__init__.py +111 -0
  39. superqode/app/constants.py +314 -0
  40. superqode/app/css.py +366 -0
  41. superqode/app/models.py +118 -0
  42. superqode/app/suggester.py +125 -0
  43. superqode/app/widgets.py +1591 -0
  44. superqode/app_enhanced.py +399 -0
  45. superqode/app_main.py +17187 -0
  46. superqode/approval.py +312 -0
  47. superqode/atomic.py +296 -0
  48. superqode/commands/__init__.py +1 -0
  49. superqode/commands/acp.py +965 -0
  50. superqode/commands/agents.py +180 -0
  51. superqode/commands/auth.py +278 -0
  52. superqode/commands/config.py +374 -0
  53. superqode/commands/init.py +826 -0
  54. superqode/commands/providers.py +819 -0
  55. superqode/commands/qe.py +1145 -0
  56. superqode/commands/roles.py +380 -0
  57. superqode/commands/serve.py +172 -0
  58. superqode/commands/suggestions.py +127 -0
  59. superqode/commands/superqe.py +460 -0
  60. superqode/config/__init__.py +51 -0
  61. superqode/config/loader.py +812 -0
  62. superqode/config/schema.py +498 -0
  63. superqode/core/__init__.py +111 -0
  64. superqode/core/roles.py +281 -0
  65. superqode/danger.py +386 -0
  66. superqode/data/superqode-template.yaml +1522 -0
  67. superqode/design_system.py +1080 -0
  68. superqode/dialogs/__init__.py +6 -0
  69. superqode/dialogs/base.py +39 -0
  70. superqode/dialogs/model.py +130 -0
  71. superqode/dialogs/provider.py +870 -0
  72. superqode/diff_view.py +919 -0
  73. superqode/enterprise.py +21 -0
  74. superqode/evaluation/__init__.py +25 -0
  75. superqode/evaluation/adapters.py +93 -0
  76. superqode/evaluation/behaviors.py +89 -0
  77. superqode/evaluation/engine.py +209 -0
  78. superqode/evaluation/scenarios.py +96 -0
  79. superqode/execution/__init__.py +36 -0
  80. superqode/execution/linter.py +538 -0
  81. superqode/execution/modes.py +347 -0
  82. superqode/execution/resolver.py +283 -0
  83. superqode/execution/runner.py +642 -0
  84. superqode/file_explorer.py +811 -0
  85. superqode/file_viewer.py +471 -0
  86. superqode/flash.py +183 -0
  87. superqode/guidance/__init__.py +58 -0
  88. superqode/guidance/config.py +203 -0
  89. superqode/guidance/prompts.py +71 -0
  90. superqode/harness/__init__.py +54 -0
  91. superqode/harness/accelerator.py +291 -0
  92. superqode/harness/config.py +319 -0
  93. superqode/harness/validator.py +147 -0
  94. superqode/history.py +279 -0
  95. superqode/integrations/superopt_runner.py +124 -0
  96. superqode/logging/__init__.py +49 -0
  97. superqode/logging/adapters.py +219 -0
  98. superqode/logging/formatter.py +923 -0
  99. superqode/logging/integration.py +341 -0
  100. superqode/logging/sinks.py +170 -0
  101. superqode/logging/unified_log.py +417 -0
  102. superqode/lsp/__init__.py +26 -0
  103. superqode/lsp/client.py +544 -0
  104. superqode/main.py +1069 -0
  105. superqode/mcp/__init__.py +89 -0
  106. superqode/mcp/auth_storage.py +380 -0
  107. superqode/mcp/client.py +1236 -0
  108. superqode/mcp/config.py +319 -0
  109. superqode/mcp/integration.py +337 -0
  110. superqode/mcp/oauth.py +436 -0
  111. superqode/mcp/oauth_callback.py +385 -0
  112. superqode/mcp/types.py +290 -0
  113. superqode/memory/__init__.py +31 -0
  114. superqode/memory/feedback.py +342 -0
  115. superqode/memory/store.py +522 -0
  116. superqode/notifications.py +369 -0
  117. superqode/optimization/__init__.py +5 -0
  118. superqode/optimization/config.py +33 -0
  119. superqode/permissions/__init__.py +25 -0
  120. superqode/permissions/rules.py +488 -0
  121. superqode/plan.py +323 -0
  122. superqode/providers/__init__.py +33 -0
  123. superqode/providers/gateway/__init__.py +165 -0
  124. superqode/providers/gateway/base.py +228 -0
  125. superqode/providers/gateway/litellm_gateway.py +1170 -0
  126. superqode/providers/gateway/openresponses_gateway.py +436 -0
  127. superqode/providers/health.py +297 -0
  128. superqode/providers/huggingface/__init__.py +74 -0
  129. superqode/providers/huggingface/downloader.py +472 -0
  130. superqode/providers/huggingface/endpoints.py +442 -0
  131. superqode/providers/huggingface/hub.py +531 -0
  132. superqode/providers/huggingface/inference.py +394 -0
  133. superqode/providers/huggingface/transformers_runner.py +516 -0
  134. superqode/providers/local/__init__.py +100 -0
  135. superqode/providers/local/base.py +438 -0
  136. superqode/providers/local/discovery.py +418 -0
  137. superqode/providers/local/lmstudio.py +256 -0
  138. superqode/providers/local/mlx.py +457 -0
  139. superqode/providers/local/ollama.py +486 -0
  140. superqode/providers/local/sglang.py +268 -0
  141. superqode/providers/local/tgi.py +260 -0
  142. superqode/providers/local/tool_support.py +477 -0
  143. superqode/providers/local/vllm.py +258 -0
  144. superqode/providers/manager.py +1338 -0
  145. superqode/providers/models.py +1016 -0
  146. superqode/providers/models_dev.py +578 -0
  147. superqode/providers/openresponses/__init__.py +87 -0
  148. superqode/providers/openresponses/converters/__init__.py +17 -0
  149. superqode/providers/openresponses/converters/messages.py +343 -0
  150. superqode/providers/openresponses/converters/tools.py +268 -0
  151. superqode/providers/openresponses/schema/__init__.py +56 -0
  152. superqode/providers/openresponses/schema/models.py +585 -0
  153. superqode/providers/openresponses/streaming/__init__.py +5 -0
  154. superqode/providers/openresponses/streaming/parser.py +338 -0
  155. superqode/providers/openresponses/tools/__init__.py +21 -0
  156. superqode/providers/openresponses/tools/apply_patch.py +352 -0
  157. superqode/providers/openresponses/tools/code_interpreter.py +290 -0
  158. superqode/providers/openresponses/tools/file_search.py +333 -0
  159. superqode/providers/openresponses/tools/mcp_adapter.py +252 -0
  160. superqode/providers/registry.py +716 -0
  161. superqode/providers/usage.py +332 -0
  162. superqode/pure_mode.py +384 -0
  163. superqode/qr/__init__.py +23 -0
  164. superqode/qr/dashboard.py +781 -0
  165. superqode/qr/generator.py +1018 -0
  166. superqode/qr/templates.py +135 -0
  167. superqode/safety/__init__.py +41 -0
  168. superqode/safety/sandbox.py +413 -0
  169. superqode/safety/warnings.py +256 -0
  170. superqode/server/__init__.py +33 -0
  171. superqode/server/lsp_server.py +775 -0
  172. superqode/server/web.py +250 -0
  173. superqode/session/__init__.py +25 -0
  174. superqode/session/persistence.py +580 -0
  175. superqode/session/sharing.py +477 -0
  176. superqode/session.py +475 -0
  177. superqode/sidebar.py +2991 -0
  178. superqode/stream_view.py +648 -0
  179. superqode/styles/__init__.py +3 -0
  180. superqode/superqe/__init__.py +184 -0
  181. superqode/superqe/acp_runner.py +1064 -0
  182. superqode/superqe/constitution/__init__.py +62 -0
  183. superqode/superqe/constitution/evaluator.py +308 -0
  184. superqode/superqe/constitution/loader.py +432 -0
  185. superqode/superqe/constitution/schema.py +250 -0
  186. superqode/superqe/events.py +591 -0
  187. superqode/superqe/frameworks/__init__.py +65 -0
  188. superqode/superqe/frameworks/base.py +234 -0
  189. superqode/superqe/frameworks/e2e.py +263 -0
  190. superqode/superqe/frameworks/executor.py +237 -0
  191. superqode/superqe/frameworks/javascript.py +409 -0
  192. superqode/superqe/frameworks/python.py +373 -0
  193. superqode/superqe/frameworks/registry.py +92 -0
  194. superqode/superqe/mcp_tools/__init__.py +47 -0
  195. superqode/superqe/mcp_tools/core_tools.py +418 -0
  196. superqode/superqe/mcp_tools/registry.py +230 -0
  197. superqode/superqe/mcp_tools/testing_tools.py +167 -0
  198. superqode/superqe/noise.py +89 -0
  199. superqode/superqe/orchestrator.py +778 -0
  200. superqode/superqe/roles.py +609 -0
  201. superqode/superqe/session.py +713 -0
  202. superqode/superqe/skills/__init__.py +57 -0
  203. superqode/superqe/skills/base.py +106 -0
  204. superqode/superqe/skills/core_skills.py +899 -0
  205. superqode/superqe/skills/registry.py +90 -0
  206. superqode/superqe/verifier.py +101 -0
  207. superqode/superqe_cli.py +76 -0
  208. superqode/tool_call.py +358 -0
  209. superqode/tools/__init__.py +93 -0
  210. superqode/tools/agent_tools.py +496 -0
  211. superqode/tools/base.py +324 -0
  212. superqode/tools/batch_tool.py +133 -0
  213. superqode/tools/diagnostics.py +311 -0
  214. superqode/tools/edit_tools.py +653 -0
  215. superqode/tools/enhanced_base.py +515 -0
  216. superqode/tools/file_tools.py +269 -0
  217. superqode/tools/file_tracking.py +45 -0
  218. superqode/tools/lsp_tools.py +610 -0
  219. superqode/tools/network_tools.py +350 -0
  220. superqode/tools/permissions.py +400 -0
  221. superqode/tools/question_tool.py +324 -0
  222. superqode/tools/search_tools.py +598 -0
  223. superqode/tools/shell_tools.py +259 -0
  224. superqode/tools/todo_tools.py +121 -0
  225. superqode/tools/validation.py +80 -0
  226. superqode/tools/web_tools.py +639 -0
  227. superqode/tui.py +1152 -0
  228. superqode/tui_integration.py +875 -0
  229. superqode/tui_widgets/__init__.py +27 -0
  230. superqode/tui_widgets/widgets/__init__.py +18 -0
  231. superqode/tui_widgets/widgets/progress.py +185 -0
  232. superqode/tui_widgets/widgets/tool_display.py +188 -0
  233. superqode/undo_manager.py +574 -0
  234. superqode/utils/__init__.py +5 -0
  235. superqode/utils/error_handling.py +323 -0
  236. superqode/utils/fuzzy.py +257 -0
  237. superqode/widgets/__init__.py +477 -0
  238. superqode/widgets/agent_collab.py +390 -0
  239. superqode/widgets/agent_store.py +936 -0
  240. superqode/widgets/agent_switcher.py +395 -0
  241. superqode/widgets/animation_manager.py +284 -0
  242. superqode/widgets/code_context.py +356 -0
  243. superqode/widgets/command_palette.py +412 -0
  244. superqode/widgets/connection_status.py +537 -0
  245. superqode/widgets/conversation_history.py +470 -0
  246. superqode/widgets/diff_indicator.py +155 -0
  247. superqode/widgets/enhanced_status_bar.py +385 -0
  248. superqode/widgets/enhanced_toast.py +476 -0
  249. superqode/widgets/file_browser.py +809 -0
  250. superqode/widgets/file_reference.py +585 -0
  251. superqode/widgets/issue_timeline.py +340 -0
  252. superqode/widgets/leader_key.py +264 -0
  253. superqode/widgets/mode_switcher.py +445 -0
  254. superqode/widgets/model_picker.py +234 -0
  255. superqode/widgets/permission_preview.py +1205 -0
  256. superqode/widgets/prompt.py +358 -0
  257. superqode/widgets/provider_connect.py +725 -0
  258. superqode/widgets/pty_shell.py +587 -0
  259. superqode/widgets/qe_dashboard.py +321 -0
  260. superqode/widgets/resizable_sidebar.py +377 -0
  261. superqode/widgets/response_changes.py +218 -0
  262. superqode/widgets/response_display.py +528 -0
  263. superqode/widgets/rich_tool_display.py +613 -0
  264. superqode/widgets/sidebar_panels.py +1180 -0
  265. superqode/widgets/slash_complete.py +356 -0
  266. superqode/widgets/split_view.py +612 -0
  267. superqode/widgets/status_bar.py +273 -0
  268. superqode/widgets/superqode_display.py +786 -0
  269. superqode/widgets/thinking_display.py +815 -0
  270. superqode/widgets/throbber.py +87 -0
  271. superqode/widgets/toast.py +206 -0
  272. superqode/widgets/unified_output.py +1073 -0
  273. superqode/workspace/__init__.py +75 -0
  274. superqode/workspace/artifacts.py +472 -0
  275. superqode/workspace/coordinator.py +353 -0
  276. superqode/workspace/diff_tracker.py +429 -0
  277. superqode/workspace/git_guard.py +373 -0
  278. superqode/workspace/git_snapshot.py +526 -0
  279. superqode/workspace/manager.py +750 -0
  280. superqode/workspace/snapshot.py +357 -0
  281. superqode/workspace/watcher.py +535 -0
  282. superqode/workspace/worktree.py +440 -0
  283. superqode-0.1.5.dist-info/METADATA +204 -0
  284. superqode-0.1.5.dist-info/RECORD +288 -0
  285. superqode-0.1.5.dist-info/WHEEL +5 -0
  286. superqode-0.1.5.dist-info/entry_points.txt +3 -0
  287. superqode-0.1.5.dist-info/licenses/LICENSE +648 -0
  288. superqode-0.1.5.dist-info/top_level.txt +1 -0
@@ -0,0 +1,892 @@
1
+ """
2
+ Agent Loop - Minimal, Transparent Execution.
3
+
4
+ The simplest possible agent loop:
5
+ 1. Send messages + tools to model
6
+ 2. If model calls tools, execute them
7
+ 3. Add results to messages
8
+ 4. Repeat until model responds with text only
9
+
10
+ NO:
11
+ - Complex state management
12
+ - Hidden context injection
13
+ - Automatic retries with modified prompts
14
+ - "Smart" error recovery
15
+
16
+ YES:
17
+ - Transparent execution
18
+ - Raw model behavior
19
+ - Fair comparison between models
20
+
21
+ Performance optimizations:
22
+ - Tool definitions cached at init (not rebuilt each iteration)
23
+ - Message conversion cached with hash-based lookup
24
+ - Parallel tool execution support
25
+ """
26
+
27
+ import asyncio
28
+ import json
29
+ import re
30
+ import uuid
31
+ from dataclasses import dataclass, field
32
+ from functools import lru_cache
33
+ from pathlib import Path
34
+ from typing import Any, AsyncIterator, Awaitable, Callable, Dict, List, Optional, Tuple
35
+
36
+ from ..tools.base import Tool, ToolContext, ToolRegistry, ToolResult
37
+ from ..providers.gateway.base import GatewayInterface, Message, ToolDefinition
38
+ from .system_prompts import SystemPromptLevel, get_system_prompt, get_job_description_prompt
39
+
40
+
41
+ # Module-level cache for system prompts
42
+ @lru_cache(maxsize=32)
43
+ def _cached_system_prompt(
44
+ level: SystemPromptLevel,
45
+ working_directory: str,
46
+ custom_prompt: str | None,
47
+ job_description: str | None,
48
+ ) -> str:
49
+ """Cached system prompt builder."""
50
+ prompt = get_system_prompt(level=level, working_directory=Path(working_directory))
51
+ if custom_prompt:
52
+ prompt += f"\n\n{custom_prompt}"
53
+ if job_description:
54
+ prompt += get_job_description_prompt(job_description)
55
+ return prompt
56
+
57
+
58
+ def _make_hashable(value: Any) -> Any:
59
+ """Convert a value to a hashable type for use in tuples/dict keys.
60
+
61
+ Converts dicts to tuples, lists to tuples, and handles nested structures.
62
+ """
63
+ if isinstance(value, dict):
64
+ # Convert dict to sorted tuple of (key, hashable_value) pairs
65
+ return tuple(sorted((k, _make_hashable(v)) for k, v in value.items()))
66
+ elif isinstance(value, list):
67
+ # Convert list to tuple
68
+ return tuple(_make_hashable(item) for item in value)
69
+ elif isinstance(value, (str, int, float, bool, type(None))):
70
+ # Already hashable
71
+ return value
72
+ else:
73
+ # For other types (objects, etc.), convert to string representation
74
+ # This is safe because we only need unique identification, not exact equality
75
+ return str(value)
76
+
77
+
78
+ def _message_to_tuple(m: "AgentMessage") -> Tuple:
79
+ """Convert message to hashable tuple for caching."""
80
+ if m.tool_calls:
81
+ # Handle tool calls that might be dicts or objects (from LiteLLM)
82
+ tool_calls_list = []
83
+ for tc in m.tool_calls:
84
+ if isinstance(tc, dict):
85
+ # Already a dict - convert to hashable representation
86
+ # Use _make_hashable to handle nested dicts (like function field)
87
+ tool_calls_list.append(_make_hashable(tc))
88
+ else:
89
+ # Object (e.g., ChatCompletionDeltaToolCall) - convert to dict representation
90
+ # Extract key fields that make tool calls unique
91
+ tc_dict = {}
92
+ if hasattr(tc, "id"):
93
+ tc_dict["id"] = getattr(tc, "id", None)
94
+ if hasattr(tc, "function"):
95
+ func = getattr(tc, "function", None)
96
+ if func:
97
+ if isinstance(func, dict):
98
+ func_dict = func
99
+ else:
100
+ func_dict = {}
101
+ if hasattr(func, "name"):
102
+ func_dict["name"] = getattr(func, "name", None)
103
+ if hasattr(func, "arguments"):
104
+ func_dict["arguments"] = getattr(func, "arguments", None)
105
+ tc_dict["function"] = func_dict
106
+ elif hasattr(tc, "get"):
107
+ # Might be a dict-like object
108
+ tc_dict = dict(tc) if hasattr(tc, "__iter__") and hasattr(tc, "keys") else {}
109
+ # Convert to hashable representation
110
+ tool_calls_list.append(_make_hashable(tc_dict))
111
+ tool_calls_tuple = tuple(tool_calls_list)
112
+ else:
113
+ tool_calls_tuple = None
114
+ return (m.role, m.content, tool_calls_tuple, m.tool_call_id, m.name)
115
+
116
+
117
+ def _is_simple_conversational_query(message: str) -> bool:
118
+ """Detect if a query is simple/conversational and doesn't need tools.
119
+
120
+ Simple queries are general knowledge questions, greetings, or basic
121
+ questions that don't require code/file operations.
122
+
123
+ This is conservative - only returns True for very obvious cases.
124
+ """
125
+ message_lower = message.lower().strip()
126
+
127
+ # Very short greetings only
128
+ if message_lower in ["hi", "hello", "hey"]:
129
+ return True
130
+
131
+ # Simple question patterns - detect basic general knowledge questions
132
+ # These should not require tools and some models handle them poorly with tools
133
+ simple_patterns = [
134
+ r"^(what|what\'s|whats) .+\??$", # "What is the capital?", "What's the weather?"
135
+ r"^where .+\??$", # "Where is the capital?"
136
+ r"^who .+\??$", # "Who is the president?"
137
+ r"^when .+\??$", # "When was the war?"
138
+ r"^how (many|much|long|old) .+\??$", # "How many people?", "How old is it?"
139
+ ]
140
+
141
+ for pattern in simple_patterns:
142
+ if re.match(pattern, message_lower):
143
+ # Double-check: no code keywords
144
+ code_keywords = ["file", "code", "function", "class", "read", "write", "edit"]
145
+ if not any(keyword in message_lower for keyword in code_keywords):
146
+ return True
147
+
148
+ # Don't auto-detect other cases - be conservative
149
+ return False
150
+
151
+
152
+ def _is_malformed_tool_call_response(response_content: str, tool_calls: List[Dict]) -> bool:
153
+ """Detect if tool calls look malformed (model trying to return JSON instead of proper tool calls).
154
+
155
+ Some local models return JSON in content when they should return proper tool calls,
156
+ or return tool calls for simple queries that don't need tools.
157
+ """
158
+ if not tool_calls:
159
+ return False
160
+
161
+ # Check if content looks like JSON (common with local models)
162
+ content = (response_content or "").strip()
163
+ if content.startswith("{") and content.endswith("}"):
164
+ try:
165
+ parsed = json.loads(content)
166
+ # If it's a dict with keys like "function", "arguments", "input", "tool" - likely malformed
167
+ if isinstance(parsed, dict) and any(
168
+ key in parsed for key in ["function", "arguments", "input", "tool"]
169
+ ):
170
+ return True
171
+ # Also check if content has answer-like fields (message, content, text, response)
172
+ # This suggests the model returned JSON with the answer instead of tool calls
173
+ if isinstance(parsed, dict) and any(
174
+ key in parsed for key in ["message", "content", "text", "response"]
175
+ ):
176
+ # If we have tool calls but content has answer fields, it's likely malformed
177
+ # (model should either return tool calls OR text, not both in JSON)
178
+ return True
179
+ except json.JSONDecodeError:
180
+ pass
181
+
182
+ # Check if tool calls have suspicious structure
183
+ for tool_call in tool_calls:
184
+ func = tool_call.get("function", {})
185
+ if not isinstance(func, dict):
186
+ return True
187
+ if "name" not in func:
188
+ return True
189
+ # If arguments is a string that's not valid JSON, might be malformed
190
+ args = func.get("arguments", "{}")
191
+ if isinstance(args, str):
192
+ try:
193
+ json.loads(args)
194
+ except json.JSONDecodeError:
195
+ # Arguments should be valid JSON
196
+ return True
197
+
198
+ return False
199
+
200
+
201
+ @dataclass
202
+ class AgentConfig:
203
+ """Configuration for the agent loop.
204
+
205
+ Designed for transparency - every setting is explicit.
206
+ """
207
+
208
+ # Model settings
209
+ provider: str
210
+ model: str
211
+
212
+ # System prompt level (default: minimal for fair testing)
213
+ system_prompt_level: SystemPromptLevel = SystemPromptLevel.MINIMAL
214
+
215
+ # Optional custom system prompt (appended to level prompt)
216
+ custom_system_prompt: Optional[str] = None
217
+
218
+ # Optional job description (role context)
219
+ job_description: Optional[str] = None
220
+
221
+ # Working directory
222
+ working_directory: Path = field(default_factory=Path.cwd)
223
+
224
+ # Tool settings
225
+ tools_enabled: bool = True
226
+
227
+ # Execution settings
228
+ max_iterations: int = 50 # Prevent infinite loops
229
+ require_confirmation: bool = False # Ask before tool execution
230
+
231
+ # Model parameters (passed through to gateway)
232
+ temperature: Optional[float] = None
233
+ max_tokens: Optional[int] = None
234
+
235
+
236
+ @dataclass
237
+ class AgentMessage:
238
+ """A message in the agent conversation."""
239
+
240
+ role: str # "user", "assistant", "tool"
241
+ content: str
242
+ tool_calls: Optional[List[Dict]] = None
243
+ tool_call_id: Optional[str] = None
244
+ name: Optional[str] = None # Tool name for tool messages
245
+
246
+
247
+ @dataclass
248
+ class AgentResponse:
249
+ """Response from the agent loop."""
250
+
251
+ content: str
252
+ messages: List[AgentMessage]
253
+ tool_calls_made: int
254
+ iterations: int
255
+ stopped_reason: str # "complete", "max_iterations", "error"
256
+ error: Optional[str] = None
257
+
258
+
259
+ class AgentLoop:
260
+ """Minimal agent loop for fair model testing.
261
+
262
+ Usage:
263
+ gateway = LiteLLMGateway()
264
+ tools = ToolRegistry.default()
265
+ config = AgentConfig(provider="anthropic", model="claude-sonnet-4-20250514")
266
+
267
+ agent = AgentLoop(gateway, tools, config)
268
+ response = await agent.run("Fix the bug in main.py")
269
+
270
+ Performance features:
271
+ - Tool definitions cached at initialization
272
+ - Message conversion cached with hash lookup
273
+ - Parallel tool execution via asyncio.gather
274
+ """
275
+
276
+ def __init__(
277
+ self,
278
+ gateway: GatewayInterface,
279
+ tools: ToolRegistry,
280
+ config: AgentConfig,
281
+ on_tool_call: Optional[Callable[[str, Dict], None]] = None,
282
+ on_tool_result: Optional[Callable[[str, ToolResult], None]] = None,
283
+ on_thinking: Optional[Callable[[str], Awaitable[None]]] = None,
284
+ parallel_tools: bool = True, # Enable parallel tool execution
285
+ ):
286
+ self.gateway = gateway
287
+ self.tools = tools
288
+ self.config = config
289
+ self.on_tool_call = on_tool_call
290
+ self.on_tool_result = on_tool_result
291
+ self.on_thinking = on_thinking
292
+ self.parallel_tools = parallel_tools
293
+
294
+ # Build system prompt (cached via module-level function)
295
+ self.system_prompt = self._build_system_prompt()
296
+
297
+ # Session ID for tool context
298
+ self.session_id = str(uuid.uuid4())
299
+
300
+ # PERFORMANCE: Cache tool definitions at init (compute once)
301
+ self._cached_tool_defs: List[ToolDefinition] = self._compute_tool_definitions()
302
+
303
+ # PERFORMANCE: Cache for converted messages (avoid repeated conversions)
304
+ self._message_cache: Dict[Tuple, Message] = {}
305
+
306
+ # Cancellation support
307
+ self._cancelled = False
308
+
309
+ def _build_system_prompt(self) -> str:
310
+ """Build the system prompt based on config (uses cached function)."""
311
+ return _cached_system_prompt(
312
+ level=self.config.system_prompt_level,
313
+ working_directory=str(self.config.working_directory),
314
+ custom_prompt=self.config.custom_system_prompt,
315
+ job_description=self.config.job_description,
316
+ )
317
+
318
+ def _compute_tool_definitions(self) -> List[ToolDefinition]:
319
+ """Compute tool definitions once at init."""
320
+ if not self.config.tools_enabled:
321
+ return []
322
+
323
+ definitions = []
324
+ for tool in self.tools.list():
325
+ definitions.append(
326
+ ToolDefinition(
327
+ name=tool.name,
328
+ description=tool.description,
329
+ parameters=tool.parameters,
330
+ )
331
+ )
332
+ return definitions
333
+
334
+ def _get_tool_definitions(self) -> List[ToolDefinition]:
335
+ """Get cached tool definitions."""
336
+ return self._cached_tool_defs
337
+
338
+ def _convert_message(self, m: AgentMessage) -> Message:
339
+ """Convert a single message with caching."""
340
+ key = _message_to_tuple(m)
341
+ if key not in self._message_cache:
342
+ self._message_cache[key] = Message(
343
+ role=m.role,
344
+ content=m.content,
345
+ tool_calls=m.tool_calls,
346
+ tool_call_id=m.tool_call_id,
347
+ name=m.name,
348
+ )
349
+ return self._message_cache[key]
350
+
351
+ def _convert_messages(self, messages: List[AgentMessage]) -> List[Message]:
352
+ """Convert messages to gateway format with caching."""
353
+ return [self._convert_message(m) for m in messages]
354
+
355
+ def _create_tool_context(self) -> ToolContext:
356
+ """Create context for tool execution."""
357
+ return ToolContext(
358
+ session_id=self.session_id,
359
+ working_directory=self.config.working_directory,
360
+ require_confirmation=self.config.require_confirmation,
361
+ tool_registry=self.tools,
362
+ )
363
+
364
+ async def _execute_tool(self, name: str, arguments: Dict[str, Any]) -> ToolResult:
365
+ """Execute a single tool call."""
366
+ tool = self.tools.get(name)
367
+
368
+ if not tool:
369
+ return ToolResult(success=False, output="", error=f"Unknown tool: {name}")
370
+
371
+ ctx = self._create_tool_context()
372
+
373
+ try:
374
+ result = await tool.execute(arguments, ctx)
375
+ return result
376
+ except Exception as e:
377
+ return ToolResult(success=False, output="", error=f"Tool execution error: {str(e)}")
378
+
379
+ async def _execute_tools_parallel(
380
+ self,
381
+ tool_calls: List[Dict],
382
+ ) -> List[Tuple[str, str, Dict, ToolResult]]:
383
+ """Execute multiple tool calls in parallel.
384
+
385
+ Returns list of (tool_name, tool_call_id, tool_args, result) tuples.
386
+ """
387
+
388
+ async def execute_one(tc: Dict) -> Tuple[str, str, Dict, ToolResult]:
389
+ tool_name = tc.get("function", {}).get("name", "")
390
+ tool_args_str = tc.get("function", {}).get("arguments", "{}")
391
+ tool_call_id = tc.get("id", str(uuid.uuid4()))
392
+
393
+ try:
394
+ tool_args = (
395
+ json.loads(tool_args_str) if isinstance(tool_args_str, str) else tool_args_str
396
+ )
397
+ except json.JSONDecodeError:
398
+ tool_args = {}
399
+
400
+ # Callback for tool call
401
+ if self.on_tool_call:
402
+ self.on_tool_call(tool_name, tool_args)
403
+
404
+ result = await self._execute_tool(tool_name, tool_args)
405
+
406
+ # Callback for result
407
+ if self.on_tool_result:
408
+ self.on_tool_result(tool_name, result)
409
+
410
+ return (tool_name, tool_call_id, tool_args, result)
411
+
412
+ # Execute all tools in parallel
413
+ tasks = [execute_one(tc) for tc in tool_calls]
414
+ results = await asyncio.gather(*tasks, return_exceptions=True)
415
+
416
+ # Handle any exceptions
417
+ processed = []
418
+ for i, r in enumerate(results):
419
+ if isinstance(r, Exception):
420
+ tc = tool_calls[i]
421
+ tool_name = tc.get("function", {}).get("name", "unknown")
422
+ tool_call_id = tc.get("id", str(uuid.uuid4()))
423
+ processed.append(
424
+ (
425
+ tool_name,
426
+ tool_call_id,
427
+ {},
428
+ ToolResult(success=False, output="", error=str(r)),
429
+ )
430
+ )
431
+ else:
432
+ processed.append(r)
433
+
434
+ return processed
435
+
436
+ async def run(self, user_message: str) -> AgentResponse:
437
+ """Run the agent loop until completion.
438
+
439
+ Args:
440
+ user_message: The user's request
441
+
442
+ Returns:
443
+ AgentResponse with the final result
444
+
445
+ Performance: Uses cached message conversion and parallel tool execution.
446
+ """
447
+ messages: List[AgentMessage] = []
448
+
449
+ # Add system message if we have one
450
+ if self.system_prompt:
451
+ messages.append(AgentMessage(role="system", content=self.system_prompt))
452
+
453
+ # Add user message
454
+ messages.append(AgentMessage(role="user", content=user_message))
455
+
456
+ tool_calls_made = 0
457
+ iterations = 0
458
+
459
+ # Emit initial processing log
460
+ if self.on_thinking:
461
+ await self.on_thinking("Processing request...")
462
+
463
+ # Get cached tool definitions (computed once at init)
464
+ tool_defs = self._get_tool_definitions()
465
+
466
+ # Always send tools if available - let malformed tool call handling deal with issues
467
+ # This ensures models always get the full context and we handle malformed responses gracefully
468
+ while iterations < self.config.max_iterations:
469
+ iterations += 1
470
+
471
+ # Emit iteration log
472
+ if self.on_thinking:
473
+ await self.on_thinking(
474
+ f"Calling model {self.config.provider}/{self.config.model}... (iteration {iterations})"
475
+ )
476
+
477
+ # PERFORMANCE: Use cached message conversion
478
+ gateway_messages = self._convert_messages(messages)
479
+
480
+ # Check if this is a simple conversational query that doesn't need tools
481
+ # Some models (especially local ones) don't handle tools well for simple questions
482
+ # Local providers generally don't support tools well
483
+ from ..providers.registry import PROVIDERS, ProviderCategory
484
+
485
+ provider_def = PROVIDERS.get(self.config.provider)
486
+ is_local_provider = provider_def and provider_def.category == ProviderCategory.LOCAL
487
+
488
+ is_simple_query = _is_simple_conversational_query(user_message)
489
+ tools_to_send = (
490
+ tool_defs if (tool_defs and not is_simple_query and not is_local_provider) else None
491
+ )
492
+
493
+ # Call the model
494
+ try:
495
+ response = await self.gateway.chat_completion(
496
+ messages=gateway_messages,
497
+ model=self.config.model,
498
+ provider=self.config.provider,
499
+ tools=tools_to_send,
500
+ temperature=self.config.temperature,
501
+ max_tokens=self.config.max_tokens,
502
+ )
503
+ except Exception as e:
504
+ return AgentResponse(
505
+ content="",
506
+ messages=messages,
507
+ tool_calls_made=tool_calls_made,
508
+ iterations=iterations,
509
+ stopped_reason="error",
510
+ error=str(e),
511
+ )
512
+
513
+ # Extract thinking content if available
514
+ if response.thinking_content and self.on_thinking:
515
+ await self.on_thinking(f"[Extended Thinking]\n{response.thinking_content}")
516
+
517
+ # Emit response received log
518
+ if self.on_thinking and response.usage:
519
+ total_tokens = response.usage.total_tokens or 0
520
+ await self.on_thinking(f"Received response ({total_tokens} tokens)")
521
+
522
+ # Extract content - handle None/empty cases
523
+ response_content = response.content if response.content is not None else ""
524
+
525
+ # Check for empty responses from models that should respond
526
+ if not response_content.strip() and not response.tool_calls:
527
+ # Model returned empty content with no tool calls - this is likely a problem
528
+ # Provide a helpful error message instead of empty content
529
+ response_content = f"⚠️ The model '{self.config.provider}/{self.config.model}' returned an empty response. This could mean:\n\n• The model is not responding properly\n• The model may be overloaded or unavailable\n• The model format may not be compatible\n\nTry a different model or check your provider configuration."
530
+
531
+ # Check for tool calls
532
+ if response.tool_calls:
533
+ # Check if tool calls look malformed (common with local models)
534
+ if _is_malformed_tool_call_response(response_content, response.tool_calls):
535
+ # For malformed tool calls, try to extract text from content
536
+ # or if it's a simple query, just return the content as-is
537
+ content = response_content
538
+
539
+ # Try to extract text from JSON if content is JSON
540
+ if content.strip().startswith("{"):
541
+ try:
542
+ parsed = json.loads(content)
543
+ if isinstance(parsed, dict):
544
+ # Try common fields that might contain the answer
545
+ extracted = (
546
+ parsed.get("message")
547
+ or parsed.get("content")
548
+ or parsed.get("text")
549
+ or parsed.get("response")
550
+ or str(parsed)
551
+ )
552
+ if isinstance(extracted, dict):
553
+ extracted = extracted.get("content", str(extracted))
554
+ content = str(extracted) if extracted else content
555
+ except (json.JSONDecodeError, AttributeError):
556
+ pass
557
+
558
+ # If we have content, return it (ignore malformed tool calls)
559
+ if content.strip():
560
+ return AgentResponse(
561
+ content=content,
562
+ messages=messages,
563
+ tool_calls_made=tool_calls_made,
564
+ iterations=iterations,
565
+ stopped_reason="complete",
566
+ )
567
+
568
+ # No content extracted - continue to normal tool call handling
569
+ # (might be a false positive on malformed detection)
570
+
571
+ # Add assistant message with tool calls
572
+ messages.append(
573
+ AgentMessage(
574
+ role="assistant",
575
+ content=response_content,
576
+ tool_calls=response.tool_calls,
577
+ )
578
+ )
579
+
580
+ # Emit tool execution log
581
+ if self.on_thinking:
582
+ tool_count = len(response.tool_calls)
583
+ await self.on_thinking(
584
+ f"Executing {tool_count} tool call{'s' if tool_count != 1 else ''}..."
585
+ )
586
+
587
+ # PERFORMANCE: Execute tools in parallel or sequential
588
+ if self.parallel_tools and len(response.tool_calls) > 1:
589
+ # Parallel execution for multiple tools
590
+ results = await self._execute_tools_parallel(response.tool_calls)
591
+ for tool_name, tool_call_id, tool_args, result in results:
592
+ tool_calls_made += 1
593
+ messages.append(
594
+ AgentMessage(
595
+ role="tool",
596
+ content=result.to_message(),
597
+ tool_call_id=tool_call_id,
598
+ name=tool_name,
599
+ )
600
+ )
601
+ else:
602
+ # Sequential execution (single tool or parallel disabled)
603
+ for tool_call in response.tool_calls:
604
+ tool_name = tool_call.get("function", {}).get("name", "")
605
+ tool_args_str = tool_call.get("function", {}).get("arguments", "{}")
606
+ tool_call_id = tool_call.get("id", str(uuid.uuid4()))
607
+
608
+ try:
609
+ tool_args = (
610
+ json.loads(tool_args_str)
611
+ if isinstance(tool_args_str, str)
612
+ else tool_args_str
613
+ )
614
+ except json.JSONDecodeError:
615
+ tool_args = {}
616
+
617
+ if self.on_tool_call:
618
+ self.on_tool_call(tool_name, tool_args)
619
+
620
+ result = await self._execute_tool(tool_name, tool_args)
621
+ tool_calls_made += 1
622
+
623
+ if self.on_tool_result:
624
+ self.on_tool_result(tool_name, result)
625
+
626
+ messages.append(
627
+ AgentMessage(
628
+ role="tool",
629
+ content=result.to_message(),
630
+ tool_call_id=tool_call_id,
631
+ name=tool_name,
632
+ )
633
+ )
634
+
635
+ # Emit iteration complete log
636
+ if self.on_thinking:
637
+ await self.on_thinking(f"Iteration {iterations} complete")
638
+ else:
639
+ # No tool calls - return the response content
640
+ if self.on_thinking:
641
+ await self.on_thinking("Response complete")
642
+ return AgentResponse(
643
+ content=response_content,
644
+ messages=messages,
645
+ tool_calls_made=tool_calls_made,
646
+ iterations=iterations,
647
+ stopped_reason="complete",
648
+ )
649
+
650
+ # Hit max iterations
651
+ if self.on_thinking:
652
+ await self.on_thinking(f"Reached maximum iterations ({self.config.max_iterations})")
653
+ return AgentResponse(
654
+ content="",
655
+ messages=messages,
656
+ tool_calls_made=tool_calls_made,
657
+ iterations=iterations,
658
+ stopped_reason="max_iterations",
659
+ error=f"Reached maximum iterations ({self.config.max_iterations})",
660
+ )
661
+
662
+ async def run_streaming(
663
+ self,
664
+ user_message: str,
665
+ ) -> AsyncIterator[str]:
666
+ """Run the agent loop with streaming output.
667
+
668
+ Yields text chunks as they come from the model.
669
+ Tool calls are executed between chunks.
670
+
671
+ Performance: Uses cached message conversion and parallel tool execution.
672
+ """
673
+ messages: List[AgentMessage] = []
674
+
675
+ if self.system_prompt:
676
+ messages.append(AgentMessage(role="system", content=self.system_prompt))
677
+
678
+ messages.append(AgentMessage(role="user", content=user_message))
679
+
680
+ iterations = 0
681
+ tool_calls_made = 0
682
+
683
+ # Emit initial processing log
684
+ if self.on_thinking:
685
+ await self.on_thinking("Processing request...")
686
+
687
+ # Get cached tool definitions
688
+ tool_defs = self._get_tool_definitions()
689
+
690
+ while iterations < self.config.max_iterations:
691
+ # Check for cancellation
692
+ if self._cancelled:
693
+ if self.on_thinking:
694
+ await self.on_thinking("Operation cancelled by user")
695
+ return
696
+
697
+ iterations += 1
698
+
699
+ # Emit iteration log
700
+ if self.on_thinking:
701
+ await self.on_thinking(
702
+ f"Calling model {self.config.provider}/{self.config.model}... (iteration {iterations})"
703
+ )
704
+
705
+ # PERFORMANCE: Use cached message conversion
706
+ gateway_messages = self._convert_messages(messages)
707
+
708
+ # Check if this is a simple conversational query that doesn't need tools
709
+ # Some models (especially local ones) don't handle tools well for simple questions
710
+ # Local providers generally don't support tools well
711
+ from ..providers.registry import PROVIDERS, ProviderCategory
712
+
713
+ provider_def = PROVIDERS.get(self.config.provider)
714
+ is_local_provider = provider_def and provider_def.category == ProviderCategory.LOCAL
715
+
716
+ is_simple_query = _is_simple_conversational_query(user_message)
717
+ tools_to_send = (
718
+ tool_defs if (tool_defs and not is_simple_query and not is_local_provider) else None
719
+ )
720
+
721
+ # Stream response
722
+ full_content = ""
723
+ tool_calls = []
724
+ had_content = False
725
+
726
+ # Buffer for accumulating thinking content chunks
727
+ # Local models stream thinking in tiny pieces - accumulate for readable display
728
+ thinking_buffer = ""
729
+ import time as _time
730
+
731
+ last_thinking_emit = _time.time()
732
+
733
+ try:
734
+ async for chunk in self.gateway.stream_completion(
735
+ messages=gateway_messages,
736
+ model=self.config.model,
737
+ provider=self.config.provider,
738
+ tools=tools_to_send,
739
+ temperature=self.config.temperature,
740
+ max_tokens=self.config.max_tokens,
741
+ ):
742
+ # Check for cancellation during streaming
743
+ if self._cancelled:
744
+ if self.on_thinking:
745
+ await self.on_thinking("Operation cancelled by user")
746
+ return
747
+
748
+ # Handle thinking content if available - BUFFER for readable display
749
+ if chunk.thinking_content:
750
+ thinking_buffer += chunk.thinking_content
751
+ current_time = _time.time()
752
+
753
+ # Emit thinking content when:
754
+ # 1. Buffer has a complete sentence (ends with . ? ! or newline)
755
+ # 2. Buffer exceeds 200 chars (long enough to be readable)
756
+ # 3. 500ms has passed since last emit (prevent stale buffer)
757
+ should_emit = (
758
+ thinking_buffer.rstrip().endswith((".", "?", "!", "\n"))
759
+ or len(thinking_buffer) > 200
760
+ or (
761
+ current_time - last_thinking_emit > 0.5
762
+ and len(thinking_buffer) > 20
763
+ )
764
+ )
765
+
766
+ if should_emit and self.on_thinking and thinking_buffer.strip():
767
+ await self.on_thinking(thinking_buffer.strip())
768
+ thinking_buffer = ""
769
+ last_thinking_emit = current_time
770
+
771
+ if chunk.content:
772
+ full_content += chunk.content
773
+ had_content = True
774
+ yield chunk.content
775
+
776
+ if chunk.tool_calls:
777
+ tool_calls.extend(chunk.tool_calls)
778
+
779
+ # Flush any remaining thinking content after streaming completes
780
+ if thinking_buffer.strip() and self.on_thinking:
781
+ await self.on_thinking(thinking_buffer.strip())
782
+ thinking_buffer = ""
783
+
784
+ except Exception as e:
785
+ # Flush thinking buffer before handling error
786
+ if thinking_buffer.strip() and self.on_thinking:
787
+ await self.on_thinking(thinking_buffer.strip())
788
+
789
+ error_msg = str(e)
790
+ error_type = type(e).__name__
791
+ # Yield error message so it's displayed
792
+ yield f"\n\n[Error: {error_type}] {error_msg}"
793
+ # Don't return immediately - let the error be displayed
794
+ # But mark that we had an error so we don't continue the loop
795
+ full_content = f"[Error: {error_type}] {error_msg}"
796
+ return
797
+
798
+ # Handle tool calls
799
+ if tool_calls:
800
+ messages.append(
801
+ AgentMessage(
802
+ role="assistant",
803
+ content=full_content,
804
+ tool_calls=tool_calls,
805
+ )
806
+ )
807
+
808
+ # Emit tool execution log
809
+ if self.on_thinking:
810
+ tool_count = len(tool_calls)
811
+ await self.on_thinking(
812
+ f"Executing {tool_count} tool call{'s' if tool_count != 1 else ''}..."
813
+ )
814
+
815
+ # PERFORMANCE: Execute tools in parallel or sequential
816
+ if self.parallel_tools and len(tool_calls) > 1:
817
+ results = await self._execute_tools_parallel(tool_calls)
818
+ for tool_name, tool_call_id, tool_args, result in results:
819
+ tool_calls_made += 1
820
+ messages.append(
821
+ AgentMessage(
822
+ role="tool",
823
+ content=result.to_message(),
824
+ tool_call_id=tool_call_id,
825
+ name=tool_name,
826
+ )
827
+ )
828
+ else:
829
+ for tool_call in tool_calls:
830
+ tool_name = tool_call.get("function", {}).get("name", "")
831
+ tool_args_str = tool_call.get("function", {}).get("arguments", "{}")
832
+ tool_call_id = tool_call.get("id", str(uuid.uuid4()))
833
+
834
+ try:
835
+ tool_args = (
836
+ json.loads(tool_args_str)
837
+ if isinstance(tool_args_str, str)
838
+ else tool_args_str
839
+ )
840
+ except json.JSONDecodeError:
841
+ tool_args = {}
842
+
843
+ if self.on_tool_call:
844
+ self.on_tool_call(tool_name, tool_args)
845
+
846
+ result = await self._execute_tool(tool_name, tool_args)
847
+ tool_calls_made += 1
848
+
849
+ if self.on_tool_result:
850
+ self.on_tool_result(tool_name, result)
851
+
852
+ messages.append(
853
+ AgentMessage(
854
+ role="tool",
855
+ content=result.to_message(),
856
+ tool_call_id=tool_call_id,
857
+ name=tool_name,
858
+ )
859
+ )
860
+
861
+ # Emit iteration complete log
862
+ if self.on_thinking:
863
+ await self.on_thinking(f"Iteration {iterations} complete")
864
+
865
+ # Continue loop to get final response after tool execution
866
+ # The next iteration will stream the final response with tool results
867
+ # Important: The model should provide a summary after seeing tool results
868
+ else:
869
+ # No tool calls - we have the final response
870
+ # If we had tool calls in previous iterations but no content now,
871
+ # the model should still provide a summary
872
+ if self.on_thinking:
873
+ await self.on_thinking("Response complete")
874
+ if full_content:
875
+ # Content was already yielded during streaming
876
+ pass
877
+ # Done - return (final response was already streamed)
878
+ return
879
+
880
+ # Hit max iterations (unless cancelled)
881
+ if not self._cancelled:
882
+ if self.on_thinking:
883
+ await self.on_thinking(f"Reached maximum iterations ({self.config.max_iterations})")
884
+ yield f"\n\n[Reached maximum iterations ({self.config.max_iterations})]"
885
+
886
+ def cancel(self):
887
+ """Cancel the current agent operation."""
888
+ self._cancelled = True
889
+
890
+ def reset_cancellation(self):
891
+ """Reset cancellation flag for a new operation."""
892
+ self._cancelled = False