klaude-code 1.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/__init__.py +0 -0
- klaude_code/cli/__init__.py +1 -0
- klaude_code/cli/main.py +298 -0
- klaude_code/cli/runtime.py +331 -0
- klaude_code/cli/session_cmd.py +80 -0
- klaude_code/command/__init__.py +43 -0
- klaude_code/command/clear_cmd.py +20 -0
- klaude_code/command/command_abc.py +92 -0
- klaude_code/command/diff_cmd.py +138 -0
- klaude_code/command/export_cmd.py +86 -0
- klaude_code/command/help_cmd.py +51 -0
- klaude_code/command/model_cmd.py +43 -0
- klaude_code/command/prompt-dev-docs-update.md +56 -0
- klaude_code/command/prompt-dev-docs.md +46 -0
- klaude_code/command/prompt-init.md +45 -0
- klaude_code/command/prompt_command.py +69 -0
- klaude_code/command/refresh_cmd.py +43 -0
- klaude_code/command/registry.py +110 -0
- klaude_code/command/status_cmd.py +111 -0
- klaude_code/command/terminal_setup_cmd.py +252 -0
- klaude_code/config/__init__.py +11 -0
- klaude_code/config/config.py +177 -0
- klaude_code/config/list_model.py +162 -0
- klaude_code/config/select_model.py +67 -0
- klaude_code/const/__init__.py +133 -0
- klaude_code/core/__init__.py +0 -0
- klaude_code/core/agent.py +165 -0
- klaude_code/core/executor.py +485 -0
- klaude_code/core/manager/__init__.py +19 -0
- klaude_code/core/manager/agent_manager.py +127 -0
- klaude_code/core/manager/llm_clients.py +42 -0
- klaude_code/core/manager/llm_clients_builder.py +49 -0
- klaude_code/core/manager/sub_agent_manager.py +86 -0
- klaude_code/core/prompt.py +89 -0
- klaude_code/core/prompts/prompt-claude-code.md +98 -0
- klaude_code/core/prompts/prompt-codex.md +331 -0
- klaude_code/core/prompts/prompt-gemini.md +43 -0
- klaude_code/core/prompts/prompt-subagent-explore.md +27 -0
- klaude_code/core/prompts/prompt-subagent-oracle.md +23 -0
- klaude_code/core/prompts/prompt-subagent-webfetch.md +46 -0
- klaude_code/core/prompts/prompt-subagent.md +8 -0
- klaude_code/core/reminders.py +445 -0
- klaude_code/core/task.py +237 -0
- klaude_code/core/tool/__init__.py +75 -0
- klaude_code/core/tool/file/__init__.py +0 -0
- klaude_code/core/tool/file/apply_patch.py +492 -0
- klaude_code/core/tool/file/apply_patch_tool.md +1 -0
- klaude_code/core/tool/file/apply_patch_tool.py +204 -0
- klaude_code/core/tool/file/edit_tool.md +9 -0
- klaude_code/core/tool/file/edit_tool.py +274 -0
- klaude_code/core/tool/file/multi_edit_tool.md +42 -0
- klaude_code/core/tool/file/multi_edit_tool.py +199 -0
- klaude_code/core/tool/file/read_tool.md +14 -0
- klaude_code/core/tool/file/read_tool.py +326 -0
- klaude_code/core/tool/file/write_tool.md +8 -0
- klaude_code/core/tool/file/write_tool.py +146 -0
- klaude_code/core/tool/memory/__init__.py +0 -0
- klaude_code/core/tool/memory/memory_tool.md +16 -0
- klaude_code/core/tool/memory/memory_tool.py +462 -0
- klaude_code/core/tool/memory/skill_loader.py +245 -0
- klaude_code/core/tool/memory/skill_tool.md +24 -0
- klaude_code/core/tool/memory/skill_tool.py +97 -0
- klaude_code/core/tool/shell/__init__.py +0 -0
- klaude_code/core/tool/shell/bash_tool.md +43 -0
- klaude_code/core/tool/shell/bash_tool.py +123 -0
- klaude_code/core/tool/shell/command_safety.py +363 -0
- klaude_code/core/tool/sub_agent_tool.py +83 -0
- klaude_code/core/tool/todo/__init__.py +0 -0
- klaude_code/core/tool/todo/todo_write_tool.md +182 -0
- klaude_code/core/tool/todo/todo_write_tool.py +121 -0
- klaude_code/core/tool/todo/update_plan_tool.md +3 -0
- klaude_code/core/tool/todo/update_plan_tool.py +104 -0
- klaude_code/core/tool/tool_abc.py +25 -0
- klaude_code/core/tool/tool_context.py +106 -0
- klaude_code/core/tool/tool_registry.py +78 -0
- klaude_code/core/tool/tool_runner.py +252 -0
- klaude_code/core/tool/truncation.py +170 -0
- klaude_code/core/tool/web/__init__.py +0 -0
- klaude_code/core/tool/web/mermaid_tool.md +21 -0
- klaude_code/core/tool/web/mermaid_tool.py +76 -0
- klaude_code/core/tool/web/web_fetch_tool.md +8 -0
- klaude_code/core/tool/web/web_fetch_tool.py +159 -0
- klaude_code/core/turn.py +220 -0
- klaude_code/llm/__init__.py +21 -0
- klaude_code/llm/anthropic/__init__.py +3 -0
- klaude_code/llm/anthropic/client.py +221 -0
- klaude_code/llm/anthropic/input.py +200 -0
- klaude_code/llm/client.py +49 -0
- klaude_code/llm/input_common.py +239 -0
- klaude_code/llm/openai_compatible/__init__.py +3 -0
- klaude_code/llm/openai_compatible/client.py +211 -0
- klaude_code/llm/openai_compatible/input.py +109 -0
- klaude_code/llm/openai_compatible/tool_call_accumulator.py +80 -0
- klaude_code/llm/openrouter/__init__.py +3 -0
- klaude_code/llm/openrouter/client.py +200 -0
- klaude_code/llm/openrouter/input.py +160 -0
- klaude_code/llm/openrouter/reasoning_handler.py +209 -0
- klaude_code/llm/registry.py +22 -0
- klaude_code/llm/responses/__init__.py +3 -0
- klaude_code/llm/responses/client.py +216 -0
- klaude_code/llm/responses/input.py +167 -0
- klaude_code/llm/usage.py +109 -0
- klaude_code/protocol/__init__.py +4 -0
- klaude_code/protocol/commands.py +21 -0
- klaude_code/protocol/events.py +163 -0
- klaude_code/protocol/llm_param.py +147 -0
- klaude_code/protocol/model.py +287 -0
- klaude_code/protocol/op.py +89 -0
- klaude_code/protocol/op_handler.py +28 -0
- klaude_code/protocol/sub_agent.py +348 -0
- klaude_code/protocol/tools.py +15 -0
- klaude_code/session/__init__.py +4 -0
- klaude_code/session/export.py +624 -0
- klaude_code/session/selector.py +76 -0
- klaude_code/session/session.py +474 -0
- klaude_code/session/templates/export_session.html +1434 -0
- klaude_code/trace/__init__.py +3 -0
- klaude_code/trace/log.py +168 -0
- klaude_code/ui/__init__.py +91 -0
- klaude_code/ui/core/__init__.py +1 -0
- klaude_code/ui/core/display.py +103 -0
- klaude_code/ui/core/input.py +71 -0
- klaude_code/ui/core/stage_manager.py +55 -0
- klaude_code/ui/modes/__init__.py +1 -0
- klaude_code/ui/modes/debug/__init__.py +1 -0
- klaude_code/ui/modes/debug/display.py +36 -0
- klaude_code/ui/modes/exec/__init__.py +1 -0
- klaude_code/ui/modes/exec/display.py +63 -0
- klaude_code/ui/modes/repl/__init__.py +51 -0
- klaude_code/ui/modes/repl/clipboard.py +152 -0
- klaude_code/ui/modes/repl/completers.py +429 -0
- klaude_code/ui/modes/repl/display.py +60 -0
- klaude_code/ui/modes/repl/event_handler.py +375 -0
- klaude_code/ui/modes/repl/input_prompt_toolkit.py +198 -0
- klaude_code/ui/modes/repl/key_bindings.py +170 -0
- klaude_code/ui/modes/repl/renderer.py +281 -0
- klaude_code/ui/renderers/__init__.py +0 -0
- klaude_code/ui/renderers/assistant.py +21 -0
- klaude_code/ui/renderers/common.py +8 -0
- klaude_code/ui/renderers/developer.py +158 -0
- klaude_code/ui/renderers/diffs.py +215 -0
- klaude_code/ui/renderers/errors.py +16 -0
- klaude_code/ui/renderers/metadata.py +190 -0
- klaude_code/ui/renderers/sub_agent.py +71 -0
- klaude_code/ui/renderers/thinking.py +39 -0
- klaude_code/ui/renderers/tools.py +551 -0
- klaude_code/ui/renderers/user_input.py +65 -0
- klaude_code/ui/rich/__init__.py +1 -0
- klaude_code/ui/rich/live.py +65 -0
- klaude_code/ui/rich/markdown.py +308 -0
- klaude_code/ui/rich/quote.py +34 -0
- klaude_code/ui/rich/searchable_text.py +71 -0
- klaude_code/ui/rich/status.py +240 -0
- klaude_code/ui/rich/theme.py +274 -0
- klaude_code/ui/terminal/__init__.py +1 -0
- klaude_code/ui/terminal/color.py +244 -0
- klaude_code/ui/terminal/control.py +147 -0
- klaude_code/ui/terminal/notifier.py +107 -0
- klaude_code/ui/terminal/progress_bar.py +87 -0
- klaude_code/ui/utils/__init__.py +1 -0
- klaude_code/ui/utils/common.py +108 -0
- klaude_code/ui/utils/debouncer.py +42 -0
- klaude_code/version.py +163 -0
- klaude_code-1.2.6.dist-info/METADATA +178 -0
- klaude_code-1.2.6.dist-info/RECORD +167 -0
- klaude_code-1.2.6.dist-info/WHEEL +4 -0
- klaude_code-1.2.6.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Any, Literal
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from pydantic.json_schema import JsonSchemaValue
|
|
6
|
+
|
|
7
|
+
from klaude_code.protocol.model import ConversationItem
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LLMClientProtocol(Enum):
|
|
11
|
+
OPENAI = "openai"
|
|
12
|
+
RESPONSES = "responses"
|
|
13
|
+
OPENROUTER = "openrouter"
|
|
14
|
+
ANTHROPIC = "anthropic"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ToolSchema(BaseModel):
|
|
18
|
+
name: str
|
|
19
|
+
type: Literal["function"]
|
|
20
|
+
description: str
|
|
21
|
+
parameters: JsonSchemaValue
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Thinking(BaseModel):
|
|
25
|
+
"""
|
|
26
|
+
Unified Thinking & Reasoning Configuration
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
# OpenAI Reasoning Style
|
|
30
|
+
reasoning_effort: Literal["high", "medium", "low", "minimal", "none"] | None = None
|
|
31
|
+
reasoning_summary: Literal["auto", "concise", "detailed"] | None = None
|
|
32
|
+
|
|
33
|
+
# Claude/Gemini Thinking Style
|
|
34
|
+
type: Literal["enabled", "disabled"] | None = None
|
|
35
|
+
budget_tokens: int | None = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class Cost(BaseModel):
|
|
39
|
+
"""Cost configuration per million tokens (USD)."""
|
|
40
|
+
|
|
41
|
+
input: float # Input token price per million tokens
|
|
42
|
+
output: float # Output token price per million tokens
|
|
43
|
+
cache_read: float = 0.0 # Cache read price per million tokens
|
|
44
|
+
cache_write: float = 0.0 # Cache write price per million tokens (ignored in calculation for now)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class OpenRouterProviderRouting(BaseModel):
|
|
48
|
+
"""
|
|
49
|
+
https://openrouter.ai/docs/features/provider-routing#json-schema-for-provider-preferences
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
allow_fallbacks: bool | None = None
|
|
53
|
+
require_parameters: bool | None = None
|
|
54
|
+
|
|
55
|
+
# Data collection setting: allow (default) or deny
|
|
56
|
+
data_collection: Literal["deny", "allow"] | None = None
|
|
57
|
+
|
|
58
|
+
# Provider lists
|
|
59
|
+
order: list[str] | None = None
|
|
60
|
+
only: list[str] | None = None
|
|
61
|
+
ignore: list[str] | None = None
|
|
62
|
+
|
|
63
|
+
# Quantization filters
|
|
64
|
+
quantizations: list[Literal["int4", "int8", "fp4", "fp6", "fp8", "fp16", "bf16", "fp32", "unknown"]] | None = None
|
|
65
|
+
|
|
66
|
+
# Sorting strategy when order is not specified
|
|
67
|
+
sort: Literal["price", "throughput", "latency"] | None = None
|
|
68
|
+
|
|
69
|
+
class MaxPrice(BaseModel):
|
|
70
|
+
# USD price per million tokens (or provider-specific string); OpenRouter also
|
|
71
|
+
# accepts other JSON types according to the schema, so Any covers that.
|
|
72
|
+
prompt: float | str | Any | None = None
|
|
73
|
+
completion: float | str | Any | None = None
|
|
74
|
+
image: float | str | Any | None = None
|
|
75
|
+
audio: float | str | Any | None = None
|
|
76
|
+
request: float | str | Any | None = None
|
|
77
|
+
|
|
78
|
+
max_price: MaxPrice | None = None
|
|
79
|
+
|
|
80
|
+
class Experimental(BaseModel):
|
|
81
|
+
# Placeholder for future experimental settings (no properties allowed in schema)
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
experimental: Experimental | None = None
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class LLMConfigProviderParameter(BaseModel):
|
|
88
|
+
provider_name: str = ""
|
|
89
|
+
protocol: LLMClientProtocol
|
|
90
|
+
base_url: str | None = None
|
|
91
|
+
api_key: str | None = None
|
|
92
|
+
is_azure: bool = False
|
|
93
|
+
azure_api_version: str | None = None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class LLMConfigModelParameter(BaseModel):
|
|
97
|
+
model: str | None = None
|
|
98
|
+
temperature: float | None = None
|
|
99
|
+
max_tokens: int | None = None
|
|
100
|
+
context_limit: int | None = None
|
|
101
|
+
|
|
102
|
+
# OpenAI GPT-5
|
|
103
|
+
verbosity: Literal["low", "medium", "high"] | None = None
|
|
104
|
+
|
|
105
|
+
# Unified Thinking & Reasoning
|
|
106
|
+
thinking: Thinking | None = None
|
|
107
|
+
|
|
108
|
+
# OpenRouter Provider Routing Preferences
|
|
109
|
+
provider_routing: OpenRouterProviderRouting | None = None
|
|
110
|
+
|
|
111
|
+
# Cost configuration (USD per million tokens)
|
|
112
|
+
cost: Cost | None = None
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class LLMConfigParameter(LLMConfigProviderParameter, LLMConfigModelParameter):
|
|
116
|
+
"""
|
|
117
|
+
Parameter support in config yaml
|
|
118
|
+
|
|
119
|
+
When adding a new parameter, please also modify the following:
|
|
120
|
+
- llm_parameter.py#apply_config_defaults
|
|
121
|
+
- llm/*/client.py, handle the new parameter, e.g. add it to extra_body
|
|
122
|
+
- ui/repl_display.py#display_welcome
|
|
123
|
+
- config/list_models.py#display_models_and_providers
|
|
124
|
+
- config/select_model.py#select_model_from_config
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
pass
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class LLMCallParameter(LLMConfigModelParameter):
|
|
131
|
+
"""
|
|
132
|
+
Parameters for a single agent call
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
# Agent
|
|
136
|
+
input: list[ConversationItem]
|
|
137
|
+
system: str | None = None
|
|
138
|
+
tools: list[ToolSchema] | None = None
|
|
139
|
+
|
|
140
|
+
stream: Literal[True] = True # Always True
|
|
141
|
+
|
|
142
|
+
# OpenAI Responses
|
|
143
|
+
include: list[str] | None = None
|
|
144
|
+
store: bool = True
|
|
145
|
+
previous_response_id: str | None = None
|
|
146
|
+
|
|
147
|
+
session_id: str | None = None
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from klaude_code.protocol.commands import CommandName
|
|
8
|
+
from klaude_code.protocol.tools import SubAgentType
|
|
9
|
+
|
|
10
|
+
RoleType = Literal["system", "developer", "user", "assistant", "tool"]
|
|
11
|
+
TodoStatusType = Literal["pending", "in_progress", "completed"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Usage(BaseModel):
|
|
15
|
+
input_tokens: int = 0
|
|
16
|
+
cached_tokens: int = 0
|
|
17
|
+
reasoning_tokens: int = 0
|
|
18
|
+
output_tokens: int = 0
|
|
19
|
+
total_tokens: int = 0
|
|
20
|
+
context_usage_percent: float | None = None
|
|
21
|
+
throughput_tps: float | None = None
|
|
22
|
+
first_token_latency_ms: float | None = None
|
|
23
|
+
|
|
24
|
+
# Cost in USD (calculated from token counts and cost config)
|
|
25
|
+
input_cost: float | None = None # Cost for non-cached input tokens
|
|
26
|
+
output_cost: float | None = None # Cost for output tokens (including reasoning)
|
|
27
|
+
cache_read_cost: float | None = None # Cost for cached tokens
|
|
28
|
+
total_cost: float | None = None # Total cost (input + output + cache_read)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class TodoItem(BaseModel):
|
|
32
|
+
content: str
|
|
33
|
+
status: TodoStatusType
|
|
34
|
+
activeForm: str = ""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class TodoUIExtra(BaseModel):
|
|
38
|
+
todos: list[TodoItem]
|
|
39
|
+
new_completed: list[str]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class ToolResultUIExtraType(str, Enum):
|
|
43
|
+
DIFF_TEXT = "diff_text"
|
|
44
|
+
TODO_LIST = "todo_list"
|
|
45
|
+
SESSION_ID = "session_id"
|
|
46
|
+
MERMAID_LINK = "mermaid_link"
|
|
47
|
+
TRUNCATION = "truncation"
|
|
48
|
+
SESSION_STATUS = "session_status"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class ToolSideEffect(str, Enum):
|
|
52
|
+
TODO_CHANGE = "todo_change"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class MermaidLinkUIExtra(BaseModel):
|
|
56
|
+
link: str
|
|
57
|
+
line_count: int
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class TruncationUIExtra(BaseModel):
|
|
61
|
+
saved_file_path: str
|
|
62
|
+
original_length: int
|
|
63
|
+
truncated_length: int
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class SessionStatusUIExtra(BaseModel):
|
|
67
|
+
usage: "Usage"
|
|
68
|
+
task_count: int
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ToolResultUIExtra(BaseModel):
|
|
72
|
+
type: ToolResultUIExtraType
|
|
73
|
+
diff_text: str | None = None
|
|
74
|
+
todo_list: TodoUIExtra | None = None
|
|
75
|
+
session_id: str | None = None
|
|
76
|
+
mermaid_link: MermaidLinkUIExtra | None = None
|
|
77
|
+
truncation: TruncationUIExtra | None = None
|
|
78
|
+
session_status: SessionStatusUIExtra | None = None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class AtPatternParseResult(BaseModel):
|
|
82
|
+
path: str
|
|
83
|
+
tool_name: str
|
|
84
|
+
result: str
|
|
85
|
+
tool_args: str
|
|
86
|
+
operation: Literal["Read", "List"]
|
|
87
|
+
images: list["ImageURLPart"] | None = None
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class CommandOutput(BaseModel):
|
|
91
|
+
command_name: CommandName
|
|
92
|
+
ui_extra: ToolResultUIExtra | None = None
|
|
93
|
+
is_error: bool = False
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class SubAgentState(BaseModel):
|
|
97
|
+
sub_agent_type: SubAgentType
|
|
98
|
+
sub_agent_desc: str
|
|
99
|
+
sub_agent_prompt: str
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
"""
|
|
103
|
+
Models for LLM API input and response items.
|
|
104
|
+
|
|
105
|
+
A typical sequence of response items is:
|
|
106
|
+
- [StartItem]
|
|
107
|
+
- [ReasoningTextItem | ReasoningEncryptedItem]
|
|
108
|
+
- [AssistantMessageDelta] × n
|
|
109
|
+
- [AssistantMessageItem]
|
|
110
|
+
- [ToolCallItem] × n
|
|
111
|
+
- [ResponseMetadataItem]
|
|
112
|
+
- Done
|
|
113
|
+
|
|
114
|
+
A conversation history input contains:
|
|
115
|
+
- [UserMessageItem]
|
|
116
|
+
- [ReasoningTextItem | ReasoningEncryptedItem]
|
|
117
|
+
- [AssistantMessageItem]
|
|
118
|
+
- [ToolCallItem]
|
|
119
|
+
- [ToolResultItem]
|
|
120
|
+
- [InterruptItem]
|
|
121
|
+
- [DeveloperMessageItem]
|
|
122
|
+
|
|
123
|
+
When adding a new item, please also modify the following:
|
|
124
|
+
- session.py#_TypeMap
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class StartItem(BaseModel):
|
|
129
|
+
response_id: str
|
|
130
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class InterruptItem(BaseModel):
|
|
134
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class SystemMessageItem(BaseModel):
|
|
138
|
+
id: str | None = None
|
|
139
|
+
role: RoleType = "system"
|
|
140
|
+
content: str | None = None
|
|
141
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class DeveloperMessageItem(BaseModel):
|
|
145
|
+
id: str | None = None
|
|
146
|
+
role: RoleType = "developer"
|
|
147
|
+
content: str | None = None # For LLM input
|
|
148
|
+
images: list["ImageURLPart"] | None = None
|
|
149
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
150
|
+
|
|
151
|
+
# Special fields for reminders UI
|
|
152
|
+
memory_paths: list[str] | None = None
|
|
153
|
+
external_file_changes: list[str] | None = None
|
|
154
|
+
todo_use: bool | None = None
|
|
155
|
+
at_files: list[AtPatternParseResult] | None = None
|
|
156
|
+
command_output: CommandOutput | None = None
|
|
157
|
+
user_image_count: int | None = None
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
class ImageURLPart(BaseModel):
|
|
161
|
+
class ImageURL(BaseModel):
|
|
162
|
+
url: str
|
|
163
|
+
id: str | None = None
|
|
164
|
+
|
|
165
|
+
image_url: ImageURL
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
class UserInputPayload(BaseModel):
|
|
169
|
+
"""Structured payload for user input containing text and optional images.
|
|
170
|
+
|
|
171
|
+
This is the unified data structure for user input across the entire
|
|
172
|
+
UI -> CLI -> Executor -> Agent -> Task chain.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
text: str
|
|
176
|
+
images: list["ImageURLPart"] | None = None
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class UserMessageItem(BaseModel):
|
|
180
|
+
id: str | None = None
|
|
181
|
+
role: RoleType = "user"
|
|
182
|
+
content: str | None = None
|
|
183
|
+
images: list[ImageURLPart] | None = None
|
|
184
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class AssistantMessageItem(BaseModel):
|
|
188
|
+
id: str | None = None
|
|
189
|
+
role: RoleType = "assistant"
|
|
190
|
+
content: str | None = None
|
|
191
|
+
response_id: str | None = None
|
|
192
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
class ReasoningTextItem(BaseModel):
|
|
196
|
+
id: str | None = None
|
|
197
|
+
response_id: str | None = None
|
|
198
|
+
content: str
|
|
199
|
+
model: str | None = None
|
|
200
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
class ReasoningEncryptedItem(BaseModel):
|
|
204
|
+
id: str | None = None
|
|
205
|
+
response_id: str | None = None
|
|
206
|
+
encrypted_content: str # OpenAI encrypted content or Anthropic thinking signature
|
|
207
|
+
format: str | None = None
|
|
208
|
+
model: str | None
|
|
209
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class ToolCallStartItem(BaseModel):
|
|
213
|
+
"""Transient streaming signal when LLM starts a tool call.
|
|
214
|
+
|
|
215
|
+
This is NOT persisted to conversation history. Used only for
|
|
216
|
+
real-time UI feedback (e.g., "Calling Bash ...").
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
response_id: str | None = None
|
|
220
|
+
call_id: str
|
|
221
|
+
name: str
|
|
222
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class ToolCallItem(BaseModel):
|
|
226
|
+
id: str | None = None
|
|
227
|
+
response_id: str | None = None
|
|
228
|
+
call_id: str
|
|
229
|
+
name: str
|
|
230
|
+
arguments: str
|
|
231
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
class ToolResultItem(BaseModel):
|
|
235
|
+
call_id: str = "" # This field will auto set by tool registry's run_tool
|
|
236
|
+
output: str | None = None
|
|
237
|
+
status: Literal["success", "error"]
|
|
238
|
+
tool_name: str | None = None # This field will auto set by tool registry's run_tool
|
|
239
|
+
ui_extra: ToolResultUIExtra | None = None # Extra data for UI display, e.g. diff render
|
|
240
|
+
images: list[ImageURLPart] | None = None
|
|
241
|
+
side_effects: list[ToolSideEffect] | None = None
|
|
242
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
class AssistantMessageDelta(BaseModel):
|
|
246
|
+
response_id: str | None = None
|
|
247
|
+
content: str
|
|
248
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class StreamErrorItem(BaseModel):
|
|
252
|
+
error: str
|
|
253
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
class ResponseMetadataItem(BaseModel):
|
|
257
|
+
response_id: str | None = None
|
|
258
|
+
usage: Usage | None = None
|
|
259
|
+
model_name: str = ""
|
|
260
|
+
provider: str | None = None # OpenRouter's provider name
|
|
261
|
+
task_duration_s: float | None = None
|
|
262
|
+
status: str | None = None
|
|
263
|
+
error_reason: str | None = None
|
|
264
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
MessageItem = (
|
|
268
|
+
UserMessageItem
|
|
269
|
+
| AssistantMessageItem
|
|
270
|
+
| SystemMessageItem
|
|
271
|
+
| DeveloperMessageItem
|
|
272
|
+
| ReasoningTextItem
|
|
273
|
+
| ReasoningEncryptedItem
|
|
274
|
+
| ToolCallItem
|
|
275
|
+
| ToolResultItem
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
StreamItem = AssistantMessageDelta
|
|
280
|
+
|
|
281
|
+
ConversationItem = (
|
|
282
|
+
StartItem | InterruptItem | StreamErrorItem | StreamItem | MessageItem | ResponseMetadataItem | ToolCallStartItem
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def todo_list_str(todos: list[TodoItem]) -> str:
|
|
287
|
+
return "[" + "\n".join(f"[{todo.status}] {todo.content}" for todo in todos) + "]\n"
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Operation protocol for the executor system.
|
|
3
|
+
|
|
4
|
+
This module defines the operation types and submission structure
|
|
5
|
+
that the executor uses to handle different types of requests.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
from uuid import uuid4
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, Field
|
|
15
|
+
|
|
16
|
+
from klaude_code.protocol.model import UserInputPayload
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from klaude_code.protocol.op_handler import OperationHandler
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OperationType(Enum):
|
|
23
|
+
"""Enumeration of supported operation types."""
|
|
24
|
+
|
|
25
|
+
USER_INPUT = "user_input"
|
|
26
|
+
INTERRUPT = "interrupt"
|
|
27
|
+
INIT_AGENT = "init_agent"
|
|
28
|
+
END = "end"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class Operation(BaseModel):
|
|
32
|
+
"""Base class for all operations that can be submitted to the executor."""
|
|
33
|
+
|
|
34
|
+
type: OperationType
|
|
35
|
+
id: str = Field(default_factory=lambda: uuid4().hex)
|
|
36
|
+
|
|
37
|
+
async def execute(self, handler: OperationHandler) -> None:
|
|
38
|
+
"""Execute this operation using the given handler."""
|
|
39
|
+
raise NotImplementedError("Subclasses must implement execute()")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class UserInputOperation(Operation):
|
|
43
|
+
"""Operation for handling user input (text and optional images) that should be processed by an agent."""
|
|
44
|
+
|
|
45
|
+
type: OperationType = OperationType.USER_INPUT
|
|
46
|
+
input: UserInputPayload
|
|
47
|
+
session_id: str | None = None
|
|
48
|
+
|
|
49
|
+
async def execute(self, handler: OperationHandler) -> None:
|
|
50
|
+
"""Execute user input by running it through an agent."""
|
|
51
|
+
await handler.handle_user_input(self)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class InterruptOperation(Operation):
|
|
55
|
+
"""Operation for interrupting currently running tasks."""
|
|
56
|
+
|
|
57
|
+
type: OperationType = OperationType.INTERRUPT
|
|
58
|
+
target_session_id: str | None = None # If None, interrupt all sessions
|
|
59
|
+
|
|
60
|
+
async def execute(self, handler: OperationHandler) -> None:
|
|
61
|
+
"""Execute interrupt by cancelling active tasks."""
|
|
62
|
+
await handler.handle_interrupt(self)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class InitAgentOperation(Operation):
|
|
66
|
+
"""Operation for initializing an agent and replaying history if any."""
|
|
67
|
+
|
|
68
|
+
type: OperationType = OperationType.INIT_AGENT
|
|
69
|
+
session_id: str | None = None
|
|
70
|
+
|
|
71
|
+
async def execute(self, handler: OperationHandler) -> None:
|
|
72
|
+
await handler.handle_init_agent(self)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class EndOperation(Operation):
|
|
76
|
+
"""Operation for gracefully stopping the executor."""
|
|
77
|
+
|
|
78
|
+
type: OperationType = OperationType.END
|
|
79
|
+
|
|
80
|
+
async def execute(self, handler: OperationHandler) -> None:
|
|
81
|
+
"""Execute end operation - this is a no-op, just signals the executor to stop."""
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class Submission(BaseModel):
|
|
86
|
+
"""A submission represents a request sent to the executor for processing."""
|
|
87
|
+
|
|
88
|
+
id: str
|
|
89
|
+
operation: Operation
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Operation handler protocol for the executor system.
|
|
3
|
+
|
|
4
|
+
This module defines the protocol that operation handlers must implement.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING, Protocol
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from klaude_code.protocol.op import InitAgentOperation, InterruptOperation, UserInputOperation
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class OperationHandler(Protocol):
|
|
16
|
+
"""Protocol defining the interface for handling operations."""
|
|
17
|
+
|
|
18
|
+
async def handle_user_input(self, operation: UserInputOperation) -> None:
|
|
19
|
+
"""Handle a user input operation."""
|
|
20
|
+
...
|
|
21
|
+
|
|
22
|
+
async def handle_interrupt(self, operation: InterruptOperation) -> None:
|
|
23
|
+
"""Handle an interrupt operation."""
|
|
24
|
+
...
|
|
25
|
+
|
|
26
|
+
async def handle_init_agent(self, operation: InitAgentOperation) -> None:
|
|
27
|
+
"""Handle an init agent operation."""
|
|
28
|
+
...
|