tunacode-cli 0.0.66__py3-none-any.whl → 0.0.68__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands/__init__.py +2 -0
- tunacode/cli/commands/implementations/__init__.py +2 -0
- tunacode/cli/commands/implementations/command_reload.py +48 -0
- tunacode/cli/commands/implementations/quickstart.py +43 -0
- tunacode/cli/commands/implementations/system.py +27 -3
- tunacode/cli/commands/registry.py +131 -1
- tunacode/cli/commands/slash/__init__.py +32 -0
- tunacode/cli/commands/slash/command.py +157 -0
- tunacode/cli/commands/slash/loader.py +134 -0
- tunacode/cli/commands/slash/processor.py +294 -0
- tunacode/cli/commands/slash/types.py +93 -0
- tunacode/cli/commands/slash/validator.py +399 -0
- tunacode/cli/main.py +4 -1
- tunacode/cli/repl.py +25 -0
- tunacode/configuration/defaults.py +1 -0
- tunacode/constants.py +1 -1
- tunacode/core/agents/agent_components/agent_helpers.py +14 -13
- tunacode/core/agents/main.py +1 -1
- tunacode/core/agents/utils.py +4 -3
- tunacode/core/setup/config_setup.py +231 -6
- tunacode/core/setup/coordinator.py +13 -5
- tunacode/core/setup/git_safety_setup.py +5 -1
- tunacode/exceptions.py +119 -5
- tunacode/setup.py +5 -2
- tunacode/tools/glob.py +9 -46
- tunacode/tools/grep.py +9 -51
- tunacode/tools/xml_helper.py +83 -0
- tunacode/tutorial/__init__.py +9 -0
- tunacode/tutorial/content.py +98 -0
- tunacode/tutorial/manager.py +182 -0
- tunacode/tutorial/steps.py +124 -0
- tunacode/ui/output.py +1 -1
- tunacode/utils/user_configuration.py +45 -0
- tunacode_cli-0.0.68.dist-info/METADATA +192 -0
- {tunacode_cli-0.0.66.dist-info → tunacode_cli-0.0.68.dist-info}/RECORD +38 -25
- tunacode_cli-0.0.66.dist-info/METADATA +0 -327
- {tunacode_cli-0.0.66.dist-info → tunacode_cli-0.0.68.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.66.dist-info → tunacode_cli-0.0.68.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.66.dist-info → tunacode_cli-0.0.68.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""SlashCommandLoader for discovering and loading markdown-based commands."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Dict, List, Tuple
|
|
6
|
+
|
|
7
|
+
from .command import SlashCommand
|
|
8
|
+
from .types import CommandDiscoveryResult, CommandSource
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SlashCommandLoader:
|
|
14
|
+
"""Discovers and loads markdown-based slash commands with precedence rules."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, project_root: Path, user_home: Path):
|
|
17
|
+
self.project_root = project_root
|
|
18
|
+
self.user_home = user_home
|
|
19
|
+
self.directories = self._build_directory_list()
|
|
20
|
+
self._cache: Dict[str, SlashCommand] = {}
|
|
21
|
+
|
|
22
|
+
def _build_directory_list(self) -> List[Tuple[Path, CommandSource, str]]:
|
|
23
|
+
"""Build prioritized directory list with sources and namespaces."""
|
|
24
|
+
return [
|
|
25
|
+
(
|
|
26
|
+
self.project_root / ".tunacode" / "commands",
|
|
27
|
+
CommandSource.PROJECT_TUNACODE,
|
|
28
|
+
"project",
|
|
29
|
+
),
|
|
30
|
+
(self.project_root / ".claude" / "commands", CommandSource.PROJECT_CLAUDE, "project"),
|
|
31
|
+
(self.user_home / ".tunacode" / "commands", CommandSource.USER_TUNACODE, "user"),
|
|
32
|
+
(self.user_home / ".claude" / "commands", CommandSource.USER_CLAUDE, "user"),
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
def discover_commands(self) -> CommandDiscoveryResult:
|
|
36
|
+
"""Main discovery method with conflict resolution."""
|
|
37
|
+
all_commands: Dict[str, Any] = {}
|
|
38
|
+
conflicts = []
|
|
39
|
+
errors = []
|
|
40
|
+
stats = {"scanned_dirs": 0, "found_files": 0, "loaded_commands": 0}
|
|
41
|
+
|
|
42
|
+
for directory, source, namespace in self.directories:
|
|
43
|
+
if not directory.exists():
|
|
44
|
+
continue
|
|
45
|
+
|
|
46
|
+
stats["scanned_dirs"] += 1
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
dir_commands = self._scan_directory(directory, source, namespace)
|
|
50
|
+
stats["found_files"] += len(dir_commands)
|
|
51
|
+
|
|
52
|
+
# Handle conflicts with precedence
|
|
53
|
+
for cmd_name, cmd in dir_commands.items():
|
|
54
|
+
if cmd_name in all_commands:
|
|
55
|
+
existing_cmd = all_commands[cmd_name]
|
|
56
|
+
# Lower source value = higher priority
|
|
57
|
+
if (
|
|
58
|
+
source.value < existing_cmd._metadata.source.value
|
|
59
|
+
if existing_cmd._metadata
|
|
60
|
+
else float("inf")
|
|
61
|
+
):
|
|
62
|
+
conflicts.append((cmd_name, [existing_cmd.file_path, cmd.file_path]))
|
|
63
|
+
all_commands[cmd_name] = cmd
|
|
64
|
+
logger.info(f"Command '{cmd_name}' overridden by {source.name}")
|
|
65
|
+
else:
|
|
66
|
+
all_commands[cmd_name] = cmd
|
|
67
|
+
|
|
68
|
+
stats["loaded_commands"] += len(
|
|
69
|
+
[c for c in dir_commands.values() if c.name in all_commands]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
except Exception as e:
|
|
73
|
+
errors.append((directory, e))
|
|
74
|
+
logger.error(f"Error scanning {directory}: {e}")
|
|
75
|
+
|
|
76
|
+
logger.info(
|
|
77
|
+
f"Discovered {len(all_commands)} slash commands from {stats['scanned_dirs']} directories"
|
|
78
|
+
)
|
|
79
|
+
return CommandDiscoveryResult(all_commands, conflicts, errors, stats)
|
|
80
|
+
|
|
81
|
+
def _scan_directory(
|
|
82
|
+
self, directory: Path, source: CommandSource, namespace: str
|
|
83
|
+
) -> Dict[str, SlashCommand]:
|
|
84
|
+
"""Recursively scan directory for markdown files."""
|
|
85
|
+
commands = {}
|
|
86
|
+
|
|
87
|
+
for md_file in directory.rglob("*.md"):
|
|
88
|
+
try:
|
|
89
|
+
# Calculate command parts from file path
|
|
90
|
+
relative_path = md_file.relative_to(directory)
|
|
91
|
+
command_parts = list(relative_path.parts[:-1]) # Directories
|
|
92
|
+
command_parts.append(relative_path.stem) # Filename without .md
|
|
93
|
+
|
|
94
|
+
# Create command
|
|
95
|
+
command = SlashCommand(md_file, namespace, command_parts)
|
|
96
|
+
# Set source in metadata (will be used for precedence)
|
|
97
|
+
if not hasattr(command, "_metadata") or command._metadata is None:
|
|
98
|
+
from .types import SlashCommandMetadata
|
|
99
|
+
|
|
100
|
+
command._metadata = SlashCommandMetadata(description="", source=source)
|
|
101
|
+
else:
|
|
102
|
+
command._metadata.source = source
|
|
103
|
+
|
|
104
|
+
command_name = command.name
|
|
105
|
+
commands[command_name] = command
|
|
106
|
+
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logger.warning(f"Failed to load command from {md_file}: {e}")
|
|
109
|
+
|
|
110
|
+
return commands
|
|
111
|
+
|
|
112
|
+
def reload_commands(self) -> CommandDiscoveryResult:
|
|
113
|
+
"""Reload all commands (useful for development)."""
|
|
114
|
+
self._cache.clear()
|
|
115
|
+
return self.discover_commands()
|
|
116
|
+
|
|
117
|
+
def get_command_by_path(self, file_path: Path) -> SlashCommand:
|
|
118
|
+
"""Get command for a specific file path."""
|
|
119
|
+
# Determine namespace and command parts from path
|
|
120
|
+
for directory, source, namespace in self.directories:
|
|
121
|
+
try:
|
|
122
|
+
if file_path.is_relative_to(directory):
|
|
123
|
+
relative_path = file_path.relative_to(directory)
|
|
124
|
+
command_parts = list(relative_path.parts[:-1])
|
|
125
|
+
command_parts.append(relative_path.stem)
|
|
126
|
+
|
|
127
|
+
command = SlashCommand(file_path, namespace, command_parts)
|
|
128
|
+
return command
|
|
129
|
+
except (ValueError, AttributeError):
|
|
130
|
+
continue
|
|
131
|
+
|
|
132
|
+
# Fallback to project namespace if path not in known directories
|
|
133
|
+
parts = file_path.stem.split("_") if file_path.stem else ["unknown"]
|
|
134
|
+
return SlashCommand(file_path, "project", parts)
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
"""Template processing engine for markdown slash commands."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
import subprocess
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple
|
|
9
|
+
|
|
10
|
+
import yaml # type: ignore[import-untyped]
|
|
11
|
+
|
|
12
|
+
from .types import ContextInjectionResult
|
|
13
|
+
from .validator import CommandValidator
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from ....types import CommandContext
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MarkdownTemplateProcessor:
|
|
22
|
+
"""Processes markdown templates with variable substitution and command execution."""
|
|
23
|
+
|
|
24
|
+
def __init__(self, max_context_size: int = 100_000, max_files: int = 50):
|
|
25
|
+
self.validator = CommandValidator()
|
|
26
|
+
self.max_context_size = max_context_size
|
|
27
|
+
self.max_files = max_files
|
|
28
|
+
|
|
29
|
+
# Regex patterns for template syntax
|
|
30
|
+
self.argument_pattern = re.compile(r"\$ARGUMENTS\b")
|
|
31
|
+
self.env_var_pattern = re.compile(r"\$([A-Z_][A-Z0-9_]*)\b")
|
|
32
|
+
self.command_pattern = re.compile(r"!\`([^`]+)\`")
|
|
33
|
+
self.file_pattern = re.compile(r"@([^\s\)\],]+)")
|
|
34
|
+
self.glob_pattern = re.compile(r"@@([^\s\)\],]+)")
|
|
35
|
+
|
|
36
|
+
# Context tracking
|
|
37
|
+
self._included_files: Set[Path] = set()
|
|
38
|
+
self._total_context_size = 0
|
|
39
|
+
self._warnings: List[str] = []
|
|
40
|
+
|
|
41
|
+
def parse_frontmatter(self, content: str) -> Tuple[Optional[Dict], str]:
|
|
42
|
+
"""Parse YAML frontmatter from markdown content."""
|
|
43
|
+
if not content.strip().startswith("---"):
|
|
44
|
+
return {}, content
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
# Split on --- boundaries
|
|
48
|
+
parts = content.split("---", 2)
|
|
49
|
+
if len(parts) < 3:
|
|
50
|
+
return {}, content
|
|
51
|
+
|
|
52
|
+
frontmatter_text = parts[1].strip()
|
|
53
|
+
markdown_content = parts[2].lstrip("\n")
|
|
54
|
+
|
|
55
|
+
if not frontmatter_text:
|
|
56
|
+
return {}, markdown_content
|
|
57
|
+
|
|
58
|
+
frontmatter = yaml.safe_load(frontmatter_text)
|
|
59
|
+
return frontmatter, markdown_content
|
|
60
|
+
|
|
61
|
+
except yaml.YAMLError as e:
|
|
62
|
+
logger.warning(f"Invalid YAML frontmatter: {e}")
|
|
63
|
+
return {}, content
|
|
64
|
+
|
|
65
|
+
def process_template_with_context(
|
|
66
|
+
self, content: str, args: List[str], context: "CommandContext"
|
|
67
|
+
) -> ContextInjectionResult:
|
|
68
|
+
"""Process template with comprehensive context injection tracking."""
|
|
69
|
+
|
|
70
|
+
# Reset tracking
|
|
71
|
+
self._included_files.clear()
|
|
72
|
+
self._total_context_size = len(content)
|
|
73
|
+
self._warnings.clear()
|
|
74
|
+
executed_commands = []
|
|
75
|
+
|
|
76
|
+
processed = content
|
|
77
|
+
|
|
78
|
+
# 1. Replace $ARGUMENTS
|
|
79
|
+
args_string = " ".join(args) if args else ""
|
|
80
|
+
processed = self.argument_pattern.sub(args_string, processed)
|
|
81
|
+
|
|
82
|
+
# 2. Replace environment variables
|
|
83
|
+
processed = self._process_env_vars(processed)
|
|
84
|
+
|
|
85
|
+
# 3. Execute !`command` blocks (track executed commands)
|
|
86
|
+
processed, cmd_list = self._process_command_blocks_with_tracking(processed, context)
|
|
87
|
+
executed_commands.extend(cmd_list)
|
|
88
|
+
|
|
89
|
+
# 4. Include @file contents (with size tracking)
|
|
90
|
+
processed = self._process_file_inclusions_with_tracking(processed, context)
|
|
91
|
+
|
|
92
|
+
# 5. Process @@glob patterns (with limits)
|
|
93
|
+
processed = self._process_glob_inclusions_with_tracking(processed, context)
|
|
94
|
+
|
|
95
|
+
return ContextInjectionResult(
|
|
96
|
+
processed_content=processed,
|
|
97
|
+
included_files=list(self._included_files),
|
|
98
|
+
executed_commands=executed_commands,
|
|
99
|
+
total_size=self._total_context_size,
|
|
100
|
+
warnings=self._warnings.copy(),
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
def _process_env_vars(self, content: str) -> str:
|
|
104
|
+
"""Replace environment variables."""
|
|
105
|
+
|
|
106
|
+
def replace_env_var(match):
|
|
107
|
+
var_name = match.group(1)
|
|
108
|
+
return os.environ.get(var_name, f"${var_name}") # Leave unchanged if not found
|
|
109
|
+
|
|
110
|
+
return self.env_var_pattern.sub(replace_env_var, content)
|
|
111
|
+
|
|
112
|
+
def _process_command_blocks_with_tracking(
|
|
113
|
+
self, content: str, context: "CommandContext"
|
|
114
|
+
) -> Tuple[str, List[str]]:
|
|
115
|
+
"""Execute commands with tracking."""
|
|
116
|
+
executed_commands = []
|
|
117
|
+
|
|
118
|
+
def replace_command(match):
|
|
119
|
+
command = match.group(1).strip()
|
|
120
|
+
executed_commands.append(command)
|
|
121
|
+
|
|
122
|
+
# Security validation
|
|
123
|
+
validation_result = self.validator.validate_shell_command(command)
|
|
124
|
+
if not validation_result.allowed:
|
|
125
|
+
error_violations = [
|
|
126
|
+
v for v in validation_result.violations if v.severity == "error"
|
|
127
|
+
]
|
|
128
|
+
if error_violations:
|
|
129
|
+
return f"[BLOCKED: Unsafe command '{command}']"
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
result = subprocess.run(
|
|
133
|
+
command,
|
|
134
|
+
shell=True,
|
|
135
|
+
capture_output=True,
|
|
136
|
+
text=True,
|
|
137
|
+
timeout=10, # 10 second timeout
|
|
138
|
+
cwd=getattr(context.state_manager.config, "current_directory", os.getcwd()),
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
if result.returncode == 0:
|
|
142
|
+
output = result.stdout.strip()
|
|
143
|
+
self._total_context_size += len(output)
|
|
144
|
+
|
|
145
|
+
# Check context size limit
|
|
146
|
+
if self._total_context_size > self.max_context_size:
|
|
147
|
+
self._warnings.append(
|
|
148
|
+
f"Command output truncated due to size limit: {command}"
|
|
149
|
+
)
|
|
150
|
+
return output[:1000] + "...[truncated]"
|
|
151
|
+
|
|
152
|
+
return output
|
|
153
|
+
else:
|
|
154
|
+
error_msg = f"[ERROR: Command failed with code {result.returncode}]"
|
|
155
|
+
if result.stderr:
|
|
156
|
+
error_msg += f"\n{result.stderr.strip()}"
|
|
157
|
+
return error_msg
|
|
158
|
+
|
|
159
|
+
except subprocess.TimeoutExpired:
|
|
160
|
+
return "[ERROR: Command timed out]"
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return f"[ERROR: {str(e)}]"
|
|
163
|
+
|
|
164
|
+
processed_content = self.command_pattern.sub(replace_command, content)
|
|
165
|
+
return processed_content, executed_commands
|
|
166
|
+
|
|
167
|
+
def _process_file_inclusions_with_tracking(
|
|
168
|
+
self, content: str, context: "CommandContext"
|
|
169
|
+
) -> str:
|
|
170
|
+
"""Include files with comprehensive tracking and limits."""
|
|
171
|
+
|
|
172
|
+
def replace_file(match):
|
|
173
|
+
file_path = match.group(1).strip()
|
|
174
|
+
|
|
175
|
+
# Check file limit
|
|
176
|
+
if len(self._included_files) >= self.max_files:
|
|
177
|
+
self._warnings.append(f"File inclusion limit reached, skipping: {file_path}")
|
|
178
|
+
return f"[LIMIT: Too many files included, skipping '{file_path}']"
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
base_path = Path(
|
|
182
|
+
getattr(context.state_manager.config, "current_directory", os.getcwd())
|
|
183
|
+
)
|
|
184
|
+
full_path = (base_path / file_path).resolve()
|
|
185
|
+
|
|
186
|
+
# Security validation
|
|
187
|
+
validation_result = self.validator.validate_file_path(file_path, base_path)
|
|
188
|
+
if not validation_result.allowed:
|
|
189
|
+
error_violations = [
|
|
190
|
+
v for v in validation_result.violations if v.severity == "error"
|
|
191
|
+
]
|
|
192
|
+
if error_violations:
|
|
193
|
+
return f"[BLOCKED: Unsafe file path '{file_path}']"
|
|
194
|
+
|
|
195
|
+
# Check for circular inclusion
|
|
196
|
+
if full_path in self._included_files:
|
|
197
|
+
self._warnings.append(f"Circular file inclusion detected: {file_path}")
|
|
198
|
+
return f"[CIRCULAR: File already included '{file_path}']"
|
|
199
|
+
|
|
200
|
+
if full_path.exists() and full_path.is_file():
|
|
201
|
+
file_content = full_path.read_text(encoding="utf-8")
|
|
202
|
+
|
|
203
|
+
# Check size limits
|
|
204
|
+
if self._total_context_size + len(file_content) > self.max_context_size:
|
|
205
|
+
self._warnings.append(
|
|
206
|
+
f"File content truncated due to size limit: {file_path}"
|
|
207
|
+
)
|
|
208
|
+
remaining_space = self.max_context_size - self._total_context_size
|
|
209
|
+
file_content = file_content[:remaining_space] + "...[truncated]"
|
|
210
|
+
|
|
211
|
+
self._included_files.add(full_path)
|
|
212
|
+
self._total_context_size += len(file_content)
|
|
213
|
+
|
|
214
|
+
# Add file header for context
|
|
215
|
+
return f"\n# File: {file_path}\n{file_content}\n# End of {file_path}\n"
|
|
216
|
+
else:
|
|
217
|
+
return f"[ERROR: File not found '{file_path}']"
|
|
218
|
+
|
|
219
|
+
except Exception as e:
|
|
220
|
+
return f"[ERROR: Cannot read file '{file_path}': {str(e)}]"
|
|
221
|
+
|
|
222
|
+
return self.file_pattern.sub(replace_file, content)
|
|
223
|
+
|
|
224
|
+
def _process_glob_inclusions_with_tracking(
|
|
225
|
+
self, content: str, context: "CommandContext"
|
|
226
|
+
) -> str:
|
|
227
|
+
"""Process glob patterns with comprehensive tracking."""
|
|
228
|
+
|
|
229
|
+
def replace_glob(match):
|
|
230
|
+
pattern = match.group(1).strip()
|
|
231
|
+
|
|
232
|
+
# Security validation
|
|
233
|
+
validation_result = self.validator.validate_glob_pattern(pattern)
|
|
234
|
+
if not validation_result.allowed:
|
|
235
|
+
error_violations = [
|
|
236
|
+
v for v in validation_result.violations if v.severity == "error"
|
|
237
|
+
]
|
|
238
|
+
if error_violations:
|
|
239
|
+
return f"[BLOCKED: Unsafe glob pattern '{pattern}']"
|
|
240
|
+
|
|
241
|
+
try:
|
|
242
|
+
base_path = Path(
|
|
243
|
+
getattr(context.state_manager.config, "current_directory", os.getcwd())
|
|
244
|
+
)
|
|
245
|
+
matching_files = list(base_path.glob(pattern))
|
|
246
|
+
|
|
247
|
+
# Limit number of files
|
|
248
|
+
if len(matching_files) > self.max_files - len(self._included_files):
|
|
249
|
+
self._warnings.append(
|
|
250
|
+
f"Glob pattern matched too many files, truncating: {pattern}"
|
|
251
|
+
)
|
|
252
|
+
matching_files = matching_files[: self.max_files - len(self._included_files)]
|
|
253
|
+
|
|
254
|
+
if not matching_files:
|
|
255
|
+
return f"[INFO: No files matched pattern '{pattern}']"
|
|
256
|
+
|
|
257
|
+
# Aggregate file contents
|
|
258
|
+
aggregated_content = []
|
|
259
|
+
aggregated_content.append(f"\n# Files matching pattern: {pattern}")
|
|
260
|
+
|
|
261
|
+
for file_path in sorted(matching_files):
|
|
262
|
+
if file_path in self._included_files:
|
|
263
|
+
continue # Skip already included files
|
|
264
|
+
|
|
265
|
+
if not file_path.is_file():
|
|
266
|
+
continue # Skip directories
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
file_content = file_path.read_text(encoding="utf-8")
|
|
270
|
+
|
|
271
|
+
# Check size limits
|
|
272
|
+
if self._total_context_size + len(file_content) > self.max_context_size:
|
|
273
|
+
self._warnings.append(
|
|
274
|
+
f"Glob inclusion stopped due to size limit at: {file_path}"
|
|
275
|
+
)
|
|
276
|
+
break
|
|
277
|
+
|
|
278
|
+
relative_path = file_path.relative_to(base_path)
|
|
279
|
+
aggregated_content.append(f"\n## File: {relative_path}")
|
|
280
|
+
aggregated_content.append(file_content)
|
|
281
|
+
|
|
282
|
+
self._included_files.add(file_path)
|
|
283
|
+
self._total_context_size += len(file_content)
|
|
284
|
+
|
|
285
|
+
except Exception as e:
|
|
286
|
+
aggregated_content.append(f"\n## Error reading {file_path}: {str(e)}")
|
|
287
|
+
|
|
288
|
+
aggregated_content.append(f"\n# End of pattern: {pattern}\n")
|
|
289
|
+
return "\n".join(aggregated_content)
|
|
290
|
+
|
|
291
|
+
except Exception as e:
|
|
292
|
+
return f"[ERROR: Glob pattern failed '{pattern}': {str(e)}]"
|
|
293
|
+
|
|
294
|
+
return self.glob_pattern.sub(replace_glob, content)
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""Core types and data structures for slash command system."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CommandSource(Enum):
|
|
14
|
+
"""Command source with priority ordering (lower value = higher priority)."""
|
|
15
|
+
|
|
16
|
+
PROJECT_TUNACODE = 1 # Highest priority
|
|
17
|
+
PROJECT_CLAUDE = 2 # Project fallback
|
|
18
|
+
USER_TUNACODE = 3 # User primary
|
|
19
|
+
USER_CLAUDE = 4 # Lowest priority
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SecurityLevel(Enum):
|
|
23
|
+
"""Security validation levels."""
|
|
24
|
+
|
|
25
|
+
STRICT = "strict" # Minimal commands allowed
|
|
26
|
+
MODERATE = "moderate" # Balanced security (default)
|
|
27
|
+
PERMISSIVE = "permissive" # More commands allowed
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class SlashCommandMetadata:
|
|
32
|
+
"""Metadata parsed from YAML frontmatter."""
|
|
33
|
+
|
|
34
|
+
description: str
|
|
35
|
+
allowed_tools: Optional[List[str]] = None
|
|
36
|
+
timeout: Optional[int] = None
|
|
37
|
+
parameters: Dict[str, str] = field(default_factory=dict)
|
|
38
|
+
source: CommandSource = CommandSource.PROJECT_TUNACODE
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class CommandDiscoveryResult:
|
|
43
|
+
"""Result of command discovery process."""
|
|
44
|
+
|
|
45
|
+
commands: Dict[str, Any] # SlashCommand instances
|
|
46
|
+
conflicts: List[Tuple[str, List[Path]]] # Commands with conflicts
|
|
47
|
+
errors: List[Tuple[Path, Exception]] # Files that failed to load
|
|
48
|
+
stats: Dict[str, int] = field(default_factory=dict)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@dataclass
|
|
52
|
+
class ContextInjectionResult:
|
|
53
|
+
"""Result of template processing with context injection."""
|
|
54
|
+
|
|
55
|
+
processed_content: str
|
|
56
|
+
included_files: List[Path]
|
|
57
|
+
executed_commands: List[str]
|
|
58
|
+
total_size: int
|
|
59
|
+
warnings: List[str]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class SecurityViolation:
|
|
64
|
+
"""Details about a security violation."""
|
|
65
|
+
|
|
66
|
+
type: str
|
|
67
|
+
message: str
|
|
68
|
+
command: str
|
|
69
|
+
severity: str # "error", "warning", "info"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass
|
|
73
|
+
class ValidationResult:
|
|
74
|
+
"""Result of security validation."""
|
|
75
|
+
|
|
76
|
+
allowed: bool
|
|
77
|
+
violations: List[SecurityViolation]
|
|
78
|
+
sanitized_command: Optional[str] = None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass
|
|
82
|
+
class AuditEntry:
|
|
83
|
+
"""Single audit log entry for security monitoring."""
|
|
84
|
+
|
|
85
|
+
timestamp: datetime
|
|
86
|
+
command_name: str
|
|
87
|
+
user: str
|
|
88
|
+
command_content: str
|
|
89
|
+
included_files: List[str]
|
|
90
|
+
executed_commands: List[str]
|
|
91
|
+
security_violations: List[Dict]
|
|
92
|
+
success: bool
|
|
93
|
+
error_message: Optional[str] = None
|