llm-ide-rules 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_ide_rules/__init__.py +20 -1
- llm_ide_rules/agents/__init__.py +4 -0
- llm_ide_rules/agents/agents.py +124 -0
- llm_ide_rules/agents/base.py +63 -6
- llm_ide_rules/agents/claude.py +31 -4
- llm_ide_rules/agents/cursor.py +64 -14
- llm_ide_rules/agents/gemini.py +44 -4
- llm_ide_rules/agents/github.py +49 -13
- llm_ide_rules/agents/opencode.py +14 -4
- llm_ide_rules/agents/vscode.py +88 -0
- llm_ide_rules/commands/config.py +46 -0
- llm_ide_rules/commands/delete.py +111 -5
- llm_ide_rules/commands/download.py +123 -14
- llm_ide_rules/commands/explode.py +169 -170
- llm_ide_rules/commands/implode.py +25 -56
- llm_ide_rules/commands/mcp.py +1 -1
- llm_ide_rules/constants.py +1 -27
- llm_ide_rules/markdown_parser.py +108 -0
- llm_ide_rules/utils.py +118 -0
- {llm_ide_rules-0.6.0.dist-info → llm_ide_rules-0.8.0.dist-info}/METADATA +4 -3
- llm_ide_rules-0.8.0.dist-info/RECORD +27 -0
- llm_ide_rules/sections.json +0 -17
- llm_ide_rules-0.6.0.dist-info/RECORD +0 -23
- {llm_ide_rules-0.6.0.dist-info → llm_ide_rules-0.8.0.dist-info}/WHEEL +0 -0
- {llm_ide_rules-0.6.0.dist-info → llm_ide_rules-0.8.0.dist-info}/entry_points.txt +0 -0
|
@@ -9,6 +9,8 @@ import requests
|
|
|
9
9
|
import typer
|
|
10
10
|
from typing_extensions import Annotated
|
|
11
11
|
|
|
12
|
+
from llm_ide_rules.commands.explode import explode_implementation
|
|
13
|
+
from llm_ide_rules.constants import VALID_AGENTS
|
|
12
14
|
from llm_ide_rules.log import log
|
|
13
15
|
|
|
14
16
|
DEFAULT_REPO = "iloveitaly/llm-ide-rules"
|
|
@@ -38,17 +40,39 @@ def normalize_repo(repo: str) -> str:
|
|
|
38
40
|
|
|
39
41
|
|
|
40
42
|
# Define what files/directories each instruction type includes
|
|
43
|
+
# For agents supported by 'explode' (cursor, github, gemini, claude, opencode),
|
|
44
|
+
# we don't download specific directories anymore. Instead, we download the source
|
|
45
|
+
# files (instructions.md, commands.md) and generate them locally using explode.
|
|
46
|
+
# The directories listed here are what gets created by explode and what delete removes.
|
|
41
47
|
INSTRUCTION_TYPES = {
|
|
42
|
-
"cursor": {
|
|
48
|
+
"cursor": {
|
|
49
|
+
"directories": [".cursor/rules", ".cursor/commands"],
|
|
50
|
+
"files": [],
|
|
51
|
+
"include_patterns": [],
|
|
52
|
+
},
|
|
43
53
|
"github": {
|
|
44
|
-
"directories": [".github"],
|
|
54
|
+
"directories": [".github/instructions", ".github/prompts"],
|
|
55
|
+
"files": [".github/copilot-instructions.md"],
|
|
56
|
+
"include_patterns": [],
|
|
57
|
+
},
|
|
58
|
+
"gemini": {
|
|
59
|
+
"directories": [".gemini/commands"],
|
|
60
|
+
"files": [],
|
|
61
|
+
"generated_files": [],
|
|
62
|
+
"include_patterns": [],
|
|
63
|
+
},
|
|
64
|
+
"claude": {
|
|
65
|
+
"directories": [".claude/commands"],
|
|
45
66
|
"files": [],
|
|
46
|
-
"
|
|
67
|
+
"generated_files": ["CLAUDE.md"],
|
|
68
|
+
"include_patterns": [],
|
|
47
69
|
},
|
|
48
|
-
"
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
70
|
+
"opencode": {
|
|
71
|
+
"directories": [".opencode/commands"],
|
|
72
|
+
"files": [],
|
|
73
|
+
"include_patterns": [],
|
|
74
|
+
},
|
|
75
|
+
"agents": {"directories": [], "files": [], "generated_files": ["AGENTS.md"]},
|
|
52
76
|
}
|
|
53
77
|
|
|
54
78
|
# Default types to download when no specific types are specified
|
|
@@ -131,7 +155,10 @@ def copy_instruction_files(
|
|
|
131
155
|
|
|
132
156
|
# Copy all files from source to target
|
|
133
157
|
copy_directory_contents(
|
|
134
|
-
source_dir,
|
|
158
|
+
source_dir,
|
|
159
|
+
target_subdir,
|
|
160
|
+
config.get("exclude_patterns", []),
|
|
161
|
+
config.get("include_patterns", []),
|
|
135
162
|
)
|
|
136
163
|
copied_items.append(f"{dir_name}/")
|
|
137
164
|
|
|
@@ -208,7 +235,10 @@ def copy_recursive_files(
|
|
|
208
235
|
|
|
209
236
|
|
|
210
237
|
def copy_directory_contents(
|
|
211
|
-
source_dir: Path,
|
|
238
|
+
source_dir: Path,
|
|
239
|
+
target_dir: Path,
|
|
240
|
+
exclude_patterns: list[str],
|
|
241
|
+
include_patterns: list[str] = [],
|
|
212
242
|
):
|
|
213
243
|
"""Recursively copy directory contents, excluding specified patterns."""
|
|
214
244
|
for item in source_dir.rglob("*"):
|
|
@@ -234,6 +264,22 @@ def copy_directory_contents(
|
|
|
234
264
|
log.debug("excluding file", file=relative_str, pattern=pattern)
|
|
235
265
|
continue
|
|
236
266
|
|
|
267
|
+
# Check if file matches any include pattern (if any provided)
|
|
268
|
+
if include_patterns:
|
|
269
|
+
matched_include = False
|
|
270
|
+
for include_pattern in include_patterns:
|
|
271
|
+
# Match against filename only, or full relative path
|
|
272
|
+
if item.match(include_pattern):
|
|
273
|
+
matched_include = True
|
|
274
|
+
break
|
|
275
|
+
|
|
276
|
+
if not matched_include:
|
|
277
|
+
log.debug(
|
|
278
|
+
"skipping file (not matched in include_patterns)",
|
|
279
|
+
file=relative_str,
|
|
280
|
+
)
|
|
281
|
+
continue
|
|
282
|
+
|
|
237
283
|
target_file = target_dir / relative_path
|
|
238
284
|
target_file.parent.mkdir(parents=True, exist_ok=True)
|
|
239
285
|
target_file.write_bytes(item.read_bytes())
|
|
@@ -243,7 +289,7 @@ def download_main(
|
|
|
243
289
|
instruction_types: Annotated[
|
|
244
290
|
list[str] | None,
|
|
245
291
|
typer.Argument(
|
|
246
|
-
help="Types of instructions to download (cursor, github, gemini, claude,
|
|
292
|
+
help="Types of instructions to download (cursor, github, gemini, claude, opencode, agents). Downloads everything by default."
|
|
247
293
|
),
|
|
248
294
|
] = None,
|
|
249
295
|
repo: Annotated[
|
|
@@ -310,16 +356,79 @@ def download_main(
|
|
|
310
356
|
|
|
311
357
|
try:
|
|
312
358
|
# Copy instruction files
|
|
313
|
-
copied_items =
|
|
359
|
+
copied_items = [
|
|
360
|
+
f"Downloaded: {item}"
|
|
361
|
+
for item in copy_instruction_files(repo_dir, instruction_types, target_path)
|
|
362
|
+
]
|
|
363
|
+
|
|
364
|
+
# Check for source files (instructions.md, commands.md) and copy them if available
|
|
365
|
+
# These are needed for 'explode' logic
|
|
366
|
+
source_files = ["instructions.md", "commands.md"]
|
|
367
|
+
sources_copied = False
|
|
368
|
+
|
|
369
|
+
# Only copy source files if we have at least one agent that uses explode
|
|
370
|
+
has_explode_agent = any(t in VALID_AGENTS for t in instruction_types)
|
|
371
|
+
|
|
372
|
+
if has_explode_agent:
|
|
373
|
+
for source_file in source_files:
|
|
374
|
+
src = repo_dir / source_file
|
|
375
|
+
dst = target_path / source_file
|
|
376
|
+
if src.exists():
|
|
377
|
+
log.info("copying source file", source=str(src), target=str(dst))
|
|
378
|
+
dst.parent.mkdir(parents=True, exist_ok=True)
|
|
379
|
+
dst.write_bytes(src.read_bytes())
|
|
380
|
+
copied_items.append(f"Downloaded: {source_file}")
|
|
381
|
+
sources_copied = True
|
|
382
|
+
|
|
383
|
+
# Generate rule files locally for supported agents
|
|
384
|
+
explodable_agents = [t for t in instruction_types if t in VALID_AGENTS]
|
|
385
|
+
|
|
386
|
+
if explodable_agents:
|
|
387
|
+
if not sources_copied:
|
|
388
|
+
# Check if they existed in target already?
|
|
389
|
+
if not (target_path / "instructions.md").exists():
|
|
390
|
+
log.warning(
|
|
391
|
+
"source file instructions.md missing, generation might fail"
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
for agent in explodable_agents:
|
|
395
|
+
log.info("generating rules locally", agent=agent)
|
|
396
|
+
try:
|
|
397
|
+
explode_implementation(
|
|
398
|
+
input_file="instructions.md",
|
|
399
|
+
agent=agent,
|
|
400
|
+
working_dir=target_path,
|
|
401
|
+
)
|
|
402
|
+
copied_items.append(f"Generated: {agent} rules")
|
|
403
|
+
except Exception as e:
|
|
404
|
+
log.error("failed to generate rules", agent=agent, error=str(e))
|
|
405
|
+
typer.echo(
|
|
406
|
+
f"Warning: Failed to generate rules for {agent}: {e}", err=True
|
|
407
|
+
)
|
|
314
408
|
|
|
315
409
|
if copied_items:
|
|
316
|
-
success_msg = f"Downloaded
|
|
410
|
+
success_msg = f"Downloaded/Generated items in {target_path}:"
|
|
317
411
|
typer.echo(typer.style(success_msg, fg=typer.colors.GREEN))
|
|
318
412
|
for item in copied_items:
|
|
319
413
|
typer.echo(f" - {item}")
|
|
320
414
|
else:
|
|
321
|
-
log.
|
|
322
|
-
|
|
415
|
+
log.info("no files were copied or generated")
|
|
416
|
+
|
|
417
|
+
# Build list of expected files
|
|
418
|
+
expected_files = []
|
|
419
|
+
for inst_type in instruction_types:
|
|
420
|
+
config = INSTRUCTION_TYPES[inst_type]
|
|
421
|
+
expected_files.extend(config.get("directories", []))
|
|
422
|
+
expected_files.extend(config.get("files", []))
|
|
423
|
+
expected_files.extend(config.get("recursive_files", []))
|
|
424
|
+
|
|
425
|
+
error_msg = "No matching instruction files found in the repository."
|
|
426
|
+
typer.echo(typer.style(error_msg, fg=typer.colors.RED), err=True)
|
|
427
|
+
|
|
428
|
+
if expected_files:
|
|
429
|
+
typer.echo("\nExpected files/directories:", err=True)
|
|
430
|
+
for expected in expected_files:
|
|
431
|
+
typer.echo(f" - {expected}", err=True)
|
|
323
432
|
|
|
324
433
|
finally:
|
|
325
434
|
# Clean up temporary directory
|
|
@@ -12,59 +12,8 @@ from llm_ide_rules.agents.base import (
|
|
|
12
12
|
write_rule_file,
|
|
13
13
|
)
|
|
14
14
|
from llm_ide_rules.log import log
|
|
15
|
-
from llm_ide_rules.constants import
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def extract_general(lines: list[str]) -> list[str]:
|
|
19
|
-
"""Extract lines before the first section header '## '."""
|
|
20
|
-
general = []
|
|
21
|
-
for line in lines:
|
|
22
|
-
if line.startswith("## "):
|
|
23
|
-
break
|
|
24
|
-
general.append(line)
|
|
25
|
-
|
|
26
|
-
return general
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def extract_section(lines: list[str], header: str) -> list[str]:
|
|
30
|
-
"""Extract lines under a given section header until the next header or EOF.
|
|
31
|
-
|
|
32
|
-
Includes the header itself in the output.
|
|
33
|
-
"""
|
|
34
|
-
content = []
|
|
35
|
-
in_section = False
|
|
36
|
-
for line in lines:
|
|
37
|
-
if in_section:
|
|
38
|
-
if line.startswith("## "):
|
|
39
|
-
break
|
|
40
|
-
content.append(line)
|
|
41
|
-
elif line.strip().lower() == header.lower():
|
|
42
|
-
in_section = True
|
|
43
|
-
content.append(line)
|
|
44
|
-
|
|
45
|
-
return content
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def extract_all_sections(lines: list[str]) -> dict[str, list[str]]:
|
|
49
|
-
"""Extract all sections from lines, returning dict of section_name -> content_lines."""
|
|
50
|
-
sections: dict[str, list[str]] = {}
|
|
51
|
-
current_section: str | None = None
|
|
52
|
-
current_content: list[str] = []
|
|
53
|
-
|
|
54
|
-
for line in lines:
|
|
55
|
-
if line.startswith("## "):
|
|
56
|
-
if current_section:
|
|
57
|
-
sections[current_section] = current_content
|
|
58
|
-
|
|
59
|
-
current_section = line.strip()[3:]
|
|
60
|
-
current_content = [line]
|
|
61
|
-
elif current_section:
|
|
62
|
-
current_content.append(line)
|
|
63
|
-
|
|
64
|
-
if current_section:
|
|
65
|
-
sections[current_section] = current_content
|
|
66
|
-
|
|
67
|
-
return sections
|
|
15
|
+
from llm_ide_rules.constants import header_to_filename, VALID_AGENTS
|
|
16
|
+
from llm_ide_rules.markdown_parser import parse_sections
|
|
68
17
|
|
|
69
18
|
|
|
70
19
|
def process_command_section(
|
|
@@ -105,31 +54,31 @@ def process_unmapped_as_always_apply(
|
|
|
105
54
|
section_content = replace_header_with_proper_casing(section_content, section_name)
|
|
106
55
|
|
|
107
56
|
cursor_agent.write_rule(
|
|
108
|
-
section_content,
|
|
57
|
+
section_content,
|
|
58
|
+
filename,
|
|
59
|
+
cursor_rules_dir,
|
|
60
|
+
glob_pattern=None,
|
|
61
|
+
description=section_name,
|
|
62
|
+
)
|
|
63
|
+
github_agent.write_rule(
|
|
64
|
+
section_content,
|
|
65
|
+
filename,
|
|
66
|
+
copilot_dir,
|
|
67
|
+
glob_pattern=None,
|
|
68
|
+
description=section_name,
|
|
109
69
|
)
|
|
110
|
-
github_agent.write_rule(section_content, filename, copilot_dir, glob_pattern=None)
|
|
111
70
|
|
|
112
71
|
return True
|
|
113
72
|
|
|
114
73
|
|
|
115
|
-
def
|
|
116
|
-
input_file:
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
config: Annotated[
|
|
120
|
-
str | None,
|
|
121
|
-
typer.Option("--config", "-c", help="Custom configuration file path"),
|
|
122
|
-
] = None,
|
|
123
|
-
agent: Annotated[
|
|
124
|
-
str,
|
|
125
|
-
typer.Option(
|
|
126
|
-
"--agent",
|
|
127
|
-
"-a",
|
|
128
|
-
help="Agent to explode for (cursor, github, claude, gemini, or all)",
|
|
129
|
-
),
|
|
130
|
-
] = "all",
|
|
74
|
+
def explode_implementation(
|
|
75
|
+
input_file: str = "instructions.md",
|
|
76
|
+
agent: str = "all",
|
|
77
|
+
working_dir: Path | None = None,
|
|
131
78
|
) -> None:
|
|
132
|
-
"""
|
|
79
|
+
"""Core implementation of explode command."""
|
|
80
|
+
if working_dir is None:
|
|
81
|
+
working_dir = Path.cwd()
|
|
133
82
|
|
|
134
83
|
if agent not in VALID_AGENTS:
|
|
135
84
|
log.error("invalid agent", agent=agent, valid_agents=VALID_AGENTS)
|
|
@@ -139,18 +88,24 @@ def explode_main(
|
|
|
139
88
|
typer.echo(typer.style(error_msg, fg=typer.colors.RED), err=True)
|
|
140
89
|
raise typer.Exit(1)
|
|
141
90
|
|
|
142
|
-
section_globs = load_section_globs(config)
|
|
143
|
-
|
|
144
91
|
log.info(
|
|
145
|
-
"starting explode operation",
|
|
92
|
+
"starting explode operation",
|
|
93
|
+
input_file=input_file,
|
|
94
|
+
agent=agent,
|
|
95
|
+
working_dir=str(working_dir),
|
|
146
96
|
)
|
|
147
97
|
|
|
148
|
-
cwd = Path.cwd()
|
|
149
|
-
|
|
150
98
|
# Initialize only the agents we need
|
|
151
99
|
agents_to_process = []
|
|
152
100
|
if agent == "all":
|
|
153
|
-
agents_to_process = [
|
|
101
|
+
agents_to_process = [
|
|
102
|
+
"cursor",
|
|
103
|
+
"github",
|
|
104
|
+
"claude",
|
|
105
|
+
"gemini",
|
|
106
|
+
"opencode",
|
|
107
|
+
"agents",
|
|
108
|
+
]
|
|
154
109
|
else:
|
|
155
110
|
agents_to_process = [agent]
|
|
156
111
|
|
|
@@ -163,21 +118,21 @@ def explode_main(
|
|
|
163
118
|
|
|
164
119
|
if agent_name in ["cursor", "github"]:
|
|
165
120
|
# These agents have both rules and commands
|
|
166
|
-
rules_dir =
|
|
167
|
-
commands_dir =
|
|
168
|
-
rules_dir.mkdir(parents=True, exist_ok=True)
|
|
169
|
-
commands_dir.mkdir(parents=True, exist_ok=True)
|
|
121
|
+
rules_dir = working_dir / agent_instances[agent_name].rules_dir
|
|
122
|
+
commands_dir = working_dir / agent_instances[agent_name].commands_dir
|
|
170
123
|
agent_dirs[agent_name] = {"rules": rules_dir, "commands": commands_dir}
|
|
171
|
-
|
|
124
|
+
elif agent_instances[agent_name].commands_dir:
|
|
172
125
|
# claude, gemini, and opencode only have commands
|
|
173
|
-
commands_dir =
|
|
174
|
-
commands_dir.mkdir(parents=True, exist_ok=True)
|
|
126
|
+
commands_dir = working_dir / agent_instances[agent_name].commands_dir
|
|
175
127
|
agent_dirs[agent_name] = {"commands": commands_dir}
|
|
128
|
+
else:
|
|
129
|
+
# agents has neither rules nor commands dirs (only generates root doc)
|
|
130
|
+
agent_dirs[agent_name] = {}
|
|
176
131
|
|
|
177
|
-
input_path =
|
|
132
|
+
input_path = working_dir / input_file
|
|
178
133
|
|
|
179
134
|
try:
|
|
180
|
-
|
|
135
|
+
input_text = input_path.read_text()
|
|
181
136
|
except FileNotFoundError:
|
|
182
137
|
log.error("input file not found", input_file=str(input_path))
|
|
183
138
|
error_msg = f"Input file not found: {input_path}"
|
|
@@ -185,17 +140,20 @@ def explode_main(
|
|
|
185
140
|
raise typer.Exit(1)
|
|
186
141
|
|
|
187
142
|
commands_path = input_path.parent / "commands.md"
|
|
188
|
-
|
|
143
|
+
commands_text = ""
|
|
189
144
|
if commands_path.exists():
|
|
190
|
-
|
|
145
|
+
commands_text = commands_path.read_text()
|
|
191
146
|
log.info("found commands file", commands_file=str(commands_path))
|
|
192
147
|
|
|
148
|
+
# Parse instructions
|
|
149
|
+
general, instruction_sections = parse_sections(input_text)
|
|
150
|
+
|
|
193
151
|
# Process general instructions for agents that support rules
|
|
194
|
-
general = extract_general(lines)
|
|
195
152
|
if any(line.strip() for line in general):
|
|
196
153
|
general_header = """
|
|
197
154
|
---
|
|
198
|
-
description:
|
|
155
|
+
description: General Instructions
|
|
156
|
+
globs:
|
|
199
157
|
alwaysApply: true
|
|
200
158
|
---
|
|
201
159
|
"""
|
|
@@ -204,26 +162,61 @@ alwaysApply: true
|
|
|
204
162
|
agent_dirs["cursor"]["rules"] / "general.mdc", general_header, general
|
|
205
163
|
)
|
|
206
164
|
if "github" in agent_instances:
|
|
207
|
-
agent_instances["github"].write_general_instructions(general,
|
|
208
|
-
|
|
209
|
-
# Process
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
section_content = replace_header_with_proper_casing(
|
|
218
|
-
section_content, section_name
|
|
219
|
-
)
|
|
165
|
+
agent_instances["github"].write_general_instructions(general, working_dir)
|
|
166
|
+
|
|
167
|
+
# Process sections for agents that support rules
|
|
168
|
+
rules_sections: dict[str, list[str]] = {}
|
|
169
|
+
section_globs: dict[str, str | None] = {}
|
|
170
|
+
|
|
171
|
+
for section_name, section_data in instruction_sections.items():
|
|
172
|
+
content = section_data.content
|
|
173
|
+
glob_pattern = section_data.glob_pattern
|
|
220
174
|
|
|
175
|
+
if not any(line.strip() for line in content):
|
|
176
|
+
continue
|
|
177
|
+
|
|
178
|
+
rules_sections[section_name] = content
|
|
179
|
+
section_globs[section_name] = glob_pattern
|
|
180
|
+
filename = header_to_filename(section_name)
|
|
181
|
+
|
|
182
|
+
section_content = replace_header_with_proper_casing(content, section_name)
|
|
183
|
+
|
|
184
|
+
if glob_pattern is None:
|
|
185
|
+
# No directive = alwaysApply
|
|
186
|
+
if "cursor" in agent_instances and "github" in agent_instances:
|
|
187
|
+
process_unmapped_as_always_apply(
|
|
188
|
+
section_name,
|
|
189
|
+
section_content,
|
|
190
|
+
agent_instances["cursor"],
|
|
191
|
+
agent_instances["github"],
|
|
192
|
+
agent_dirs["cursor"]["rules"],
|
|
193
|
+
agent_dirs["github"]["rules"],
|
|
194
|
+
)
|
|
195
|
+
elif "cursor" in agent_instances:
|
|
196
|
+
agent_instances["cursor"].write_rule(
|
|
197
|
+
section_content,
|
|
198
|
+
filename,
|
|
199
|
+
agent_dirs["cursor"]["rules"],
|
|
200
|
+
glob_pattern=None,
|
|
201
|
+
description=section_name,
|
|
202
|
+
)
|
|
203
|
+
elif "github" in agent_instances:
|
|
204
|
+
agent_instances["github"].write_rule(
|
|
205
|
+
section_content,
|
|
206
|
+
filename,
|
|
207
|
+
agent_dirs["github"]["rules"],
|
|
208
|
+
glob_pattern=None,
|
|
209
|
+
description=section_name,
|
|
210
|
+
)
|
|
211
|
+
elif glob_pattern != "manual":
|
|
212
|
+
# Has glob pattern = file-specific rule
|
|
221
213
|
if "cursor" in agent_instances:
|
|
222
214
|
agent_instances["cursor"].write_rule(
|
|
223
215
|
section_content,
|
|
224
216
|
filename,
|
|
225
217
|
agent_dirs["cursor"]["rules"],
|
|
226
218
|
glob_pattern,
|
|
219
|
+
description=section_name,
|
|
227
220
|
)
|
|
228
221
|
if "github" in agent_instances:
|
|
229
222
|
agent_instances["github"].write_rule(
|
|
@@ -231,73 +224,40 @@ alwaysApply: true
|
|
|
231
224
|
filename,
|
|
232
225
|
agent_dirs["github"]["rules"],
|
|
233
226
|
glob_pattern,
|
|
227
|
+
description=section_name,
|
|
234
228
|
)
|
|
235
229
|
|
|
236
|
-
for section_name in section_globs:
|
|
237
|
-
if section_name not in found_sections:
|
|
238
|
-
log.warning("section not found in file", section=section_name)
|
|
239
|
-
|
|
240
|
-
# Process unmapped sections for agents that support rules
|
|
241
|
-
if "cursor" in agent_instances or "github" in agent_instances:
|
|
242
|
-
for line in lines:
|
|
243
|
-
if line.startswith("## "):
|
|
244
|
-
section_name = line.strip()[3:]
|
|
245
|
-
if not any(
|
|
246
|
-
section_name.lower() == mapped_section.lower()
|
|
247
|
-
for mapped_section in section_globs
|
|
248
|
-
):
|
|
249
|
-
log.warning(
|
|
250
|
-
"unmapped section in instructions.md, treating as always-apply rule",
|
|
251
|
-
section=section_name,
|
|
252
|
-
)
|
|
253
|
-
section_content = extract_section(lines, f"## {section_name}")
|
|
254
|
-
|
|
255
|
-
if "cursor" in agent_instances and "github" in agent_instances:
|
|
256
|
-
process_unmapped_as_always_apply(
|
|
257
|
-
section_name,
|
|
258
|
-
section_content,
|
|
259
|
-
agent_instances["cursor"],
|
|
260
|
-
agent_instances["github"],
|
|
261
|
-
agent_dirs["cursor"]["rules"],
|
|
262
|
-
agent_dirs["github"]["rules"],
|
|
263
|
-
)
|
|
264
|
-
elif "cursor" in agent_instances:
|
|
265
|
-
# Only cursor - write just cursor rules
|
|
266
|
-
if any(line.strip() for line in section_content):
|
|
267
|
-
filename = header_to_filename(section_name)
|
|
268
|
-
section_content = replace_header_with_proper_casing(
|
|
269
|
-
section_content, section_name
|
|
270
|
-
)
|
|
271
|
-
agent_instances["cursor"].write_rule(
|
|
272
|
-
section_content,
|
|
273
|
-
filename,
|
|
274
|
-
agent_dirs["cursor"]["rules"],
|
|
275
|
-
glob_pattern=None,
|
|
276
|
-
)
|
|
277
|
-
elif "github" in agent_instances:
|
|
278
|
-
# Only github - write just github rules
|
|
279
|
-
if any(line.strip() for line in section_content):
|
|
280
|
-
filename = header_to_filename(section_name)
|
|
281
|
-
section_content = replace_header_with_proper_casing(
|
|
282
|
-
section_content, section_name
|
|
283
|
-
)
|
|
284
|
-
agent_instances["github"].write_rule(
|
|
285
|
-
section_content,
|
|
286
|
-
filename,
|
|
287
|
-
agent_dirs["github"]["rules"],
|
|
288
|
-
glob_pattern=None,
|
|
289
|
-
)
|
|
290
|
-
|
|
291
230
|
# Process commands for all agents
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
231
|
+
command_sections_data = {}
|
|
232
|
+
command_sections = {}
|
|
233
|
+
if commands_text:
|
|
234
|
+
_, command_sections_data = parse_sections(commands_text)
|
|
235
|
+
agents_with_commands = [
|
|
236
|
+
agent_instances[name]
|
|
237
|
+
for name in agents_to_process
|
|
238
|
+
if agent_instances[name].commands_dir
|
|
239
|
+
]
|
|
295
240
|
command_dirs = {
|
|
296
|
-
name: agent_dirs[name]["commands"]
|
|
241
|
+
name: agent_dirs[name]["commands"]
|
|
242
|
+
for name in agents_to_process
|
|
243
|
+
if "commands" in agent_dirs[name]
|
|
297
244
|
}
|
|
298
245
|
|
|
299
|
-
for section_name,
|
|
300
|
-
|
|
246
|
+
for section_name, section_data in command_sections_data.items():
|
|
247
|
+
command_sections[section_name] = section_data.content
|
|
248
|
+
process_command_section(
|
|
249
|
+
section_name, section_data.content, agents_with_commands, command_dirs
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# Generate root documentation (CLAUDE.md, GEMINI.md, etc.)
|
|
253
|
+
for agent_name, agent_inst in agent_instances.items():
|
|
254
|
+
agent_inst.generate_root_doc(
|
|
255
|
+
general,
|
|
256
|
+
rules_sections,
|
|
257
|
+
command_sections,
|
|
258
|
+
working_dir,
|
|
259
|
+
section_globs=section_globs,
|
|
260
|
+
)
|
|
301
261
|
|
|
302
262
|
# Build log message and user output based on processed agents
|
|
303
263
|
log_data = {"agent": agent}
|
|
@@ -308,13 +268,52 @@ alwaysApply: true
|
|
|
308
268
|
log_data[f"{agent_name}_rules"] = str(agent_dirs[agent_name]["rules"])
|
|
309
269
|
log_data[f"{agent_name}_commands"] = str(agent_dirs[agent_name]["commands"])
|
|
310
270
|
created_dirs.append(f".{agent_name}/")
|
|
311
|
-
|
|
271
|
+
elif agent_dirs[agent_name]:
|
|
272
|
+
# Has commands directory
|
|
312
273
|
log_data[f"{agent_name}_commands"] = str(agent_dirs[agent_name]["commands"])
|
|
313
274
|
created_dirs.append(f".{agent_name}/")
|
|
275
|
+
# else: agent has no directories (e.g., agents which only generates root doc)
|
|
314
276
|
|
|
315
|
-
if
|
|
316
|
-
|
|
317
|
-
|
|
277
|
+
if "gemini" in agent_instances:
|
|
278
|
+
if not agent_instances["gemini"].check_agents_md_config(working_dir):
|
|
279
|
+
typer.secho(
|
|
280
|
+
"Warning: Gemini CLI configuration missing for AGENTS.md.",
|
|
281
|
+
fg=typer.colors.YELLOW,
|
|
282
|
+
)
|
|
283
|
+
typer.secho(
|
|
284
|
+
"Run this command to configure it:",
|
|
285
|
+
fg=typer.colors.YELLOW,
|
|
286
|
+
)
|
|
287
|
+
typer.secho(
|
|
288
|
+
" gemini config set agent.instructionFile AGENTS.md",
|
|
289
|
+
fg=typer.colors.YELLOW,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
if created_dirs:
|
|
293
|
+
if len(created_dirs) == 1:
|
|
294
|
+
success_msg = f"Created files in {created_dirs[0]} directory"
|
|
295
|
+
typer.echo(typer.style(success_msg, fg=typer.colors.GREEN))
|
|
296
|
+
else:
|
|
297
|
+
success_msg = f"Created files in {', '.join(created_dirs)} directories"
|
|
298
|
+
typer.echo(typer.style(success_msg, fg=typer.colors.GREEN))
|
|
318
299
|
else:
|
|
319
|
-
|
|
300
|
+
# No directories created (e.g., agents that only generate root docs)
|
|
301
|
+
success_msg = "Created root documentation files"
|
|
320
302
|
typer.echo(typer.style(success_msg, fg=typer.colors.GREEN))
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def explode_main(
|
|
306
|
+
input_file: Annotated[
|
|
307
|
+
str, typer.Argument(help="Input markdown file")
|
|
308
|
+
] = "instructions.md",
|
|
309
|
+
agent: Annotated[
|
|
310
|
+
str,
|
|
311
|
+
typer.Option(
|
|
312
|
+
"--agent",
|
|
313
|
+
"-a",
|
|
314
|
+
help="Agent to explode for (cursor, github, claude, gemini, or all)",
|
|
315
|
+
),
|
|
316
|
+
] = "all",
|
|
317
|
+
) -> None:
|
|
318
|
+
"""Convert instruction file to separate rule files."""
|
|
319
|
+
explode_implementation(input_file, agent, Path.cwd())
|