monoco-toolkit 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/cli/project.py +35 -31
- monoco/cli/workspace.py +26 -16
- monoco/core/agent/__init__.py +0 -2
- monoco/core/agent/action.py +44 -20
- monoco/core/agent/adapters.py +20 -16
- monoco/core/agent/protocol.py +5 -4
- monoco/core/agent/state.py +21 -21
- monoco/core/config.py +90 -33
- monoco/core/execution.py +21 -16
- monoco/core/feature.py +8 -5
- monoco/core/git.py +61 -30
- monoco/core/hooks.py +57 -0
- monoco/core/injection.py +47 -44
- monoco/core/integrations.py +50 -35
- monoco/core/lsp.py +12 -1
- monoco/core/output.py +35 -16
- monoco/core/registry.py +3 -2
- monoco/core/setup.py +190 -124
- monoco/core/skills.py +121 -107
- monoco/core/state.py +12 -10
- monoco/core/sync.py +85 -56
- monoco/core/telemetry.py +10 -6
- monoco/core/workspace.py +26 -19
- monoco/daemon/app.py +123 -79
- monoco/daemon/commands.py +14 -13
- monoco/daemon/models.py +11 -3
- monoco/daemon/reproduce_stats.py +8 -8
- monoco/daemon/services.py +32 -33
- monoco/daemon/stats.py +59 -40
- monoco/features/config/commands.py +38 -25
- monoco/features/i18n/adapter.py +4 -5
- monoco/features/i18n/commands.py +83 -49
- monoco/features/i18n/core.py +94 -54
- monoco/features/issue/adapter.py +6 -7
- monoco/features/issue/commands.py +468 -272
- monoco/features/issue/core.py +419 -312
- monoco/features/issue/domain/lifecycle.py +33 -23
- monoco/features/issue/domain/models.py +71 -38
- monoco/features/issue/domain/parser.py +92 -69
- monoco/features/issue/domain/workspace.py +19 -16
- monoco/features/issue/engine/__init__.py +3 -3
- monoco/features/issue/engine/config.py +18 -25
- monoco/features/issue/engine/machine.py +72 -39
- monoco/features/issue/engine/models.py +4 -2
- monoco/features/issue/linter.py +287 -157
- monoco/features/issue/lsp/definition.py +26 -19
- monoco/features/issue/migration.py +45 -34
- monoco/features/issue/models.py +29 -13
- monoco/features/issue/monitor.py +24 -8
- monoco/features/issue/resources/en/SKILL.md +6 -2
- monoco/features/issue/validator.py +395 -208
- monoco/features/skills/__init__.py +0 -1
- monoco/features/skills/core.py +24 -18
- monoco/features/spike/adapter.py +4 -5
- monoco/features/spike/commands.py +51 -38
- monoco/features/spike/core.py +24 -16
- monoco/main.py +34 -21
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/METADATA +1 -1
- monoco_toolkit-0.3.1.dist-info/RECORD +84 -0
- monoco_toolkit-0.2.8.dist-info/RECORD +0 -83
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/licenses/LICENSE +0 -0
monoco/features/i18n/commands.py
CHANGED
|
@@ -4,20 +4,36 @@ from rich.console import Console
|
|
|
4
4
|
from rich.table import Table
|
|
5
5
|
from rich.panel import Panel
|
|
6
6
|
|
|
7
|
-
from typing import Optional, Annotated
|
|
8
7
|
from monoco.core.config import get_config, find_monoco_root
|
|
9
8
|
from monoco.core.output import AgentOutput, OutputManager
|
|
10
9
|
from . import core
|
|
11
10
|
|
|
12
|
-
app = typer.Typer(
|
|
11
|
+
app = typer.Typer(
|
|
12
|
+
help="Management tools for Documentation Internationalization (i18n)."
|
|
13
|
+
)
|
|
13
14
|
console = Console()
|
|
14
15
|
|
|
16
|
+
|
|
15
17
|
@app.command("scan")
|
|
16
18
|
def scan(
|
|
17
|
-
root: str = typer.Option(
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
19
|
+
root: str = typer.Option(
|
|
20
|
+
None,
|
|
21
|
+
"--root",
|
|
22
|
+
help="Target root directory to scan. Defaults to the project root.",
|
|
23
|
+
),
|
|
24
|
+
limit: int = typer.Option(
|
|
25
|
+
10,
|
|
26
|
+
"--limit",
|
|
27
|
+
help="Maximum number of missing files to display. Use 0 for unlimited.",
|
|
28
|
+
),
|
|
29
|
+
check_issues: bool = typer.Option(
|
|
30
|
+
False, "--check-issues", help="Include Issues directory in the scan."
|
|
31
|
+
),
|
|
32
|
+
check_source_lang: bool = typer.Option(
|
|
33
|
+
False,
|
|
34
|
+
"--check-source-lang",
|
|
35
|
+
help="Verify if source files content matches source language (heuristic).",
|
|
36
|
+
),
|
|
21
37
|
json: AgentOutput = False,
|
|
22
38
|
):
|
|
23
39
|
"""
|
|
@@ -39,15 +55,21 @@ def scan(
|
|
|
39
55
|
config = get_config(project_root=str(target_root))
|
|
40
56
|
target_langs = config.i18n.target_langs
|
|
41
57
|
source_lang = config.i18n.source_lang
|
|
42
|
-
|
|
58
|
+
|
|
43
59
|
if not OutputManager.is_agent_mode():
|
|
44
|
-
console.print(
|
|
45
|
-
|
|
46
|
-
|
|
60
|
+
console.print(
|
|
61
|
+
f"Scanning i18n coverage in [bold cyan]{target_root}[/bold cyan]..."
|
|
62
|
+
)
|
|
63
|
+
console.print(
|
|
64
|
+
f"Target Languages: [bold yellow]{', '.join(target_langs)}[/bold yellow] (Source: {source_lang})"
|
|
65
|
+
)
|
|
66
|
+
|
|
47
67
|
all_files = core.discover_markdown_files(target_root, include_issues=check_issues)
|
|
48
|
-
|
|
49
|
-
source_files = [
|
|
50
|
-
|
|
68
|
+
|
|
69
|
+
source_files = [
|
|
70
|
+
f for f in all_files if not core.is_translation_file(f, target_langs)
|
|
71
|
+
]
|
|
72
|
+
|
|
51
73
|
# Store missing results: { file_path: [missing_langs] }
|
|
52
74
|
missing_map = {}
|
|
53
75
|
# Store lang mismatch results: [file_path]
|
|
@@ -55,16 +77,18 @@ def scan(
|
|
|
55
77
|
|
|
56
78
|
total_checks = len(source_files) * len(target_langs)
|
|
57
79
|
found_count = 0
|
|
58
|
-
|
|
80
|
+
|
|
59
81
|
for f in source_files:
|
|
60
82
|
# Check translation existence
|
|
61
|
-
missing_langs = core.check_translation_exists(
|
|
83
|
+
missing_langs = core.check_translation_exists(
|
|
84
|
+
f, target_root, target_langs, source_lang
|
|
85
|
+
)
|
|
62
86
|
if missing_langs:
|
|
63
87
|
missing_map[f] = missing_langs
|
|
64
|
-
found_count +=
|
|
88
|
+
found_count += len(target_langs) - len(missing_langs)
|
|
65
89
|
else:
|
|
66
90
|
found_count += len(target_langs)
|
|
67
|
-
|
|
91
|
+
|
|
68
92
|
# Check source content language if enabled
|
|
69
93
|
if check_source_lang:
|
|
70
94
|
if not core.is_content_source_language(f, source_lang):
|
|
@@ -75,10 +99,10 @@ def scan(
|
|
|
75
99
|
except:
|
|
76
100
|
detected = "unknown"
|
|
77
101
|
lang_mismatch_files.append((f, detected))
|
|
78
|
-
|
|
102
|
+
|
|
79
103
|
# Reporting
|
|
80
104
|
coverage = (found_count / total_checks * 100) if total_checks > 0 else 100
|
|
81
|
-
|
|
105
|
+
|
|
82
106
|
# Sort missing_map by file path for stable output
|
|
83
107
|
sorted_missing = sorted(missing_map.items(), key=lambda x: str(x[0]))
|
|
84
108
|
|
|
@@ -94,23 +118,27 @@ def scan(
|
|
|
94
118
|
"found_translations": found_count,
|
|
95
119
|
"coverage_percent": round(coverage, 2),
|
|
96
120
|
"missing_files_count": len(sorted_missing),
|
|
97
|
-
"mismatch_files_count": len(lang_mismatch_files)
|
|
121
|
+
"mismatch_files_count": len(lang_mismatch_files),
|
|
98
122
|
},
|
|
99
123
|
"missing_files": [
|
|
100
124
|
{
|
|
101
125
|
"file": str(f.relative_to(target_root)),
|
|
102
126
|
"missing_langs": langs,
|
|
103
127
|
"expected_paths": [
|
|
104
|
-
str(
|
|
128
|
+
str(
|
|
129
|
+
core.get_target_translation_path(
|
|
130
|
+
f, target_root, l, source_lang
|
|
131
|
+
).relative_to(target_root)
|
|
132
|
+
)
|
|
105
133
|
for l in langs
|
|
106
|
-
]
|
|
134
|
+
],
|
|
107
135
|
}
|
|
108
136
|
for f, langs in sorted_missing
|
|
109
137
|
],
|
|
110
138
|
"language_mismatches": [
|
|
111
139
|
{"file": str(f.relative_to(target_root)), "detected": detected}
|
|
112
140
|
for f, detected in lang_mismatch_files
|
|
113
|
-
]
|
|
141
|
+
],
|
|
114
142
|
}
|
|
115
143
|
OutputManager.print(report)
|
|
116
144
|
return
|
|
@@ -120,62 +148,68 @@ def scan(
|
|
|
120
148
|
total_missing_files = len(sorted_missing)
|
|
121
149
|
display_limit = limit if limit > 0 else total_missing_files
|
|
122
150
|
displayed_missing = sorted_missing[:display_limit]
|
|
123
|
-
|
|
151
|
+
|
|
124
152
|
# Build table title with count info
|
|
125
153
|
table_title = "i18n Availability Report"
|
|
126
154
|
if total_missing_files > 0:
|
|
127
155
|
if display_limit < total_missing_files:
|
|
128
156
|
table_title = f"i18n Availability Report (Showing {display_limit} / {total_missing_files} missing files)"
|
|
129
157
|
else:
|
|
130
|
-
table_title =
|
|
131
|
-
|
|
158
|
+
table_title = (
|
|
159
|
+
f"i18n Availability Report ({total_missing_files} missing files)"
|
|
160
|
+
)
|
|
161
|
+
|
|
132
162
|
table = Table(title=table_title, box=None)
|
|
133
163
|
table.add_column("Source File", style="cyan", no_wrap=True, overflow="fold")
|
|
134
164
|
table.add_column("Missing Languages", style="red")
|
|
135
165
|
table.add_column("Expected Paths", style="dim", no_wrap=True, overflow="fold")
|
|
136
|
-
|
|
166
|
+
|
|
137
167
|
for f, langs in displayed_missing:
|
|
138
168
|
rel_path = f.relative_to(target_root)
|
|
139
169
|
expected_paths = []
|
|
140
170
|
for lang in langs:
|
|
141
171
|
target = core.get_target_translation_path(f, target_root, lang, source_lang)
|
|
142
172
|
expected_paths.append(str(target.relative_to(target_root)))
|
|
143
|
-
|
|
144
|
-
table.add_row(
|
|
145
|
-
|
|
146
|
-
", ".join(langs),
|
|
147
|
-
"\n".join(expected_paths)
|
|
148
|
-
)
|
|
149
|
-
|
|
173
|
+
|
|
174
|
+
table.add_row(str(rel_path), ", ".join(langs), "\n".join(expected_paths))
|
|
175
|
+
|
|
150
176
|
console.print(table)
|
|
151
|
-
|
|
177
|
+
|
|
152
178
|
# Show Language Mismatch Warnings
|
|
153
179
|
if lang_mismatch_files:
|
|
154
180
|
console.print("\n")
|
|
155
|
-
mismatch_table = Table(
|
|
181
|
+
mismatch_table = Table(
|
|
182
|
+
title=f"Source Language Mismatch (Expected: {source_lang})", box=None
|
|
183
|
+
)
|
|
156
184
|
mismatch_table.add_column("File", style="yellow")
|
|
157
185
|
mismatch_table.add_column("Detected", style="red")
|
|
158
|
-
|
|
186
|
+
|
|
159
187
|
limit_mismatch = 10
|
|
160
188
|
for f, detected in lang_mismatch_files[:limit_mismatch]:
|
|
161
|
-
|
|
162
|
-
|
|
189
|
+
mismatch_table.add_row(str(f.relative_to(target_root)), detected)
|
|
190
|
+
|
|
163
191
|
console.print(mismatch_table)
|
|
164
192
|
if len(lang_mismatch_files) > limit_mismatch:
|
|
165
|
-
|
|
193
|
+
console.print(
|
|
194
|
+
f"[dim]... and {len(lang_mismatch_files) - limit_mismatch} more.[/dim]"
|
|
195
|
+
)
|
|
166
196
|
|
|
167
197
|
# Show hint if output was truncated
|
|
168
198
|
if display_limit < total_missing_files:
|
|
169
|
-
console.print(
|
|
170
|
-
|
|
199
|
+
console.print(
|
|
200
|
+
f"\n[dim]💡 Tip: Use [bold]--limit 0[/bold] to show all {total_missing_files} missing files.[/dim]\n"
|
|
201
|
+
)
|
|
202
|
+
|
|
171
203
|
# Calculate partial vs complete missing
|
|
172
|
-
partial_missing = sum(
|
|
204
|
+
partial_missing = sum(
|
|
205
|
+
1 for _, langs in sorted_missing if len(langs) < len(target_langs)
|
|
206
|
+
)
|
|
173
207
|
complete_missing = total_missing_files - partial_missing
|
|
174
|
-
|
|
208
|
+
|
|
175
209
|
status_color = "green" if coverage == 100 else "yellow"
|
|
176
210
|
if coverage < 50:
|
|
177
211
|
status_color = "red"
|
|
178
|
-
|
|
212
|
+
|
|
179
213
|
summary_lines = [
|
|
180
214
|
f"Total Source Files: {len(source_files)}",
|
|
181
215
|
f"Target Languages: {len(target_langs)}",
|
|
@@ -183,16 +217,16 @@ def scan(
|
|
|
183
217
|
f"Found Translations: {found_count}",
|
|
184
218
|
f"Missing Files: {total_missing_files}",
|
|
185
219
|
]
|
|
186
|
-
|
|
220
|
+
|
|
187
221
|
if total_missing_files > 0:
|
|
188
222
|
summary_lines.append(f" - Partial Missing: {partial_missing}")
|
|
189
223
|
summary_lines.append(f" - Complete Missing: {complete_missing}")
|
|
190
|
-
|
|
224
|
+
|
|
191
225
|
if lang_mismatch_files:
|
|
192
226
|
summary_lines.append(f"Language Mismatches: {len(lang_mismatch_files)}")
|
|
193
|
-
|
|
227
|
+
|
|
194
228
|
summary_lines.append(f"Coverage: [{status_color}]{coverage:.1f}%[/{status_color}]")
|
|
195
|
-
|
|
229
|
+
|
|
196
230
|
summary = "\n".join(summary_lines)
|
|
197
231
|
console.print(Panel(summary, title="I18N STATUS", expand=False))
|
|
198
232
|
|
monoco/features/i18n/core.py
CHANGED
|
@@ -1,24 +1,44 @@
|
|
|
1
|
-
import os
|
|
2
1
|
import fnmatch
|
|
3
2
|
from pathlib import Path
|
|
4
|
-
from typing import List,
|
|
3
|
+
from typing import List, Optional
|
|
5
4
|
import re
|
|
6
5
|
|
|
7
6
|
DEFAULT_EXCLUDES = [
|
|
8
|
-
".git",
|
|
9
|
-
".
|
|
7
|
+
".git",
|
|
8
|
+
".reference",
|
|
9
|
+
"dist",
|
|
10
|
+
"build",
|
|
11
|
+
"node_modules",
|
|
12
|
+
"__pycache__",
|
|
13
|
+
".agent",
|
|
14
|
+
".mono",
|
|
15
|
+
".venv",
|
|
16
|
+
"venv",
|
|
17
|
+
"ENV",
|
|
10
18
|
# Agent Integration Directories
|
|
11
|
-
".claude",
|
|
19
|
+
".claude",
|
|
20
|
+
".gemini",
|
|
21
|
+
".qwen",
|
|
22
|
+
".openai",
|
|
23
|
+
".cursor",
|
|
24
|
+
".vscode",
|
|
25
|
+
".idea",
|
|
26
|
+
".fleet",
|
|
12
27
|
# System Prompts & Agent Configs
|
|
13
|
-
"AGENTS.md",
|
|
28
|
+
"AGENTS.md",
|
|
29
|
+
"CLAUDE.md",
|
|
30
|
+
"GEMINI.md",
|
|
31
|
+
"QWEN.md",
|
|
32
|
+
"SKILL.md",
|
|
14
33
|
]
|
|
15
34
|
|
|
35
|
+
|
|
16
36
|
def load_gitignore_patterns(root: Path) -> List[str]:
|
|
17
37
|
"""Load patterns from .gitignore file."""
|
|
18
38
|
gitignore_path = root / ".gitignore"
|
|
19
39
|
if not gitignore_path.exists():
|
|
20
40
|
return []
|
|
21
|
-
|
|
41
|
+
|
|
22
42
|
patterns = []
|
|
23
43
|
try:
|
|
24
44
|
with open(gitignore_path, "r", encoding="utf-8") as f:
|
|
@@ -33,17 +53,20 @@ def load_gitignore_patterns(root: Path) -> List[str]:
|
|
|
33
53
|
pass
|
|
34
54
|
return patterns
|
|
35
55
|
|
|
36
|
-
|
|
56
|
+
|
|
57
|
+
def is_excluded(
|
|
58
|
+
path: Path, root: Path, patterns: List[str], excludes: Optional[List[str]] = None
|
|
59
|
+
) -> bool:
|
|
37
60
|
"""Check if a path should be excluded based on patterns and defaults."""
|
|
38
61
|
rel_path = str(path.relative_to(root))
|
|
39
|
-
|
|
62
|
+
|
|
40
63
|
final_excludes = excludes if excludes is not None else DEFAULT_EXCLUDES
|
|
41
64
|
|
|
42
65
|
# 1. Check default excludes (exact match for any path component, case-insensitive)
|
|
43
66
|
for part in path.parts:
|
|
44
67
|
if part.lower() in [e.lower() for e in final_excludes]:
|
|
45
68
|
return True
|
|
46
|
-
|
|
69
|
+
|
|
47
70
|
# 2. Check gitignore patterns
|
|
48
71
|
for pattern in patterns:
|
|
49
72
|
# Check against relative path
|
|
@@ -65,27 +88,29 @@ def is_excluded(path: Path, root: Path, patterns: List[str], excludes: Optional[
|
|
|
65
88
|
|
|
66
89
|
return False
|
|
67
90
|
|
|
91
|
+
|
|
68
92
|
def discover_markdown_files(root: Path, include_issues: bool = False) -> List[Path]:
|
|
69
93
|
"""Recursively find markdown files while respecting exclusion rules."""
|
|
70
94
|
patterns = load_gitignore_patterns(root)
|
|
71
95
|
all_md_files = []
|
|
72
|
-
|
|
96
|
+
|
|
73
97
|
excludes = list(DEFAULT_EXCLUDES)
|
|
74
98
|
if not include_issues:
|
|
75
99
|
excludes.append("Issues")
|
|
76
100
|
|
|
77
|
-
# We walk to ensure we can skip directories early if needed,
|
|
101
|
+
# We walk to ensure we can skip directories early if needed,
|
|
78
102
|
# but for now rglob + filter is simpler.
|
|
79
103
|
for p in root.rglob("*.md"):
|
|
80
104
|
if p.is_file() and not is_excluded(p, root, patterns, excludes=excludes):
|
|
81
105
|
all_md_files.append(p)
|
|
82
|
-
|
|
106
|
+
|
|
83
107
|
return sorted(all_md_files)
|
|
84
108
|
|
|
109
|
+
|
|
85
110
|
def is_translation_file(path: Path, target_langs: List[str]) -> bool:
|
|
86
111
|
"""Check if the given path is a translation file (target)."""
|
|
87
112
|
normalized_langs = [lang.lower() for lang in target_langs]
|
|
88
|
-
|
|
113
|
+
|
|
89
114
|
# Suffix check (case-insensitive)
|
|
90
115
|
stem_upper = path.stem.upper()
|
|
91
116
|
for lang in normalized_langs:
|
|
@@ -95,21 +120,24 @@ def is_translation_file(path: Path, target_langs: List[str]) -> bool:
|
|
|
95
120
|
# Generic Suffix Check: Detect any _XX suffix where XX is 2-3 letters
|
|
96
121
|
# This prevents files like README_ZH.md from being treated as source files
|
|
97
122
|
# even if 'zh' is not in target_langs (e.g. when scanning for 'en' gaps).
|
|
98
|
-
if re.search(r
|
|
123
|
+
if re.search(r"_[A-Z]{2,3}$", stem_upper):
|
|
99
124
|
return True
|
|
100
|
-
|
|
125
|
+
|
|
101
126
|
# Subdir check (case-insensitive)
|
|
102
127
|
path_parts_lower = [p.lower() for p in path.parts]
|
|
103
128
|
for lang in normalized_langs:
|
|
104
129
|
if lang in path_parts_lower:
|
|
105
130
|
return True
|
|
106
|
-
|
|
131
|
+
|
|
107
132
|
return False
|
|
108
133
|
|
|
109
|
-
|
|
134
|
+
|
|
135
|
+
def get_target_translation_path(
|
|
136
|
+
path: Path, root: Path, lang: str, source_lang: str = "en"
|
|
137
|
+
) -> Path:
|
|
110
138
|
"""Calculate the expected translation path for a specific language."""
|
|
111
139
|
lang = lang.lower()
|
|
112
|
-
|
|
140
|
+
|
|
113
141
|
# Parallel Directory Mode: docs/en/... -> docs/zh/...
|
|
114
142
|
path_parts = list(path.parts)
|
|
115
143
|
# Search for source_lang component to replace
|
|
@@ -123,27 +151,37 @@ def get_target_translation_path(path: Path, root: Path, lang: str, source_lang:
|
|
|
123
151
|
stem = path.stem
|
|
124
152
|
source_suffix = f"_{source_lang.upper()}"
|
|
125
153
|
if stem.upper().endswith(source_suffix):
|
|
126
|
-
|
|
127
|
-
|
|
154
|
+
stem = stem[: -len(source_suffix)]
|
|
155
|
+
|
|
128
156
|
if path.parent == root:
|
|
129
157
|
return path.with_name(f"{stem}_{lang.upper()}{path.suffix}")
|
|
130
|
-
|
|
158
|
+
|
|
131
159
|
# Subdir Mode: for documentation directories (fallback)
|
|
132
160
|
return path.parent / lang / path.name
|
|
133
161
|
|
|
134
|
-
|
|
162
|
+
|
|
163
|
+
def check_translation_exists(
|
|
164
|
+
path: Path, root: Path, target_langs: List[str], source_lang: str = "en"
|
|
165
|
+
) -> List[str]:
|
|
135
166
|
"""
|
|
136
167
|
Verify which target languages have translations.
|
|
137
168
|
Returns a list of missing language codes.
|
|
138
169
|
"""
|
|
139
170
|
if is_translation_file(path, target_langs):
|
|
140
|
-
return []
|
|
141
|
-
|
|
171
|
+
return [] # Already a translation, skip
|
|
172
|
+
|
|
142
173
|
# Special handling for standard files: always treat as EN source
|
|
143
174
|
effective_source_lang = source_lang
|
|
144
|
-
if path.name.upper() in [
|
|
175
|
+
if path.name.upper() in [
|
|
176
|
+
"README.MD",
|
|
177
|
+
"CHANGELOG.MD",
|
|
178
|
+
"CODE_OF_CONDUCT.MD",
|
|
179
|
+
"CONTRIBUTING.MD",
|
|
180
|
+
"LICENSE.MD",
|
|
181
|
+
"SECURITY.MD",
|
|
182
|
+
]:
|
|
145
183
|
effective_source_lang = "en"
|
|
146
|
-
|
|
184
|
+
|
|
147
185
|
missing = []
|
|
148
186
|
for lang in target_langs:
|
|
149
187
|
# Skip if target language matches the effective source language
|
|
@@ -155,41 +193,43 @@ def check_translation_exists(path: Path, root: Path, target_langs: List[str], so
|
|
|
155
193
|
missing.append(lang)
|
|
156
194
|
return missing
|
|
157
195
|
|
|
196
|
+
|
|
158
197
|
def detect_language(content: str) -> str:
|
|
159
198
|
"""
|
|
160
199
|
Detect the language of the content using simple heuristics.
|
|
161
200
|
Returns: 'zh', 'en', or 'unknown'
|
|
162
201
|
"""
|
|
163
202
|
if not content:
|
|
164
|
-
return
|
|
165
|
-
|
|
203
|
+
return "unknown"
|
|
204
|
+
|
|
166
205
|
# Strip YAML Frontmatter if present
|
|
167
206
|
# Matches --- at start, followed by anything, followed by ---
|
|
168
|
-
frontmatter_pattern = re.compile(r
|
|
169
|
-
content = frontmatter_pattern.sub(
|
|
170
|
-
|
|
207
|
+
frontmatter_pattern = re.compile(r"^---\n.*?\n---\n", re.DOTALL)
|
|
208
|
+
content = frontmatter_pattern.sub("", content)
|
|
209
|
+
|
|
171
210
|
if not content.strip():
|
|
172
|
-
return
|
|
211
|
+
return "unknown"
|
|
173
212
|
|
|
174
213
|
# 1. Check for CJK characters (Chinese/Japanese/Korean)
|
|
175
214
|
# Range: \u4e00-\u9fff (Common CJK Unified Ideographs)
|
|
176
215
|
# Heuristic: If CJK count > threshold, it's likely Asian (we assume ZH for now in this context)
|
|
177
216
|
total_chars = len(content)
|
|
178
|
-
cjk_count = sum(1 for c in content if
|
|
179
|
-
|
|
217
|
+
cjk_count = sum(1 for c in content if "\u4e00" <= c <= "\u9fff")
|
|
218
|
+
|
|
180
219
|
# If > 5% chars are CJK, highly likely to be Chinese document
|
|
181
220
|
if total_chars > 0 and cjk_count / total_chars > 0.05:
|
|
182
|
-
return
|
|
183
|
-
|
|
221
|
+
return "zh"
|
|
222
|
+
|
|
184
223
|
# 2. Check for English
|
|
185
224
|
# Heuristic: High ASCII ratio and low CJK
|
|
186
225
|
non_ascii = sum(1 for c in content if ord(c) > 127)
|
|
187
|
-
|
|
226
|
+
|
|
188
227
|
# If < 10% non-ASCII, likely English (or code)
|
|
189
228
|
if total_chars > 0 and non_ascii / total_chars < 0.1:
|
|
190
|
-
return
|
|
191
|
-
|
|
192
|
-
return
|
|
229
|
+
return "en"
|
|
230
|
+
|
|
231
|
+
return "unknown"
|
|
232
|
+
|
|
193
233
|
|
|
194
234
|
def is_content_source_language(path: Path, source_lang: str = "en") -> bool:
|
|
195
235
|
"""
|
|
@@ -202,22 +242,24 @@ def is_content_source_language(path: Path, source_lang: str = "en") -> bool:
|
|
|
202
242
|
|
|
203
243
|
content = path.read_text(encoding="utf-8")
|
|
204
244
|
detected = detect_language(content)
|
|
205
|
-
|
|
245
|
+
|
|
206
246
|
# 'unknown' is leniently accepted as valid to avoid false positives on code-heavy files
|
|
207
|
-
if detected ==
|
|
247
|
+
if detected == "unknown":
|
|
208
248
|
return True
|
|
209
|
-
|
|
249
|
+
|
|
210
250
|
# Normalize source_lang
|
|
211
251
|
expected = source_lang.lower()
|
|
212
|
-
if expected ==
|
|
213
|
-
return detected ==
|
|
214
|
-
elif expected ==
|
|
215
|
-
return detected ==
|
|
216
|
-
|
|
252
|
+
if expected == "zh" or expected == "cn":
|
|
253
|
+
return detected == "zh"
|
|
254
|
+
elif expected == "en":
|
|
255
|
+
return detected == "en"
|
|
256
|
+
|
|
217
257
|
# For other languages, we don't have detectors yet
|
|
218
258
|
return True
|
|
219
259
|
except Exception:
|
|
220
|
-
return True
|
|
260
|
+
return True # Assume valid on error
|
|
261
|
+
|
|
262
|
+
|
|
221
263
|
# ... (Existing code) ...
|
|
222
264
|
|
|
223
265
|
SKILL_CONTENT = """---
|
|
@@ -257,8 +299,6 @@ def init(root: Path):
|
|
|
257
299
|
pass
|
|
258
300
|
|
|
259
301
|
return {
|
|
260
|
-
"skills": {
|
|
261
|
-
|
|
262
|
-
},
|
|
263
|
-
"prompts": {} # Handled by adapter via resource files
|
|
302
|
+
"skills": {"i18n": SKILL_CONTENT},
|
|
303
|
+
"prompts": {}, # Handled by adapter via resource files
|
|
264
304
|
}
|
monoco/features/issue/adapter.py
CHANGED
|
@@ -3,6 +3,7 @@ from typing import Dict
|
|
|
3
3
|
from monoco.core.feature import MonocoFeature, IntegrationData
|
|
4
4
|
from monoco.features.issue import core
|
|
5
5
|
|
|
6
|
+
|
|
6
7
|
class IssueFeature(MonocoFeature):
|
|
7
8
|
@property
|
|
8
9
|
def name(self) -> str:
|
|
@@ -15,20 +16,18 @@ class IssueFeature(MonocoFeature):
|
|
|
15
16
|
def integrate(self, root: Path, config: Dict) -> IntegrationData:
|
|
16
17
|
# Determine language from config, default to 'en'
|
|
17
18
|
lang = config.get("i18n", {}).get("source_lang", "en")
|
|
18
|
-
|
|
19
|
+
|
|
19
20
|
# Current file is in monoco/features/issue/adapter.py
|
|
20
21
|
# Resource path: monoco/features/issue/resources/{lang}/AGENTS.md
|
|
21
22
|
base_dir = Path(__file__).parent / "resources"
|
|
22
|
-
|
|
23
|
+
|
|
23
24
|
# Try specific language, fallback to 'en'
|
|
24
25
|
prompt_file = base_dir / lang / "AGENTS.md"
|
|
25
26
|
if not prompt_file.exists():
|
|
26
27
|
prompt_file = base_dir / "en" / "AGENTS.md"
|
|
27
|
-
|
|
28
|
+
|
|
28
29
|
content = ""
|
|
29
30
|
if prompt_file.exists():
|
|
30
31
|
content = prompt_file.read_text(encoding="utf-8").strip()
|
|
31
|
-
|
|
32
|
-
return IntegrationData(
|
|
33
|
-
system_prompts={"Issue Management": content}
|
|
34
|
-
)
|
|
32
|
+
|
|
33
|
+
return IntegrationData(system_prompts={"Issue Management": content})
|