monoco-toolkit 0.1.1__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. monoco/cli/__init__.py +0 -0
  2. monoco/cli/project.py +87 -0
  3. monoco/cli/workspace.py +46 -0
  4. monoco/core/agent/__init__.py +5 -0
  5. monoco/core/agent/action.py +144 -0
  6. monoco/core/agent/adapters.py +106 -0
  7. monoco/core/agent/protocol.py +31 -0
  8. monoco/core/agent/state.py +106 -0
  9. monoco/core/config.py +152 -17
  10. monoco/core/execution.py +62 -0
  11. monoco/core/feature.py +58 -0
  12. monoco/core/git.py +51 -2
  13. monoco/core/injection.py +196 -0
  14. monoco/core/integrations.py +234 -0
  15. monoco/core/lsp.py +61 -0
  16. monoco/core/output.py +13 -2
  17. monoco/core/registry.py +36 -0
  18. monoco/core/resources/en/AGENTS.md +8 -0
  19. monoco/core/resources/en/SKILL.md +66 -0
  20. monoco/core/resources/zh/AGENTS.md +8 -0
  21. monoco/core/resources/zh/SKILL.md +66 -0
  22. monoco/core/setup.py +88 -110
  23. monoco/core/skills.py +444 -0
  24. monoco/core/state.py +53 -0
  25. monoco/core/sync.py +224 -0
  26. monoco/core/telemetry.py +4 -1
  27. monoco/core/workspace.py +85 -20
  28. monoco/daemon/app.py +127 -58
  29. monoco/daemon/models.py +4 -0
  30. monoco/daemon/services.py +56 -155
  31. monoco/features/agent/commands.py +166 -0
  32. monoco/features/agent/doctor.py +30 -0
  33. monoco/features/config/commands.py +125 -44
  34. monoco/features/i18n/adapter.py +29 -0
  35. monoco/features/i18n/commands.py +89 -10
  36. monoco/features/i18n/core.py +113 -27
  37. monoco/features/i18n/resources/en/AGENTS.md +8 -0
  38. monoco/features/i18n/resources/en/SKILL.md +94 -0
  39. monoco/features/i18n/resources/zh/AGENTS.md +8 -0
  40. monoco/features/i18n/resources/zh/SKILL.md +94 -0
  41. monoco/features/issue/adapter.py +34 -0
  42. monoco/features/issue/commands.py +183 -65
  43. monoco/features/issue/core.py +172 -77
  44. monoco/features/issue/linter.py +215 -116
  45. monoco/features/issue/migration.py +134 -0
  46. monoco/features/issue/models.py +23 -19
  47. monoco/features/issue/monitor.py +94 -0
  48. monoco/features/issue/resources/en/AGENTS.md +15 -0
  49. monoco/features/issue/resources/en/SKILL.md +87 -0
  50. monoco/features/issue/resources/zh/AGENTS.md +15 -0
  51. monoco/features/issue/resources/zh/SKILL.md +114 -0
  52. monoco/features/issue/validator.py +269 -0
  53. monoco/features/pty/core.py +185 -0
  54. monoco/features/pty/router.py +138 -0
  55. monoco/features/pty/server.py +56 -0
  56. monoco/features/spike/adapter.py +30 -0
  57. monoco/features/spike/commands.py +45 -24
  58. monoco/features/spike/core.py +4 -21
  59. monoco/features/spike/resources/en/AGENTS.md +7 -0
  60. monoco/features/spike/resources/en/SKILL.md +74 -0
  61. monoco/features/spike/resources/zh/AGENTS.md +7 -0
  62. monoco/features/spike/resources/zh/SKILL.md +74 -0
  63. monoco/main.py +115 -2
  64. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.5.dist-info}/METADATA +2 -2
  65. monoco_toolkit-0.2.5.dist-info/RECORD +77 -0
  66. monoco_toolkit-0.1.1.dist-info/RECORD +0 -33
  67. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.5.dist-info}/WHEEL +0 -0
  68. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.5.dist-info}/entry_points.txt +0 -0
  69. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.5.dist-info}/licenses/LICENSE +0 -0
@@ -4,7 +4,9 @@ from rich.console import Console
4
4
  from rich.table import Table
5
5
  from rich.panel import Panel
6
6
 
7
- from monoco.core.config import get_config
7
+ from typing import Optional, Annotated
8
+ from monoco.core.config import get_config, find_monoco_root
9
+ from monoco.core.output import AgentOutput, OutputManager
8
10
  from . import core
9
11
 
10
12
  app = typer.Typer(help="Management tools for Documentation Internationalization (i18n).")
@@ -14,6 +16,9 @@ console = Console()
14
16
  def scan(
15
17
  root: str = typer.Option(None, "--root", help="Target root directory to scan. Defaults to the project root."),
16
18
  limit: int = typer.Option(10, "--limit", help="Maximum number of missing files to display. Use 0 for unlimited."),
19
+ check_issues: bool = typer.Option(False, "--check-issues", help="Include Issues directory in the scan."),
20
+ check_source_lang: bool = typer.Option(False, "--check-source-lang", help="Verify if source files content matches source language (heuristic)."),
21
+ json: AgentOutput = False,
17
22
  ):
18
23
  """
19
24
  Scan the project for internationalization (i18n) status.
@@ -25,36 +30,92 @@ def scan(
25
30
 
26
31
  Returns a report of files missing translations in the checking target languages.
27
32
  """
28
- config = get_config()
29
- target_root = Path(root).resolve() if root else Path(config.paths.root)
33
+ if root:
34
+ target_root = Path(root).resolve()
35
+ else:
36
+ target_root = find_monoco_root(Path.cwd())
37
+
38
+ # Load config with correct root
39
+ config = get_config(project_root=str(target_root))
30
40
  target_langs = config.i18n.target_langs
41
+ source_lang = config.i18n.source_lang
31
42
 
32
- console.print(f"Scanning i18n coverage in [bold cyan]{target_root}[/bold cyan]...")
33
- console.print(f"Target Languages: [bold yellow]{', '.join(target_langs)}[/bold yellow] (Source: {config.i18n.source_lang})")
43
+ if not OutputManager.is_agent_mode():
44
+ console.print(f"Scanning i18n coverage in [bold cyan]{target_root}[/bold cyan]...")
45
+ console.print(f"Target Languages: [bold yellow]{', '.join(target_langs)}[/bold yellow] (Source: {source_lang})")
34
46
 
35
- all_files = core.discover_markdown_files(target_root)
47
+ all_files = core.discover_markdown_files(target_root, include_issues=check_issues)
36
48
 
37
49
  source_files = [f for f in all_files if not core.is_translation_file(f, target_langs)]
38
50
 
39
51
  # Store missing results: { file_path: [missing_langs] }
40
52
  missing_map = {}
53
+ # Store lang mismatch results: [file_path]
54
+ lang_mismatch_files = []
55
+
41
56
  total_checks = len(source_files) * len(target_langs)
42
57
  found_count = 0
43
58
 
44
59
  for f in source_files:
45
- missing_langs = core.check_translation_exists(f, target_root, target_langs)
60
+ # Check translation existence
61
+ missing_langs = core.check_translation_exists(f, target_root, target_langs, source_lang)
46
62
  if missing_langs:
47
63
  missing_map[f] = missing_langs
48
64
  found_count += (len(target_langs) - len(missing_langs))
49
65
  else:
50
66
  found_count += len(target_langs)
51
67
 
68
+ # Check source content language if enabled
69
+ if check_source_lang:
70
+ if not core.is_content_source_language(f, source_lang):
71
+ # Try to detect actual language for better error message
72
+ try:
73
+ content = f.read_text(encoding="utf-8")
74
+ detected = core.detect_language(content)
75
+ except:
76
+ detected = "unknown"
77
+ lang_mismatch_files.append((f, detected))
78
+
52
79
  # Reporting
53
80
  coverage = (found_count / total_checks * 100) if total_checks > 0 else 100
54
81
 
55
82
  # Sort missing_map by file path for stable output
56
83
  sorted_missing = sorted(missing_map.items(), key=lambda x: str(x[0]))
57
-
84
+
85
+ if OutputManager.is_agent_mode():
86
+ # JSON Output
87
+ report = {
88
+ "root": str(target_root),
89
+ "source_lang": source_lang,
90
+ "target_langs": target_langs,
91
+ "stats": {
92
+ "total_source_files": len(source_files),
93
+ "total_checks": total_checks,
94
+ "found_translations": found_count,
95
+ "coverage_percent": round(coverage, 2),
96
+ "missing_files_count": len(sorted_missing),
97
+ "mismatch_files_count": len(lang_mismatch_files)
98
+ },
99
+ "missing_files": [
100
+ {
101
+ "file": str(f.relative_to(target_root)),
102
+ "missing_langs": langs,
103
+ "expected_paths": [
104
+ str(core.get_target_translation_path(f, target_root, l, source_lang).relative_to(target_root))
105
+ for l in langs
106
+ ]
107
+ }
108
+ for f, langs in sorted_missing
109
+ ],
110
+ "language_mismatches": [
111
+ {"file": str(f.relative_to(target_root)), "detected": detected}
112
+ for f, detected in lang_mismatch_files
113
+ ]
114
+ }
115
+ OutputManager.print(report)
116
+ return
117
+
118
+ # Human Output
58
119
  # Apply limit
59
120
  total_missing_files = len(sorted_missing)
60
121
  display_limit = limit if limit > 0 else total_missing_files
@@ -77,7 +138,7 @@ def scan(
77
138
  rel_path = f.relative_to(target_root)
78
139
  expected_paths = []
79
140
  for lang in langs:
80
- target = core.get_target_translation_path(f, target_root, lang)
141
+ target = core.get_target_translation_path(f, target_root, lang, source_lang)
81
142
  expected_paths.append(str(target.relative_to(target_root)))
82
143
 
83
144
  table.add_row(
@@ -88,6 +149,21 @@ def scan(
88
149
 
89
150
  console.print(table)
90
151
 
152
+ # Show Language Mismatch Warnings
153
+ if lang_mismatch_files:
154
+ console.print("\n")
155
+ mismatch_table = Table(title=f"Source Language Mismatch (Expected: {source_lang})", box=None)
156
+ mismatch_table.add_column("File", style="yellow")
157
+ mismatch_table.add_column("Detected", style="red")
158
+
159
+ limit_mismatch = 10
160
+ for f, detected in lang_mismatch_files[:limit_mismatch]:
161
+ mismatch_table.add_row(str(f.relative_to(target_root)), detected)
162
+
163
+ console.print(mismatch_table)
164
+ if len(lang_mismatch_files) > limit_mismatch:
165
+ console.print(f"[dim]... and {len(lang_mismatch_files) - limit_mismatch} more.[/dim]")
166
+
91
167
  # Show hint if output was truncated
92
168
  if display_limit < total_missing_files:
93
169
  console.print(f"\n[dim]💡 Tip: Use [bold]--limit 0[/bold] to show all {total_missing_files} missing files.[/dim]\n")
@@ -111,11 +187,14 @@ def scan(
111
187
  if total_missing_files > 0:
112
188
  summary_lines.append(f" - Partial Missing: {partial_missing}")
113
189
  summary_lines.append(f" - Complete Missing: {complete_missing}")
190
+
191
+ if lang_mismatch_files:
192
+ summary_lines.append(f"Language Mismatches: {len(lang_mismatch_files)}")
114
193
 
115
194
  summary_lines.append(f"Coverage: [{status_color}]{coverage:.1f}%[/{status_color}]")
116
195
 
117
196
  summary = "\n".join(summary_lines)
118
197
  console.print(Panel(summary, title="I18N STATUS", expand=False))
119
198
 
120
- if missing_map:
199
+ if missing_map or lang_mismatch_files:
121
200
  raise typer.Exit(code=1)
@@ -1,9 +1,17 @@
1
1
  import os
2
2
  import fnmatch
3
3
  from pathlib import Path
4
- from typing import List, Set, Dict, Any
4
+ from typing import List, Set, Dict, Any, Optional
5
+ import re
5
6
 
6
- DEFAULT_EXCLUDES = [".git", ".reference", "dist", "build", "node_modules", "__pycache__", ".agent", ".mono", ".venv", "venv", "ENV", "Issues"]
7
+ DEFAULT_EXCLUDES = [
8
+ ".git", ".reference", "dist", "build", "node_modules", "__pycache__",
9
+ ".agent", ".mono", ".venv", "venv", "ENV",
10
+ # Agent Integration Directories
11
+ ".claude", ".gemini", ".qwen", ".openai", ".cursor", ".vscode", ".idea", ".fleet",
12
+ # System Prompts & Agent Configs
13
+ "AGENTS.md", "CLAUDE.md", "GEMINI.md", "QWEN.md", "SKILL.md"
14
+ ]
7
15
 
8
16
  def load_gitignore_patterns(root: Path) -> List[str]:
9
17
  """Load patterns from .gitignore file."""
@@ -25,13 +33,15 @@ def load_gitignore_patterns(root: Path) -> List[str]:
25
33
  pass
26
34
  return patterns
27
35
 
28
- def is_excluded(path: Path, root: Path, patterns: List[str]) -> bool:
36
+ def is_excluded(path: Path, root: Path, patterns: List[str], excludes: Optional[List[str]] = None) -> bool:
29
37
  """Check if a path should be excluded based on patterns and defaults."""
30
38
  rel_path = str(path.relative_to(root))
31
39
 
40
+ final_excludes = excludes if excludes is not None else DEFAULT_EXCLUDES
41
+
32
42
  # 1. Check default excludes (exact match for any path component, case-insensitive)
33
43
  for part in path.parts:
34
- if part.lower() in [e.lower() for e in DEFAULT_EXCLUDES]:
44
+ if part.lower() in [e.lower() for e in final_excludes]:
35
45
  return True
36
46
 
37
47
  # 2. Check gitignore patterns
@@ -55,15 +65,19 @@ def is_excluded(path: Path, root: Path, patterns: List[str]) -> bool:
55
65
 
56
66
  return False
57
67
 
58
- def discover_markdown_files(root: Path) -> List[Path]:
68
+ def discover_markdown_files(root: Path, include_issues: bool = False) -> List[Path]:
59
69
  """Recursively find markdown files while respecting exclusion rules."""
60
70
  patterns = load_gitignore_patterns(root)
61
71
  all_md_files = []
62
72
 
73
+ excludes = list(DEFAULT_EXCLUDES)
74
+ if not include_issues:
75
+ excludes.append("Issues")
76
+
63
77
  # We walk to ensure we can skip directories early if needed,
64
78
  # but for now rglob + filter is simpler.
65
79
  for p in root.rglob("*.md"):
66
- if p.is_file() and not is_excluded(p, root, patterns):
80
+ if p.is_file() and not is_excluded(p, root, patterns, excludes=excludes):
67
81
  all_md_files.append(p)
68
82
 
69
83
  return sorted(all_md_files)
@@ -77,6 +91,12 @@ def is_translation_file(path: Path, target_langs: List[str]) -> bool:
77
91
  for lang in normalized_langs:
78
92
  if stem_upper.endswith(f"_{lang.upper()}"):
79
93
  return True
94
+
95
+ # Generic Suffix Check: Detect any _XX suffix where XX is 2-3 letters
96
+ # This prevents files like README_ZH.md from being treated as source files
97
+ # even if 'zh' is not in target_langs (e.g. when scanning for 'en' gaps).
98
+ if re.search(r'_[A-Z]{2,3}$', stem_upper):
99
+ return True
80
100
 
81
101
  # Subdir check (case-insensitive)
82
102
  path_parts_lower = [p.lower() for p in path.parts]
@@ -86,29 +106,32 @@ def is_translation_file(path: Path, target_langs: List[str]) -> bool:
86
106
 
87
107
  return False
88
108
 
89
- def get_target_translation_path(path: Path, root: Path, lang: str) -> Path:
109
+ def get_target_translation_path(path: Path, root: Path, lang: str, source_lang: str = "en") -> Path:
90
110
  """Calculate the expected translation path for a specific language."""
91
111
  lang = lang.lower()
92
112
 
93
113
  # Parallel Directory Mode: docs/en/... -> docs/zh/...
94
- # We assume 'en' is the source language for now.
95
114
  path_parts = list(path.parts)
96
- # Search for 'en' component to replace
97
- # We iterate from root relative parts to be safe, but simple replacement of the first 'en'
98
- # component (if not part of filename) is a good heuristic for docs structure.
115
+ # Search for source_lang component to replace
99
116
  for i, part in enumerate(path_parts):
100
- if part.lower() == 'en':
117
+ if part.lower() == source_lang.lower():
101
118
  path_parts[i] = lang
102
119
  return Path(*path_parts)
103
120
 
104
- # Suffix Mode: for root files
121
+ # Suffix Mode:
122
+ # If stem ends with _{SOURCE_LANG}, strip it.
123
+ stem = path.stem
124
+ source_suffix = f"_{source_lang.upper()}"
125
+ if stem.upper().endswith(source_suffix):
126
+ stem = stem[:-len(source_suffix)]
127
+
105
128
  if path.parent == root:
106
- return path.with_name(f"{path.stem}_{lang.upper()}{path.suffix}")
129
+ return path.with_name(f"{stem}_{lang.upper()}{path.suffix}")
107
130
 
108
131
  # Subdir Mode: for documentation directories (fallback)
109
132
  return path.parent / lang / path.name
110
133
 
111
- def check_translation_exists(path: Path, root: Path, target_langs: List[str]) -> List[str]:
134
+ def check_translation_exists(path: Path, root: Path, target_langs: List[str], source_lang: str = "en") -> List[str]:
112
135
  """
113
136
  Verify which target languages have translations.
114
137
  Returns a list of missing language codes.
@@ -116,12 +139,85 @@ def check_translation_exists(path: Path, root: Path, target_langs: List[str]) ->
116
139
  if is_translation_file(path, target_langs):
117
140
  return [] # Already a translation, skip
118
141
 
142
+ # Special handling for standard files: always treat as EN source
143
+ effective_source_lang = source_lang
144
+ if path.name.upper() in ["README.MD", "CHANGELOG.MD", "CODE_OF_CONDUCT.MD", "CONTRIBUTING.MD", "LICENSE.MD", "SECURITY.MD"]:
145
+ effective_source_lang = "en"
146
+
119
147
  missing = []
120
148
  for lang in target_langs:
121
- target = get_target_translation_path(path, root, lang)
149
+ # Skip if target language matches the effective source language
150
+ if lang.lower() == effective_source_lang.lower():
151
+ continue
152
+
153
+ target = get_target_translation_path(path, root, lang, effective_source_lang)
122
154
  if not target.exists():
123
155
  missing.append(lang)
124
156
  return missing
157
+
158
+ def detect_language(content: str) -> str:
159
+ """
160
+ Detect the language of the content using simple heuristics.
161
+ Returns: 'zh', 'en', or 'unknown'
162
+ """
163
+ if not content:
164
+ return 'unknown'
165
+
166
+ # Strip YAML Frontmatter if present
167
+ # Matches --- at start, followed by anything, followed by ---
168
+ frontmatter_pattern = re.compile(r'^---\n.*?\n---\n', re.DOTALL)
169
+ content = frontmatter_pattern.sub('', content)
170
+
171
+ if not content.strip():
172
+ return 'unknown'
173
+
174
+ # 1. Check for CJK characters (Chinese/Japanese/Korean)
175
+ # Range: \u4e00-\u9fff (Common CJK Unified Ideographs)
176
+ # Heuristic: If CJK count > threshold, it's likely Asian (we assume ZH for now in this context)
177
+ total_chars = len(content)
178
+ cjk_count = sum(1 for c in content if '\u4e00' <= c <= '\u9fff')
179
+
180
+ # If > 5% chars are CJK, highly likely to be Chinese document
181
+ if total_chars > 0 and cjk_count / total_chars > 0.05:
182
+ return 'zh'
183
+
184
+ # 2. Check for English
185
+ # Heuristic: High ASCII ratio and low CJK
186
+ non_ascii = sum(1 for c in content if ord(c) > 127)
187
+
188
+ # If < 10% non-ASCII, likely English (or code)
189
+ if total_chars > 0 and non_ascii / total_chars < 0.1:
190
+ return 'en'
191
+
192
+ return 'unknown'
193
+
194
+ def is_content_source_language(path: Path, source_lang: str = "en") -> bool:
195
+ """
196
+ Check if file content appears to be in the source language.
197
+ """
198
+ try:
199
+ # Special handling for README/CHANGELOG
200
+ if path.name.upper() in ["README.MD", "CHANGELOG.MD"]:
201
+ source_lang = "en"
202
+
203
+ content = path.read_text(encoding="utf-8")
204
+ detected = detect_language(content)
205
+
206
+ # 'unknown' is leniently accepted as valid to avoid false positives on code-heavy files
207
+ if detected == 'unknown':
208
+ return True
209
+
210
+ # Normalize source_lang
211
+ expected = source_lang.lower()
212
+ if expected == 'zh' or expected == 'cn':
213
+ return detected == 'zh'
214
+ elif expected == 'en':
215
+ return detected == 'en'
216
+
217
+ # For other languages, we don't have detectors yet
218
+ return True
219
+ except Exception:
220
+ return True # Assume valid on error
125
221
  # ... (Existing code) ...
126
222
 
127
223
  SKILL_CONTENT = """---
@@ -154,25 +250,15 @@ i18n is a "first-class citizen" in Monoco.
154
250
  - Run `monoco i18n scan` to verify coverage.
155
251
  """
156
252
 
157
- PROMPT_CONTENT = """### Documentation I18n
158
- Manage internationalization.
159
- - **Scan**: `monoco i18n scan` (Check for missing translations)
160
- - **Structure**:
161
- - Root files: `FILE_ZH.md`
162
- - Subdirs: `folder/zh/file.md`"""
163
253
 
164
254
  def init(root: Path):
165
255
  """Initialize I18n environment (No-op currently as it relies on config)."""
166
256
  # In future, could generate i18n config section if missing.
167
257
  pass
168
258
 
169
- def get_resources() -> Dict[str, Any]:
170
259
  return {
171
260
  "skills": {
172
261
  "i18n": SKILL_CONTENT
173
262
  },
174
- "prompts": {
175
- "i18n": PROMPT_CONTENT
176
- }
263
+ "prompts": {} # Handled by adapter via resource files
177
264
  }
178
-
@@ -0,0 +1,8 @@
1
+ ### Documentation I18n
2
+
3
+ Manage internationalization.
4
+
5
+ - **Scan**: `monoco i18n scan` (Check for missing translations)
6
+ - **Structure**:
7
+ - Root files: `FILE_ZH.md`
8
+ - Subdirs: `folder/zh/file.md`
@@ -0,0 +1,94 @@
1
+ ---
2
+ name: monoco-i18n
3
+ description: Internationalization quality control for documentation. Ensures multi-language documentation stays synchronized.
4
+ ---
5
+
6
+ # Documentation I18n
7
+
8
+ Manage internationalization for Monoco project documentation.
9
+
10
+ ## Overview
11
+
12
+ The I18n feature provides:
13
+
14
+ - **Automatic scanning** for missing translations
15
+ - **Standardized structure** for multi-language documentation
16
+ - **Quality control** to maintain documentation parity
17
+
18
+ ## Key Commands
19
+
20
+ ### Scan for Missing Translations
21
+
22
+ ```bash
23
+ monoco i18n scan
24
+ ```
25
+
26
+ Scans the project for markdown files and reports missing translations.
27
+
28
+ **Output**:
29
+
30
+ - Lists source files without corresponding translations
31
+ - Shows which target languages are missing
32
+ - Respects `.gitignore` and default exclusions
33
+
34
+ ## Configuration
35
+
36
+ I18n settings are configured in `.monoco/config.yaml`:
37
+
38
+ ```yaml
39
+ i18n:
40
+ source_lang: en # Source language code
41
+ target_langs: # Target language codes
42
+ - zh
43
+ - ja
44
+ ```
45
+
46
+ ## Documentation Structure
47
+
48
+ ### Root Files (Suffix Pattern)
49
+
50
+ For files in the project root:
51
+
52
+ - Source: `README.md`
53
+ - Chinese: `README_ZH.md`
54
+ - Japanese: `README_JA.md`
55
+
56
+ ### Subdirectory Files (Directory Pattern)
57
+
58
+ For files in `docs/` or other directories:
59
+
60
+ ```
61
+ docs/
62
+ ├── en/
63
+ │ ├── guide.md
64
+ │ └── api.md
65
+ ├── zh/
66
+ │ ├── guide.md
67
+ │ └── api.md
68
+ └── ja/
69
+ ├── guide.md
70
+ └── api.md
71
+ ```
72
+
73
+ ## Exclusion Rules
74
+
75
+ The following are automatically excluded from i18n scanning:
76
+
77
+ - `.gitignore` patterns (respected automatically)
78
+ - `.references/` directory
79
+ - Build artifacts (`dist/`, `build/`, `node_modules/`)
80
+ - `Issues/` directory
81
+
82
+ ## Best Practices
83
+
84
+ 1. **Create English First**: Write documentation in the source language first
85
+ 2. **Follow Naming Convention**: Use the appropriate pattern (suffix or directory)
86
+ 3. **Run Scan Regularly**: Use `monoco i18n scan` to verify coverage
87
+ 4. **Commit All Languages**: Keep translations in version control
88
+
89
+ ## Workflow
90
+
91
+ 1. Write documentation in source language (e.g., English)
92
+ 2. Create translation files following the naming convention
93
+ 3. Run `monoco i18n scan` to verify all translations exist
94
+ 4. Fix any missing translations reported by the scan
@@ -0,0 +1,8 @@
1
+ ### 文档国际化
2
+
3
+ 管理国际化。
4
+
5
+ - **扫描**: `monoco i18n scan` (检查缺失的翻译)
6
+ - **结构**:
7
+ - 根文件: `FILE_ZH.md`
8
+ - 子目录: `folder/zh/file.md`
@@ -0,0 +1,94 @@
1
+ ---
2
+ name: monoco-i18n
3
+ description: 文档国际化质量控制。确保多语言文档保持同步。
4
+ ---
5
+
6
+ # 文档国际化
7
+
8
+ 管理 Monoco 项目文档的国际化。
9
+
10
+ ## 概述
11
+
12
+ I18n 功能提供:
13
+
14
+ - **自动扫描**缺失的翻译
15
+ - **标准化结构**用于多语言文档
16
+ - **质量控制**以维护文档一致性
17
+
18
+ ## 核心命令
19
+
20
+ ### 扫描缺失的翻译
21
+
22
+ ```bash
23
+ monoco i18n scan
24
+ ```
25
+
26
+ 扫描项目中的 markdown 文件并报告缺失的翻译。
27
+
28
+ **输出**:
29
+
30
+ - 列出没有对应翻译的源文件
31
+ - 显示缺少哪些目标语言
32
+ - 遵循 `.gitignore` 和默认排除规则
33
+
34
+ ## 配置
35
+
36
+ I18n 设置在 `.monoco/config.yaml` 中配置:
37
+
38
+ ```yaml
39
+ i18n:
40
+ source_lang: en # 源语言代码
41
+ target_langs: # 目标语言代码
42
+ - zh
43
+ - ja
44
+ ```
45
+
46
+ ## 文档结构
47
+
48
+ ### 根文件(后缀模式)
49
+
50
+ 对于项目根目录中的文件:
51
+
52
+ - 源文件: `README.md`
53
+ - 中文: `README_ZH.md`
54
+ - 日文: `README_JA.md`
55
+
56
+ ### 子目录文件(目录模式)
57
+
58
+ 对于 `docs/` 或其他目录中的文件:
59
+
60
+ ```
61
+ docs/
62
+ ├── en/
63
+ │ ├── guide.md
64
+ │ └── api.md
65
+ ├── zh/
66
+ │ ├── guide.md
67
+ │ └── api.md
68
+ └── ja/
69
+ ├── guide.md
70
+ └── api.md
71
+ ```
72
+
73
+ ## 排除规则
74
+
75
+ 以下内容会自动从 i18n 扫描中排除:
76
+
77
+ - `.gitignore` 模式(自动遵循)
78
+ - `.references/` 目录
79
+ - 构建产物(`dist/`, `build/`, `node_modules/`)
80
+ - `Issues/` 目录
81
+
82
+ ## 最佳实践
83
+
84
+ 1. **先创建英文版**: 首先用源语言编写文档
85
+ 2. **遵循命名约定**: 使用适当的模式(后缀或目录)
86
+ 3. **定期运行扫描**: 使用 `monoco i18n scan` 验证覆盖率
87
+ 4. **提交所有语言**: 将翻译保存在版本控制中
88
+
89
+ ## 工作流程
90
+
91
+ 1. 用源语言(如英语)编写文档
92
+ 2. 按照命名约定创建翻译文件
93
+ 3. 运行 `monoco i18n scan` 验证所有翻译是否存在
94
+ 4. 修复扫描报告的任何缺失翻译
@@ -0,0 +1,34 @@
1
+ from pathlib import Path
2
+ from typing import Dict
3
+ from monoco.core.feature import MonocoFeature, IntegrationData
4
+ from monoco.features.issue import core
5
+
6
+ class IssueFeature(MonocoFeature):
7
+ @property
8
+ def name(self) -> str:
9
+ return "issue"
10
+
11
+ def initialize(self, root: Path, config: Dict) -> None:
12
+ issues_path = root / config.get("paths", {}).get("issues", "Issues")
13
+ core.init(issues_path)
14
+
15
+ def integrate(self, root: Path, config: Dict) -> IntegrationData:
16
+ # Determine language from config, default to 'en'
17
+ lang = config.get("i18n", {}).get("source_lang", "en")
18
+
19
+ # Current file is in monoco/features/issue/adapter.py
20
+ # Resource path: monoco/features/issue/resources/{lang}/AGENTS.md
21
+ base_dir = Path(__file__).parent / "resources"
22
+
23
+ # Try specific language, fallback to 'en'
24
+ prompt_file = base_dir / lang / "AGENTS.md"
25
+ if not prompt_file.exists():
26
+ prompt_file = base_dir / "en" / "AGENTS.md"
27
+
28
+ content = ""
29
+ if prompt_file.exists():
30
+ content = prompt_file.read_text(encoding="utf-8").strip()
31
+
32
+ return IntegrationData(
33
+ system_prompts={"Issue Management": content}
34
+ )