monoco-toolkit 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. monoco/cli/project.py +35 -31
  2. monoco/cli/workspace.py +26 -16
  3. monoco/core/agent/__init__.py +0 -2
  4. monoco/core/agent/action.py +44 -20
  5. monoco/core/agent/adapters.py +20 -16
  6. monoco/core/agent/protocol.py +5 -4
  7. monoco/core/agent/state.py +21 -21
  8. monoco/core/config.py +90 -33
  9. monoco/core/execution.py +21 -16
  10. monoco/core/feature.py +8 -5
  11. monoco/core/git.py +61 -30
  12. monoco/core/hooks.py +57 -0
  13. monoco/core/injection.py +47 -44
  14. monoco/core/integrations.py +50 -35
  15. monoco/core/lsp.py +12 -1
  16. monoco/core/output.py +35 -16
  17. monoco/core/registry.py +3 -2
  18. monoco/core/setup.py +190 -124
  19. monoco/core/skills.py +121 -107
  20. monoco/core/state.py +12 -10
  21. monoco/core/sync.py +85 -56
  22. monoco/core/telemetry.py +10 -6
  23. monoco/core/workspace.py +26 -19
  24. monoco/daemon/app.py +123 -79
  25. monoco/daemon/commands.py +14 -13
  26. monoco/daemon/models.py +11 -3
  27. monoco/daemon/reproduce_stats.py +8 -8
  28. monoco/daemon/services.py +32 -33
  29. monoco/daemon/stats.py +59 -40
  30. monoco/features/config/commands.py +38 -25
  31. monoco/features/i18n/adapter.py +4 -5
  32. monoco/features/i18n/commands.py +83 -49
  33. monoco/features/i18n/core.py +94 -54
  34. monoco/features/issue/adapter.py +6 -7
  35. monoco/features/issue/commands.py +468 -272
  36. monoco/features/issue/core.py +419 -312
  37. monoco/features/issue/domain/lifecycle.py +33 -23
  38. monoco/features/issue/domain/models.py +71 -38
  39. monoco/features/issue/domain/parser.py +92 -69
  40. monoco/features/issue/domain/workspace.py +19 -16
  41. monoco/features/issue/engine/__init__.py +3 -3
  42. monoco/features/issue/engine/config.py +18 -25
  43. monoco/features/issue/engine/machine.py +72 -39
  44. monoco/features/issue/engine/models.py +4 -2
  45. monoco/features/issue/linter.py +287 -157
  46. monoco/features/issue/lsp/definition.py +26 -19
  47. monoco/features/issue/migration.py +45 -34
  48. monoco/features/issue/models.py +29 -13
  49. monoco/features/issue/monitor.py +24 -8
  50. monoco/features/issue/resources/en/SKILL.md +6 -2
  51. monoco/features/issue/validator.py +383 -208
  52. monoco/features/skills/__init__.py +0 -1
  53. monoco/features/skills/core.py +24 -18
  54. monoco/features/spike/adapter.py +4 -5
  55. monoco/features/spike/commands.py +51 -38
  56. monoco/features/spike/core.py +24 -16
  57. monoco/main.py +34 -21
  58. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/METADATA +1 -1
  59. monoco_toolkit-0.3.0.dist-info/RECORD +84 -0
  60. monoco_toolkit-0.2.8.dist-info/RECORD +0 -83
  61. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/WHEEL +0 -0
  62. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/entry_points.txt +0 -0
  63. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,12 +1,13 @@
1
- from typing import List, Optional, Callable
1
+ from typing import List, Optional
2
2
  from pydantic import BaseModel
3
3
  from ..models import IssueStatus, IssueStage, IssueSolution, current_time
4
4
  from .models import Issue
5
5
 
6
+
6
7
  class Transition(BaseModel):
7
8
  name: str
8
- from_status: Optional[IssueStatus] = None # None means any
9
- from_stage: Optional[IssueStage] = None # None means any
9
+ from_status: Optional[IssueStatus] = None # None means any
10
+ from_stage: Optional[IssueStage] = None # None means any
10
11
  to_status: IssueStatus
11
12
  to_stage: Optional[IssueStage] = None
12
13
  required_solution: Optional[IssueSolution] = None
@@ -19,6 +20,7 @@ class Transition(BaseModel):
19
20
  return False
20
21
  return True
21
22
 
23
+
22
24
  class TransitionService:
23
25
  def __init__(self):
24
26
  self.transitions: List[Transition] = [
@@ -28,15 +30,15 @@ class TransitionService:
28
30
  from_status=IssueStatus.OPEN,
29
31
  to_status=IssueStatus.BACKLOG,
30
32
  to_stage=IssueStage.FREEZED,
31
- description="Move open issue to backlog"
33
+ description="Move open issue to backlog",
32
34
  ),
33
35
  # Backlog -> Open
34
36
  Transition(
35
37
  name="activate",
36
38
  from_status=IssueStatus.BACKLOG,
37
39
  to_status=IssueStatus.OPEN,
38
- to_stage=IssueStage.DRAFT, # Reset to draft?
39
- description="Restore issue from backlog"
40
+ to_stage=IssueStage.DRAFT, # Reset to draft?
41
+ description="Restore issue from backlog",
40
42
  ),
41
43
  # Open (Draft) -> Open (Doing)
42
44
  Transition(
@@ -45,7 +47,7 @@ class TransitionService:
45
47
  from_stage=IssueStage.DRAFT,
46
48
  to_status=IssueStatus.OPEN,
47
49
  to_stage=IssueStage.DOING,
48
- description="Start working on the issue"
50
+ description="Start working on the issue",
49
51
  ),
50
52
  # Open (Doing) -> Open (Review)
51
53
  Transition(
@@ -54,16 +56,16 @@ class TransitionService:
54
56
  from_stage=IssueStage.DOING,
55
57
  to_status=IssueStatus.OPEN,
56
58
  to_stage=IssueStage.REVIEW,
57
- description="Submit for review"
59
+ description="Submit for review",
58
60
  ),
59
- # Open (Review) -> Open (Doing) - reject
61
+ # Open (Review) -> Open (Doing) - reject
60
62
  Transition(
61
63
  name="reject",
62
64
  from_status=IssueStatus.OPEN,
63
65
  from_stage=IssueStage.REVIEW,
64
66
  to_status=IssueStatus.OPEN,
65
67
  to_stage=IssueStage.DOING,
66
- description="Reject review and return to doing"
68
+ description="Reject review and return to doing",
67
69
  ),
68
70
  # Open (Review) -> Closed (Implemented)
69
71
  Transition(
@@ -73,7 +75,7 @@ class TransitionService:
73
75
  to_status=IssueStatus.CLOSED,
74
76
  to_stage=IssueStage.DONE,
75
77
  required_solution=IssueSolution.IMPLEMENTED,
76
- description="Accept and close issue"
78
+ description="Accept and close issue",
77
79
  ),
78
80
  # Direct Close (Cancel, Wontfix, Duplicate)
79
81
  Transition(
@@ -81,14 +83,14 @@ class TransitionService:
81
83
  to_status=IssueStatus.CLOSED,
82
84
  to_stage=IssueStage.DONE,
83
85
  required_solution=IssueSolution.CANCELLED,
84
- description="Cancel the issue"
86
+ description="Cancel the issue",
85
87
  ),
86
- Transition(
88
+ Transition(
87
89
  name="wontfix",
88
90
  to_status=IssueStatus.CLOSED,
89
91
  to_stage=IssueStage.DONE,
90
92
  required_solution=IssueSolution.WONTFIX,
91
- description="Mark as wontfix"
93
+ description="Mark as wontfix",
92
94
  ),
93
95
  ]
94
96
 
@@ -103,9 +105,11 @@ class TransitionService:
103
105
  if t.is_allowed(issue):
104
106
  valid_transition = t
105
107
  break
106
-
108
+
107
109
  if not valid_transition:
108
- raise ValueError(f"Transition '{transition_name}' is not allowed for current state.")
110
+ raise ValueError(
111
+ f"Transition '{transition_name}' is not allowed for current state."
112
+ )
109
113
 
110
114
  # Apply changes
111
115
  issue.frontmatter.status = valid_transition.to_status
@@ -113,14 +117,20 @@ class TransitionService:
113
117
  issue.frontmatter.stage = valid_transition.to_stage
114
118
  if valid_transition.required_solution:
115
119
  issue.frontmatter.solution = valid_transition.required_solution
116
-
120
+
117
121
  issue.frontmatter.updated_at = current_time()
118
-
122
+
119
123
  # Logic for closed_at, opened_at etc.
120
- if valid_transition.to_status == IssueStatus.CLOSED and issue.frontmatter.closed_at is None:
124
+ if (
125
+ valid_transition.to_status == IssueStatus.CLOSED
126
+ and issue.frontmatter.closed_at is None
127
+ ):
121
128
  issue.frontmatter.closed_at = current_time()
122
-
123
- if valid_transition.to_status == IssueStatus.OPEN and issue.frontmatter.opened_at is None:
124
- issue.frontmatter.opened_at = current_time()
125
-
129
+
130
+ if (
131
+ valid_transition.to_status == IssueStatus.OPEN
132
+ and issue.frontmatter.opened_at is None
133
+ ):
134
+ issue.frontmatter.opened_at = current_time()
135
+
126
136
  return issue
@@ -1,22 +1,33 @@
1
1
  from typing import List, Optional, Any, Dict
2
2
  from pydantic import BaseModel, Field, model_validator
3
3
  from datetime import datetime
4
- from ..models import IssueType, IssueStatus, IssueStage, IssueSolution, IssueIsolation, IssueID, current_time
5
- from monoco.core.lsp import Range, Position
4
+ from ..models import (
5
+ IssueType,
6
+ IssueStatus,
7
+ IssueStage,
8
+ IssueSolution,
9
+ IssueIsolation,
10
+ current_time,
11
+ )
12
+ from monoco.core.lsp import Range
13
+
6
14
 
7
15
  class Span(BaseModel):
8
16
  """
9
17
  Represents a fine-grained location inside a ContentBlock.
10
18
  """
19
+
11
20
  type: str # 'wikilink', 'issue_id', 'checkbox', 'yaml_key', 'plain_text'
12
21
  range: Range
13
22
  content: str
14
23
  metadata: Dict[str, Any] = Field(default_factory=dict)
15
24
 
25
+
16
26
  class ContentBlock(BaseModel):
17
27
  """
18
28
  Represents a block of content in the markdown body.
19
29
  """
30
+
20
31
  type: str # e.g., 'heading', 'task_list', 'paragraph', 'empty'
21
32
  content: str
22
33
  line_start: int
@@ -27,30 +38,35 @@ class ContentBlock(BaseModel):
27
38
  def to_string(self) -> str:
28
39
  return self.content
29
40
 
41
+
30
42
  from enum import Enum
31
43
 
44
+
32
45
  class TaskState(str, Enum):
33
46
  TODO = " "
34
47
  DONE = "x"
35
48
  DOING = "-"
36
49
  CANCELLED = "+"
37
50
 
51
+
38
52
  class TaskItem(ContentBlock):
39
- type: str = "task_item" # override type
53
+ type: str = "task_item" # override type
40
54
  state: TaskState = TaskState.TODO
41
55
  level: int = 0
42
56
  parent_index: Optional[int] = None
43
-
57
+
44
58
  @property
45
59
  def is_completed(self) -> bool:
46
60
  return self.state in [TaskState.DONE, TaskState.CANCELLED]
47
61
 
62
+
48
63
  class IssueBody(BaseModel):
49
64
  """
50
65
  Represents the parsed body of the issue.
51
66
  """
67
+
52
68
  blocks: List[ContentBlock] = Field(default_factory=list)
53
-
69
+
54
70
  def to_markdown(self) -> str:
55
71
  return "\n".join(b.to_string() for b in self.blocks)
56
72
 
@@ -60,21 +76,30 @@ class IssueBody(BaseModel):
60
76
 
61
77
  @property
62
78
  def tasks(self) -> List[TaskItem]:
63
- return [b for b in self.blocks if isinstance(b, TaskItem) or (isinstance(b, ContentBlock) and b.type == 'task_item')]
64
-
79
+ return [
80
+ b
81
+ for b in self.blocks
82
+ if isinstance(b, TaskItem)
83
+ or (isinstance(b, ContentBlock) and b.type == "task_item")
84
+ ]
85
+
65
86
  @property
66
87
  def progress(self) -> str:
67
88
  tasks = self.tasks
68
89
  if not tasks:
69
90
  return "0/0"
70
- completed = len([t for t in tasks if isinstance(t, TaskItem) and t.is_completed])
91
+ completed = len(
92
+ [t for t in tasks if isinstance(t, TaskItem) and t.is_completed]
93
+ )
71
94
  return f"{completed}/{len(tasks)}"
72
95
 
96
+
73
97
  class IssueFrontmatter(BaseModel):
74
98
  """
75
99
  Represents the YAML frontmatter of the issue.
76
100
  Contains metadata and validation logic.
77
101
  """
102
+
78
103
  id: str
79
104
  uid: Optional[str] = None
80
105
  type: IssueType
@@ -91,10 +116,10 @@ class IssueFrontmatter(BaseModel):
91
116
  tags: List[str] = Field(default_factory=list)
92
117
  solution: Optional[IssueSolution] = None
93
118
  isolation: Optional[IssueIsolation] = None
94
-
119
+
95
120
  model_config = {"extra": "allow"}
96
121
 
97
- @model_validator(mode='before')
122
+ @model_validator(mode="before")
98
123
  @classmethod
99
124
  def normalize_fields(cls, v: Any) -> Any:
100
125
  # Reusing normalization logic from original model or keeping it clean here
@@ -105,10 +130,12 @@ class IssueFrontmatter(BaseModel):
105
130
  v["status"] = v["status"].lower()
106
131
  return v
107
132
 
133
+
108
134
  class Issue(BaseModel):
109
135
  """
110
136
  The Aggregate Root for an Issue in the Domain Layer.
111
137
  """
138
+
112
139
  path: Optional[str] = None
113
140
  frontmatter: IssueFrontmatter
114
141
  body: IssueBody
@@ -116,55 +143,61 @@ class Issue(BaseModel):
116
143
  @property
117
144
  def id(self) -> str:
118
145
  return self.frontmatter.id
119
-
146
+
120
147
  @property
121
148
  def status(self) -> IssueStatus:
122
149
  return self.frontmatter.status
123
-
150
+
124
151
  def to_file_content(self) -> str:
125
152
  """
126
153
  Reconstruct the full file content.
127
154
  """
128
155
  import yaml
129
-
156
+
130
157
  # Dump frontmatter
131
158
  # Dump frontmatter with explicit field handling
132
159
  # We want to keep certain fields even if empty to serve as prompts
133
- data = self.frontmatter.model_dump(mode='json')
134
-
160
+ data = self.frontmatter.model_dump(mode="json")
161
+
135
162
  # Explicit ordering and key retention
136
163
  # We construct a new dict to control order and presence
137
164
  ordered_dump = {}
138
-
165
+
139
166
  # 1. Identity
140
- ordered_dump['id'] = data['id']
141
- if data.get('uid'): ordered_dump['uid'] = data['uid']
142
-
167
+ ordered_dump["id"] = data["id"]
168
+ if data.get("uid"):
169
+ ordered_dump["uid"] = data["uid"]
170
+
143
171
  # 2. Classifier
144
- ordered_dump['type'] = data['type']
145
- ordered_dump['status'] = data['status']
146
- if data.get('stage'): ordered_dump['stage'] = data['stage']
147
-
172
+ ordered_dump["type"] = data["type"]
173
+ ordered_dump["status"] = data["status"]
174
+ if data.get("stage"):
175
+ ordered_dump["stage"] = data["stage"]
176
+
148
177
  # 3. Content
149
- ordered_dump['title'] = data['title']
150
-
178
+ ordered_dump["title"] = data["title"]
179
+
151
180
  # 4. Dates (Always keep created/updated, others if exist)
152
- ordered_dump['created_at'] = data['created_at']
153
- if data.get('opened_at'): ordered_dump['opened_at'] = data['opened_at']
154
- ordered_dump['updated_at'] = data['updated_at']
155
- if data.get('closed_at'): ordered_dump['closed_at'] = data['closed_at']
156
-
181
+ ordered_dump["created_at"] = data["created_at"]
182
+ if data.get("opened_at"):
183
+ ordered_dump["opened_at"] = data["opened_at"]
184
+ ordered_dump["updated_at"] = data["updated_at"]
185
+ if data.get("closed_at"):
186
+ ordered_dump["closed_at"] = data["closed_at"]
187
+
157
188
  # 5. Graph (Always include to prompt usage)
158
- ordered_dump['parent'] = data.get('parent') # Allow null
159
- ordered_dump['dependencies'] = data.get('dependencies', [])
160
- ordered_dump['related'] = data.get('related', [])
161
- ordered_dump['tags'] = data.get('tags', [])
162
-
189
+ ordered_dump["parent"] = data.get("parent") # Allow null
190
+ ordered_dump["dependencies"] = data.get("dependencies", [])
191
+ ordered_dump["related"] = data.get("related", [])
192
+ ordered_dump["tags"] = data.get("tags", [])
193
+
163
194
  # 6. Lifecycle (Optional)
164
- if data.get('solution'): ordered_dump['solution'] = data['solution']
165
- if data.get('isolation'): ordered_dump['isolation'] = data['isolation']
195
+ if data.get("solution"):
196
+ ordered_dump["solution"] = data["solution"]
197
+ if data.get("isolation"):
198
+ ordered_dump["isolation"] = data["isolation"]
166
199
 
167
200
  fm_str = yaml.dump(ordered_dump, sort_keys=False, allow_unicode=True).strip()
168
201
  body_str = self.body.to_markdown()
169
-
202
+
170
203
  return f"---\n{fm_str}\n---\n\n{body_str}"
@@ -1,9 +1,10 @@
1
1
  import yaml
2
2
  import re
3
- from typing import List, Optional, Tuple, Any
3
+ from typing import List, Optional, Tuple
4
4
  from .models import Issue, IssueFrontmatter, IssueBody, ContentBlock, Span
5
5
  from monoco.core.lsp import Range, Position
6
6
 
7
+
7
8
  class MarkdownParser:
8
9
  """
9
10
  Parses markdown content into Domain Models.
@@ -17,10 +18,10 @@ class MarkdownParser:
17
18
  @staticmethod
18
19
  def parse(content: str, path: Optional[str] = None) -> Issue:
19
20
  lines = content.splitlines()
20
-
21
+
21
22
  # 1. Parse Frontmatter
22
23
  frontmatter_dict, body_start_line = MarkdownParser._extract_frontmatter(lines)
23
-
24
+
24
25
  # 2. Create Frontmatter Object
25
26
  # Handle cases where frontmatter might be empty or invalid
26
27
  if not frontmatter_dict:
@@ -34,8 +35,10 @@ class MarkdownParser:
34
35
  # 3. Parse Body
35
36
  body_lines = lines[body_start_line:]
36
37
  # Adjust line numbers relative to the original file
37
- blocks = MarkdownParser._parse_blocks(body_lines, start_line_offset=body_start_line)
38
-
38
+ blocks = MarkdownParser._parse_blocks(
39
+ body_lines, start_line_offset=body_start_line
40
+ )
41
+
39
42
  body = IssueBody(blocks=blocks)
40
43
 
41
44
  return Issue(path=path, frontmatter=frontmatter, body=body)
@@ -47,7 +50,7 @@ class MarkdownParser:
47
50
  """
48
51
  if not lines or lines[0].strip() != "---":
49
52
  return {}, 0
50
-
53
+
51
54
  fm_lines = []
52
55
  i = 1
53
56
  while i < len(lines):
@@ -56,8 +59,8 @@ class MarkdownParser:
56
59
  return yaml.safe_load("\n".join(fm_lines)), i + 1
57
60
  fm_lines.append(line)
58
61
  i += 1
59
-
60
- return {}, 0 # malformed
62
+
63
+ return {}, 0 # malformed
61
64
 
62
65
  @staticmethod
63
66
  def _parse_blocks(lines: List[str], start_line_offset: int) -> List[ContentBlock]:
@@ -65,7 +68,7 @@ class MarkdownParser:
65
68
  current_block_lines = []
66
69
  current_block_type = "paragraph"
67
70
  current_start_line = start_line_offset
68
-
71
+
69
72
  def flush_block():
70
73
  nonlocal current_block_lines, current_start_line
71
74
  if current_block_lines:
@@ -74,31 +77,33 @@ class MarkdownParser:
74
77
  type=current_block_type,
75
78
  content=content,
76
79
  line_start=current_start_line,
77
- line_end=current_start_line + len(current_block_lines)
80
+ line_end=current_start_line + len(current_block_lines),
81
+ )
82
+ block.spans = MarkdownParser._parse_spans(
83
+ current_block_lines, current_start_line
78
84
  )
79
- block.spans = MarkdownParser._parse_spans(current_block_lines, current_start_line)
80
85
  blocks.append(block)
81
86
  current_block_lines = []
82
87
 
83
88
  for i, line in enumerate(lines):
84
89
  abs_line_idx = start_line_offset + i
85
-
90
+
86
91
  # Simple heuristic for block detection
87
92
  # 1. Heading
88
93
  if re.match(r"^#{1,6}\s", line):
89
94
  flush_block()
90
-
95
+
91
96
  # Add heading as its own block
92
97
  block = ContentBlock(
93
98
  type="heading",
94
99
  content=line,
95
100
  line_start=abs_line_idx,
96
- line_end=abs_line_idx + 1
101
+ line_end=abs_line_idx + 1,
97
102
  )
98
103
  block.spans = MarkdownParser._parse_spans([line], abs_line_idx)
99
104
  blocks.append(block)
100
105
  current_start_line = abs_line_idx + 1
101
- current_block_type = "paragraph" # reset
106
+ current_block_type = "paragraph" # reset
102
107
  continue
103
108
 
104
109
  # 2. Task List Item
@@ -109,24 +114,25 @@ class MarkdownParser:
109
114
 
110
115
  indent_str = task_match.group(1)
111
116
  state_char = task_match.group(2).lower()
112
-
117
+
113
118
  # Calculate level (assuming 2 spaces per level)
114
- level = len(indent_str) // 2
115
-
119
+ level = len(indent_str) // 2
120
+
116
121
  # Determine state
117
122
  from .models import TaskState, TaskItem
123
+
118
124
  state_map = {
119
125
  " ": TaskState.TODO,
120
126
  "x": TaskState.DONE,
121
127
  "-": TaskState.DOING, # Legacy
122
128
  "/": TaskState.DOING, # New Standard
123
- "+": TaskState.CANCELLED, # Legacy
124
- "~": TaskState.CANCELLED # New Standard
129
+ "+": TaskState.CANCELLED, # Legacy
130
+ "~": TaskState.CANCELLED, # New Standard
125
131
  }
126
-
132
+
127
133
  # Fallback for 'X' -> 'x'
128
- if state_char not in state_map and state_char == 'x':
129
- state_char = 'x'
134
+ if state_char not in state_map and state_char == "x":
135
+ state_char = "x"
130
136
 
131
137
  block = TaskItem(
132
138
  content=line,
@@ -134,7 +140,7 @@ class MarkdownParser:
134
140
  line_end=abs_line_idx + 1,
135
141
  state=state_map.get(state_char, TaskState.TODO),
136
142
  level=level,
137
- metadata={"checked": state_char in ['x', '+']}
143
+ metadata={"checked": state_char in ["x", "+"]},
138
144
  )
139
145
  block.spans = MarkdownParser._parse_spans([line], abs_line_idx)
140
146
  blocks.append(block)
@@ -144,24 +150,26 @@ class MarkdownParser:
144
150
 
145
151
  # 3. Empty lines (separators)
146
152
  if not line.strip():
147
- flush_block()
148
-
149
- blocks.append(ContentBlock(
150
- type="empty",
151
- content="",
152
- line_start=abs_line_idx,
153
- line_end=abs_line_idx + 1
154
- ))
155
- current_start_line = abs_line_idx + 1
156
- current_block_type = "paragraph"
157
- continue
153
+ flush_block()
154
+
155
+ blocks.append(
156
+ ContentBlock(
157
+ type="empty",
158
+ content="",
159
+ line_start=abs_line_idx,
160
+ line_end=abs_line_idx + 1,
161
+ )
162
+ )
163
+ current_start_line = abs_line_idx + 1
164
+ current_block_type = "paragraph"
165
+ continue
158
166
 
159
167
  # Default: accumulate lines into paragraph
160
168
  if not current_block_lines:
161
169
  current_start_line = abs_line_idx
162
-
170
+
163
171
  current_block_lines.append(line)
164
-
172
+
165
173
  # Flush remaining
166
174
  flush_block()
167
175
 
@@ -175,49 +183,64 @@ class MarkdownParser:
175
183
  spans = []
176
184
  for i, line in enumerate(lines):
177
185
  abs_line_idx = line_offset + i
178
-
186
+
179
187
  # 1. Parse Checkboxes (only at start of line)
180
188
  checkbox_match = re.match(r"^(\s*-\s*\[)([ xX\-\+~/])(\])", line)
181
189
  if checkbox_match:
182
190
  start_char = len(checkbox_match.group(1))
183
191
  end_char = start_char + 1
184
- spans.append(Span(
185
- type="checkbox",
186
- range=Range(
187
- start=Position(line=abs_line_idx, character=start_char),
188
- end=Position(line=abs_line_idx, character=end_char)
189
- ),
190
- content=checkbox_match.group(2),
191
- metadata={"state": checkbox_match.group(2)}
192
- ))
192
+ spans.append(
193
+ Span(
194
+ type="checkbox",
195
+ range=Range(
196
+ start=Position(line=abs_line_idx, character=start_char),
197
+ end=Position(line=abs_line_idx, character=end_char),
198
+ ),
199
+ content=checkbox_match.group(2),
200
+ metadata={"state": checkbox_match.group(2)},
201
+ )
202
+ )
193
203
 
194
204
  # 2. Parse Wikilinks
195
205
  for match in re.finditer(MarkdownParser.WIKILINK_PATTERN, line):
196
- spans.append(Span(
197
- type="wikilink",
198
- range=Range(
199
- start=Position(line=abs_line_idx, character=match.start()),
200
- end=Position(line=abs_line_idx, character=match.end())
201
- ),
202
- content=match.group(0),
203
- metadata={"issue_id": match.group(1)}
204
- ))
206
+ spans.append(
207
+ Span(
208
+ type="wikilink",
209
+ range=Range(
210
+ start=Position(line=abs_line_idx, character=match.start()),
211
+ end=Position(line=abs_line_idx, character=match.end()),
212
+ ),
213
+ content=match.group(0),
214
+ metadata={"issue_id": match.group(1)},
215
+ )
216
+ )
205
217
 
206
218
  # 3. Parse Raw Issue IDs (not inside wikilinks)
207
219
  # We use a simple exclusion logic: if a match is inside a wikilink, skip it.
208
- wikilink_ranges = [(s.range.start.character, s.range.end.character) for s in spans if s.type == "wikilink" and s.range.start.line == abs_line_idx]
209
-
220
+ wikilink_ranges = [
221
+ (s.range.start.character, s.range.end.character)
222
+ for s in spans
223
+ if s.type == "wikilink" and s.range.start.line == abs_line_idx
224
+ ]
225
+
210
226
  for match in re.finditer(MarkdownParser.ISSUE_ID_PATTERN, line):
211
- is_inside = any(r[0] <= match.start() and match.end() <= r[1] for r in wikilink_ranges)
227
+ is_inside = any(
228
+ r[0] <= match.start() and match.end() <= r[1]
229
+ for r in wikilink_ranges
230
+ )
212
231
  if not is_inside:
213
- spans.append(Span(
214
- type="issue_id",
215
- range=Range(
216
- start=Position(line=abs_line_idx, character=match.start()),
217
- end=Position(line=abs_line_idx, character=match.end())
218
- ),
219
- content=match.group(0),
220
- metadata={"issue_id": match.group(1)}
221
- ))
222
-
232
+ spans.append(
233
+ Span(
234
+ type="issue_id",
235
+ range=Range(
236
+ start=Position(
237
+ line=abs_line_idx, character=match.start()
238
+ ),
239
+ end=Position(line=abs_line_idx, character=match.end()),
240
+ ),
241
+ content=match.group(0),
242
+ metadata={"issue_id": match.group(1)},
243
+ )
244
+ )
245
+
223
246
  return spans