taskflow-git 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- taskflow/__init__.py +3 -0
- taskflow/archive.py +135 -0
- taskflow/cli.py +550 -0
- taskflow/config.py +195 -0
- taskflow/reports.py +284 -0
- taskflow/setup_cmd.py +305 -0
- taskflow/tasklib.py +451 -0
- taskflow_git-0.3.0.dist-info/METADATA +448 -0
- taskflow_git-0.3.0.dist-info/RECORD +11 -0
- taskflow_git-0.3.0.dist-info/WHEEL +4 -0
- taskflow_git-0.3.0.dist-info/entry_points.txt +2 -0
taskflow/tasklib.py
ADDED
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
"""
|
|
2
|
+
tasklib.py — shared parsing and file mutation.
|
|
3
|
+
|
|
4
|
+
All the regex, section parsing, fuzzy matching, and done.md writing
|
|
5
|
+
lives here. Nothing in this module knows about git or the CLI — it just
|
|
6
|
+
reads and writes backlog files.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import re
|
|
12
|
+
from datetime import date, datetime, timedelta
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
# category headings are ###, with an optional leading emoji
|
|
17
|
+
CATEGORY_RE = re.compile(r"^###\s+(?:[\U0001F300-\U0001FFFE\u2600-\u26FF\u2700-\u27BF]\s*)?(.*\S.*?)\s*$")
|
|
18
|
+
|
|
19
|
+
# phase headings are ## (later.md only) — task operations skip these
|
|
20
|
+
PHASE_RE = re.compile(r"^##\s+(.*\S.*?)\s*$")
|
|
21
|
+
|
|
22
|
+
# week headings in done.md
|
|
23
|
+
WEEK_HEADING_RE = re.compile(r"^##\s+Week of\s+(\d{4}-\d{2}-\d{2})\s*$")
|
|
24
|
+
|
|
25
|
+
DIVIDER_RE = re.compile(r"^---\s*$")
|
|
26
|
+
|
|
27
|
+
# plain bullets — * or - with optional indent, no checkboxes
|
|
28
|
+
TASK_RE = re.compile(r"^(\s*)([*-]\s+)(.*\S.*?)\s*$")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ---------------------------------------------------------------------------
|
|
32
|
+
# Text helpers
|
|
33
|
+
# ---------------------------------------------------------------------------
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def normalize(text: str) -> str:
|
|
37
|
+
"""Collapse whitespace and lowercase — used for task matching."""
|
|
38
|
+
return re.sub(r"\s+", " ", text.strip()).lower()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def strip_emoji(text: str) -> str:
|
|
42
|
+
"""Remove a leading emoji and any trailing space from a heading name."""
|
|
43
|
+
return re.sub(r"^[\U0001F300-\U0001FFFE\u2600-\u26FF\u2700-\u27BF]\s*", "", text).strip()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def fuzzy_match(query: str, candidate: str) -> bool:
|
|
47
|
+
"""True if query appears anywhere in candidate (normalized, case-insensitive)."""
|
|
48
|
+
return normalize(query) in normalize(candidate)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# ---------------------------------------------------------------------------
|
|
52
|
+
# Line classifiers
|
|
53
|
+
# ---------------------------------------------------------------------------
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def is_category(line: str) -> bool:
|
|
57
|
+
return CATEGORY_RE.match(line) is not None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def is_phase(line: str) -> bool:
|
|
61
|
+
# phase headings are ## but not ###
|
|
62
|
+
return PHASE_RE.match(line) is not None and not is_category(line)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def is_divider(line: str) -> bool:
|
|
66
|
+
return DIVIDER_RE.match(line) is not None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def task_match(line: str) -> Optional[re.Match]:
|
|
70
|
+
return TASK_RE.match(line)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def task_indent(line: str) -> Optional[int]:
|
|
74
|
+
m = task_match(line)
|
|
75
|
+
return len(m.group(1)) if m else None
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def task_text(line: str) -> Optional[str]:
|
|
79
|
+
m = task_match(line)
|
|
80
|
+
return m.group(3) if m else None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# ---------------------------------------------------------------------------
|
|
84
|
+
# Section parsing
|
|
85
|
+
# ---------------------------------------------------------------------------
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def parse_sections(lines: list[str]) -> list[dict]:
|
|
89
|
+
"""
|
|
90
|
+
Parse a backlog file into category sections.
|
|
91
|
+
|
|
92
|
+
Each section:
|
|
93
|
+
heading — canonical name (emoji stripped)
|
|
94
|
+
heading_raw — name as written (with emoji)
|
|
95
|
+
start — line index of the ### heading
|
|
96
|
+
end — line index of the --- divider (or last line)
|
|
97
|
+
tasks — list of (line_idx, task_text, raw_line)
|
|
98
|
+
|
|
99
|
+
Phase headings (##) are skipped — they're structure, not sections.
|
|
100
|
+
"""
|
|
101
|
+
sections = []
|
|
102
|
+
i = 0
|
|
103
|
+
n = len(lines)
|
|
104
|
+
|
|
105
|
+
while i < n:
|
|
106
|
+
if is_phase(lines[i]):
|
|
107
|
+
i += 1
|
|
108
|
+
continue
|
|
109
|
+
|
|
110
|
+
m = CATEGORY_RE.match(lines[i])
|
|
111
|
+
if not m:
|
|
112
|
+
i += 1
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
heading_raw = m.group(1)
|
|
116
|
+
heading = strip_emoji(heading_raw).strip()
|
|
117
|
+
start = i
|
|
118
|
+
j = i + 1
|
|
119
|
+
tasks = []
|
|
120
|
+
|
|
121
|
+
while j < n:
|
|
122
|
+
if is_category(lines[j]) or is_phase(lines[j]) or is_divider(lines[j]):
|
|
123
|
+
break
|
|
124
|
+
txt = task_text(lines[j])
|
|
125
|
+
if txt is not None:
|
|
126
|
+
tasks.append((j, txt, lines[j]))
|
|
127
|
+
j += 1
|
|
128
|
+
|
|
129
|
+
end = j if (j < n and is_divider(lines[j])) else j - 1
|
|
130
|
+
|
|
131
|
+
sections.append(
|
|
132
|
+
{
|
|
133
|
+
"start": start,
|
|
134
|
+
"end": end,
|
|
135
|
+
"heading": heading,
|
|
136
|
+
"heading_raw": heading_raw,
|
|
137
|
+
"tasks": tasks,
|
|
138
|
+
}
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
i = j + 1 if (j < n and is_divider(lines[j])) else j
|
|
142
|
+
|
|
143
|
+
return sections
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def find_duplicates(sections: list[dict]) -> list[tuple[str, int, int]]:
|
|
147
|
+
"""Return (key, line1, line2) for any duplicate task text across sections."""
|
|
148
|
+
seen: dict[str, int] = {}
|
|
149
|
+
dups = []
|
|
150
|
+
for section in sections:
|
|
151
|
+
for line_no, txt, _raw in section["tasks"]:
|
|
152
|
+
key = normalize(txt)
|
|
153
|
+
if key in seen:
|
|
154
|
+
dups.append((key, seen[key] + 1, line_no + 1))
|
|
155
|
+
else:
|
|
156
|
+
seen[key] = line_no
|
|
157
|
+
return dups
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def find_task(sections: list[dict], query: str, src_path: Path) -> tuple[dict, int, str, str]:
|
|
161
|
+
"""
|
|
162
|
+
Fuzzy-match a task across sections. One match = proceed.
|
|
163
|
+
Zero or multiple = raise with a useful message.
|
|
164
|
+
"""
|
|
165
|
+
import click
|
|
166
|
+
|
|
167
|
+
matches = []
|
|
168
|
+
for section in sections:
|
|
169
|
+
for line_idx, txt, raw_line in section["tasks"]:
|
|
170
|
+
if fuzzy_match(query, txt):
|
|
171
|
+
matches.append((section, line_idx, txt, raw_line))
|
|
172
|
+
|
|
173
|
+
if not matches:
|
|
174
|
+
raise click.UsageError(f"No task matching '{query}' in {src_path.name}")
|
|
175
|
+
|
|
176
|
+
if len(matches) > 1:
|
|
177
|
+
lines = "\n".join(f" line {idx + 1} [{sec['heading']}] {txt}" for sec, idx, txt, _ in matches)
|
|
178
|
+
raise click.UsageError(f"Multiple tasks match '{query}' — be more specific:\n{lines}")
|
|
179
|
+
|
|
180
|
+
return matches[0]
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
# ---------------------------------------------------------------------------
|
|
184
|
+
# Task block (parent + children)
|
|
185
|
+
# ---------------------------------------------------------------------------
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def find_task_block(lines: list[str], section_end: int, start_idx: int) -> tuple[int, int]:
|
|
189
|
+
"""
|
|
190
|
+
Return [start, end) for a task and all its indented children.
|
|
191
|
+
Children are lines with greater indent than the parent.
|
|
192
|
+
Non-task lines inside the subtree ride along until it ends.
|
|
193
|
+
"""
|
|
194
|
+
parent_indent = task_indent(lines[start_idx])
|
|
195
|
+
if parent_indent is None:
|
|
196
|
+
raise ValueError(f"Line {start_idx} is not a task line")
|
|
197
|
+
|
|
198
|
+
end = start_idx + 1
|
|
199
|
+
while end < section_end:
|
|
200
|
+
line = lines[end]
|
|
201
|
+
if is_category(line) or is_phase(line) or is_divider(line):
|
|
202
|
+
break
|
|
203
|
+
indent = task_indent(line)
|
|
204
|
+
if indent is not None and indent <= parent_indent:
|
|
205
|
+
break
|
|
206
|
+
end += 1
|
|
207
|
+
|
|
208
|
+
return start_idx, end
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
# ---------------------------------------------------------------------------
|
|
212
|
+
# Blank line handling and serialisation
|
|
213
|
+
# ---------------------------------------------------------------------------
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def collapse_blank_lines(lines: list[str]) -> list[str]:
|
|
217
|
+
"""Never let two blank lines sit next to each other."""
|
|
218
|
+
out = []
|
|
219
|
+
prev_blank = False
|
|
220
|
+
for line in lines:
|
|
221
|
+
blank = line.strip() == ""
|
|
222
|
+
if blank and prev_blank:
|
|
223
|
+
continue
|
|
224
|
+
out.append(line)
|
|
225
|
+
prev_blank = blank
|
|
226
|
+
return out
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def ensure_blank_before_dividers(lines: list[str]) -> list[str]:
|
|
230
|
+
"""Every --- gets a blank line before it."""
|
|
231
|
+
out = []
|
|
232
|
+
for line in lines:
|
|
233
|
+
if DIVIDER_RE.match(line) and out and out[-1].strip() != "":
|
|
234
|
+
out.append("")
|
|
235
|
+
out.append(line)
|
|
236
|
+
return out
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def serialize_lines(lines: list[str], path: Path) -> None:
|
|
240
|
+
lines = collapse_blank_lines(lines)
|
|
241
|
+
lines = ensure_blank_before_dividers(lines)
|
|
242
|
+
path.write_text("\n".join(lines).rstrip() + "\n", encoding="utf-8")
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# ---------------------------------------------------------------------------
|
|
246
|
+
# Empty section removal
|
|
247
|
+
# ---------------------------------------------------------------------------
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def remove_empty_sections(lines: list[str]) -> list[str]:
|
|
251
|
+
sections = parse_sections(lines)
|
|
252
|
+
remove_ranges = []
|
|
253
|
+
|
|
254
|
+
for section in sections:
|
|
255
|
+
if not section["tasks"]:
|
|
256
|
+
start = section["start"]
|
|
257
|
+
end = section["end"]
|
|
258
|
+
while end + 1 < len(lines) and lines[end + 1].strip() == "":
|
|
259
|
+
end += 1
|
|
260
|
+
while start - 1 >= 0 and lines[start - 1].strip() == "":
|
|
261
|
+
start -= 1
|
|
262
|
+
remove_ranges.append((start, end))
|
|
263
|
+
|
|
264
|
+
if not remove_ranges:
|
|
265
|
+
return collapse_blank_lines(lines)
|
|
266
|
+
|
|
267
|
+
result = []
|
|
268
|
+
idx = 0
|
|
269
|
+
for start, end in sorted(remove_ranges):
|
|
270
|
+
if idx < start:
|
|
271
|
+
result.extend(lines[idx:start])
|
|
272
|
+
idx = end + 1
|
|
273
|
+
if idx < len(lines):
|
|
274
|
+
result.extend(lines[idx:])
|
|
275
|
+
|
|
276
|
+
return collapse_blank_lines(result)
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# ---------------------------------------------------------------------------
|
|
280
|
+
# Destination insertion
|
|
281
|
+
# ---------------------------------------------------------------------------
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def insert_into_destination(
|
|
285
|
+
dst_lines: list[str],
|
|
286
|
+
moved_block: list[str],
|
|
287
|
+
category: str,
|
|
288
|
+
category_raw: str,
|
|
289
|
+
) -> list[str]:
|
|
290
|
+
"""
|
|
291
|
+
Insert moved_block into dst_lines under the matching category section.
|
|
292
|
+
Creates the section if it doesn't exist.
|
|
293
|
+
"""
|
|
294
|
+
dst_sections = parse_sections(dst_lines)
|
|
295
|
+
inserted = False
|
|
296
|
+
|
|
297
|
+
for dsec in dst_sections:
|
|
298
|
+
if dsec["heading"] == category:
|
|
299
|
+
insert_at = dsec["end"]
|
|
300
|
+
|
|
301
|
+
# if the line before the divider is blank, insert before the blank
|
|
302
|
+
# so new tasks don't end up after it
|
|
303
|
+
if insert_at > 0 and dst_lines[insert_at - 1].strip() == "":
|
|
304
|
+
insert_at -= 1
|
|
305
|
+
|
|
306
|
+
# blank separator only when the preceding task has subtasks
|
|
307
|
+
preceding = dst_lines[insert_at - 1] if insert_at > 0 else ""
|
|
308
|
+
if preceding.strip() != "" and preceding.startswith(" "):
|
|
309
|
+
dst_lines[insert_at:insert_at] = [""] + moved_block
|
|
310
|
+
else:
|
|
311
|
+
dst_lines[insert_at:insert_at] = moved_block
|
|
312
|
+
|
|
313
|
+
inserted = True
|
|
314
|
+
break
|
|
315
|
+
|
|
316
|
+
if not inserted:
|
|
317
|
+
if dst_lines and dst_lines[-1].strip() != "":
|
|
318
|
+
dst_lines.append("")
|
|
319
|
+
dst_lines.append(f"### {category_raw}")
|
|
320
|
+
dst_lines.extend(moved_block)
|
|
321
|
+
dst_lines.append("---")
|
|
322
|
+
|
|
323
|
+
return collapse_blank_lines(dst_lines)
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
# ---------------------------------------------------------------------------
|
|
327
|
+
# Done log
|
|
328
|
+
# ---------------------------------------------------------------------------
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
def latest_week_heading_date(lines: list[str]) -> Optional[date]:
|
|
332
|
+
"""Return the date from the most recent ## Week of heading, or None."""
|
|
333
|
+
latest: Optional[date] = None
|
|
334
|
+
for line in lines:
|
|
335
|
+
m = WEEK_HEADING_RE.match(line)
|
|
336
|
+
if m:
|
|
337
|
+
try:
|
|
338
|
+
d = date.fromisoformat(m.group(1))
|
|
339
|
+
if latest is None or d > latest:
|
|
340
|
+
latest = d
|
|
341
|
+
except ValueError:
|
|
342
|
+
pass
|
|
343
|
+
return latest
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def append_done(done_path: Path, category: str, task_text_str: str) -> None:
|
|
347
|
+
"""
|
|
348
|
+
Append a timestamped done entry. Auto-inserts a week heading when:
|
|
349
|
+
- no prior heading exists, or
|
|
350
|
+
- most recent heading is >= 7 days old
|
|
351
|
+
|
|
352
|
+
The heading date is today — weeks are relative to when work started,
|
|
353
|
+
not a fixed Mon/Sun calendar boundary.
|
|
354
|
+
"""
|
|
355
|
+
done_path.parent.mkdir(parents=True, exist_ok=True)
|
|
356
|
+
done_lines = done_path.read_text(encoding="utf-8").splitlines() if done_path.exists() else []
|
|
357
|
+
|
|
358
|
+
today = date.today()
|
|
359
|
+
latest = latest_week_heading_date(done_lines)
|
|
360
|
+
needs_heading = (latest is None) or ((today - latest) >= timedelta(days=7))
|
|
361
|
+
|
|
362
|
+
if needs_heading:
|
|
363
|
+
if done_lines and done_lines[-1].strip() != "":
|
|
364
|
+
done_lines.append("")
|
|
365
|
+
done_lines.append(f"## Week of {today.isoformat()}")
|
|
366
|
+
done_lines.append("")
|
|
367
|
+
|
|
368
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
369
|
+
done_lines.append(f"[{timestamp}] done: ({category}) - {task_text_str}")
|
|
370
|
+
serialize_lines(done_lines, done_path)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
# ---------------------------------------------------------------------------
|
|
374
|
+
# Task move
|
|
375
|
+
# ---------------------------------------------------------------------------
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def move_task(src_path: Path, dst_path: Path, query: str) -> tuple[str, str]:
|
|
379
|
+
"""
|
|
380
|
+
Move a task (and its children) from src to dst.
|
|
381
|
+
Returns (category, matched_task_text) on success.
|
|
382
|
+
Raises click.UsageError on any problem.
|
|
383
|
+
"""
|
|
384
|
+
import click
|
|
385
|
+
|
|
386
|
+
if not src_path.exists():
|
|
387
|
+
raise click.UsageError(f"Source file does not exist: {src_path}")
|
|
388
|
+
|
|
389
|
+
src_lines = src_path.read_text(encoding="utf-8").splitlines()
|
|
390
|
+
src_sections = parse_sections(src_lines)
|
|
391
|
+
|
|
392
|
+
dups = find_duplicates(src_sections)
|
|
393
|
+
if dups:
|
|
394
|
+
detail = "\n".join(f" lines {l1} and {l2}: {k}" for k, l1, l2 in dups)
|
|
395
|
+
raise click.UsageError(f"Duplicate tasks in {src_path.name} — fix before continuing:\n{detail}")
|
|
396
|
+
|
|
397
|
+
section, line_idx, txt, _raw = find_task(src_sections, query, src_path)
|
|
398
|
+
category = section["heading"]
|
|
399
|
+
category_raw = section["heading_raw"]
|
|
400
|
+
|
|
401
|
+
block_start, block_end = find_task_block(src_lines, section["end"], line_idx)
|
|
402
|
+
moved_block = src_lines[block_start:block_end]
|
|
403
|
+
|
|
404
|
+
# strip trailing blanks from the block so insertion doesn't double-space
|
|
405
|
+
while moved_block and moved_block[-1].strip() == "":
|
|
406
|
+
moved_block.pop()
|
|
407
|
+
|
|
408
|
+
del src_lines[block_start:block_end]
|
|
409
|
+
src_lines = remove_empty_sections(src_lines)
|
|
410
|
+
serialize_lines(src_lines, src_path)
|
|
411
|
+
|
|
412
|
+
dst_lines = dst_path.read_text(encoding="utf-8").splitlines() if dst_path.exists() else []
|
|
413
|
+
dst_lines = insert_into_destination(dst_lines, moved_block, category, category_raw)
|
|
414
|
+
serialize_lines(dst_lines, dst_path)
|
|
415
|
+
|
|
416
|
+
return category, txt
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
def complete_task(src_path: Path, done_path: Path, query: str) -> tuple[str, str]:
|
|
420
|
+
"""
|
|
421
|
+
Remove a task from src, append to done.md with timestamp.
|
|
422
|
+
Returns (category, matched_task_text).
|
|
423
|
+
"""
|
|
424
|
+
import click
|
|
425
|
+
|
|
426
|
+
if not src_path.exists():
|
|
427
|
+
raise click.UsageError(f"Source file does not exist: {src_path}")
|
|
428
|
+
|
|
429
|
+
src_lines = src_path.read_text(encoding="utf-8").splitlines()
|
|
430
|
+
src_sections = parse_sections(src_lines)
|
|
431
|
+
|
|
432
|
+
dups = find_duplicates(src_sections)
|
|
433
|
+
if dups:
|
|
434
|
+
detail = "\n".join(f" lines {l1} and {l2}: {k}" for k, l1, l2 in dups)
|
|
435
|
+
raise click.UsageError(f"Duplicate tasks in {src_path.name} — fix before continuing:\n{detail}")
|
|
436
|
+
|
|
437
|
+
section, line_idx, txt, _raw = find_task(src_sections, query, src_path)
|
|
438
|
+
category = section["heading"]
|
|
439
|
+
|
|
440
|
+
block_start, block_end = find_task_block(src_lines, section["end"], line_idx)
|
|
441
|
+
del src_lines[block_start:block_end]
|
|
442
|
+
|
|
443
|
+
# clean up any orphaned blank lines at the deletion site
|
|
444
|
+
while block_start < len(src_lines) and src_lines[block_start].strip() == "":
|
|
445
|
+
del src_lines[block_start]
|
|
446
|
+
|
|
447
|
+
src_lines = remove_empty_sections(src_lines)
|
|
448
|
+
serialize_lines(src_lines, src_path)
|
|
449
|
+
append_done(done_path, category, txt)
|
|
450
|
+
|
|
451
|
+
return category, txt
|