taskflow-git 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- taskflow/__init__.py +3 -0
- taskflow/archive.py +135 -0
- taskflow/cli.py +550 -0
- taskflow/config.py +195 -0
- taskflow/reports.py +284 -0
- taskflow/setup_cmd.py +305 -0
- taskflow/tasklib.py +451 -0
- taskflow_git-0.3.0.dist-info/METADATA +448 -0
- taskflow_git-0.3.0.dist-info/RECORD +11 -0
- taskflow_git-0.3.0.dist-info/WHEEL +4 -0
- taskflow_git-0.3.0.dist-info/entry_points.txt +2 -0
taskflow/config.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"""
|
|
2
|
+
config.py — project root discovery and .taskflow.yml loading.
|
|
3
|
+
|
|
4
|
+
Everything resolves relative to wherever .taskflow.yml lives. Walk up
|
|
5
|
+
from cwd until we find it, or fail clearly. This is the only place that
|
|
6
|
+
knows about the config structure — everything else asks this module.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
import yaml
|
|
16
|
+
|
|
17
|
+
CONFIG_FILE = ".taskflow.yml"
|
|
18
|
+
|
|
19
|
+
# defaults used when states section is missing or partially specified
|
|
20
|
+
STATE_DEFAULTS: dict[str, dict] = {
|
|
21
|
+
"now": {"file": "backlog/0-now.md", "icon": "▶"},
|
|
22
|
+
"blocked": {"file": "backlog/1-blocked.md", "icon": "⊘"},
|
|
23
|
+
"paused": {"file": "backlog/2-paused.md", "icon": "⏸"},
|
|
24
|
+
"next": {"file": "backlog/3-next.md", "icon": "◈"},
|
|
25
|
+
"later": {"file": "backlog/4-later.md", "icon": "◇"},
|
|
26
|
+
"done": {"file": "backlog/done.md", "icon": "✓"},
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
# the order transitions are defined matters for help text and validation
|
|
30
|
+
WORKFLOW_TRANSITIONS: dict[str, tuple[str, str, str]] = {
|
|
31
|
+
"promote": ("later", "next", "promote"),
|
|
32
|
+
"start": ("next", "now", "start"),
|
|
33
|
+
"block": ("now", "blocked", "block"),
|
|
34
|
+
"unblock": ("blocked", "now", "unblock"),
|
|
35
|
+
"pause": ("now", "paused", "pause"),
|
|
36
|
+
"unpause": ("paused", "now", "unpause"),
|
|
37
|
+
"backlog": ("now", "next", "backlog"),
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class TaskflowConfig:
|
|
42
|
+
"""
|
|
43
|
+
Loaded config for a taskflow project. All path resolution goes through here
|
|
44
|
+
so nothing else has to think about absolute vs relative.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, root: Path, data: dict) -> None:
|
|
48
|
+
self.root = root
|
|
49
|
+
self._data = data
|
|
50
|
+
|
|
51
|
+
# --- state resolution ---
|
|
52
|
+
|
|
53
|
+
def state(self, name: str) -> dict:
|
|
54
|
+
"""Return merged config for a state — user values over defaults."""
|
|
55
|
+
user = self._data.get("states", {}).get(name, {})
|
|
56
|
+
default = STATE_DEFAULTS.get(name, {})
|
|
57
|
+
return {
|
|
58
|
+
"file": user.get("file") or default.get("file", f"backlog/{name}.md"),
|
|
59
|
+
"icon": user.get("icon") or default.get("icon", ""),
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
def state_path(self, name: str) -> Path:
|
|
63
|
+
"""Absolute path for a state file."""
|
|
64
|
+
f = self.state(name)["file"]
|
|
65
|
+
p = Path(f)
|
|
66
|
+
# absolute path in config wins; relative resolves from root
|
|
67
|
+
return p if p.is_absolute() else self.root / p
|
|
68
|
+
|
|
69
|
+
def state_icon(self, name: str) -> str:
|
|
70
|
+
return self.state(name).get("icon", "")
|
|
71
|
+
|
|
72
|
+
# --- categories ---
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def categories(self) -> list[dict]:
|
|
76
|
+
return self._data.get("categories", [])
|
|
77
|
+
|
|
78
|
+
def category_names(self) -> list[str]:
|
|
79
|
+
return [c["name"] for c in self.categories]
|
|
80
|
+
|
|
81
|
+
def category_icon(self, name: str) -> str:
|
|
82
|
+
for cat in self.categories:
|
|
83
|
+
if cat["name"].lower() == name.lower():
|
|
84
|
+
return cat.get("icon", "")
|
|
85
|
+
return ""
|
|
86
|
+
|
|
87
|
+
def fuzzy_category(self, query: str) -> Optional[str]:
|
|
88
|
+
"""
|
|
89
|
+
Case-insensitive substring match against category names and aliases.
|
|
90
|
+
Returns the canonical name if exactly one match, None otherwise.
|
|
91
|
+
Callers are responsible for surfacing ambiguity errors to the user.
|
|
92
|
+
"""
|
|
93
|
+
q = query.strip().lower()
|
|
94
|
+
matches = [c["name"] for c in self.categories if q in c["name"].lower()]
|
|
95
|
+
if len(matches) == 1:
|
|
96
|
+
return matches[0]
|
|
97
|
+
# check aliases too — useful when migrating old category names
|
|
98
|
+
if not matches:
|
|
99
|
+
for cat in self.categories:
|
|
100
|
+
for alias in cat.get("aliases", []):
|
|
101
|
+
if q in alias.lower():
|
|
102
|
+
matches.append(cat["name"])
|
|
103
|
+
return matches[0] if len(matches) == 1 else None
|
|
104
|
+
|
|
105
|
+
def category_aliases(self) -> dict[str, str]:
|
|
106
|
+
"""Flat alias → canonical name mapping for week-plan and reports."""
|
|
107
|
+
out: dict[str, str] = {}
|
|
108
|
+
for cat in self.categories:
|
|
109
|
+
name = cat["name"]
|
|
110
|
+
out[name] = name
|
|
111
|
+
for alias in cat.get("aliases", []):
|
|
112
|
+
out[alias] = name
|
|
113
|
+
return out
|
|
114
|
+
|
|
115
|
+
# --- phases ---
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def phases(self) -> list[dict]:
|
|
119
|
+
return self._data.get("phases", [])
|
|
120
|
+
|
|
121
|
+
# --- settings ---
|
|
122
|
+
|
|
123
|
+
@property
|
|
124
|
+
def repo_name(self) -> str:
|
|
125
|
+
return self._data.get("settings", {}).get("repo_name", "taskflow")
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def weekly_plan_dir(self) -> Path:
|
|
129
|
+
d = self._data.get("settings", {}).get("weekly_plan_dir", "changelog/weekly")
|
|
130
|
+
p = Path(d)
|
|
131
|
+
return p if p.is_absolute() else self.root / p
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def done_weeks(self) -> int:
|
|
135
|
+
"""How many weeks to keep in done.md before archiving older ones."""
|
|
136
|
+
return int(self._data.get("settings", {}).get("done_weeks", 4))
|
|
137
|
+
|
|
138
|
+
@property
|
|
139
|
+
def archive_path(self) -> Path:
|
|
140
|
+
"""Where archived week files live. Defaults next to the done file."""
|
|
141
|
+
default_archive = str(self.state_path("done").parent / "archive")
|
|
142
|
+
d = self._data.get("settings", {}).get("archive_path", default_archive)
|
|
143
|
+
p = Path(d)
|
|
144
|
+
return p if p.is_absolute() else self.root / p
|
|
145
|
+
|
|
146
|
+
# --- raw access for anything not covered above ---
|
|
147
|
+
|
|
148
|
+
def get(self, key: str, default=None):
|
|
149
|
+
return self._data.get(key, default)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def find_root(start: Optional[Path] = None) -> Optional[Path]:
|
|
153
|
+
"""
|
|
154
|
+
Walk up from start (default cwd) looking for .taskflow.yml.
|
|
155
|
+
Returns the directory containing it, or None if not found.
|
|
156
|
+
"""
|
|
157
|
+
current = Path(start or os.getcwd()).resolve()
|
|
158
|
+
while True:
|
|
159
|
+
if (current / CONFIG_FILE).exists():
|
|
160
|
+
return current
|
|
161
|
+
parent = current.parent
|
|
162
|
+
if parent == current:
|
|
163
|
+
# hit filesystem root
|
|
164
|
+
return None
|
|
165
|
+
current = parent
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def load_config(root: Optional[Path] = None) -> TaskflowConfig:
|
|
169
|
+
"""
|
|
170
|
+
Load config from root, or discover root from cwd if not given.
|
|
171
|
+
Raises click.UsageError if no config found — callers get a clean error.
|
|
172
|
+
"""
|
|
173
|
+
import click
|
|
174
|
+
|
|
175
|
+
if root is None:
|
|
176
|
+
root = find_root()
|
|
177
|
+
if root is None:
|
|
178
|
+
raise click.UsageError("No .taskflow.yml found. Run `taskflow init` to set up a project.")
|
|
179
|
+
|
|
180
|
+
config_path = root / CONFIG_FILE
|
|
181
|
+
try:
|
|
182
|
+
with config_path.open(encoding="utf-8") as f:
|
|
183
|
+
data = yaml.safe_load(f) or {}
|
|
184
|
+
except Exception as e:
|
|
185
|
+
raise click.UsageError(f"Could not read {config_path}: {e}")
|
|
186
|
+
|
|
187
|
+
return TaskflowConfig(root, data)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def load_config_or_none(root: Optional[Path] = None) -> Optional[TaskflowConfig]:
|
|
191
|
+
"""Same as load_config but returns None instead of raising."""
|
|
192
|
+
try:
|
|
193
|
+
return load_config(root)
|
|
194
|
+
except Exception:
|
|
195
|
+
return None
|
taskflow/reports.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"""
|
|
2
|
+
reports.py — pipeline and progress reports.
|
|
3
|
+
|
|
4
|
+
Both reports read from the backlog files and produce either a terminal
|
|
5
|
+
table or JSON. State file paths come from config so nothing here is
|
|
6
|
+
hardcoded. Category ordering follows the config, extras are alphabetical,
|
|
7
|
+
uncategorized is always last.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import re
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
from datetime import date
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional
|
|
18
|
+
|
|
19
|
+
from taskflow.config import TaskflowConfig
|
|
20
|
+
from taskflow.tasklib import WEEK_HEADING_RE
|
|
21
|
+
|
|
22
|
+
CATEGORY_RE = re.compile(r"^###\s+(?:[\U0001F300-\U0001FFFE\u2600-\u26FF\u2700-\u27BF]\s*)?(.*\S.*?)\s*$")
|
|
23
|
+
PHASE_RE = re.compile(r"^##\s+(?!Week of)(.*\S.*?)\s*$")
|
|
24
|
+
DIVIDER_RE = re.compile(r"^---\s*$")
|
|
25
|
+
DONE_RE = re.compile(r"^\[[\d\s:\-]+\]\s+done:\s+\(([^)]+)\)\s+-\s+.+$")
|
|
26
|
+
TASK_RE = re.compile(r"^\s*[*-]\s+\S")
|
|
27
|
+
|
|
28
|
+
UNCATEGORIZED = "Uncategorized"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _strip_emoji(name: str) -> str:
|
|
32
|
+
return re.sub(r"^[\U0001F300-\U0001FFFE\u2600-\u26FF\u2700-\u27BF]\s*", "", name).strip()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def count_tasks_by_category(path: Path) -> dict[str, int]:
|
|
36
|
+
"""Count top-level (non-indented) tasks per category in a backlog file."""
|
|
37
|
+
if not path.exists():
|
|
38
|
+
return {}
|
|
39
|
+
|
|
40
|
+
counts: dict[str, int] = defaultdict(int)
|
|
41
|
+
current_cat: Optional[str] = None
|
|
42
|
+
|
|
43
|
+
for line in path.read_text(encoding="utf-8").splitlines():
|
|
44
|
+
if PHASE_RE.match(line):
|
|
45
|
+
continue
|
|
46
|
+
m = CATEGORY_RE.match(line)
|
|
47
|
+
if m:
|
|
48
|
+
current_cat = _strip_emoji(m.group(1))
|
|
49
|
+
continue
|
|
50
|
+
if DIVIDER_RE.match(line):
|
|
51
|
+
current_cat = None
|
|
52
|
+
continue
|
|
53
|
+
# only count top-level tasks — subtasks start with whitespace
|
|
54
|
+
if TASK_RE.match(line) and not line.startswith(" "):
|
|
55
|
+
counts[current_cat or UNCATEGORIZED] += 1
|
|
56
|
+
|
|
57
|
+
return dict(counts)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def parse_done_by_week(done_path: Path) -> list[tuple[str, dict[str, int]]]:
|
|
61
|
+
"""
|
|
62
|
+
Parse done.md into (week_date_str, {category: count}) tuples, newest first.
|
|
63
|
+
Week date comes from the ## Week of YYYY-MM-DD heading.
|
|
64
|
+
"""
|
|
65
|
+
if not done_path.exists():
|
|
66
|
+
return []
|
|
67
|
+
|
|
68
|
+
weeks: list[tuple[str, dict[str, int]]] = []
|
|
69
|
+
current_week: Optional[str] = None
|
|
70
|
+
current_counts: dict[str, int] = defaultdict(int)
|
|
71
|
+
|
|
72
|
+
for line in done_path.read_text(encoding="utf-8").splitlines():
|
|
73
|
+
m_week = WEEK_HEADING_RE.match(line)
|
|
74
|
+
if m_week:
|
|
75
|
+
if current_week is not None:
|
|
76
|
+
weeks.append((current_week, dict(current_counts)))
|
|
77
|
+
current_week = m_week.group(1)
|
|
78
|
+
current_counts = defaultdict(int)
|
|
79
|
+
continue
|
|
80
|
+
|
|
81
|
+
m_done = DONE_RE.match(line)
|
|
82
|
+
if m_done and current_week is not None:
|
|
83
|
+
cat = _strip_emoji(m_done.group(1).strip())
|
|
84
|
+
current_counts[cat] += 1
|
|
85
|
+
|
|
86
|
+
if current_week is not None:
|
|
87
|
+
weeks.append((current_week, dict(current_counts)))
|
|
88
|
+
|
|
89
|
+
# newest first
|
|
90
|
+
weeks.sort(key=lambda x: x[0], reverse=True)
|
|
91
|
+
return weeks
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def ordered_categories(
|
|
95
|
+
backlog_counts: dict[str, dict[str, int]],
|
|
96
|
+
week_counts: list[tuple[str, dict[str, int]]],
|
|
97
|
+
config: TaskflowConfig,
|
|
98
|
+
) -> list[str]:
|
|
99
|
+
"""
|
|
100
|
+
Category order: config order first, then alphabetical extras, uncategorized last.
|
|
101
|
+
"""
|
|
102
|
+
seen: set[str] = set()
|
|
103
|
+
ordered: list[str] = []
|
|
104
|
+
|
|
105
|
+
# config order wins
|
|
106
|
+
for name in config.category_names():
|
|
107
|
+
if name not in seen:
|
|
108
|
+
ordered.append(name)
|
|
109
|
+
seen.add(name)
|
|
110
|
+
|
|
111
|
+
# anything in the files that isn't in config — alphabetical
|
|
112
|
+
extras: set[str] = set()
|
|
113
|
+
for counts in backlog_counts.values():
|
|
114
|
+
extras.update(c for c in counts if c != UNCATEGORIZED and c not in seen)
|
|
115
|
+
for _, counts in week_counts:
|
|
116
|
+
extras.update(c for c in counts if c != UNCATEGORIZED and c not in seen)
|
|
117
|
+
for name in sorted(extras):
|
|
118
|
+
ordered.append(name)
|
|
119
|
+
seen.add(name)
|
|
120
|
+
|
|
121
|
+
# uncategorized always last, only if present
|
|
122
|
+
has_unc = any(UNCATEGORIZED in counts for counts in list(backlog_counts.values()) + [c for _, c in week_counts])
|
|
123
|
+
if has_unc:
|
|
124
|
+
ordered.append(UNCATEGORIZED)
|
|
125
|
+
|
|
126
|
+
return ordered
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
# ---------------------------------------------------------------------------
|
|
130
|
+
# Table renderer
|
|
131
|
+
# ---------------------------------------------------------------------------
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _pad(s: str, width: int, align: str = "left") -> str:
|
|
135
|
+
s = str(s)
|
|
136
|
+
return s.rjust(width) if align == "right" else s.ljust(width)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def render_table(headers: list[str], rows: list[list[str]], col_aligns: Optional[list[str]] = None) -> str:
|
|
140
|
+
n = len(headers)
|
|
141
|
+
col_aligns = col_aligns or ["left"] * n
|
|
142
|
+
widths = [len(h) for h in headers]
|
|
143
|
+
for row in rows:
|
|
144
|
+
for i, cell in enumerate(row):
|
|
145
|
+
if i < n:
|
|
146
|
+
widths[i] = max(widths[i], len(str(cell)))
|
|
147
|
+
|
|
148
|
+
sep = "+" + "+".join("-" * (w + 2) for w in widths) + "+"
|
|
149
|
+
hdr = "|" + "|".join(f" {_pad(h, widths[i])} " for i, h in enumerate(headers)) + "|"
|
|
150
|
+
|
|
151
|
+
lines = [sep, hdr, sep]
|
|
152
|
+
for row in rows:
|
|
153
|
+
cells = []
|
|
154
|
+
for i in range(n):
|
|
155
|
+
val = row[i] if i < len(row) else ""
|
|
156
|
+
cells.append(f" {_pad(val, widths[i], col_aligns[i] if i < len(col_aligns) else 'left')} ")
|
|
157
|
+
lines.append("|" + "|".join(cells) + "|")
|
|
158
|
+
lines.append(sep)
|
|
159
|
+
return "\n".join(lines)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _fmt_week(d: str) -> str:
|
|
163
|
+
try:
|
|
164
|
+
dt = date.fromisoformat(d)
|
|
165
|
+
return dt.strftime("%b %-d")
|
|
166
|
+
except Exception:
|
|
167
|
+
return d
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _cat_label(name: str, config: TaskflowConfig) -> str:
|
|
171
|
+
icon = config.category_icon(name)
|
|
172
|
+
return f"{icon} {name}".strip() if icon else name
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
# ---------------------------------------------------------------------------
|
|
176
|
+
# progress report
|
|
177
|
+
# ---------------------------------------------------------------------------
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def report_progress(config: TaskflowConfig, as_json: bool = False, max_weeks: int = 5) -> str:
|
|
181
|
+
now_counts = count_tasks_by_category(config.state_path("now"))
|
|
182
|
+
done_by_week = parse_done_by_week(config.state_path("done"))
|
|
183
|
+
weeks = done_by_week[:max_weeks]
|
|
184
|
+
categories = ordered_categories({"now": now_counts}, weeks, config)
|
|
185
|
+
|
|
186
|
+
if as_json:
|
|
187
|
+
data = {
|
|
188
|
+
"report": "progress",
|
|
189
|
+
"columns": ["now"] + [w for w, _ in weeks],
|
|
190
|
+
"rows": [],
|
|
191
|
+
}
|
|
192
|
+
for cat in categories:
|
|
193
|
+
row: dict = {"category": cat, "now": now_counts.get(cat, 0)}
|
|
194
|
+
for wd, counts in weeks:
|
|
195
|
+
row[wd] = counts.get(cat, 0)
|
|
196
|
+
data["rows"].append(row)
|
|
197
|
+
return json.dumps(data, indent=2)
|
|
198
|
+
|
|
199
|
+
now_icon = config.state_icon("now")
|
|
200
|
+
done_icon = config.state_icon("done")
|
|
201
|
+
|
|
202
|
+
headers = ["Category", f"{now_icon} Now".strip()] + [f"{done_icon} {_fmt_week(w)}".strip() for w, _ in weeks]
|
|
203
|
+
col_aligns = ["left"] + ["right"] * (1 + len(weeks))
|
|
204
|
+
rows = [[_cat_label(cat, config), str(now_counts.get(cat, 0))] + [str(c.get(cat, 0)) for _, c in weeks] for cat in categories]
|
|
205
|
+
return f"\n progress — now vs. completed by week\n\n{render_table(headers, rows, col_aligns)}\n"
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
# ---------------------------------------------------------------------------
|
|
209
|
+
# pipeline report
|
|
210
|
+
# ---------------------------------------------------------------------------
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def report_pipeline(config: TaskflowConfig, as_json: bool = False) -> str:
|
|
214
|
+
later_counts = count_tasks_by_category(config.state_path("later"))
|
|
215
|
+
next_counts = count_tasks_by_category(config.state_path("next"))
|
|
216
|
+
paused_counts = count_tasks_by_category(config.state_path("paused"))
|
|
217
|
+
blocked_counts = count_tasks_by_category(config.state_path("blocked"))
|
|
218
|
+
now_counts = count_tasks_by_category(config.state_path("now"))
|
|
219
|
+
done_by_week = parse_done_by_week(config.state_path("done"))
|
|
220
|
+
|
|
221
|
+
this_week_counts = done_by_week[0][1] if done_by_week else {}
|
|
222
|
+
this_week_date = done_by_week[0][0] if done_by_week else None
|
|
223
|
+
|
|
224
|
+
all_counts = {
|
|
225
|
+
"later": later_counts,
|
|
226
|
+
"next": next_counts,
|
|
227
|
+
"paused": paused_counts,
|
|
228
|
+
"blocked": blocked_counts,
|
|
229
|
+
"now": now_counts,
|
|
230
|
+
"this_week": this_week_counts,
|
|
231
|
+
}
|
|
232
|
+
categories = ordered_categories(all_counts, [], config)
|
|
233
|
+
|
|
234
|
+
if as_json:
|
|
235
|
+
return json.dumps(
|
|
236
|
+
{
|
|
237
|
+
"report": "pipeline",
|
|
238
|
+
"columns": ["later", "next", "paused", "blocked", "now", "this_week"],
|
|
239
|
+
"this_week_heading": this_week_date,
|
|
240
|
+
"rows": [
|
|
241
|
+
{
|
|
242
|
+
"category": cat,
|
|
243
|
+
"later": later_counts.get(cat, 0),
|
|
244
|
+
"next": next_counts.get(cat, 0),
|
|
245
|
+
"paused": paused_counts.get(cat, 0),
|
|
246
|
+
"blocked": blocked_counts.get(cat, 0),
|
|
247
|
+
"now": now_counts.get(cat, 0),
|
|
248
|
+
"this_week": this_week_counts.get(cat, 0),
|
|
249
|
+
}
|
|
250
|
+
for cat in categories
|
|
251
|
+
],
|
|
252
|
+
},
|
|
253
|
+
indent=2,
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
def si(key: str, fallback: str) -> str:
|
|
257
|
+
return config.state_icon(key) or fallback
|
|
258
|
+
|
|
259
|
+
week_label = f"{si('done', '✓')} {_fmt_week(this_week_date)}".strip() if this_week_date else "This Week"
|
|
260
|
+
|
|
261
|
+
headers = [
|
|
262
|
+
"Category",
|
|
263
|
+
f"{si('later', '◇')} Later".strip(),
|
|
264
|
+
f"{si('next', '◈')} Next".strip(),
|
|
265
|
+
f"{si('paused', '⏸')} Paused".strip(),
|
|
266
|
+
f"{si('blocked', '⊘')} Blocked".strip(),
|
|
267
|
+
f"{si('now', '▶')} Now".strip(),
|
|
268
|
+
week_label,
|
|
269
|
+
]
|
|
270
|
+
col_aligns = ["left"] + ["right"] * 6
|
|
271
|
+
rows = [
|
|
272
|
+
[
|
|
273
|
+
_cat_label(cat, config),
|
|
274
|
+
str(later_counts.get(cat, 0)),
|
|
275
|
+
str(next_counts.get(cat, 0)),
|
|
276
|
+
str(paused_counts.get(cat, 0)),
|
|
277
|
+
str(blocked_counts.get(cat, 0)),
|
|
278
|
+
str(now_counts.get(cat, 0)),
|
|
279
|
+
str(this_week_counts.get(cat, 0)),
|
|
280
|
+
]
|
|
281
|
+
for cat in categories
|
|
282
|
+
]
|
|
283
|
+
|
|
284
|
+
return f"\n pipeline — work in flight and completed this week\n\n{render_table(headers, rows, col_aligns)}\n"
|