tklr-dgraham 0.0.0rc22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tklr/__init__.py +0 -0
- tklr/cli/main.py +528 -0
- tklr/cli/migrate_etm_to_tklr.py +764 -0
- tklr/common.py +1296 -0
- tklr/controller.py +3635 -0
- tklr/item.py +4014 -0
- tklr/list_colors.py +234 -0
- tklr/model.py +4548 -0
- tklr/shared.py +739 -0
- tklr/sounds/alert.mp3 +0 -0
- tklr/tklr_env.py +493 -0
- tklr/use_system.py +64 -0
- tklr/versioning.py +21 -0
- tklr/view.py +3503 -0
- tklr/view_textual.css +296 -0
- tklr_dgraham-0.0.0rc22.dist-info/METADATA +814 -0
- tklr_dgraham-0.0.0rc22.dist-info/RECORD +20 -0
- tklr_dgraham-0.0.0rc22.dist-info/WHEEL +5 -0
- tklr_dgraham-0.0.0rc22.dist-info/entry_points.txt +2 -0
- tklr_dgraham-0.0.0rc22.dist-info/top_level.txt +1 -0
tklr/controller.py
ADDED
|
@@ -0,0 +1,3635 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from packaging.version import parse as parse_version
|
|
3
|
+
from importlib.metadata import version
|
|
4
|
+
from functools import lru_cache
|
|
5
|
+
|
|
6
|
+
# TODO: Keep the display part - the model part will be in model.py
|
|
7
|
+
from datetime import datetime, timedelta, date, timezone
|
|
8
|
+
|
|
9
|
+
# from logging import log
|
|
10
|
+
from sre_compile import dis
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
from rich.table import Table
|
|
13
|
+
from rich.box import HEAVY_EDGE
|
|
14
|
+
from rich import style
|
|
15
|
+
from rich.columns import Columns
|
|
16
|
+
from rich.console import Group, group
|
|
17
|
+
from rich.panel import Panel
|
|
18
|
+
from rich.layout import Layout
|
|
19
|
+
from rich import print as rprint
|
|
20
|
+
import re
|
|
21
|
+
import inspect
|
|
22
|
+
from rich.theme import Theme
|
|
23
|
+
from rich import box
|
|
24
|
+
from rich.text import Text
|
|
25
|
+
from typing import List, Tuple, Optional, Dict, Any, Set
|
|
26
|
+
from bisect import bisect_left, bisect_right
|
|
27
|
+
from typing import Iterator, Callable
|
|
28
|
+
|
|
29
|
+
import string
|
|
30
|
+
import shutil
|
|
31
|
+
import subprocess
|
|
32
|
+
import shlex
|
|
33
|
+
import textwrap
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
import json
|
|
37
|
+
from typing import Literal
|
|
38
|
+
from .item import Item
|
|
39
|
+
from .model import DatabaseManager, UrgencyComputer
|
|
40
|
+
from .model import _fmt_naive
|
|
41
|
+
from .list_colors import css_named_colors
|
|
42
|
+
from .versioning import get_version
|
|
43
|
+
|
|
44
|
+
from collections import defaultdict
|
|
45
|
+
from dataclasses import dataclass, field
|
|
46
|
+
from pathlib import Path
|
|
47
|
+
from zoneinfo import ZoneInfo
|
|
48
|
+
from dateutil.rrule import rrulestr
|
|
49
|
+
from dateutil import tz
|
|
50
|
+
|
|
51
|
+
# import sqlite3
|
|
52
|
+
from .shared import (
|
|
53
|
+
TYPE_TO_COLOR,
|
|
54
|
+
REPEATING,
|
|
55
|
+
log_msg,
|
|
56
|
+
bug_msg,
|
|
57
|
+
_to_local_naive,
|
|
58
|
+
HRS_MINS,
|
|
59
|
+
# ALERT_COMMANDS,
|
|
60
|
+
dt_as_utc_timestamp,
|
|
61
|
+
format_time_range,
|
|
62
|
+
format_timedelta,
|
|
63
|
+
datetime_from_timestamp,
|
|
64
|
+
format_datetime,
|
|
65
|
+
datetime_in_words,
|
|
66
|
+
truncate_string,
|
|
67
|
+
parse,
|
|
68
|
+
fmt_local_compact,
|
|
69
|
+
parse_local_compact,
|
|
70
|
+
fmt_utc_z,
|
|
71
|
+
# fmt_user,
|
|
72
|
+
parse_utc_z,
|
|
73
|
+
)
|
|
74
|
+
from tklr.tklr_env import TklrEnvironment
|
|
75
|
+
from tklr.view import ChildBinRow, ReminderRow
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
VERSION = get_version()
|
|
79
|
+
|
|
80
|
+
ISO_Z = "%Y%m%dT%H%MZ"
|
|
81
|
+
|
|
82
|
+
type_color = css_named_colors["goldenrod"]
|
|
83
|
+
at_color = css_named_colors["goldenrod"]
|
|
84
|
+
am_color = css_named_colors["goldenrod"]
|
|
85
|
+
# type_color = css_named_colors["burlywood"]
|
|
86
|
+
# at_color = css_named_colors["burlywood"]
|
|
87
|
+
# am_color = css_named_colors["burlywood"]
|
|
88
|
+
label_color = css_named_colors["lightskyblue"]
|
|
89
|
+
|
|
90
|
+
# The overall background color of the app is #2e2e2e - set in view_textual.css
|
|
91
|
+
CORNSILK = "#FFF8DC"
|
|
92
|
+
DARK_GRAY = "#A9A9A9"
|
|
93
|
+
DARK_GREY = "#A9A9A9" # same as DARK_GRAY
|
|
94
|
+
DARK_OLIVEGREEN = "#556B2F"
|
|
95
|
+
DARK_ORANGE = "#FF8C00"
|
|
96
|
+
DARK_SALMON = "#E9967A"
|
|
97
|
+
GOLD = "#FFD700"
|
|
98
|
+
GOLDENROD = "#DAA520"
|
|
99
|
+
KHAKI = "#F0E68C"
|
|
100
|
+
LAWN_GREEN = "#7CFC00"
|
|
101
|
+
LEMON_CHIFFON = "#FFFACD"
|
|
102
|
+
LIGHT_CORAL = "#F08080"
|
|
103
|
+
LIGHT_SKY_BLUE = "#87CEFA"
|
|
104
|
+
LIME_GREEN = "#32CD32"
|
|
105
|
+
ORANGE_RED = "#FF4500"
|
|
106
|
+
PALE_GREEN = "#98FB98"
|
|
107
|
+
PEACHPUFF = "#FFDAB9"
|
|
108
|
+
SALMON = "#FA8072"
|
|
109
|
+
SANDY_BROWN = "#F4A460"
|
|
110
|
+
SEA_GREEN = "#2E8B57"
|
|
111
|
+
SLATE_GREY = "#708090"
|
|
112
|
+
TOMATO = "#FF6347"
|
|
113
|
+
|
|
114
|
+
# Colors for UI elements
|
|
115
|
+
DAY_COLOR = LEMON_CHIFFON
|
|
116
|
+
FRAME_COLOR = KHAKI
|
|
117
|
+
HEADER_COLOR = LIGHT_SKY_BLUE
|
|
118
|
+
DIM_COLOR = DARK_GRAY
|
|
119
|
+
ALLDAY_COLOR = SANDY_BROWN
|
|
120
|
+
EVENT_COLOR = LIME_GREEN
|
|
121
|
+
NOTE_COLOR = DARK_SALMON
|
|
122
|
+
PASSED_EVENT = DARK_OLIVEGREEN
|
|
123
|
+
ACTIVE_EVENT = LAWN_GREEN
|
|
124
|
+
TASK_COLOR = LIGHT_SKY_BLUE
|
|
125
|
+
AVAILABLE_COLOR = LIGHT_SKY_BLUE
|
|
126
|
+
WAITING_COLOR = SLATE_GREY
|
|
127
|
+
FINISHED_COLOR = DARK_GREY
|
|
128
|
+
GOAL_COLOR = GOLDENROD
|
|
129
|
+
CHORE_COLOR = KHAKI
|
|
130
|
+
PASTDUE_COLOR = DARK_ORANGE
|
|
131
|
+
NOTICE_COLOR = GOLD
|
|
132
|
+
DRAFT_COLOR = ORANGE_RED
|
|
133
|
+
TODAY_COLOR = TOMATO
|
|
134
|
+
SELECTED_BACKGROUND = "#566573"
|
|
135
|
+
MATCH_COLOR = TOMATO
|
|
136
|
+
TITLE_COLOR = CORNSILK
|
|
137
|
+
BUSY_COLOR = "#9acd32"
|
|
138
|
+
BUSY_COLOR = "#adff2f"
|
|
139
|
+
CONF_COLOR = TOMATO
|
|
140
|
+
BUSY_FRAME_COLOR = "#5d5d5d"
|
|
141
|
+
|
|
142
|
+
# This one appears to be a Rich/Textual style string
|
|
143
|
+
SELECTED_COLOR = "bold yellow"
|
|
144
|
+
# SLOT_HOURS = [0, 4, 8, 12, 16, 20, 24]
|
|
145
|
+
SLOT_HOURS = [0, 6, 12, 18, 24]
|
|
146
|
+
SLOT_MINUTES = [x * 60 for x in SLOT_HOURS]
|
|
147
|
+
BUSY = "■" # U+25A0 this will be busy_bar busy and conflict character
|
|
148
|
+
FREE = "□" # U+25A1 this will be busy_bar free character
|
|
149
|
+
ADAY = "━" # U+2501 for all day events ━
|
|
150
|
+
NOTICE = "⋙"
|
|
151
|
+
|
|
152
|
+
SELECTED_COLOR = "yellow"
|
|
153
|
+
# SELECTED_COLOR = "bold yellow"
|
|
154
|
+
|
|
155
|
+
HEADER_COLOR = LEMON_CHIFFON
|
|
156
|
+
HEADER_STYLE = f"bold {LEMON_CHIFFON}"
|
|
157
|
+
FIELD_COLOR = LIGHT_SKY_BLUE
|
|
158
|
+
|
|
159
|
+
ONEDAY = timedelta(days=1)
|
|
160
|
+
ONEWK = 7 * ONEDAY
|
|
161
|
+
alpha = [x for x in string.ascii_lowercase]
|
|
162
|
+
|
|
163
|
+
# TYPE_TO_COLOR = {
|
|
164
|
+
# "*": EVENT_COLOR, # event
|
|
165
|
+
# "~": AVAILABLE_COLOR, # available task
|
|
166
|
+
# "x": FINISHED_COLOR, # finished task
|
|
167
|
+
# "^": AVAILABLE_COLOR, # available task
|
|
168
|
+
# "+": WAITING_COLOR, # waiting task
|
|
169
|
+
# "%": NOTE_COLOR, # note
|
|
170
|
+
# "<": PASTDUE_COLOR, # past due task
|
|
171
|
+
# ">": NOTICE_COLOR, # begin
|
|
172
|
+
# "!": GOAL_COLOR, # draft
|
|
173
|
+
# "?": DRAFT_COLOR, # draft
|
|
174
|
+
# }
|
|
175
|
+
#
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def _ensure_tokens_list(value):
|
|
179
|
+
"""Return a list[dict] for tokens whether DB returned JSON str or already-parsed list."""
|
|
180
|
+
if value is None:
|
|
181
|
+
return []
|
|
182
|
+
if isinstance(value, (list, tuple)):
|
|
183
|
+
return list(value)
|
|
184
|
+
if isinstance(value, (bytes, bytearray)):
|
|
185
|
+
value = value.decode("utf-8")
|
|
186
|
+
if isinstance(value, str):
|
|
187
|
+
return json.loads(value)
|
|
188
|
+
# last resort: try to coerce
|
|
189
|
+
return list(value)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
# Stop at end-of-line or the start of another token-ish thing (@, &, +, %, - ...)
|
|
193
|
+
RE_BIN = re.compile(r"@b\s+([^\s].*?)\s*(?=$|[@&+%-])", re.IGNORECASE)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def extract_bin_slashpath(line: str) -> str | None:
|
|
197
|
+
"""
|
|
198
|
+
Example:
|
|
199
|
+
"Pick up pastry @b Lille\\France\\places @t 9a" -> "Lille\\France\\places"
|
|
200
|
+
"""
|
|
201
|
+
m = RE_BIN.search(line or "")
|
|
202
|
+
return m.group(1) if m else None
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def format_tokens(tokens, width, highlight=True):
|
|
206
|
+
if isinstance(tokens, str):
|
|
207
|
+
try:
|
|
208
|
+
tokens = json.loads(tokens)
|
|
209
|
+
except Exception:
|
|
210
|
+
pass
|
|
211
|
+
|
|
212
|
+
output_lines = []
|
|
213
|
+
current_line = ""
|
|
214
|
+
|
|
215
|
+
def strip_rich(s: str) -> str:
|
|
216
|
+
return re.sub(r"\[[^\]]+\]", "", s)
|
|
217
|
+
|
|
218
|
+
def apply_highlight(line: str) -> str:
|
|
219
|
+
if not highlight:
|
|
220
|
+
return strip_rich(line)
|
|
221
|
+
color = {"@": at_color, "&": am_color}
|
|
222
|
+
return re.sub(
|
|
223
|
+
r"(^|(?<=\s))([@&]\S\s)",
|
|
224
|
+
lambda m: m.group(1)
|
|
225
|
+
+ f"[{color[m.group(2)[0]]}]{m.group(2)}[/{color[m.group(2)[0]]}]",
|
|
226
|
+
line,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
for t in tokens:
|
|
230
|
+
token_text = (t.get("token") or "").rstrip("\n")
|
|
231
|
+
ttype = t.get("t")
|
|
232
|
+
k = t.get("k") or t.get("key")
|
|
233
|
+
|
|
234
|
+
# ✅ PRESERVE itemtype char as the start of the line
|
|
235
|
+
if ttype == "itemtype":
|
|
236
|
+
if current_line:
|
|
237
|
+
output_lines.append(current_line)
|
|
238
|
+
current_line = token_text # start new line with '*', '-', '~', '^', etc.
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
# @d blocks: own paragraph, preserve newlines/indent
|
|
242
|
+
if ttype == "@" and k == "d":
|
|
243
|
+
if current_line:
|
|
244
|
+
output_lines.append(current_line)
|
|
245
|
+
current_line = ""
|
|
246
|
+
# output_lines.append("")
|
|
247
|
+
for line in token_text.splitlines():
|
|
248
|
+
indent = len(line) - len(line.lstrip(" "))
|
|
249
|
+
wrapped = textwrap.wrap(
|
|
250
|
+
line, width=width, subsequent_indent=" " * indent
|
|
251
|
+
) or [""]
|
|
252
|
+
output_lines.extend(wrapped)
|
|
253
|
+
# output_lines.append("")
|
|
254
|
+
continue
|
|
255
|
+
|
|
256
|
+
# optional special-case for @~
|
|
257
|
+
if ttype == "@" and k == "~":
|
|
258
|
+
# if current_line:
|
|
259
|
+
output_lines.append(current_line)
|
|
260
|
+
current_line = " "
|
|
261
|
+
# if token_text:
|
|
262
|
+
# output_lines.append(token_text)
|
|
263
|
+
# continu # normal tokens
|
|
264
|
+
if not token_text:
|
|
265
|
+
continue
|
|
266
|
+
if current_line and len(current_line) + 1 + len(token_text) > width:
|
|
267
|
+
output_lines.append(current_line)
|
|
268
|
+
current_line = token_text
|
|
269
|
+
else:
|
|
270
|
+
current_line = current_line + " " + token_text
|
|
271
|
+
|
|
272
|
+
if current_line:
|
|
273
|
+
output_lines.append(current_line)
|
|
274
|
+
|
|
275
|
+
return "\n".join(apply_highlight(line) for line in output_lines)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def wrap_preserve_newlines(text, width=70, initial_indent="", subsequent_indent=""):
|
|
279
|
+
lines = text.splitlines() # preserve \n boundaries
|
|
280
|
+
wrapped_lines = [
|
|
281
|
+
subline
|
|
282
|
+
for line in lines
|
|
283
|
+
for subline in textwrap.wrap(
|
|
284
|
+
line,
|
|
285
|
+
width=width,
|
|
286
|
+
initial_indent=initial_indent,
|
|
287
|
+
subsequent_indent=subsequent_indent,
|
|
288
|
+
)
|
|
289
|
+
or [""]
|
|
290
|
+
]
|
|
291
|
+
return wrapped_lines
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def format_rruleset_for_details(
|
|
295
|
+
rruleset: str, width: int, subsequent_indent: int = 11
|
|
296
|
+
) -> str:
|
|
297
|
+
"""
|
|
298
|
+
Wrap RDATE/EXDATE value lists on commas to fit `width`.
|
|
299
|
+
Continuation lines are indented by the length of header.
|
|
300
|
+
When a wrap occurs, the comma stays at the end of the line.
|
|
301
|
+
"""
|
|
302
|
+
|
|
303
|
+
def wrap_value_line(header: str, values_csv: str) -> list[str]:
|
|
304
|
+
# indent = " " * (len(header) + 2) # for colon and space
|
|
305
|
+
indent = " " * 2
|
|
306
|
+
tokens = [t.strip() for t in values_csv.split(",") if t.strip()]
|
|
307
|
+
out_lines: list[str] = []
|
|
308
|
+
cur = header # start with e.g. "RDATE:"
|
|
309
|
+
|
|
310
|
+
for i, tok in enumerate(tokens):
|
|
311
|
+
sep = "," if i < len(tokens) - 1 else "" # last token → no comma
|
|
312
|
+
candidate = f"{cur}{tok}{sep}"
|
|
313
|
+
|
|
314
|
+
if len(candidate) <= width:
|
|
315
|
+
cur = candidate + " "
|
|
316
|
+
else:
|
|
317
|
+
# flush current line before adding token
|
|
318
|
+
out_lines.append(cur.rstrip())
|
|
319
|
+
cur = f"{indent}{tok}{sep} "
|
|
320
|
+
if cur.strip():
|
|
321
|
+
out_lines.append(cur.rstrip())
|
|
322
|
+
return out_lines
|
|
323
|
+
|
|
324
|
+
out: list[str] = []
|
|
325
|
+
for line in (rruleset or "").splitlines():
|
|
326
|
+
if ":" in line:
|
|
327
|
+
prop, value = line.split(":", 1)
|
|
328
|
+
prop_up = prop.upper()
|
|
329
|
+
if prop_up.startswith("RDATE") or prop_up.startswith("EXDATE"):
|
|
330
|
+
out.extend(wrap_value_line(f"{prop_up}:", value.strip()))
|
|
331
|
+
continue
|
|
332
|
+
out.append(line)
|
|
333
|
+
# prepend = " " * (len("rruleset: ")) + "\n"
|
|
334
|
+
bug_msg(f"{out = }")
|
|
335
|
+
return "\n ".join(out)
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def format_hours_mins(dt: datetime, mode: Literal["24", "12"]) -> str:
|
|
339
|
+
"""
|
|
340
|
+
Format a datetime object as hours and minutes.
|
|
341
|
+
"""
|
|
342
|
+
if dt.minute > 0:
|
|
343
|
+
fmt = {
|
|
344
|
+
"24": "%H:%M",
|
|
345
|
+
"12": "%-I:%M%p",
|
|
346
|
+
}
|
|
347
|
+
else:
|
|
348
|
+
fmt = {
|
|
349
|
+
"24": "%H:%M",
|
|
350
|
+
"12": "%-I%p",
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
if mode == "12":
|
|
354
|
+
return dt.strftime(fmt[mode]).lower().rstrip("m")
|
|
355
|
+
return f"{dt.strftime(fmt[mode])}"
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def format_date_range(start_dt: datetime, end_dt: datetime):
|
|
359
|
+
"""
|
|
360
|
+
Format a datetime object as a week string, taking not to repeat the month subject unless the week spans two months.
|
|
361
|
+
"""
|
|
362
|
+
same_year = start_dt.year == end_dt.year
|
|
363
|
+
same_month = start_dt.month == end_dt.month
|
|
364
|
+
# same_day = start_dt.day == end_dt.day
|
|
365
|
+
if same_year and same_month:
|
|
366
|
+
return f"{start_dt.strftime('%b %-d')} - {end_dt.strftime('%-d, %Y')}"
|
|
367
|
+
elif same_year and not same_month:
|
|
368
|
+
return f"{start_dt.strftime('%b %-d')} - {end_dt.strftime('%b %-d, %Y')}"
|
|
369
|
+
else:
|
|
370
|
+
return f"{start_dt.strftime('%b %-d, %Y')} - {end_dt.strftime('%b %-d, %Y')}"
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def format_iso_week(monday_date: datetime )->str:
|
|
374
|
+
"""
|
|
375
|
+
Format an ISO week string, taking not to repeat the month subject unless the week spans two months.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
monday_date (datetime): The date of the Monday of the week.
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
str: Formatted string like 'Monday 17 - Sunday 23, 2023 #1'.
|
|
382
|
+
"""
|
|
383
|
+
start_dt = monday_date.date()
|
|
384
|
+
end_dt = start_dt + timedelta(days=6)
|
|
385
|
+
iso_yr, iso_wk, _ = start_dt.isocalendar()
|
|
386
|
+
yr_wk = f"{iso_yr} #{iso_wk}"
|
|
387
|
+
same_month = start_dt.month == end_dt.month
|
|
388
|
+
# same_day = start_dt.day == end_dt.day
|
|
389
|
+
if same_month:
|
|
390
|
+
return f"{start_dt.strftime('%b %-d')} - {end_dt.strftime('%-d')}, {yr_wk}"
|
|
391
|
+
else:
|
|
392
|
+
return f"{start_dt.strftime('%b %-d')} - {end_dt.strftime('%b %-d')}, {yr_wk}"
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def get_previous_yrwk(year, week):
|
|
396
|
+
"""
|
|
397
|
+
Get the previous (year, week) from an ISO calendar (year, week).
|
|
398
|
+
"""
|
|
399
|
+
# Convert the ISO year and week to a Monday date
|
|
400
|
+
monday_date = datetime.strptime(f"{year} {week} 1", "%G %V %u")
|
|
401
|
+
# Subtract 1 week
|
|
402
|
+
previous_monday = monday_date - timedelta(weeks=1)
|
|
403
|
+
# Get the ISO year and week of the new date
|
|
404
|
+
return previous_monday.isocalendar()[:2]
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def get_next_yrwk(year, week):
|
|
408
|
+
"""
|
|
409
|
+
Get the next (year, week) from an ISO calendar (year, week).
|
|
410
|
+
"""
|
|
411
|
+
# Convert the ISO year and week to a Monday date
|
|
412
|
+
monday_date = datetime.strptime(f"{year} {week} 1", "%G %V %u")
|
|
413
|
+
# Add 1 week
|
|
414
|
+
next_monday = monday_date + timedelta(weeks=1)
|
|
415
|
+
# Get the ISO year and week of the new date
|
|
416
|
+
return next_monday.isocalendar()[:2]
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
def calculate_4_week_start():
|
|
420
|
+
"""
|
|
421
|
+
Calculate the starting date of the 4-week period, starting on a Monday.
|
|
422
|
+
"""
|
|
423
|
+
today = datetime.now()
|
|
424
|
+
iso_year, iso_week, iso_weekday = today.isocalendar()
|
|
425
|
+
start_of_week = today - timedelta(days=iso_weekday - 1)
|
|
426
|
+
weeks_into_cycle = (iso_week - 1) % 4
|
|
427
|
+
return start_of_week - timedelta(weeks=weeks_into_cycle)
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
def decimal_to_base26(decimal_num):
|
|
431
|
+
"""
|
|
432
|
+
Convert a decimal number to its equivalent base-26 string.
|
|
433
|
+
|
|
434
|
+
Args:
|
|
435
|
+
decimal_num (int): The decimal number to convert.
|
|
436
|
+
|
|
437
|
+
Returns:
|
|
438
|
+
str: The base-26 representation where 'a' = 0, 'b' = 1, ..., 'z' = 25.
|
|
439
|
+
"""
|
|
440
|
+
if decimal_num < 0:
|
|
441
|
+
raise ValueError("Decimal number must be non-negative.")
|
|
442
|
+
|
|
443
|
+
if decimal_num == 0:
|
|
444
|
+
return "a" # Special case for zero
|
|
445
|
+
|
|
446
|
+
base26 = ""
|
|
447
|
+
while decimal_num > 0:
|
|
448
|
+
digit = decimal_num % 26
|
|
449
|
+
base26 = chr(digit + ord("a")) + base26 # Map digit to 'a'-'z'
|
|
450
|
+
decimal_num //= 26
|
|
451
|
+
|
|
452
|
+
return base26
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
def base26_to_decimal(tag: str) -> int:
|
|
456
|
+
"""Decode 'a'..'z' (a=0) for any length."""
|
|
457
|
+
total = 0
|
|
458
|
+
for ch in tag:
|
|
459
|
+
total = total * 26 + (ord(ch) - ord("a"))
|
|
460
|
+
return total
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
def indx_to_tag(indx: int, fill: int = 1):
|
|
464
|
+
"""
|
|
465
|
+
Convert an index to a base-26 tag.
|
|
466
|
+
"""
|
|
467
|
+
return decimal_to_base26(indx).rjust(fill, "a")
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
def event_tuple_to_minutes(start_dt: datetime, end_dt: datetime) -> Tuple[int, int]:
|
|
471
|
+
"""
|
|
472
|
+
Convert event start and end datetimes to minutes since midnight.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
start_dt (datetime): Event start datetime.
|
|
476
|
+
end_dt (datetime): Event end datetime.
|
|
477
|
+
|
|
478
|
+
Returns:
|
|
479
|
+
Tuple(int, int): Tuple of start and end minutes since midnight.
|
|
480
|
+
"""
|
|
481
|
+
start_minutes = start_dt.hour * 60 + start_dt.minute
|
|
482
|
+
end_minutes = end_dt.hour * 60 + end_dt.minute if end_dt else start_minutes
|
|
483
|
+
return (start_minutes, end_minutes)
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
def get_busy_bar(events):
|
|
487
|
+
"""
|
|
488
|
+
Determine slot states (0: free, 1: busy, 2: conflict) for a list of events.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
L (List[int]): Sorted list of slot boundaries.
|
|
492
|
+
events (List[Tuple[int, int]]): List of event tuples (start, end).
|
|
493
|
+
|
|
494
|
+
Returns:
|
|
495
|
+
List[int]: A list where 0 indicates a free slot, 1 indicates a busy slot,
|
|
496
|
+
and 2 indicates a conflicting slot.
|
|
497
|
+
"""
|
|
498
|
+
# Initialize slot usage as empty lists
|
|
499
|
+
L = SLOT_MINUTES
|
|
500
|
+
slot_events = [[] for _ in range(len(L) - 1)]
|
|
501
|
+
allday = 0
|
|
502
|
+
|
|
503
|
+
for b, e in events:
|
|
504
|
+
# Find the start and end slots for the current event
|
|
505
|
+
|
|
506
|
+
if b == 0 and e == 0:
|
|
507
|
+
allday += 1
|
|
508
|
+
if e == b and not allday:
|
|
509
|
+
continue
|
|
510
|
+
|
|
511
|
+
start_slot = bisect_left(L, b) - 1
|
|
512
|
+
end_slot = bisect_left(L, e) - 1
|
|
513
|
+
|
|
514
|
+
# Track the event in each affected slot
|
|
515
|
+
for i in range(start_slot, min(len(slot_events), end_slot + 1)):
|
|
516
|
+
if L[i + 1] > b and L[i] < e: # Ensure overlap with the slot
|
|
517
|
+
slot_events[i].append((b, e))
|
|
518
|
+
|
|
519
|
+
# Determine the state of each slot
|
|
520
|
+
slots_state = []
|
|
521
|
+
for i, events_in_slot in enumerate(slot_events):
|
|
522
|
+
if not events_in_slot:
|
|
523
|
+
# No events in the slot
|
|
524
|
+
slots_state.append(0)
|
|
525
|
+
elif len(events_in_slot) == 1:
|
|
526
|
+
# Only one event in the slot, so it's busy but not conflicting
|
|
527
|
+
slots_state.append(1)
|
|
528
|
+
else:
|
|
529
|
+
# Check for overlaps to determine if there's a conflict
|
|
530
|
+
events_in_slot.sort() # Sort events by start time
|
|
531
|
+
conflict = False
|
|
532
|
+
for j in range(len(events_in_slot) - 1):
|
|
533
|
+
_, end1 = events_in_slot[j]
|
|
534
|
+
start2, _ = events_in_slot[j + 1]
|
|
535
|
+
if start2 < end1: # Overlap detected
|
|
536
|
+
conflict = True
|
|
537
|
+
break
|
|
538
|
+
slots_state.append(2 if conflict else 1)
|
|
539
|
+
|
|
540
|
+
busy_bar = ["_" for _ in range(len(slots_state))]
|
|
541
|
+
have_busy = False
|
|
542
|
+
for i in range(len(slots_state)):
|
|
543
|
+
if slots_state[i] == 0:
|
|
544
|
+
busy_bar[i] = f"[dim]{FREE}[/dim]"
|
|
545
|
+
elif slots_state[i] == 1:
|
|
546
|
+
have_busy = True
|
|
547
|
+
busy_bar[i] = f"[{BUSY_COLOR}]{BUSY}[/{BUSY_COLOR}]"
|
|
548
|
+
else:
|
|
549
|
+
have_busy = True
|
|
550
|
+
busy_bar[i] = f"[{CONF_COLOR}]{BUSY}[/{CONF_COLOR}]"
|
|
551
|
+
|
|
552
|
+
# return slots_state, "".join(busy_bar)
|
|
553
|
+
busy_str = (
|
|
554
|
+
f"\n[{BUSY_FRAME_COLOR}]{''.join(busy_bar)}[/{BUSY_FRAME_COLOR}]"
|
|
555
|
+
if have_busy
|
|
556
|
+
else "\n"
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
aday_str = f"[{BUSY_COLOR}]{ADAY}[/{BUSY_COLOR}]" if allday > 0 else ""
|
|
560
|
+
|
|
561
|
+
return aday_str, busy_str
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def ordinal(n: int) -> str:
|
|
565
|
+
"""Return ordinal representation of an integer (1 -> 1st)."""
|
|
566
|
+
if 10 <= n % 100 <= 20:
|
|
567
|
+
suffix = "th"
|
|
568
|
+
else:
|
|
569
|
+
suffix = {1: "st", 2: "nd", 3: "rd"}.get(n % 10, "th")
|
|
570
|
+
return f"{n}{suffix}"
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def set_anniversary(subject: str, start: date, instance: date, freq: str) -> str:
|
|
574
|
+
"""
|
|
575
|
+
Replace {XXX} in subject with ordinal count of periods since start.
|
|
576
|
+
freq ∈ {'y','m','w','d'}.
|
|
577
|
+
"""
|
|
578
|
+
has_xxx = "{XXX}" in subject
|
|
579
|
+
# bug_msg(f"set_anniversary {subject = }, {has_xxx = }")
|
|
580
|
+
if not has_xxx:
|
|
581
|
+
return subject
|
|
582
|
+
|
|
583
|
+
if isinstance(start, datetime):
|
|
584
|
+
start = start.date()
|
|
585
|
+
if isinstance(instance, datetime):
|
|
586
|
+
instance = instance.date()
|
|
587
|
+
|
|
588
|
+
diff = instance - start
|
|
589
|
+
if freq == "y":
|
|
590
|
+
n = instance.year - start.year
|
|
591
|
+
elif freq == "m":
|
|
592
|
+
n = (instance.year - start.year) * 12 + (instance.month - start.month)
|
|
593
|
+
elif freq == "w":
|
|
594
|
+
n = diff.days // 7
|
|
595
|
+
else: # 'd'
|
|
596
|
+
n = diff.days
|
|
597
|
+
|
|
598
|
+
# n = max(n, 0) + 1 # treat first instance as "1st"
|
|
599
|
+
n = max(n, 0) # treat first instance as "1st"
|
|
600
|
+
|
|
601
|
+
new_subject = subject.replace("{XXX}", ordinal(n))
|
|
602
|
+
# log_msg(f"{subject = }, {new_subject = }")
|
|
603
|
+
return new_subject
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
# A page is (rows, tag_map)
|
|
607
|
+
# rows: list[str] ready to render (header + content)
|
|
608
|
+
# tag_map: { 'a': ('bin', bin_id) | ('reminder', (record_id, job_id)) }
|
|
609
|
+
Page = Tuple[List[str], Dict[str, Tuple[str, object]]]
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
# def page_tagger(
|
|
613
|
+
# items: List[dict], page_size: int = 26
|
|
614
|
+
# ) -> List[Tuple[List[str], Dict[str, Tuple[int, int | None]]]]:
|
|
615
|
+
# """
|
|
616
|
+
# Split 'items' into pages. Each item is a dict:
|
|
617
|
+
# { "record_id": int | None, "job_id": int | None, "text": str }
|
|
618
|
+
#
|
|
619
|
+
# Returns a list of pages. Each page is a tuple:
|
|
620
|
+
# (page_rows: list[str], page_tag_map: dict[str -> (record_id, job_id|None)])
|
|
621
|
+
#
|
|
622
|
+
# Rules:
|
|
623
|
+
# - Only record rows (record_id != None) receive single-letter tags 'a'..'z'.
|
|
624
|
+
# - Exactly `page_size` records are tagged per page (except the last page).
|
|
625
|
+
# - Headers (record_id is None) are kept in order.
|
|
626
|
+
# - If a header's block of records spans pages, the header is duplicated at the
|
|
627
|
+
# start of the next page with " (continued)" appended.
|
|
628
|
+
# """
|
|
629
|
+
# pages: List[Tuple[List[str], Dict[str, Tuple[int, int | None]]]] = []
|
|
630
|
+
#
|
|
631
|
+
# page_rows: List[str] = []
|
|
632
|
+
# tag_map: Dict[str, Tuple[int, int | None]] = {}
|
|
633
|
+
# tag_counter = 0 # number of record-tags on current page
|
|
634
|
+
# last_header_text = None # text of the most recent header seen (if any)
|
|
635
|
+
#
|
|
636
|
+
# def finalize_page(new_page_rows=None):
|
|
637
|
+
# """Close out the current page and start a fresh one optionally seeded with
|
|
638
|
+
# new_page_rows (e.g., duplicated header)."""
|
|
639
|
+
# nonlocal page_rows, tag_map, tag_counter
|
|
640
|
+
# pages.append((page_rows, tag_map))
|
|
641
|
+
# page_rows = new_page_rows[:] if new_page_rows else []
|
|
642
|
+
# tag_map = {}
|
|
643
|
+
# tag_counter = 0
|
|
644
|
+
#
|
|
645
|
+
# for item in items:
|
|
646
|
+
# # header row
|
|
647
|
+
# if not isinstance(item, dict):
|
|
648
|
+
# log_msg(f"error: {item} is not a dict")
|
|
649
|
+
# continue
|
|
650
|
+
# if item.get("record_id") is None:
|
|
651
|
+
# hdr_text = item.get("text", "")
|
|
652
|
+
# last_header_text = hdr_text
|
|
653
|
+
# page_rows.append(hdr_text)
|
|
654
|
+
# # continue; headers do not affect tag_counter
|
|
655
|
+
# continue
|
|
656
|
+
#
|
|
657
|
+
# # record row (taggable)
|
|
658
|
+
# # If current page is already full (page_size tags), start a new page.
|
|
659
|
+
# # IMPORTANT: when we create the new page, we want to preseed it with a
|
|
660
|
+
# # duplicated header (if one exists) and mark it as "(continued)".
|
|
661
|
+
# if tag_counter >= page_size:
|
|
662
|
+
# # If we have a last_header_text, duplicate it at top of next page with continued.
|
|
663
|
+
# if last_header_text:
|
|
664
|
+
# continued_header = f"{last_header_text} (continued)"
|
|
665
|
+
# finalize_page(new_page_rows=[continued_header])
|
|
666
|
+
# else:
|
|
667
|
+
# finalize_page()
|
|
668
|
+
#
|
|
669
|
+
# # assign next tag on current page
|
|
670
|
+
# tag = chr(ord("a") + tag_counter)
|
|
671
|
+
# tag_map[tag] = (item["record_id"], item.get("job_id", None))
|
|
672
|
+
# # Use small/dim tag formatting to match your UI style; adapt if needed
|
|
673
|
+
# page_rows.append(f" [dim]{tag}[/dim] {item.get('text', '')}")
|
|
674
|
+
# tag_counter += 1
|
|
675
|
+
#
|
|
676
|
+
# # At end, still need to push the last page if it has any rows
|
|
677
|
+
# if page_rows or tag_map:
|
|
678
|
+
# pages.append((page_rows, tag_map))
|
|
679
|
+
#
|
|
680
|
+
# return pages
|
|
681
|
+
|
|
682
|
+
|
|
683
|
+
def page_tagger(
|
|
684
|
+
items: List[dict], page_size: int = 26
|
|
685
|
+
) -> List[Tuple[List[str], Dict[str, Tuple[int, int | None, int | None]]]]:
|
|
686
|
+
"""
|
|
687
|
+
Split 'items' into pages. Each item is a dict:
|
|
688
|
+
{ "record_id": int | None, "job_id": int | None, "text": str, ... }
|
|
689
|
+
|
|
690
|
+
Returns a list of pages. Each page is a tuple:
|
|
691
|
+
(
|
|
692
|
+
page_rows: list[str],
|
|
693
|
+
page_tag_map: dict[str -> (record_id, job_id|None, datetime_id|None)]
|
|
694
|
+
)
|
|
695
|
+
|
|
696
|
+
Rules:
|
|
697
|
+
- Only record rows (record_id != None) receive single-letter tags 'a'..'z'.
|
|
698
|
+
- Exactly `page_size` records are tagged per page (except the last page).
|
|
699
|
+
- Headers (record_id is None) are kept in order.
|
|
700
|
+
- If a header's block of records spans pages, the header is duplicated at the
|
|
701
|
+
start of the next page with " (continued)" appended.
|
|
702
|
+
"""
|
|
703
|
+
pages: List[Tuple[List[str], Dict[str, Tuple[int, int | None, int | None]]]] = []
|
|
704
|
+
|
|
705
|
+
page_rows: List[str] = []
|
|
706
|
+
tag_map: Dict[str, Tuple[int, int | None, int | None]] = {}
|
|
707
|
+
tag_counter = 0 # number of record-tags on current page
|
|
708
|
+
last_header_text = None # text of the most recent header seen (if any)
|
|
709
|
+
|
|
710
|
+
def finalize_page(new_page_rows=None):
|
|
711
|
+
"""Close out the current page and start a fresh one optionally seeded with
|
|
712
|
+
new_page_rows (e.g., duplicated header)."""
|
|
713
|
+
nonlocal page_rows, tag_map, tag_counter
|
|
714
|
+
pages.append((page_rows, tag_map))
|
|
715
|
+
page_rows = new_page_rows[:] if new_page_rows else []
|
|
716
|
+
tag_map = {}
|
|
717
|
+
tag_counter = 0
|
|
718
|
+
|
|
719
|
+
for item in items:
|
|
720
|
+
if not isinstance(item, dict):
|
|
721
|
+
# bug_msg(f"error: {item} is not a dict")
|
|
722
|
+
continue
|
|
723
|
+
|
|
724
|
+
# header row
|
|
725
|
+
if item.get("record_id") is None:
|
|
726
|
+
hdr_text = item.get("text", "")
|
|
727
|
+
last_header_text = hdr_text
|
|
728
|
+
page_rows.append(hdr_text)
|
|
729
|
+
# headers do not affect tag_counter
|
|
730
|
+
continue
|
|
731
|
+
|
|
732
|
+
# record row (taggable)
|
|
733
|
+
if tag_counter >= page_size:
|
|
734
|
+
# If we have a last_header_text, duplicate it at top of next page with continued.
|
|
735
|
+
if last_header_text:
|
|
736
|
+
continued_header = f"{last_header_text} (continued)"
|
|
737
|
+
finalize_page(new_page_rows=[continued_header])
|
|
738
|
+
else:
|
|
739
|
+
finalize_page()
|
|
740
|
+
|
|
741
|
+
tag = chr(ord("a") + tag_counter)
|
|
742
|
+
|
|
743
|
+
# NEW: include datetime_id (or None) in the tag map
|
|
744
|
+
record_id = item["record_id"]
|
|
745
|
+
job_id = item.get("job_id", None)
|
|
746
|
+
datetime_id = item.get("datetime_id", None)
|
|
747
|
+
instance_ts = item.get("instance_ts", None)
|
|
748
|
+
|
|
749
|
+
tag_map[tag] = (record_id, job_id, datetime_id, instance_ts)
|
|
750
|
+
# bug_msg(f"{tag_map = }")
|
|
751
|
+
|
|
752
|
+
# Display text unchanged
|
|
753
|
+
page_rows.append(f" [dim]{tag}[/dim] {item.get('text', '')}")
|
|
754
|
+
tag_counter += 1
|
|
755
|
+
|
|
756
|
+
if page_rows or tag_map:
|
|
757
|
+
pages.append((page_rows, tag_map))
|
|
758
|
+
|
|
759
|
+
return pages
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
@dataclass(frozen=True)
|
|
763
|
+
class _BackupInfo:
|
|
764
|
+
path: Path
|
|
765
|
+
day: date
|
|
766
|
+
mtime: float
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
_BACKUP_RE = re.compile(r"^(\d{4})-(\d{2})-(\d{2})\.db$")
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
class Controller:
|
|
773
|
+
def __init__(self, database_path: str, env: TklrEnvironment, reset: bool = False):
|
|
774
|
+
# Initialize the database manager
|
|
775
|
+
self.db_manager = DatabaseManager(database_path, env, reset=reset)
|
|
776
|
+
|
|
777
|
+
self.tag_to_id = {} # Maps tag numbers to event IDs
|
|
778
|
+
self.list_tag_to_id: dict[str, dict[str, object]] = {}
|
|
779
|
+
|
|
780
|
+
self.yrwk_to_pages = {} # Maps (iso_year, iso_week) to week description
|
|
781
|
+
self.rownum_to_yrwk = {} # Maps row numbers to (iso_year, iso_week)
|
|
782
|
+
self.start_date = calculate_4_week_start()
|
|
783
|
+
self.selected_week = tuple(datetime.now().isocalendar()[:2])
|
|
784
|
+
self.env = env
|
|
785
|
+
self.AMPM = env.config.ui.ampm
|
|
786
|
+
self._last_details_meta = None
|
|
787
|
+
# self.afill_by_view: dict[str, int] = {} # e.g. {"events": 1, "tasks": 2}
|
|
788
|
+
# self.afill_by_week: dict[Tuple[int, int], int] = {}
|
|
789
|
+
|
|
790
|
+
for view in ["next", "last", "find", "events", "tasks", "alerts"]:
|
|
791
|
+
self.list_tag_to_id.setdefault(view, {})
|
|
792
|
+
self.week_tag_to_id: dict[Tuple[int, int], dict[str, object]] = {}
|
|
793
|
+
self.width = shutil.get_terminal_size()[0] - 2
|
|
794
|
+
# self.afill = 1
|
|
795
|
+
self._agenda_dirty = False
|
|
796
|
+
self.ampm = False
|
|
797
|
+
self.timefmt = "%H:%M"
|
|
798
|
+
self.dayfirst = False
|
|
799
|
+
self.yearfirst = True
|
|
800
|
+
self.datefmt = "%Y-%m-%d"
|
|
801
|
+
if self.env:
|
|
802
|
+
self.ampm = self.env.config.ui.ampm
|
|
803
|
+
self.timefmt = "%-I:%M%p" if self.ampm else "%H:%M"
|
|
804
|
+
self.dayfirst = self.env.config.ui.dayfirst
|
|
805
|
+
self.yearfirst = self.env.config.ui.yearfirst
|
|
806
|
+
self.history_weight = self.env.config.ui.history_weight
|
|
807
|
+
_yr = "%Y"
|
|
808
|
+
_dm = "%d-%m" if self.dayfirst else "%m-%d"
|
|
809
|
+
self.datefmt = f"{_yr}-{_dm}" if self.yearfirst else f"{_dm}-{_yr}"
|
|
810
|
+
self.datetimefmt = f"{self.datefmt} {self.timefmt}"
|
|
811
|
+
|
|
812
|
+
def fmt_user(self, dt: date | datetime) -> str:
|
|
813
|
+
"""
|
|
814
|
+
User friendly formatting for dates and datetimes using env settings
|
|
815
|
+
for ampm, yearfirst, dayfirst and two_digit year.
|
|
816
|
+
"""
|
|
817
|
+
# Simple user-facing formatter; tweak to match your prefs
|
|
818
|
+
if isinstance(dt, datetime):
|
|
819
|
+
d = dt
|
|
820
|
+
if d.tzinfo == tz.UTC and not getattr(self, "final", False):
|
|
821
|
+
d = d.astimezone()
|
|
822
|
+
return d.strftime(self.datetimefmt)
|
|
823
|
+
if isinstance(dt, date):
|
|
824
|
+
return dt.strftime(self.datefmt)
|
|
825
|
+
raise ValueError(f"Error: {dt} must either be a date or datetime")
|
|
826
|
+
|
|
827
|
+
@property
|
|
828
|
+
def root_id(self) -> int:
|
|
829
|
+
"""Return the id of the root bin, creating it if necessary."""
|
|
830
|
+
self.db_manager.ensure_system_bins()
|
|
831
|
+
self.db_manager.cursor.execute("SELECT id FROM Bins WHERE name = 'root'")
|
|
832
|
+
row = self.db_manager.cursor.fetchone()
|
|
833
|
+
if not row:
|
|
834
|
+
raise RuntimeError(
|
|
835
|
+
"Root bin not found — database not initialized correctly."
|
|
836
|
+
)
|
|
837
|
+
return row[0]
|
|
838
|
+
|
|
839
|
+
def format_datetime(self, fmt_dt: str) -> str:
|
|
840
|
+
return format_datetime(fmt_dt, self.AMPM)
|
|
841
|
+
|
|
842
|
+
def datetime_in_words(self, fmt_dt: str) -> str:
|
|
843
|
+
return datetime_in_words(fmt_dt, self.AMPM)
|
|
844
|
+
|
|
845
|
+
def make_item(self, entry_str: str, final: bool = False) -> "Item":
|
|
846
|
+
return Item(entry_str, final=final) # or config=self.env.load_config()
|
|
847
|
+
|
|
848
|
+
def add_item(self, item: Item) -> int:
|
|
849
|
+
if item.itemtype in "~^x" and item.has_f:
|
|
850
|
+
bug_msg(
|
|
851
|
+
f"{item.itemtype = } {item.has_f = } {item.itemtype in '~^x' and item.has_f = }"
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
record_id = self.db_manager.add_item(item)
|
|
855
|
+
|
|
856
|
+
if item.completions:
|
|
857
|
+
bug_msg(f"{item.completions = }")
|
|
858
|
+
self.db_manager.add_completion(record_id, item.completions)
|
|
859
|
+
|
|
860
|
+
return record_id
|
|
861
|
+
|
|
862
|
+
def apply_textual_edit(
|
|
863
|
+
self,
|
|
864
|
+
record_id: int,
|
|
865
|
+
edit_fn: Callable[[str], str],
|
|
866
|
+
) -> bool:
|
|
867
|
+
"""
|
|
868
|
+
Load the entry text for record_id, apply edit_fn(text) -> new_text,
|
|
869
|
+
reparse/finalize, and save back to the same record.
|
|
870
|
+
|
|
871
|
+
Returns True on success, False if parsing/finalizing fails.
|
|
872
|
+
"""
|
|
873
|
+
# 1) Get current entry text for the whole record
|
|
874
|
+
raw = self.get_entry_from_record(record_id)
|
|
875
|
+
if not raw:
|
|
876
|
+
return False
|
|
877
|
+
|
|
878
|
+
new_raw = edit_fn(raw)
|
|
879
|
+
if not new_raw or new_raw.strip() == raw.strip():
|
|
880
|
+
# Nothing changed; treat as no-op
|
|
881
|
+
return False
|
|
882
|
+
|
|
883
|
+
from tklr.item import Item # or your actual import
|
|
884
|
+
|
|
885
|
+
# 2) Parse as a final Item
|
|
886
|
+
item = Item(new_raw, controller=self)
|
|
887
|
+
item.final = True
|
|
888
|
+
item.parse_input(new_raw)
|
|
889
|
+
|
|
890
|
+
if not getattr(item, "parse_ok", False):
|
|
891
|
+
# You might want a log_msg here
|
|
892
|
+
return False
|
|
893
|
+
|
|
894
|
+
# 3) Finalize (jobs, rrules, etc.)
|
|
895
|
+
item.finalize_record()
|
|
896
|
+
|
|
897
|
+
if not getattr(item, "parse_ok", False):
|
|
898
|
+
return False
|
|
899
|
+
|
|
900
|
+
# 4) Save back into the same record (and regen DateTimes, Alerts, etc.)
|
|
901
|
+
self.db_manager.save_record(item, record_id=record_id)
|
|
902
|
+
# 🔁 NEW: record completion if one was produced
|
|
903
|
+
completion = getattr(item, "completions", None)
|
|
904
|
+
if completion:
|
|
905
|
+
self.db_manager.add_completion(record_id, completion)
|
|
906
|
+
|
|
907
|
+
return True
|
|
908
|
+
|
|
909
|
+
def _instance_to_rdate_key(self, instance) -> str:
|
|
910
|
+
"""
|
|
911
|
+
Convert an instance (string or datetime) into the canonical UTC-Z key
|
|
912
|
+
used in @+ / @- tokens and RDATE/EXDATE, e.g. '20251119T133000Z'.
|
|
913
|
+
"""
|
|
914
|
+
# If you already have a datetime, use it; otherwise parse your TEXT form.
|
|
915
|
+
if isinstance(instance, datetime):
|
|
916
|
+
dt = instance
|
|
917
|
+
else:
|
|
918
|
+
# Your existing helper that knows how to parse DateTimes table TEXT
|
|
919
|
+
dt = parse(instance)
|
|
920
|
+
|
|
921
|
+
# Make sure it’s timezone-aware; assume local zone if naive.
|
|
922
|
+
if dt.tzinfo is None:
|
|
923
|
+
dt = dt.astimezone()
|
|
924
|
+
|
|
925
|
+
# dt_utc = dt.astimezone(tz.UTC)
|
|
926
|
+
# return dt_utc.strftime("%Y%m%dT%H%MZ")
|
|
927
|
+
return fmt_utc_z(dt)
|
|
928
|
+
|
|
929
|
+
def apply_token_edit(
|
|
930
|
+
self,
|
|
931
|
+
record_id: int,
|
|
932
|
+
edit_tokens_fn: Callable[[list[dict]], bool],
|
|
933
|
+
) -> bool:
|
|
934
|
+
"""
|
|
935
|
+
Load tokens from Records.tokens for `record_id`, let `edit_tokens_fn`
|
|
936
|
+
mutate them in place, then rebuild the entry string, re-parse/finalize
|
|
937
|
+
via Item, and save back to the same record.
|
|
938
|
+
|
|
939
|
+
Returns True if a change was applied and saved, False otherwise.
|
|
940
|
+
"""
|
|
941
|
+
rec = self.db_manager.get_record_as_dictionary(record_id)
|
|
942
|
+
if not rec:
|
|
943
|
+
return False
|
|
944
|
+
|
|
945
|
+
tokens_json = rec.get("tokens") or "[]"
|
|
946
|
+
try:
|
|
947
|
+
tokens: list[dict] = json.loads(tokens_json)
|
|
948
|
+
except Exception as e:
|
|
949
|
+
log_msg(f"apply_token_edit: bad tokens JSON for {record_id=}: {e}")
|
|
950
|
+
return False
|
|
951
|
+
|
|
952
|
+
# Let the caller mutate `tokens`; it should return True iff something changed.
|
|
953
|
+
changed = edit_tokens_fn(tokens)
|
|
954
|
+
if not changed:
|
|
955
|
+
return False
|
|
956
|
+
|
|
957
|
+
# Rebuild entry text from tokens.
|
|
958
|
+
entry = " ".join(t.get("token", "").strip() for t in tokens if t.get("token"))
|
|
959
|
+
if not entry.strip():
|
|
960
|
+
# Don’t blow away the record with an empty line by accident.
|
|
961
|
+
return False
|
|
962
|
+
|
|
963
|
+
# Re-parse + finalize using Item so rruleset / jobs / flags / etc. stay consistent.
|
|
964
|
+
item = Item(entry, controller=self)
|
|
965
|
+
item.final = True
|
|
966
|
+
item.parse_input(entry)
|
|
967
|
+
if not getattr(item, "parse_ok", False):
|
|
968
|
+
log_msg(f"apply_token_edit: parse failed for {record_id=}")
|
|
969
|
+
return False
|
|
970
|
+
|
|
971
|
+
item.finalize_record()
|
|
972
|
+
if not getattr(item, "parse_ok", False):
|
|
973
|
+
log_msg(f"apply_token_edit: finalize failed for {record_id=}")
|
|
974
|
+
return False
|
|
975
|
+
|
|
976
|
+
# This will also rebuild the tokens column from the new Item state.
|
|
977
|
+
self.db_manager.save_record(item, record_id=record_id)
|
|
978
|
+
|
|
979
|
+
# 🔁 NEW: record completion if one was produced
|
|
980
|
+
completion = getattr(item, "completion", None)
|
|
981
|
+
if completion:
|
|
982
|
+
self.db_manager.add_completion(record_id, completion)
|
|
983
|
+
|
|
984
|
+
return True
|
|
985
|
+
|
|
986
|
+
def _dt_local_naive(self, dt: datetime) -> datetime:
|
|
987
|
+
"""Ensure a local-naive datetime for comparison."""
|
|
988
|
+
if dt.tzinfo is None:
|
|
989
|
+
return dt
|
|
990
|
+
return dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
|
|
991
|
+
|
|
992
|
+
def _instance_local_from_text(self, text: str) -> datetime:
|
|
993
|
+
"""
|
|
994
|
+
Convert a DateTimes TEXT (like 'YYYYMMDD', 'YYYYMMDDTHHMMSS', etc.)
|
|
995
|
+
into a local-naive datetime using your existing parse helper.
|
|
996
|
+
"""
|
|
997
|
+
dt = parse(text) # you already have this
|
|
998
|
+
return self._dt_local_naive(dt)
|
|
999
|
+
|
|
1000
|
+
def _is_s_plus_no_r(self, tokens: list[dict]) -> bool:
|
|
1001
|
+
has_s = any(t.get("t") == "@" and t.get("k") == "s" for t in tokens)
|
|
1002
|
+
has_plus = any(t.get("t") == "@" and t.get("k") == "+" for t in tokens)
|
|
1003
|
+
has_r = any(t.get("t") == "@" and t.get("k") == "r" for t in tokens)
|
|
1004
|
+
return has_s and has_plus and not has_r
|
|
1005
|
+
|
|
1006
|
+
def _adjust_s_plus_from_rruleset(
|
|
1007
|
+
self,
|
|
1008
|
+
tokens: list[dict],
|
|
1009
|
+
rruleset: str,
|
|
1010
|
+
instance_text: str,
|
|
1011
|
+
mode: str, # "one" or "this_and_future"
|
|
1012
|
+
) -> bool:
|
|
1013
|
+
"""
|
|
1014
|
+
Special-case handler for the pattern: @s + @+ but no @r.
|
|
1015
|
+
|
|
1016
|
+
- rruleset: the record's rruleset string (RDATE-only in this pattern)
|
|
1017
|
+
- instance_text: the DateTimes.start_datetime TEXT of the chosen instance
|
|
1018
|
+
- mode:
|
|
1019
|
+
"one" -> delete just this instance
|
|
1020
|
+
"this_and_future" -> delete this and all subsequent instances
|
|
1021
|
+
|
|
1022
|
+
Returns True if tokens were modified.
|
|
1023
|
+
"""
|
|
1024
|
+
if not rruleset:
|
|
1025
|
+
return False
|
|
1026
|
+
|
|
1027
|
+
try:
|
|
1028
|
+
rule = rrulestr(rruleset)
|
|
1029
|
+
except Exception:
|
|
1030
|
+
return False
|
|
1031
|
+
|
|
1032
|
+
occs = list(rule)
|
|
1033
|
+
if not occs:
|
|
1034
|
+
return False
|
|
1035
|
+
|
|
1036
|
+
# Canonical local-naive for all instances
|
|
1037
|
+
from dateutil import tz
|
|
1038
|
+
|
|
1039
|
+
def to_local_naive(dt: datetime) -> datetime:
|
|
1040
|
+
if dt.tzinfo is None:
|
|
1041
|
+
return dt
|
|
1042
|
+
return dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
|
|
1043
|
+
|
|
1044
|
+
instances_local = [to_local_naive(d) for d in occs]
|
|
1045
|
+
|
|
1046
|
+
inst_local = self._instance_local_from_text(instance_text)
|
|
1047
|
+
|
|
1048
|
+
# bug_msg(
|
|
1049
|
+
# f"{inst_local = }, {instances_local = }, {inst_local in instances_local = }"
|
|
1050
|
+
# )
|
|
1051
|
+
if mode == "one":
|
|
1052
|
+
survivors = [d for d in instances_local if d != inst_local]
|
|
1053
|
+
elif mode == "this_and_future":
|
|
1054
|
+
survivors = [d for d in instances_local if d < inst_local]
|
|
1055
|
+
else:
|
|
1056
|
+
return False
|
|
1057
|
+
|
|
1058
|
+
# If nothing left, clear @s/@+ schedule from tokens
|
|
1059
|
+
if not survivors:
|
|
1060
|
+
tokens[:] = [
|
|
1061
|
+
t
|
|
1062
|
+
for t in tokens
|
|
1063
|
+
if not (t.get("t") == "@" and t.get("k") in {"s", "+"})
|
|
1064
|
+
]
|
|
1065
|
+
return True
|
|
1066
|
+
|
|
1067
|
+
survivors.sort()
|
|
1068
|
+
new_s = survivors[0]
|
|
1069
|
+
plus_list = survivors[1:]
|
|
1070
|
+
|
|
1071
|
+
# Drop existing @s/@+ tokens
|
|
1072
|
+
base = [
|
|
1073
|
+
t for t in tokens if not (t.get("t") == "@" and t.get("k") in {"s", "+"})
|
|
1074
|
+
]
|
|
1075
|
+
|
|
1076
|
+
# New @s
|
|
1077
|
+
base.append(
|
|
1078
|
+
{
|
|
1079
|
+
"token": f"@s {self.fmt_user(new_s)}",
|
|
1080
|
+
"t": "@",
|
|
1081
|
+
"k": "s",
|
|
1082
|
+
}
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
# New @+ if extras exist
|
|
1086
|
+
if plus_list:
|
|
1087
|
+
plus_str = ", ".join(self.fmt_user(d) for d in plus_list)
|
|
1088
|
+
base.append(
|
|
1089
|
+
{
|
|
1090
|
+
"token": f"@+ {plus_str}",
|
|
1091
|
+
"t": "@",
|
|
1092
|
+
"k": "+",
|
|
1093
|
+
}
|
|
1094
|
+
)
|
|
1095
|
+
|
|
1096
|
+
tokens[:] = base
|
|
1097
|
+
return True
|
|
1098
|
+
|
|
1099
|
+
def _instance_is_from_rdate(self, rruleset_str: str, instance_dt: datetime) -> bool:
|
|
1100
|
+
"""
|
|
1101
|
+
Check if a given instance datetime comes from an RDATE in the rruleset.
|
|
1102
|
+
|
|
1103
|
+
Args:
|
|
1104
|
+
rruleset_str: The rruleset string from the database
|
|
1105
|
+
instance_dt: The instance datetime (already parsed, in UTC if aware)
|
|
1106
|
+
|
|
1107
|
+
Returns:
|
|
1108
|
+
True if the instance is from an RDATE, False if from RRULE
|
|
1109
|
+
"""
|
|
1110
|
+
if not rruleset_str:
|
|
1111
|
+
return False
|
|
1112
|
+
|
|
1113
|
+
# Parse rruleset to extract RDATEs
|
|
1114
|
+
rdates = []
|
|
1115
|
+
for line in rruleset_str.splitlines():
|
|
1116
|
+
line = line.strip()
|
|
1117
|
+
if line.startswith("RDATE"):
|
|
1118
|
+
# Extract datetime values from RDATE line
|
|
1119
|
+
# Format: RDATE:20251106T1900Z or RDATE:20251106T1900Z,20251113T0200Z
|
|
1120
|
+
if ":" in line:
|
|
1121
|
+
dates_part = line.split(":", 1)[1]
|
|
1122
|
+
# Split by comma for multiple dates
|
|
1123
|
+
for dt_str in dates_part.split(","):
|
|
1124
|
+
dt_str = dt_str.strip()
|
|
1125
|
+
if dt_str:
|
|
1126
|
+
try:
|
|
1127
|
+
# Parse the UTC datetime
|
|
1128
|
+
if dt_str.endswith("Z"):
|
|
1129
|
+
# Aware UTC: YYYYMMDDTHHMMZ
|
|
1130
|
+
dt = datetime.strptime(dt_str[:-1], "%Y%m%dT%H%M")
|
|
1131
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
1132
|
+
elif "T" in dt_str:
|
|
1133
|
+
# Naive datetime: YYYYMMDDTHHMM
|
|
1134
|
+
dt = datetime.strptime(dt_str, "%Y%m%dT%H%M")
|
|
1135
|
+
else:
|
|
1136
|
+
# Date only: YYYYMMDD
|
|
1137
|
+
dt = datetime.strptime(dt_str, "%Y%m%d")
|
|
1138
|
+
rdates.append(dt)
|
|
1139
|
+
except Exception:
|
|
1140
|
+
continue
|
|
1141
|
+
|
|
1142
|
+
# Convert instance_dt to UTC if aware, or leave naive
|
|
1143
|
+
if instance_dt.tzinfo is not None:
|
|
1144
|
+
instance_utc = instance_dt.astimezone(timezone.utc).replace(tzinfo=None)
|
|
1145
|
+
else:
|
|
1146
|
+
instance_utc = instance_dt.replace(tzinfo=None)
|
|
1147
|
+
|
|
1148
|
+
# Check if instance matches any RDATE (compare without timezone for simplicity)
|
|
1149
|
+
for rdate in rdates:
|
|
1150
|
+
rdate_naive = rdate.replace(tzinfo=None) if rdate.tzinfo else rdate
|
|
1151
|
+
# Compare with minute precision (ignore seconds)
|
|
1152
|
+
if (instance_utc.year == rdate_naive.year and
|
|
1153
|
+
instance_utc.month == rdate_naive.month and
|
|
1154
|
+
instance_utc.day == rdate_naive.day and
|
|
1155
|
+
instance_utc.hour == rdate_naive.hour and
|
|
1156
|
+
instance_utc.minute == rdate_naive.minute):
|
|
1157
|
+
return True
|
|
1158
|
+
|
|
1159
|
+
return False
|
|
1160
|
+
|
|
1161
|
+
def _advance_s_to_next_rrule_instance(self, record_id: int, second_instance_text: str) -> bool:
|
|
1162
|
+
"""
|
|
1163
|
+
Update @s to point to the second instance (advancing past the first RRULE instance).
|
|
1164
|
+
|
|
1165
|
+
Args:
|
|
1166
|
+
record_id: The record ID
|
|
1167
|
+
second_instance_text: The compact local-naive datetime string of the second instance
|
|
1168
|
+
|
|
1169
|
+
Returns:
|
|
1170
|
+
True if successful, False otherwise
|
|
1171
|
+
"""
|
|
1172
|
+
try:
|
|
1173
|
+
# Parse the second instance
|
|
1174
|
+
second_dt = parse(second_instance_text)
|
|
1175
|
+
|
|
1176
|
+
# Convert to local naive for user display
|
|
1177
|
+
from dateutil import tz
|
|
1178
|
+
if second_dt.tzinfo is not None:
|
|
1179
|
+
second_local = second_dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
|
|
1180
|
+
else:
|
|
1181
|
+
second_local = second_dt
|
|
1182
|
+
|
|
1183
|
+
# Format for user
|
|
1184
|
+
new_s_stamp = self.fmt_user(second_local)
|
|
1185
|
+
|
|
1186
|
+
def edit_tokens(tokens: list[dict]) -> bool:
|
|
1187
|
+
# Find and update @s token
|
|
1188
|
+
for tok in tokens:
|
|
1189
|
+
if tok.get("t") == "@" and tok.get("k") == "s":
|
|
1190
|
+
tok["token"] = f"@s {new_s_stamp}"
|
|
1191
|
+
return True
|
|
1192
|
+
return False
|
|
1193
|
+
|
|
1194
|
+
return self.apply_token_edit(record_id, edit_tokens)
|
|
1195
|
+
|
|
1196
|
+
except Exception as e:
|
|
1197
|
+
bug_msg(f"Error advancing @s: {e}")
|
|
1198
|
+
return False
|
|
1199
|
+
|
|
1200
|
+
def _instance_to_rdate_key(self, instance_text: str) -> str:
|
|
1201
|
+
"""
|
|
1202
|
+
Normalize a DateTimes TEXT value into the key format used in RDATE/EXDATE.
|
|
1203
|
+
|
|
1204
|
+
- Date-only -> 'YYYYMMDD'
|
|
1205
|
+
- Datetime -> 'YYYYMMDDTHHMM' (local-naive, no 'Z')
|
|
1206
|
+
"""
|
|
1207
|
+
s = (instance_text or "").strip()
|
|
1208
|
+
if not s:
|
|
1209
|
+
raise ValueError("empty instance_text")
|
|
1210
|
+
|
|
1211
|
+
# Fast path: already compact date-only 'YYYYMMDD'
|
|
1212
|
+
if len(s) == 8 and s.isdigit():
|
|
1213
|
+
return s
|
|
1214
|
+
|
|
1215
|
+
# Use your custom parse() helper (respects yearfirst/dayfirst)
|
|
1216
|
+
dt = parse(s) # from your helpers module
|
|
1217
|
+
|
|
1218
|
+
if isinstance(dt, date) and not isinstance(dt, datetime):
|
|
1219
|
+
# Pure date -> 'YYYYMMDD'
|
|
1220
|
+
return dt.strftime("%Y%m%d")
|
|
1221
|
+
|
|
1222
|
+
if isinstance(dt, datetime):
|
|
1223
|
+
# Drop seconds if present, match your RDATE minute granularity
|
|
1224
|
+
return dt.strftime("%Y%m%dT%H%M")
|
|
1225
|
+
|
|
1226
|
+
# Fallback (shouldn't normally happen)
|
|
1227
|
+
raise ValueError(f"Cannot normalize instance_text {instance_text!r}")
|
|
1228
|
+
|
|
1229
|
+
def _remove_instance_from_plus_tokens(
|
|
1230
|
+
self, tokens: list[dict], instance_text: str
|
|
1231
|
+
) -> bool:
|
|
1232
|
+
"""
|
|
1233
|
+
Remove the given instance from any @+ tokens by matching the UTC-Z key.
|
|
1234
|
+
Returns True if something was removed.
|
|
1235
|
+
"""
|
|
1236
|
+
target = self._instance_to_rdate_key(instance_text)
|
|
1237
|
+
|
|
1238
|
+
removed = False
|
|
1239
|
+
new_tokens: list[dict] = []
|
|
1240
|
+
|
|
1241
|
+
for tok in tokens:
|
|
1242
|
+
if tok.get("t") == "@" and tok.get("k") == "+":
|
|
1243
|
+
raw = tok.get("token", "")
|
|
1244
|
+
body = raw[2:].strip() if raw.startswith("@+") else raw.strip()
|
|
1245
|
+
parts = [p.strip() for p in body.split(",") if p.strip()]
|
|
1246
|
+
if not parts:
|
|
1247
|
+
continue
|
|
1248
|
+
|
|
1249
|
+
filtered = [p for p in parts if p != target]
|
|
1250
|
+
if len(filtered) != len(parts):
|
|
1251
|
+
removed = True
|
|
1252
|
+
|
|
1253
|
+
if filtered:
|
|
1254
|
+
new_tok = dict(tok)
|
|
1255
|
+
new_tok["token"] = "@+ " + ", ".join(filtered)
|
|
1256
|
+
new_tokens.append(new_tok)
|
|
1257
|
+
else:
|
|
1258
|
+
# @+ now empty → drop the token entirely
|
|
1259
|
+
continue
|
|
1260
|
+
else:
|
|
1261
|
+
new_tokens.append(tok)
|
|
1262
|
+
|
|
1263
|
+
tokens[:] = new_tokens
|
|
1264
|
+
return removed
|
|
1265
|
+
|
|
1266
|
+
# def finish_task(self, record_id: int, job_id: int | None, when: datetime) -> bool:
|
|
1267
|
+
# """
|
|
1268
|
+
# Mark a task (or job) as finished at `when`.
|
|
1269
|
+
#
|
|
1270
|
+
# Semantics:
|
|
1271
|
+
# - Job (job_id not None):
|
|
1272
|
+
# add &f <stamp> to that job spec (unchanged from before).
|
|
1273
|
+
# - Plain task (no job_id):
|
|
1274
|
+
# look at upcoming instances from DateTimes:
|
|
1275
|
+
#
|
|
1276
|
+
# * 0 upcoming:
|
|
1277
|
+
# - no schedule → just append @f <stamp>.
|
|
1278
|
+
# * 1 upcoming:
|
|
1279
|
+
# - consume that last instance (like delete_instance),
|
|
1280
|
+
# then append @f <stamp> to mark the reminder finished.
|
|
1281
|
+
# * 2+ upcoming:
|
|
1282
|
+
# - consume only the *next* instance (like delete_instance),
|
|
1283
|
+
# and DO NOT add @f yet (reminder still has future instances).
|
|
1284
|
+
# """
|
|
1285
|
+
# stamp = self.fmt_user(when)
|
|
1286
|
+
#
|
|
1287
|
+
# # ---- Case 1: project job ----
|
|
1288
|
+
# if job_id is not None:
|
|
1289
|
+
#
|
|
1290
|
+
# def edit_job(text: str) -> str:
|
|
1291
|
+
# # your existing helper that injects &f into the given job
|
|
1292
|
+
# return self._add_finish_to_job(text, job_id, stamp)
|
|
1293
|
+
#
|
|
1294
|
+
# return self.apply_textual_edit(record_id, edit_job)
|
|
1295
|
+
#
|
|
1296
|
+
# # ---- Case 2: plain task (no job_id) ----
|
|
1297
|
+
# upcoming = self.db_manager.get_next_start_datetimes_for_record(record_id) or []
|
|
1298
|
+
#
|
|
1299
|
+
# # 0 upcoming instances: no schedule -> simple one-shot finish
|
|
1300
|
+
# if not upcoming:
|
|
1301
|
+
#
|
|
1302
|
+
# def edit_no_schedule(text: str) -> str:
|
|
1303
|
+
# return text.rstrip() + f" @f {stamp}"
|
|
1304
|
+
#
|
|
1305
|
+
# return self.apply_textual_edit(record_id, edit_no_schedule)
|
|
1306
|
+
#
|
|
1307
|
+
# # 1 upcoming instance: finishing this consumes the last instance AND the reminder
|
|
1308
|
+
# if len(upcoming) == 1:
|
|
1309
|
+
# instance_text = upcoming[0]
|
|
1310
|
+
#
|
|
1311
|
+
# # consume that final instance (RDATE/@s/@+ housekeeping)
|
|
1312
|
+
# self.delete_instance(record_id, instance_text)
|
|
1313
|
+
#
|
|
1314
|
+
# # now mark the reminder as finished with @f
|
|
1315
|
+
# def edit_last(text: str) -> str:
|
|
1316
|
+
# return text.rstrip() + f" @f {stamp}"
|
|
1317
|
+
#
|
|
1318
|
+
# return self.apply_textual_edit(record_id, edit_last)
|
|
1319
|
+
#
|
|
1320
|
+
# # 2+ upcoming instances: repeating → consume ONLY the next instance
|
|
1321
|
+
# instance_text = upcoming[0]
|
|
1322
|
+
# return self.delete_instance(record_id, instance_text)
|
|
1323
|
+
|
|
1324
|
+
def _add_finish_to_job(self, record_id: int, job_id: int, stamp: str) -> bool:
|
|
1325
|
+
"""
|
|
1326
|
+
Insert or update an &f token for the given job_id on a project record.
|
|
1327
|
+
|
|
1328
|
+
- job_id is 1-based index of @~ tokens in the token list.
|
|
1329
|
+
- We locate the N-th @~ token, then:
|
|
1330
|
+
* if that job already has an &f token in its &-cluster, we replace it
|
|
1331
|
+
* otherwise we append a new &f <stamp> at the end of that cluster
|
|
1332
|
+
|
|
1333
|
+
Returns True if any change was made; False if job_id not found.
|
|
1334
|
+
"""
|
|
1335
|
+
|
|
1336
|
+
def edit_tokens(tokens: List[Dict]) -> bool:
|
|
1337
|
+
job_index = 0
|
|
1338
|
+
|
|
1339
|
+
i = 0
|
|
1340
|
+
while i < len(tokens):
|
|
1341
|
+
tok = tokens[i]
|
|
1342
|
+
|
|
1343
|
+
# Look for @~ job tokens
|
|
1344
|
+
if tok.get("t") == "@" and tok.get("k") == "~":
|
|
1345
|
+
job_index += 1
|
|
1346
|
+
|
|
1347
|
+
if job_index == job_id:
|
|
1348
|
+
# We are at the job_id-th job's @~ token.
|
|
1349
|
+
# Walk forward through its &-cluster.
|
|
1350
|
+
j = i + 1
|
|
1351
|
+
f_index = None
|
|
1352
|
+
|
|
1353
|
+
while j < len(tokens) and tokens[j].get("t") == "&":
|
|
1354
|
+
if tokens[j].get("k") == "f":
|
|
1355
|
+
f_index = j
|
|
1356
|
+
j += 1
|
|
1357
|
+
|
|
1358
|
+
if f_index is not None:
|
|
1359
|
+
# Update existing &f
|
|
1360
|
+
tokens[f_index]["token"] = f"&f {stamp}"
|
|
1361
|
+
else:
|
|
1362
|
+
# Insert new &f at the end of the job's &-cluster
|
|
1363
|
+
tokens.insert(
|
|
1364
|
+
j,
|
|
1365
|
+
{
|
|
1366
|
+
"token": f"&f {stamp}",
|
|
1367
|
+
"t": "&",
|
|
1368
|
+
"k": "f",
|
|
1369
|
+
},
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
return True # we made a change
|
|
1373
|
+
|
|
1374
|
+
i += 1
|
|
1375
|
+
|
|
1376
|
+
# job_id > number of jobs: nothing changed
|
|
1377
|
+
return False
|
|
1378
|
+
|
|
1379
|
+
return self.apply_token_edit(record_id, edit_tokens)
|
|
1380
|
+
|
|
1381
|
+
def finish_task(self, record_id: int, job_id: int | None, when: datetime) -> bool:
|
|
1382
|
+
stamp = self.fmt_user(when)
|
|
1383
|
+
|
|
1384
|
+
# ---- Case 1: project job ----
|
|
1385
|
+
if job_id is not None:
|
|
1386
|
+
return self._add_finish_to_job(record_id, job_id, stamp)
|
|
1387
|
+
|
|
1388
|
+
# ---- Case 2: plain task (no job_id) ----
|
|
1389
|
+
upcoming = self.db_manager.get_next_start_datetimes_for_record(record_id) or []
|
|
1390
|
+
bug_msg(f"{upcoming = }")
|
|
1391
|
+
|
|
1392
|
+
# Case 2a: No instances or only 1 instance → append @f
|
|
1393
|
+
if len(upcoming) <= 1:
|
|
1394
|
+
if upcoming:
|
|
1395
|
+
instance_text = upcoming[0]
|
|
1396
|
+
self.delete_instance(record_id, instance_text)
|
|
1397
|
+
|
|
1398
|
+
def edit_with_finish(text: str) -> str:
|
|
1399
|
+
return text.rstrip() + f" @f {stamp}"
|
|
1400
|
+
|
|
1401
|
+
return self.apply_textual_edit(record_id, edit_with_finish)
|
|
1402
|
+
|
|
1403
|
+
# Case 2b: 2+ instances → handle based on whether first is RDATE or RRULE
|
|
1404
|
+
first_instance_text = upcoming[0]
|
|
1405
|
+
second_instance_text = upcoming[1] if len(upcoming) > 1 else None
|
|
1406
|
+
|
|
1407
|
+
# Get the record to access rruleset
|
|
1408
|
+
rec = self.db_manager.get_record_as_dictionary(record_id)
|
|
1409
|
+
if not rec:
|
|
1410
|
+
return False
|
|
1411
|
+
|
|
1412
|
+
rruleset_str = rec.get("rruleset") or ""
|
|
1413
|
+
if not rruleset_str:
|
|
1414
|
+
# No rruleset, just delete first instance
|
|
1415
|
+
return self.delete_instance(record_id, first_instance_text)
|
|
1416
|
+
|
|
1417
|
+
# Parse the first instance to get UTC datetime
|
|
1418
|
+
try:
|
|
1419
|
+
first_dt = parse(first_instance_text)
|
|
1420
|
+
except Exception:
|
|
1421
|
+
return False
|
|
1422
|
+
|
|
1423
|
+
# Check if first instance comes from RDATE
|
|
1424
|
+
is_from_rdate = self._instance_is_from_rdate(rruleset_str, first_dt)
|
|
1425
|
+
|
|
1426
|
+
if is_from_rdate:
|
|
1427
|
+
# First instance is from @+ (RDATE) → remove it from @+
|
|
1428
|
+
return self.delete_instance(record_id, first_instance_text)
|
|
1429
|
+
else:
|
|
1430
|
+
# First instance is from @r (RRULE) → update @s to second instance
|
|
1431
|
+
if not second_instance_text:
|
|
1432
|
+
# Safety: shouldn't happen, but handle gracefully
|
|
1433
|
+
return self.delete_instance(record_id, first_instance_text)
|
|
1434
|
+
|
|
1435
|
+
return self._advance_s_to_next_rrule_instance(record_id, second_instance_text)
|
|
1436
|
+
|
|
1437
|
+
def schedule_new(self, record_id: int, job_id: int | None, when: datetime) -> bool:
|
|
1438
|
+
stamp = self.fmt_user(when)
|
|
1439
|
+
|
|
1440
|
+
def edit(text: str) -> str:
|
|
1441
|
+
return text.rstrip() + f" @+ {stamp}"
|
|
1442
|
+
|
|
1443
|
+
return self.apply_textual_edit(record_id, edit)
|
|
1444
|
+
|
|
1445
|
+
def reschedule_instance(
|
|
1446
|
+
self,
|
|
1447
|
+
record_id: int,
|
|
1448
|
+
old_instance_text: str,
|
|
1449
|
+
new_when: datetime,
|
|
1450
|
+
) -> bool:
|
|
1451
|
+
new_stamp = self.fmt_user(new_when)
|
|
1452
|
+
|
|
1453
|
+
def edit(text: str) -> str:
|
|
1454
|
+
# Add @- old_instance and @+ new_instance
|
|
1455
|
+
return text.rstrip() + f" @- {old_instance_text} @+ {new_stamp}"
|
|
1456
|
+
|
|
1457
|
+
return self.apply_textual_edit(record_id, edit)
|
|
1458
|
+
|
|
1459
|
+
# def delete_instance(
|
|
1460
|
+
# self,
|
|
1461
|
+
# record_id: int,
|
|
1462
|
+
# instance_text: str,
|
|
1463
|
+
# ) -> bool:
|
|
1464
|
+
# """
|
|
1465
|
+
# For a single instance:
|
|
1466
|
+
#
|
|
1467
|
+
# Special case:
|
|
1468
|
+
# - If the record uses @s + @+ with no @r, we:
|
|
1469
|
+
# * Compute the full instance list from rruleset.
|
|
1470
|
+
# * Drop just this instance.
|
|
1471
|
+
# * Rebuild @s and @+ from the survivors.
|
|
1472
|
+
#
|
|
1473
|
+
# General case:
|
|
1474
|
+
# - If the instance appears in an @+ list, remove it from that list.
|
|
1475
|
+
# - Otherwise, append an @- <instance_text> exclusion token (in entry format).
|
|
1476
|
+
# """
|
|
1477
|
+
#
|
|
1478
|
+
# rec = self.db_manager.get_record_as_dictionary(record_id)
|
|
1479
|
+
# if not rec:
|
|
1480
|
+
# return False
|
|
1481
|
+
#
|
|
1482
|
+
# rruleset = rec.get("rruleset") or ""
|
|
1483
|
+
#
|
|
1484
|
+
# def edit_tokens(tokens: list[dict]) -> bool:
|
|
1485
|
+
# # 1) Special case: @s + @+ but no @r
|
|
1486
|
+
# if self._is_s_plus_no_r(tokens) and rruleset:
|
|
1487
|
+
# changed = self._adjust_s_plus_from_rruleset(
|
|
1488
|
+
# tokens,
|
|
1489
|
+
# rruleset=rruleset,
|
|
1490
|
+
# instance_text=instance_text,
|
|
1491
|
+
# mode="one",
|
|
1492
|
+
# )
|
|
1493
|
+
# if changed:
|
|
1494
|
+
# return True
|
|
1495
|
+
# # fall through to general path if nothing changed for some reason
|
|
1496
|
+
#
|
|
1497
|
+
# changed = False
|
|
1498
|
+
#
|
|
1499
|
+
# # 2) General path: try to remove from @+ using UTC-Z
|
|
1500
|
+
# removed = self._remove_instance_from_plus_tokens(tokens, instance_text)
|
|
1501
|
+
# changed = changed or removed
|
|
1502
|
+
#
|
|
1503
|
+
# # 3) If not present in @+, fall back to @- <entry-style-datetime>
|
|
1504
|
+
# if not removed:
|
|
1505
|
+
# inst_dt = parse(instance_text)
|
|
1506
|
+
# entry_style = self.fmt_user(inst_dt)
|
|
1507
|
+
# tokens.append(
|
|
1508
|
+
# {
|
|
1509
|
+
# "token": f"@- {entry_style}",
|
|
1510
|
+
# "t": "@",
|
|
1511
|
+
# "k": "-",
|
|
1512
|
+
# }
|
|
1513
|
+
# )
|
|
1514
|
+
# changed = True
|
|
1515
|
+
#
|
|
1516
|
+
# return changed
|
|
1517
|
+
#
|
|
1518
|
+
# return self.apply_token_edit(record_id, edit_tokens)
|
|
1519
|
+
#
|
|
1520
|
+
# def delete_this_and_future(
|
|
1521
|
+
# self,
|
|
1522
|
+
# record_id: int,
|
|
1523
|
+
# instance_text: str,
|
|
1524
|
+
# ) -> bool:
|
|
1525
|
+
# """
|
|
1526
|
+
# instance_text is the TEXT of the selected instance's start_datetime.
|
|
1527
|
+
#
|
|
1528
|
+
# Special case (@s + @+ with no @r):
|
|
1529
|
+
# - Use rruleset to get the full instance list.
|
|
1530
|
+
# - Remove this instance and all subsequent ones.
|
|
1531
|
+
# - Rebuild @s and @+ from survivors (or clear schedule if none).
|
|
1532
|
+
#
|
|
1533
|
+
# General case:
|
|
1534
|
+
# - Remove this instance from @+ if present.
|
|
1535
|
+
# - Append &u <cutoff_stamp> where cutoff_stamp is (instance_dt - 1s)
|
|
1536
|
+
# in entry format.
|
|
1537
|
+
# """
|
|
1538
|
+
#
|
|
1539
|
+
# rec = self.db_manager.get_record_as_dictionary(record_id)
|
|
1540
|
+
# if not rec:
|
|
1541
|
+
# return False
|
|
1542
|
+
#
|
|
1543
|
+
# rruleset = rec.get("rruleset") or ""
|
|
1544
|
+
#
|
|
1545
|
+
# inst_dt = parse(instance_text)
|
|
1546
|
+
# cutoff = inst_dt - timedelta(seconds=1)
|
|
1547
|
+
# cutoff_stamp = self.fmt_user(cutoff)
|
|
1548
|
+
#
|
|
1549
|
+
# def edit_tokens(tokens: list[dict]) -> bool:
|
|
1550
|
+
# # 1) Special case: @s + @+ but no @r
|
|
1551
|
+
# if self._is_s_plus_no_r(tokens) and rruleset:
|
|
1552
|
+
# changed = self._adjust_s_plus_from_rruleset(
|
|
1553
|
+
# tokens,
|
|
1554
|
+
# rruleset=rruleset,
|
|
1555
|
+
# instance_text=instance_text,
|
|
1556
|
+
# mode="this_and_future",
|
|
1557
|
+
# )
|
|
1558
|
+
# if changed:
|
|
1559
|
+
# return True
|
|
1560
|
+
# # fall through to general path if nothing changed
|
|
1561
|
+
#
|
|
1562
|
+
# changed = False
|
|
1563
|
+
#
|
|
1564
|
+
# # 2) General path: clean explicit @+ for this instance (UTC-Z)
|
|
1565
|
+
# removed = self._remove_instance_from_plus_tokens(tokens, instance_text)
|
|
1566
|
+
# changed = changed or removed
|
|
1567
|
+
#
|
|
1568
|
+
# # 3) Always append &u cutoff for this-and-future semantics
|
|
1569
|
+
# tokens.append(
|
|
1570
|
+
# {
|
|
1571
|
+
# "token": f"&u {cutoff_stamp}",
|
|
1572
|
+
# "t": "&",
|
|
1573
|
+
# "k": "u",
|
|
1574
|
+
# }
|
|
1575
|
+
# )
|
|
1576
|
+
# changed = True
|
|
1577
|
+
#
|
|
1578
|
+
# return changed
|
|
1579
|
+
#
|
|
1580
|
+
# return self.apply_token_edit(record_id, edit_tokens)
|
|
1581
|
+
|
|
1582
|
+
def _is_in_plus_list(self, tokens: list[dict], dt: datetime) -> bool:
|
|
1583
|
+
"""
|
|
1584
|
+
Return True if dt (local-naive) matches one of the entries in any @+ token.
|
|
1585
|
+
"""
|
|
1586
|
+
local_dt = _to_local_naive(dt)
|
|
1587
|
+
fmt_str = local_dt.strftime("%Y%m%dT%H%M")
|
|
1588
|
+
for tok in tokens:
|
|
1589
|
+
if tok.get("k") == "+":
|
|
1590
|
+
body = tok["token"][2:].strip()
|
|
1591
|
+
for part in body.split(","):
|
|
1592
|
+
part = part.strip()
|
|
1593
|
+
try:
|
|
1594
|
+
part_dt = parse(part)
|
|
1595
|
+
except Exception:
|
|
1596
|
+
continue
|
|
1597
|
+
if _to_local_naive(part_dt).strftime("%Y%m%dT%H%M") == fmt_str:
|
|
1598
|
+
return True
|
|
1599
|
+
return False
|
|
1600
|
+
|
|
1601
|
+
def delete_instance(self, record_id: int, instance_text: str) -> bool:
|
|
1602
|
+
"""
|
|
1603
|
+
Delete a specific instance:
|
|
1604
|
+
- If instance comes from @+ list, remove it from that list.
|
|
1605
|
+
- Otherwise append @- for that instance.
|
|
1606
|
+
"""
|
|
1607
|
+
|
|
1608
|
+
def edit_tokens(tokens: list[dict]) -> bool:
|
|
1609
|
+
try:
|
|
1610
|
+
inst_dt = parse(instance_text)
|
|
1611
|
+
except Exception:
|
|
1612
|
+
return False
|
|
1613
|
+
inst_local = _to_local_naive(inst_dt)
|
|
1614
|
+
|
|
1615
|
+
if self._is_in_plus_list(tokens, inst_dt):
|
|
1616
|
+
# remove from @+
|
|
1617
|
+
tok_local_str = inst_local.strftime("%Y%m%dT%H%M")
|
|
1618
|
+
return self._remove_instance_from_plus_tokens(tokens, tok_local_str)
|
|
1619
|
+
else:
|
|
1620
|
+
# append exclusion
|
|
1621
|
+
tok_local_str = inst_local.strftime("%Y%m%dT%H%M")
|
|
1622
|
+
tokens.append({"token": f"@- {tok_local_str}", "t": "@", "k": "-"})
|
|
1623
|
+
return True
|
|
1624
|
+
|
|
1625
|
+
return self.apply_token_edit(record_id, edit_tokens)
|
|
1626
|
+
|
|
1627
|
+
def delete_this_and_future(self, record_id: int, instance_text: str) -> bool:
|
|
1628
|
+
"""
|
|
1629
|
+
Delete this instance and all subsequent ones:
|
|
1630
|
+
- If the instance is in @+ list, remove it.
|
|
1631
|
+
- Always append &u cutoff (instance minus 1 second).
|
|
1632
|
+
"""
|
|
1633
|
+
try:
|
|
1634
|
+
dt = parse(instance_text)
|
|
1635
|
+
except Exception:
|
|
1636
|
+
return False
|
|
1637
|
+
inst_local = _to_local_naive(dt)
|
|
1638
|
+
cutoff = inst_local - timedelta(seconds=1)
|
|
1639
|
+
cutoff_stamp = cutoff.strftime("%Y%m%dT%H%M")
|
|
1640
|
+
|
|
1641
|
+
def edit_tokens(tokens: list[dict]) -> bool:
|
|
1642
|
+
changed = False
|
|
1643
|
+
if self._is_in_plus_list(tokens, dt):
|
|
1644
|
+
tok_local_str = inst_local.strftime("%Y%m%dT%H%M")
|
|
1645
|
+
removed = self._remove_instance_from_plus_tokens(tokens, tok_local_str)
|
|
1646
|
+
changed = changed or removed
|
|
1647
|
+
tokens.append({"token": f"&u {cutoff_stamp}", "t": "&", "k": "u"})
|
|
1648
|
+
return True
|
|
1649
|
+
|
|
1650
|
+
return self.apply_token_edit(record_id, edit_tokens)
|
|
1651
|
+
|
|
1652
|
+
def delete_record(self, record_id: int) -> None:
|
|
1653
|
+
# For jobs you may eventually allow “delete just this job”
|
|
1654
|
+
# but right now delete whole reminder:
|
|
1655
|
+
self.db_manager.delete_item(record_id)
|
|
1656
|
+
|
|
1657
|
+
def apply_anniversary_if_needed(
|
|
1658
|
+
self, record_id: int, subject: str, instance: datetime
|
|
1659
|
+
) -> str:
|
|
1660
|
+
"""
|
|
1661
|
+
If this record is a recurring event with a {XXX} placeholder,
|
|
1662
|
+
replace it with the ordinal number of this instance.
|
|
1663
|
+
"""
|
|
1664
|
+
if "{XXX}" not in subject:
|
|
1665
|
+
return subject
|
|
1666
|
+
|
|
1667
|
+
row = self.db_manager.get_record(record_id)
|
|
1668
|
+
if not row:
|
|
1669
|
+
return subject
|
|
1670
|
+
|
|
1671
|
+
# The rruleset text is column 4 (based on your tuple)
|
|
1672
|
+
rruleset = row[4]
|
|
1673
|
+
if not rruleset:
|
|
1674
|
+
return subject
|
|
1675
|
+
|
|
1676
|
+
# --- Extract DTSTART and FREQ ---
|
|
1677
|
+
start_dt = None
|
|
1678
|
+
freq = None
|
|
1679
|
+
|
|
1680
|
+
for line in rruleset.splitlines():
|
|
1681
|
+
if line.startswith("DTSTART"):
|
|
1682
|
+
# Handles both VALUE=DATE and VALUE=DATETIME
|
|
1683
|
+
if ":" in line:
|
|
1684
|
+
val = line.split(":")[1].strip()
|
|
1685
|
+
try:
|
|
1686
|
+
if "T" in val:
|
|
1687
|
+
start_dt = datetime.strptime(val, "%Y%m%dT%H%M%S")
|
|
1688
|
+
else:
|
|
1689
|
+
start_dt = datetime.strptime(val, "%Y%m%d")
|
|
1690
|
+
except Exception:
|
|
1691
|
+
pass
|
|
1692
|
+
elif line.startswith("RRULE"):
|
|
1693
|
+
# look for FREQ=YEARLY etc.
|
|
1694
|
+
parts = line.split(":")[-1].split(";")
|
|
1695
|
+
for p in parts:
|
|
1696
|
+
if p.startswith("FREQ="):
|
|
1697
|
+
freq_val = p.split("=")[1].strip().lower()
|
|
1698
|
+
freq = {
|
|
1699
|
+
"daily": "d",
|
|
1700
|
+
"weekly": "w",
|
|
1701
|
+
"monthly": "m",
|
|
1702
|
+
"yearly": "y",
|
|
1703
|
+
}.get(freq_val)
|
|
1704
|
+
break
|
|
1705
|
+
|
|
1706
|
+
if not start_dt or not freq:
|
|
1707
|
+
return subject
|
|
1708
|
+
|
|
1709
|
+
# --- Compute ordinal replacement ---
|
|
1710
|
+
return set_anniversary(subject, start_dt, instance, freq)
|
|
1711
|
+
|
|
1712
|
+
def apply_flags(self, record_id: int, subject: str) -> str:
|
|
1713
|
+
"""
|
|
1714
|
+
Append any flags from Records.flags (e.g. 𝕒𝕘𝕠𝕣) to the given subject.
|
|
1715
|
+
"""
|
|
1716
|
+
row = self.db_manager.get_record_as_dictionary(record_id)
|
|
1717
|
+
if not row:
|
|
1718
|
+
return subject
|
|
1719
|
+
|
|
1720
|
+
flags = f" {row.get('flags')}" or ""
|
|
1721
|
+
# log_msg(f"{row = }, {flags = }")
|
|
1722
|
+
if not flags:
|
|
1723
|
+
return subject
|
|
1724
|
+
|
|
1725
|
+
return subject + flags
|
|
1726
|
+
|
|
1727
|
+
def get_name_to_binpath(self) -> Dict[str, str]:
|
|
1728
|
+
# leaf_lower -> "Leaf/Parent/.../Root"
|
|
1729
|
+
return self.db_manager.bin_cache.name_to_binpath()
|
|
1730
|
+
|
|
1731
|
+
# def get_tag_iterator(self, view: str, count: int) -> Iterator[str]:
|
|
1732
|
+
# if view not in self.afill_by_view:
|
|
1733
|
+
# self.set_afill([None] * count, view)
|
|
1734
|
+
# fill = self.afill_by_view[view]
|
|
1735
|
+
# for i in range(count):
|
|
1736
|
+
# yield indx_to_tag(i, fill)
|
|
1737
|
+
|
|
1738
|
+
# --- replace your set_afill with this per-view version ---
|
|
1739
|
+
# def set_afill(self, details: list, view: str):
|
|
1740
|
+
# n = len(details)
|
|
1741
|
+
# fill = 1 if n <= 26 else 2 if n <= 26 * 26 else 3
|
|
1742
|
+
# log_msg(f"{view = }, {n = }, {fill = }, {details = }")
|
|
1743
|
+
# self.afill_by_view[view] = fill
|
|
1744
|
+
|
|
1745
|
+
def add_tag(
|
|
1746
|
+
self, view: str, indx: int, record_id: int, *, job_id: int | None = None
|
|
1747
|
+
):
|
|
1748
|
+
"""Produce the next tag (with the pre-chosen width) and register it."""
|
|
1749
|
+
fill = self.afill_by_view[view]
|
|
1750
|
+
tag = indx_to_tag(indx, fill) # uses your existing function
|
|
1751
|
+
tag_fmt = f" [dim]{tag}[/dim] "
|
|
1752
|
+
self.list_tag_to_id.setdefault(view, {})[tag] = {
|
|
1753
|
+
"record_id": record_id,
|
|
1754
|
+
"job_id": job_id,
|
|
1755
|
+
}
|
|
1756
|
+
return tag_fmt, indx + 1
|
|
1757
|
+
|
|
1758
|
+
# def set_week_afill(self, details: list, yr_wk: Tuple[int, int]):
|
|
1759
|
+
# n = len(details)
|
|
1760
|
+
# fill = 1 if n <= 26 else 2 if n <= 26 * 26 else 3
|
|
1761
|
+
# log_msg(f"{yr_wk = }, {n = }, {fill = }")
|
|
1762
|
+
# # self.afill_by_week[yr_wk] = fill
|
|
1763
|
+
|
|
1764
|
+
def add_week_tag(
|
|
1765
|
+
self,
|
|
1766
|
+
yr_wk: Tuple[int, int],
|
|
1767
|
+
indx: int,
|
|
1768
|
+
record_id: int,
|
|
1769
|
+
job_id: int | None = None,
|
|
1770
|
+
):
|
|
1771
|
+
"""Produce the next tag (with the pre-chosen width) and register it."""
|
|
1772
|
+
fill = self.afill_by_week[yr_wk]
|
|
1773
|
+
tag = indx_to_tag(indx, fill) # uses your existing function
|
|
1774
|
+
tag_fmt = f" [dim]{tag}[/dim] "
|
|
1775
|
+
self.week_tag_to_id.setdefault(yr_wk, {})[tag] = {
|
|
1776
|
+
"record_id": record_id,
|
|
1777
|
+
"job_id": job_id,
|
|
1778
|
+
}
|
|
1779
|
+
return tag_fmt, indx + 1
|
|
1780
|
+
|
|
1781
|
+
def mark_agenda_dirty(self) -> None:
|
|
1782
|
+
self._agenda_dirty = True
|
|
1783
|
+
|
|
1784
|
+
def consume_agenda_dirty(self) -> bool:
|
|
1785
|
+
was_dirty = self._agenda_dirty
|
|
1786
|
+
self._agenda_dirty = False
|
|
1787
|
+
return was_dirty
|
|
1788
|
+
|
|
1789
|
+
def toggle_pin(self, record_id: int) -> bool:
|
|
1790
|
+
self.db_manager.toggle_pinned(record_id)
|
|
1791
|
+
self.mark_agenda_dirty() # ← mark dirty every time
|
|
1792
|
+
return self.db_manager.is_pinned(record_id)
|
|
1793
|
+
|
|
1794
|
+
def get_last_details_meta(self):
|
|
1795
|
+
return self._last_details_meta
|
|
1796
|
+
|
|
1797
|
+
def toggle_pinned(self, record_id: int):
|
|
1798
|
+
self.db_manager.toggle_pinned(record_id)
|
|
1799
|
+
log_msg(f"{record_id = }, {self.db_manager.is_pinned(record_id) = }")
|
|
1800
|
+
return self.db_manager.is_pinned(record_id)
|
|
1801
|
+
|
|
1802
|
+
def get_entry(self, record_id, job_id=None, instance=None):
|
|
1803
|
+
lines = []
|
|
1804
|
+
result = self.db_manager.get_tokens(record_id)
|
|
1805
|
+
# log_msg(f"{result = }")
|
|
1806
|
+
|
|
1807
|
+
tokens, rruleset, created, modified = result[0]
|
|
1808
|
+
|
|
1809
|
+
entry = format_tokens(tokens, self.width)
|
|
1810
|
+
entry = f"[bold {type_color}]{entry[0]}[/bold {type_color}]{entry[1:]}"
|
|
1811
|
+
|
|
1812
|
+
log_msg(f"{rruleset = }")
|
|
1813
|
+
# rruleset = f"\n{11 * ' '}".join(rruleset.splitlines())
|
|
1814
|
+
|
|
1815
|
+
instance_line = (
|
|
1816
|
+
f"[{label_color}]instance:[/{label_color}] {instance}" if instance else ""
|
|
1817
|
+
)
|
|
1818
|
+
rr_line = ""
|
|
1819
|
+
if rruleset:
|
|
1820
|
+
formatted_rr = format_rruleset_for_details(
|
|
1821
|
+
rruleset, width=self.width - 10, subsequent_indent=9
|
|
1822
|
+
)
|
|
1823
|
+
rr_line = f"[{label_color}]rruleset:[/{label_color}] {formatted_rr}"
|
|
1824
|
+
|
|
1825
|
+
job = (
|
|
1826
|
+
f" [{label_color}]job_id:[/{label_color}] [bold]{job_id}[/bold]"
|
|
1827
|
+
if job_id
|
|
1828
|
+
else ""
|
|
1829
|
+
)
|
|
1830
|
+
lines.extend(
|
|
1831
|
+
[
|
|
1832
|
+
entry,
|
|
1833
|
+
" ",
|
|
1834
|
+
instance_line,
|
|
1835
|
+
rr_line,
|
|
1836
|
+
f"[{label_color}]id/cr/md:[/{label_color}] {record_id}{job} / {created} / {modified}",
|
|
1837
|
+
]
|
|
1838
|
+
)
|
|
1839
|
+
|
|
1840
|
+
return lines
|
|
1841
|
+
|
|
1842
|
+
def update_record_from_item(self, item) -> None:
|
|
1843
|
+
self.cursor.execute(
|
|
1844
|
+
"""
|
|
1845
|
+
UPDATE Records
|
|
1846
|
+
SET itemtype=?, subject=?, description=?, rruleset=?, timezone=?,
|
|
1847
|
+
extent=?, alerts=?, notice=?, context=?, jobs=?, tags=?,
|
|
1848
|
+
priority=?, tokens=?, modified=?
|
|
1849
|
+
WHERE id=?
|
|
1850
|
+
""",
|
|
1851
|
+
(
|
|
1852
|
+
item.itemtype,
|
|
1853
|
+
item.subject,
|
|
1854
|
+
item.description,
|
|
1855
|
+
item.rruleset,
|
|
1856
|
+
item.timezone or "",
|
|
1857
|
+
item.extent or "",
|
|
1858
|
+
json.dumps(item.alerts or []),
|
|
1859
|
+
item.notice or "",
|
|
1860
|
+
item.context or "",
|
|
1861
|
+
json.dumps(item.jobs or None),
|
|
1862
|
+
";".join(item.tags or []),
|
|
1863
|
+
item.p or "",
|
|
1864
|
+
json.dumps(item.tokens),
|
|
1865
|
+
datetime.utcnow().timestamp(),
|
|
1866
|
+
item.id,
|
|
1867
|
+
),
|
|
1868
|
+
)
|
|
1869
|
+
self.conn.commit()
|
|
1870
|
+
|
|
1871
|
+
def get_record_core(self, record_id: int) -> dict:
|
|
1872
|
+
row = self.db_manager.get_record(record_id)
|
|
1873
|
+
if not row:
|
|
1874
|
+
return {
|
|
1875
|
+
"id": record_id,
|
|
1876
|
+
"itemtype": "",
|
|
1877
|
+
"subject": "",
|
|
1878
|
+
"rruleset": None,
|
|
1879
|
+
"record": None,
|
|
1880
|
+
}
|
|
1881
|
+
# tuple layout per your schema
|
|
1882
|
+
return {
|
|
1883
|
+
"id": record_id,
|
|
1884
|
+
"itemtype": row[1],
|
|
1885
|
+
"subject": row[2],
|
|
1886
|
+
"rruleset": row[4],
|
|
1887
|
+
"record": row,
|
|
1888
|
+
}
|
|
1889
|
+
|
|
1890
|
+
def get_details_for_record(
|
|
1891
|
+
self,
|
|
1892
|
+
record_id: int,
|
|
1893
|
+
job_id: int | None = None,
|
|
1894
|
+
datetime_id: int | None = None,
|
|
1895
|
+
instance_ts: str | None = None,
|
|
1896
|
+
):
|
|
1897
|
+
"""
|
|
1898
|
+
Return list: [title, '', ... lines ...] same as process_tag would.
|
|
1899
|
+
Use the same internal logic as process_tag but accept ids directly.
|
|
1900
|
+
"""
|
|
1901
|
+
# If you have a general helper that returns fields for a record, reuse it.
|
|
1902
|
+
# Here we replicate the important parts used by process_tag()
|
|
1903
|
+
core = self.get_record_core(record_id) or {}
|
|
1904
|
+
itemtype = core.get("itemtype") or ""
|
|
1905
|
+
rruleset = core.get("rruleset") or ""
|
|
1906
|
+
all_prereqs = core.get("all_prereqs") or ""
|
|
1907
|
+
|
|
1908
|
+
instance_line = (
|
|
1909
|
+
f"\n[{label_color}]instance:[/{label_color}] {instance_ts}"
|
|
1910
|
+
if instance_ts
|
|
1911
|
+
else ""
|
|
1912
|
+
)
|
|
1913
|
+
|
|
1914
|
+
subject = core.get("subject") or "(untitled)"
|
|
1915
|
+
if job_id is not None:
|
|
1916
|
+
try:
|
|
1917
|
+
js = self.db_manager.get_job_display_subject(record_id, job_id)
|
|
1918
|
+
if js:
|
|
1919
|
+
subject = js
|
|
1920
|
+
except Exception:
|
|
1921
|
+
pass
|
|
1922
|
+
|
|
1923
|
+
try:
|
|
1924
|
+
pinned_now = (
|
|
1925
|
+
self.db_manager.is_task_pinned(record_id) if itemtype == "~" else False
|
|
1926
|
+
)
|
|
1927
|
+
except Exception:
|
|
1928
|
+
pinned_now = False
|
|
1929
|
+
|
|
1930
|
+
fields = [
|
|
1931
|
+
"",
|
|
1932
|
+
] + self.get_entry(record_id, job_id, instance_ts)
|
|
1933
|
+
|
|
1934
|
+
_dts = self.db_manager.get_next_start_datetimes_for_record(record_id)
|
|
1935
|
+
first, second = (_dts + [None, None])[:2]
|
|
1936
|
+
log_msg(f"setting meta {first = }, {second = }")
|
|
1937
|
+
|
|
1938
|
+
title = f"[bold]{subject:^{self.width}}[/bold]"
|
|
1939
|
+
|
|
1940
|
+
meta = {
|
|
1941
|
+
"record_id": record_id,
|
|
1942
|
+
"job_id": job_id,
|
|
1943
|
+
"itemtype": itemtype,
|
|
1944
|
+
"subject": subject,
|
|
1945
|
+
"rruleset": rruleset,
|
|
1946
|
+
"first": first,
|
|
1947
|
+
"second": second,
|
|
1948
|
+
"datetime_id": datetime_id,
|
|
1949
|
+
"instance_ts": instance_ts,
|
|
1950
|
+
"all_prereqs": all_prereqs,
|
|
1951
|
+
"pinned": bool(pinned_now),
|
|
1952
|
+
"record": self.db_manager.get_record(record_id),
|
|
1953
|
+
}
|
|
1954
|
+
self._last_details_meta = meta
|
|
1955
|
+
bug_msg(f"{meta['first'] = }, {meta['second'] = }, {meta['instance_ts'] = }")
|
|
1956
|
+
|
|
1957
|
+
# return [title, ""] + fields
|
|
1958
|
+
return title, fields, meta
|
|
1959
|
+
|
|
1960
|
+
def get_record(self, record_id):
|
|
1961
|
+
return self.db_manager.get_record(record_id)
|
|
1962
|
+
|
|
1963
|
+
def get_all_records(self):
|
|
1964
|
+
return self.db_manager.get_all()
|
|
1965
|
+
|
|
1966
|
+
def delete_record(self, record_id):
|
|
1967
|
+
self.db_manager.delete_record(record_id)
|
|
1968
|
+
|
|
1969
|
+
def sync_jobs(self, record_id, jobs_list):
|
|
1970
|
+
self.db_manager.sync_jobs_from_record(record_id, jobs_list)
|
|
1971
|
+
|
|
1972
|
+
def get_jobs(self, record_id):
|
|
1973
|
+
return self.db_manager.get_jobs_for_record(record_id)
|
|
1974
|
+
|
|
1975
|
+
def get_job(self, record_id):
|
|
1976
|
+
return self.db_manager.get_jobs_for_record(record_id)
|
|
1977
|
+
|
|
1978
|
+
def record_count(self):
|
|
1979
|
+
return self.db_manager.count_records()
|
|
1980
|
+
|
|
1981
|
+
def populate_alerts(self):
|
|
1982
|
+
self.db_manager.populate_alerts()
|
|
1983
|
+
|
|
1984
|
+
def populate_notice(self):
|
|
1985
|
+
self.db_manager.populate_notice()
|
|
1986
|
+
|
|
1987
|
+
def refresh_alerts(self):
|
|
1988
|
+
self.db_manager.populate_alerts()
|
|
1989
|
+
|
|
1990
|
+
def refresh_tags(self):
|
|
1991
|
+
self.db_manager.populate_tags()
|
|
1992
|
+
|
|
1993
|
+
def execute_alert(self, command: str):
|
|
1994
|
+
"""
|
|
1995
|
+
Execute the given alert command using subprocess.
|
|
1996
|
+
|
|
1997
|
+
Args:
|
|
1998
|
+
command (str): The command string to execute.
|
|
1999
|
+
"""
|
|
2000
|
+
if not command:
|
|
2001
|
+
print("❌ Error: No command provided to execute.")
|
|
2002
|
+
return
|
|
2003
|
+
|
|
2004
|
+
try:
|
|
2005
|
+
# ✅ Use shlex.split() to safely parse the command
|
|
2006
|
+
subprocess.run(shlex.split(command), check=True)
|
|
2007
|
+
print(f"✅ Successfully executed: {command}")
|
|
2008
|
+
except subprocess.CalledProcessError as e:
|
|
2009
|
+
print(f"❌ Error executing command: {command}\n{e}")
|
|
2010
|
+
except FileNotFoundError:
|
|
2011
|
+
print(f"❌ Command not found: {command}")
|
|
2012
|
+
except Exception as e:
|
|
2013
|
+
print(f"❌ Unexpected error: {e}")
|
|
2014
|
+
|
|
2015
|
+
def execute_due_alerts(self):
|
|
2016
|
+
records = self.db_manager.get_due_alerts()
|
|
2017
|
+
# log_msg(f"{records = }")
|
|
2018
|
+
# SELECT alert_id, record_id, record_name, trigger_datetime, start_timedelta, command
|
|
2019
|
+
for record in records:
|
|
2020
|
+
(
|
|
2021
|
+
alert_id,
|
|
2022
|
+
record_id,
|
|
2023
|
+
trigger_datetime,
|
|
2024
|
+
start_datetime,
|
|
2025
|
+
alert_name,
|
|
2026
|
+
alert_command,
|
|
2027
|
+
) = record
|
|
2028
|
+
log_msg(
|
|
2029
|
+
f"Executing alert {alert_name = }, {alert_command = }, {trigger_datetime = }"
|
|
2030
|
+
)
|
|
2031
|
+
self.execute_alert(alert_command)
|
|
2032
|
+
# need command to execute command with arguments
|
|
2033
|
+
self.db_manager.mark_alert_executed(alert_id)
|
|
2034
|
+
|
|
2035
|
+
def get_due_alerts(self, now: datetime) -> List[str]:
|
|
2036
|
+
due = []
|
|
2037
|
+
records = self.db_manager.get_due_alerts()
|
|
2038
|
+
for record in records:
|
|
2039
|
+
(
|
|
2040
|
+
alert_id,
|
|
2041
|
+
record_id,
|
|
2042
|
+
trigger_datetime,
|
|
2043
|
+
start_datetime,
|
|
2044
|
+
alert_name,
|
|
2045
|
+
alert_command,
|
|
2046
|
+
) = record
|
|
2047
|
+
due.append([alert_id, alert_name, alert_command])
|
|
2048
|
+
log_msg(f"{due[-1] = }")
|
|
2049
|
+
return due
|
|
2050
|
+
|
|
2051
|
+
def get_active_alerts(self, width: int = 70):
|
|
2052
|
+
# now_fmt = datetime.now().strftime("%A, %B %-d %H:%M:%S")
|
|
2053
|
+
alerts = self.db_manager.get_active_alerts()
|
|
2054
|
+
log_msg(f"{alerts = }")
|
|
2055
|
+
title = "Remaining alerts for today"
|
|
2056
|
+
if not alerts:
|
|
2057
|
+
header = f"[{HEADER_COLOR}] none remaining [/{HEADER_COLOR}]"
|
|
2058
|
+
return [], header
|
|
2059
|
+
|
|
2060
|
+
now = datetime.now()
|
|
2061
|
+
|
|
2062
|
+
trigger_width = 7 if self.AMPM else 8
|
|
2063
|
+
start_width = 7 if self.AMPM else 6
|
|
2064
|
+
alert_width = trigger_width + 3
|
|
2065
|
+
name_width = width - 35
|
|
2066
|
+
header = f"[bold][dim]{'tag':^3}[/dim] {'alert':^{alert_width}} {'for':^{start_width}} {'subject':<{name_width}}[/bold]"
|
|
2067
|
+
|
|
2068
|
+
rows = []
|
|
2069
|
+
log_msg(f"processing {len(alerts)} alerts")
|
|
2070
|
+
|
|
2071
|
+
for alert in alerts:
|
|
2072
|
+
log_msg(f"Alert: {alert = }")
|
|
2073
|
+
# alert_id, record_id, record_name, start_dt, td, command
|
|
2074
|
+
(
|
|
2075
|
+
alert_id,
|
|
2076
|
+
record_id,
|
|
2077
|
+
record_name,
|
|
2078
|
+
trigger_datetime,
|
|
2079
|
+
start_datetime,
|
|
2080
|
+
alert_name,
|
|
2081
|
+
alert_command,
|
|
2082
|
+
) = alert
|
|
2083
|
+
if now > datetime_from_timestamp(trigger_datetime):
|
|
2084
|
+
log_msg("skipping - already passed")
|
|
2085
|
+
continue
|
|
2086
|
+
# tag_fmt, indx = self.add_tag("alerts", indx, record_id)
|
|
2087
|
+
trtime = self.format_datetime(trigger_datetime)
|
|
2088
|
+
sttime = self.format_datetime(start_datetime)
|
|
2089
|
+
subject = truncate_string(record_name, name_width)
|
|
2090
|
+
text = (
|
|
2091
|
+
f"[{SALMON}] {alert_name} {trtime:<{trigger_width}}[/{SALMON}][{PALE_GREEN}] → {sttime:<{start_width}}[/{PALE_GREEN}] "
|
|
2092
|
+
+ f" [{AVAILABLE_COLOR}]{subject:<{name_width}}[/{AVAILABLE_COLOR}]"
|
|
2093
|
+
)
|
|
2094
|
+
rows.append({"record_id": record_id, "job_id": None, "text": text})
|
|
2095
|
+
pages = page_tagger(rows)
|
|
2096
|
+
log_msg(f"{header = }\n{rows = }\n{pages = }")
|
|
2097
|
+
return pages, header
|
|
2098
|
+
|
|
2099
|
+
def get_table_and_list(self, start_date: datetime, selected_week: tuple[int, int]):
|
|
2100
|
+
year, week = selected_week
|
|
2101
|
+
|
|
2102
|
+
try:
|
|
2103
|
+
extended = self.db_manager.ensure_week_generated_with_topup(
|
|
2104
|
+
year, week, cushion=6, topup_threshold=2
|
|
2105
|
+
)
|
|
2106
|
+
if extended:
|
|
2107
|
+
log_msg(
|
|
2108
|
+
f"[weeks] extended/generated around {year}-W{week:02d} (+cushion)"
|
|
2109
|
+
)
|
|
2110
|
+
except Exception as e:
|
|
2111
|
+
log_msg(f"[weeks] ensure_week_generated_with_topup error: {e}")
|
|
2112
|
+
|
|
2113
|
+
year_week = f"{year:04d}-{week:02d}"
|
|
2114
|
+
busy_bits = self.db_manager.get_busy_bits_for_week(year_week)
|
|
2115
|
+
busy_bar = self._format_busy_bar(busy_bits)
|
|
2116
|
+
|
|
2117
|
+
start_dt = datetime.strptime(f"{year} {week} 1", "%G %V %u")
|
|
2118
|
+
# end_dt = start_dt + timedelta(weeks=1)
|
|
2119
|
+
details = self.get_week_details(selected_week)
|
|
2120
|
+
|
|
2121
|
+
title = format_iso_week(start_dt)
|
|
2122
|
+
return title, busy_bar, details
|
|
2123
|
+
|
|
2124
|
+
def _format_busy_bar(
|
|
2125
|
+
self,
|
|
2126
|
+
bits: list[int],
|
|
2127
|
+
*,
|
|
2128
|
+
busy_color: str = "green",
|
|
2129
|
+
conflict_color: str = "red",
|
|
2130
|
+
allday_color: str = "yellow",
|
|
2131
|
+
) -> str:
|
|
2132
|
+
"""
|
|
2133
|
+
Render 35 busy bits (7×[1 all-day + 4×6h blocks])
|
|
2134
|
+
as a compact single-row week bar with color markup.
|
|
2135
|
+
|
|
2136
|
+
Layout:
|
|
2137
|
+
| Mon | Tue | Wed | Thu | Fri | Sat | Sun |
|
|
2138
|
+
|■██▓▓| |▓███ | ... |
|
|
2139
|
+
|
|
2140
|
+
Encoding:
|
|
2141
|
+
0 = free → " "
|
|
2142
|
+
1 = busy → colored block
|
|
2143
|
+
2 = conflict → colored block
|
|
2144
|
+
(first of 5 per day is the all-day bit → colored "■" if set)
|
|
2145
|
+
"""
|
|
2146
|
+
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
|
2147
|
+
assert len(bits) == 35, "expected 35 bits (7×5)"
|
|
2148
|
+
|
|
2149
|
+
# --- Header line
|
|
2150
|
+
header = "│".join(f" {d:^3} " for d in DAYS)
|
|
2151
|
+
lines = [f"│{header}│"]
|
|
2152
|
+
|
|
2153
|
+
# --- Busy row
|
|
2154
|
+
day_segments = []
|
|
2155
|
+
for day in range(7):
|
|
2156
|
+
start = day * 5
|
|
2157
|
+
all_day_bit = bits[start]
|
|
2158
|
+
block_bits = bits[start + 1 : start + 5]
|
|
2159
|
+
|
|
2160
|
+
# --- all-day symbol
|
|
2161
|
+
if all_day_bit:
|
|
2162
|
+
all_day_char = f"[{allday_color}]■[/{allday_color}]"
|
|
2163
|
+
else:
|
|
2164
|
+
all_day_char = " "
|
|
2165
|
+
|
|
2166
|
+
# --- 4×6h blocks
|
|
2167
|
+
blocks = ""
|
|
2168
|
+
for b in block_bits:
|
|
2169
|
+
if b == 1:
|
|
2170
|
+
blocks += f"[{busy_color}]█[/{busy_color}]"
|
|
2171
|
+
elif b == 2:
|
|
2172
|
+
blocks += f"[{conflict_color}]▓[/{conflict_color}]"
|
|
2173
|
+
else:
|
|
2174
|
+
blocks += " "
|
|
2175
|
+
|
|
2176
|
+
day_segments.append(all_day_char + blocks)
|
|
2177
|
+
|
|
2178
|
+
lines.append(f"│{'│'.join(day_segments)}│")
|
|
2179
|
+
return "\n".join(lines)
|
|
2180
|
+
|
|
2181
|
+
def get_week_details(self, yr_wk):
|
|
2182
|
+
"""
|
|
2183
|
+
Fetch and format rows for a specific week.
|
|
2184
|
+
"""
|
|
2185
|
+
# log_msg(f"Getting rows for week {yr_wk}")
|
|
2186
|
+
today = datetime.now()
|
|
2187
|
+
tomorrow = today + ONEDAY
|
|
2188
|
+
today_year, today_week, today_weekday = today.isocalendar()
|
|
2189
|
+
tomorrow_year, tomorrow_week, tomorrow_day = tomorrow.isocalendar()
|
|
2190
|
+
|
|
2191
|
+
self.selected_week = yr_wk
|
|
2192
|
+
|
|
2193
|
+
start_datetime = datetime.strptime(f"{yr_wk[0]} {yr_wk[1]} 1", "%G %V %u")
|
|
2194
|
+
end_datetime = start_datetime + timedelta(weeks=1)
|
|
2195
|
+
events = self.db_manager.get_events_for_period(start_datetime, end_datetime)
|
|
2196
|
+
|
|
2197
|
+
# log_msg(f"from get_events_for_period:\n{events = }")
|
|
2198
|
+
this_week = format_date_range(start_datetime, end_datetime - ONEDAY)
|
|
2199
|
+
# terminal_width = shutil.get_terminal_size().columns
|
|
2200
|
+
|
|
2201
|
+
header = f"{this_week} #{yr_wk[1]} ({len(events)})"
|
|
2202
|
+
rows = []
|
|
2203
|
+
|
|
2204
|
+
# self.set_week_afill(events, yr_wk)
|
|
2205
|
+
|
|
2206
|
+
if not events:
|
|
2207
|
+
rows.append(
|
|
2208
|
+
{
|
|
2209
|
+
"record_id": None,
|
|
2210
|
+
"job_id": None,
|
|
2211
|
+
"datetime_id": None,
|
|
2212
|
+
"instance_ts": yr_wk[0],
|
|
2213
|
+
"text": f" [{HEADER_COLOR}]Nothing scheduled for this week[/{HEADER_COLOR}]",
|
|
2214
|
+
}
|
|
2215
|
+
)
|
|
2216
|
+
pages = page_tagger(rows)
|
|
2217
|
+
return pages
|
|
2218
|
+
|
|
2219
|
+
weekday_to_events = {}
|
|
2220
|
+
for i in range(7):
|
|
2221
|
+
this_day = (start_datetime + timedelta(days=i)).date()
|
|
2222
|
+
weekday_to_events[this_day] = []
|
|
2223
|
+
|
|
2224
|
+
# for start_ts, end_ts, itemtype, subject, id, job_id in events:
|
|
2225
|
+
for dt_id, start_ts, end_ts, itemtype, subject, id, job_id in events:
|
|
2226
|
+
# bug_msg(f"{itemtype = }, {subject = }, {dt_id = }, {id = }, {job_id = }")
|
|
2227
|
+
start_dt = datetime_from_timestamp(start_ts)
|
|
2228
|
+
end_dt = datetime_from_timestamp(end_ts)
|
|
2229
|
+
if itemtype == "*": # event
|
|
2230
|
+
# 🪄 new line: replace {XXX} with ordinal instance
|
|
2231
|
+
subject = self.apply_anniversary_if_needed(id, subject, start_dt)
|
|
2232
|
+
# log_msg(
|
|
2233
|
+
# f"Week rows {itemtype = }, {subject = }, {start_dt = }, {end_dt = }"
|
|
2234
|
+
# )
|
|
2235
|
+
status = "available"
|
|
2236
|
+
|
|
2237
|
+
if start_dt == end_dt:
|
|
2238
|
+
# if start_dt.hour == 0 and start_dt.minute == 0 and start_dt.second == 0:
|
|
2239
|
+
if start_dt.hour == 0 and start_dt.minute == 0:
|
|
2240
|
+
# start_end = f"{str('~'):^11}"
|
|
2241
|
+
start_end = ""
|
|
2242
|
+
elif start_dt.hour == 23 and start_dt.minute == 59:
|
|
2243
|
+
start_end = ""
|
|
2244
|
+
else:
|
|
2245
|
+
start_end = f"{format_time_range(start_dt, end_dt, self.AMPM)}"
|
|
2246
|
+
else:
|
|
2247
|
+
start_end = f"{format_time_range(start_dt, end_dt, self.AMPM)}"
|
|
2248
|
+
|
|
2249
|
+
type_color = TYPE_TO_COLOR[itemtype]
|
|
2250
|
+
escaped_start_end = (
|
|
2251
|
+
f"[not bold]{start_end} [/not bold]" if start_end else ""
|
|
2252
|
+
)
|
|
2253
|
+
|
|
2254
|
+
if job_id:
|
|
2255
|
+
job = self.db_manager.get_job_dict(id, job_id)
|
|
2256
|
+
status = job.get("status", "available")
|
|
2257
|
+
subject = job.get("display_subject", subject)
|
|
2258
|
+
itemtype = "~"
|
|
2259
|
+
if status != "available":
|
|
2260
|
+
type_color = WAITING_COLOR
|
|
2261
|
+
|
|
2262
|
+
# 👉 NEW: append flags from Records.flags
|
|
2263
|
+
old_subject = subject
|
|
2264
|
+
subject = self.apply_flags(id, subject)
|
|
2265
|
+
# bug_msg(f"{old_subject = }, {subject = }")
|
|
2266
|
+
|
|
2267
|
+
row = {
|
|
2268
|
+
"record_id": id,
|
|
2269
|
+
"job_id": job_id,
|
|
2270
|
+
"datetime_id": dt_id,
|
|
2271
|
+
"instance_ts": start_ts,
|
|
2272
|
+
"text": f"[{type_color}]{itemtype} {escaped_start_end}{subject}[/{type_color}]",
|
|
2273
|
+
}
|
|
2274
|
+
weekday_to_events.setdefault(start_dt.date(), []).append(row)
|
|
2275
|
+
# bug_msg(f"job row: {row = }")
|
|
2276
|
+
|
|
2277
|
+
for day, events in weekday_to_events.items():
|
|
2278
|
+
# TODO: today, tomorrow here
|
|
2279
|
+
iso_year, iso_week, weekday = day.isocalendar()
|
|
2280
|
+
today = (
|
|
2281
|
+
iso_year == today_year
|
|
2282
|
+
and iso_week == today_week
|
|
2283
|
+
and weekday == today_weekday
|
|
2284
|
+
)
|
|
2285
|
+
tomorrow = (
|
|
2286
|
+
iso_year == tomorrow_year
|
|
2287
|
+
and iso_week == tomorrow_week
|
|
2288
|
+
and weekday == tomorrow_day
|
|
2289
|
+
)
|
|
2290
|
+
flag = " (today)" if today else " (tomorrow)" if tomorrow else ""
|
|
2291
|
+
if events:
|
|
2292
|
+
rows.append(
|
|
2293
|
+
{
|
|
2294
|
+
"record_id": None,
|
|
2295
|
+
"job_id": None,
|
|
2296
|
+
"datetime_id": dt_id,
|
|
2297
|
+
"instance_ts": start_ts,
|
|
2298
|
+
"text": f"[bold][{HEADER_COLOR}]{day.strftime('%a, %b %-d')}{flag}[/{HEADER_COLOR}][/bold]",
|
|
2299
|
+
}
|
|
2300
|
+
)
|
|
2301
|
+
for event in events:
|
|
2302
|
+
rows.append(event)
|
|
2303
|
+
pages = page_tagger(rows)
|
|
2304
|
+
self.yrwk_to_pages[yr_wk] = pages
|
|
2305
|
+
# log_msg(f"{len(pages) = }, {pages[0] = }, {pages[-1] = }")
|
|
2306
|
+
return pages
|
|
2307
|
+
|
|
2308
|
+
def get_busy_bits_for_week(self, selected_week: tuple[int, int]) -> list[int]:
|
|
2309
|
+
"""Convert (year, week) tuple to 'YYYY-WW' and delegate to model."""
|
|
2310
|
+
year, week = selected_week
|
|
2311
|
+
year_week = f"{year:04d}-{week:02d}"
|
|
2312
|
+
return self.db_manager.get_busy_bits_for_week(year_week)
|
|
2313
|
+
|
|
2314
|
+
def get_next(self):
|
|
2315
|
+
"""
|
|
2316
|
+
Fetch and format description for the next instances.
|
|
2317
|
+
"""
|
|
2318
|
+
events = self.db_manager.get_next_instances()
|
|
2319
|
+
header = f"Next Instances ({len(events)})"
|
|
2320
|
+
|
|
2321
|
+
if not events:
|
|
2322
|
+
return [], header
|
|
2323
|
+
|
|
2324
|
+
year_to_events = {}
|
|
2325
|
+
|
|
2326
|
+
for dt_id, id, job_id, subject, description, itemtype, start_ts in events:
|
|
2327
|
+
start_dt = datetime_from_timestamp(start_ts)
|
|
2328
|
+
subject = self.apply_anniversary_if_needed(id, subject, start_dt)
|
|
2329
|
+
if job_id is not None:
|
|
2330
|
+
try:
|
|
2331
|
+
js = self.db_manager.get_job_display_subject(id, job_id)
|
|
2332
|
+
if js: # only override if present/non-empty
|
|
2333
|
+
subject = js
|
|
2334
|
+
# log_msg(f"{subject = }")
|
|
2335
|
+
except Exception as e:
|
|
2336
|
+
# fail-safe: keep the record subject
|
|
2337
|
+
log_msg(f"{e = }")
|
|
2338
|
+
pass
|
|
2339
|
+
|
|
2340
|
+
# 👉 NEW: append flags from Records.flags
|
|
2341
|
+
subject = self.apply_flags(id, subject)
|
|
2342
|
+
|
|
2343
|
+
monthday = start_dt.strftime("%-m-%d")
|
|
2344
|
+
start_end = f"{monthday:>2} {format_hours_mins(start_dt, HRS_MINS)}"
|
|
2345
|
+
type_color = TYPE_TO_COLOR[itemtype]
|
|
2346
|
+
escaped_start_end = f"[not bold]{start_end}[/not bold]"
|
|
2347
|
+
item = {
|
|
2348
|
+
"record_id": id,
|
|
2349
|
+
"job_id": job_id,
|
|
2350
|
+
"datetime_id": dt_id,
|
|
2351
|
+
"instance_ts": start_ts,
|
|
2352
|
+
"text": f"[{type_color}]{itemtype} {escaped_start_end} {subject}[/{type_color}]",
|
|
2353
|
+
}
|
|
2354
|
+
# yr_mnth_to_events.setdefault(start_dt.strftime("%B %Y"), []).append(row)
|
|
2355
|
+
year_to_events.setdefault(start_dt.strftime("%b %Y"), []).append(item)
|
|
2356
|
+
|
|
2357
|
+
# self.list_tag_to_id.setdefault("next", {})
|
|
2358
|
+
# indx = 0
|
|
2359
|
+
"""
|
|
2360
|
+
rows: a list of dicts each with either
|
|
2361
|
+
- { 'record_id': int, 'text': str } (a taggable record row)
|
|
2362
|
+
- { 'record_id': None, 'text': str } (a non-taggable header row)
|
|
2363
|
+
page_size: number of taggable rows per page
|
|
2364
|
+
"""
|
|
2365
|
+
|
|
2366
|
+
rows = []
|
|
2367
|
+
for ym, events in year_to_events.items():
|
|
2368
|
+
if events:
|
|
2369
|
+
rows.append(
|
|
2370
|
+
{
|
|
2371
|
+
"dt_id": None,
|
|
2372
|
+
"record_id": None,
|
|
2373
|
+
"job_id": None,
|
|
2374
|
+
"datetime_id": None,
|
|
2375
|
+
"instance_ts": None,
|
|
2376
|
+
"text": f"[not bold][{HEADER_COLOR}]{ym}[/{HEADER_COLOR}][/not bold]",
|
|
2377
|
+
}
|
|
2378
|
+
)
|
|
2379
|
+
for event in events:
|
|
2380
|
+
rows.append(event)
|
|
2381
|
+
|
|
2382
|
+
# build 'rows' as a list of dicts with record_id and text
|
|
2383
|
+
pages = page_tagger(rows)
|
|
2384
|
+
# bug_msg(f"{pages = }")
|
|
2385
|
+
return pages, header
|
|
2386
|
+
|
|
2387
|
+
def get_last(self):
|
|
2388
|
+
"""
|
|
2389
|
+
Fetch and format description for the next instances.
|
|
2390
|
+
"""
|
|
2391
|
+
events = self.db_manager.get_last_instances()
|
|
2392
|
+
header = f"Last instances ({len(events)})"
|
|
2393
|
+
# description = [f"[not bold][{HEADER_COLOR}]{header}[/{HEADER_COLOR}][/not bold]"]
|
|
2394
|
+
|
|
2395
|
+
if not events:
|
|
2396
|
+
return [], header
|
|
2397
|
+
|
|
2398
|
+
# use a, ..., z if len(events) <= 26 else use aa, ..., zz
|
|
2399
|
+
year_to_events = {}
|
|
2400
|
+
|
|
2401
|
+
for dt_id, id, job_id, subject, description, itemtype, start_ts in events:
|
|
2402
|
+
start_dt = datetime_from_timestamp(start_ts)
|
|
2403
|
+
subject = self.apply_anniversary_if_needed(id, subject, start_dt)
|
|
2404
|
+
# log_msg(f"Week description {subject = }, {start_dt = }, {end_dt = }")
|
|
2405
|
+
if job_id is not None:
|
|
2406
|
+
try:
|
|
2407
|
+
js = self.db_manager.get_job_display_subject(id, job_id)
|
|
2408
|
+
if js: # only override if present/non-empty
|
|
2409
|
+
subject = js
|
|
2410
|
+
# bug_msg(f"{subject = }")
|
|
2411
|
+
except Exception as e:
|
|
2412
|
+
# fail-safe: keep the record subject
|
|
2413
|
+
# bug_msg(f"{e = }")
|
|
2414
|
+
pass
|
|
2415
|
+
|
|
2416
|
+
# 👉 NEW: append flags from Records.flags
|
|
2417
|
+
subject = self.apply_flags(id, subject)
|
|
2418
|
+
|
|
2419
|
+
monthday = start_dt.strftime("%-m-%d")
|
|
2420
|
+
start_end = f"{monthday:>2} {format_hours_mins(start_dt, HRS_MINS)}"
|
|
2421
|
+
type_color = TYPE_TO_COLOR[itemtype]
|
|
2422
|
+
escaped_start_end = f"[not bold]{start_end}[/not bold]"
|
|
2423
|
+
item = {
|
|
2424
|
+
"dt_id": dt_id,
|
|
2425
|
+
"record_id": id,
|
|
2426
|
+
"job_id": job_id,
|
|
2427
|
+
"instance_ts": start_ts,
|
|
2428
|
+
"text": f"[{type_color}]{itemtype} {escaped_start_end} {subject}[/{type_color}]",
|
|
2429
|
+
}
|
|
2430
|
+
year_to_events.setdefault(start_dt.strftime("%b %Y"), []).append(item)
|
|
2431
|
+
|
|
2432
|
+
rows = []
|
|
2433
|
+
for ym, events in year_to_events.items():
|
|
2434
|
+
if events:
|
|
2435
|
+
rows.append(
|
|
2436
|
+
{
|
|
2437
|
+
"record_id": None,
|
|
2438
|
+
"job_id": None,
|
|
2439
|
+
"text": f"[not bold][{HEADER_COLOR}]{ym}[/{HEADER_COLOR}][/not bold]",
|
|
2440
|
+
}
|
|
2441
|
+
)
|
|
2442
|
+
for event in events:
|
|
2443
|
+
rows.append(event)
|
|
2444
|
+
pages = page_tagger(rows)
|
|
2445
|
+
# bug_msg(f"{pages = }")
|
|
2446
|
+
return pages, header
|
|
2447
|
+
|
|
2448
|
+
def find_records(self, search_str: str):
|
|
2449
|
+
"""
|
|
2450
|
+
Fetch and format description for the next instances.
|
|
2451
|
+
"""
|
|
2452
|
+
search_str = search_str.strip()
|
|
2453
|
+
events = self.db_manager.find_records(search_str)
|
|
2454
|
+
|
|
2455
|
+
matching = (
|
|
2456
|
+
f'containing a match for "[{SELECTED_COLOR}]{search_str}[/{SELECTED_COLOR}]" '
|
|
2457
|
+
if search_str
|
|
2458
|
+
else "matching anything"
|
|
2459
|
+
)
|
|
2460
|
+
|
|
2461
|
+
header = f"Items ({len(events)})\n {matching}"
|
|
2462
|
+
|
|
2463
|
+
if not events:
|
|
2464
|
+
return [], header
|
|
2465
|
+
|
|
2466
|
+
rows = []
|
|
2467
|
+
|
|
2468
|
+
for record_id, subject, _, itemtype, last_ts, next_ts in events:
|
|
2469
|
+
subject = f"{truncate_string(subject, 32):<34}"
|
|
2470
|
+
# 👉 NEW: append flags from Records.flags
|
|
2471
|
+
subject = self.apply_flags(record_id, subject)
|
|
2472
|
+
last_dt = (
|
|
2473
|
+
datetime_from_timestamp(last_ts).strftime("%y-%m-%d %H:%M")
|
|
2474
|
+
if last_ts
|
|
2475
|
+
else "~"
|
|
2476
|
+
)
|
|
2477
|
+
last_fmt = f"{last_dt:^14}"
|
|
2478
|
+
next_dt = (
|
|
2479
|
+
datetime_from_timestamp(next_ts).strftime("%y-%m-%d %H:%M")
|
|
2480
|
+
if next_ts
|
|
2481
|
+
else "~"
|
|
2482
|
+
)
|
|
2483
|
+
next_fmt = f"{next_dt:^14}"
|
|
2484
|
+
type_color = TYPE_TO_COLOR[itemtype]
|
|
2485
|
+
escaped_last = f"[not bold]{last_fmt}[/not bold]"
|
|
2486
|
+
escaped_next = f"[not bold]{next_fmt}[/not bold]"
|
|
2487
|
+
rows.append(
|
|
2488
|
+
{
|
|
2489
|
+
"record_id": record_id,
|
|
2490
|
+
"job_id": None,
|
|
2491
|
+
"text": f"[{type_color}]{itemtype} {subject} {escaped_next}[/{type_color}]",
|
|
2492
|
+
}
|
|
2493
|
+
)
|
|
2494
|
+
pages = page_tagger(rows)
|
|
2495
|
+
# bug_msg(f"{pages = }")
|
|
2496
|
+
return pages, header
|
|
2497
|
+
|
|
2498
|
+
def group_events_by_date_and_time(self, events):
|
|
2499
|
+
"""
|
|
2500
|
+
Groups only scheduled '*' events by date and time.
|
|
2501
|
+
|
|
2502
|
+
Args:
|
|
2503
|
+
events (List[Tuple[int, int, str, str, int]]):
|
|
2504
|
+
List of (start_ts, end_ts, itemtype, subject, id)
|
|
2505
|
+
|
|
2506
|
+
Returns:
|
|
2507
|
+
Dict[date, List[Tuple[time, Tuple]]]:
|
|
2508
|
+
Dict mapping date to list of (start_time, event) tuples
|
|
2509
|
+
"""
|
|
2510
|
+
grouped = defaultdict(list)
|
|
2511
|
+
|
|
2512
|
+
for dt_id, start_ts, end_ts, itemtype, subject, record_id, job_id in events:
|
|
2513
|
+
# log_msg(f"{start_ts = }, {end_ts = }, {subject = }")
|
|
2514
|
+
if itemtype != "*":
|
|
2515
|
+
continue # Only events
|
|
2516
|
+
|
|
2517
|
+
start_dt = datetime_from_timestamp(start_ts)
|
|
2518
|
+
grouped[start_dt.date()].append(
|
|
2519
|
+
(start_dt.time(), (dt_id, start_ts, end_ts, subject, record_id, job_id))
|
|
2520
|
+
)
|
|
2521
|
+
|
|
2522
|
+
# Sort each day's events by time
|
|
2523
|
+
for date in grouped:
|
|
2524
|
+
grouped[date].sort(key=lambda x: x[0])
|
|
2525
|
+
|
|
2526
|
+
return dict(grouped)
|
|
2527
|
+
|
|
2528
|
+
def get_completions(self):
|
|
2529
|
+
"""
|
|
2530
|
+
Fetch and format recent completions for a Completions view.
|
|
2531
|
+
|
|
2532
|
+
Returns:
|
|
2533
|
+
pages, header
|
|
2534
|
+
|
|
2535
|
+
pages has the same shape as get_next:
|
|
2536
|
+
[ (page_rows: list[str], page_tag_map: dict[str, (record_id, job_id)]) ]
|
|
2537
|
+
"""
|
|
2538
|
+
records = self.db_manager.get_all_completions()
|
|
2539
|
+
header = f"Completions ({len(records)})"
|
|
2540
|
+
|
|
2541
|
+
if not records:
|
|
2542
|
+
return [], header
|
|
2543
|
+
|
|
2544
|
+
# Group by month-year of completion (e.g. "Nov 2025")
|
|
2545
|
+
year_to_events: dict[str, list[dict]] = defaultdict(list)
|
|
2546
|
+
|
|
2547
|
+
for (
|
|
2548
|
+
record_id,
|
|
2549
|
+
subject,
|
|
2550
|
+
description,
|
|
2551
|
+
itemtype,
|
|
2552
|
+
due_dt, # may be None
|
|
2553
|
+
completed_dt, # datetime
|
|
2554
|
+
) in records:
|
|
2555
|
+
# apply flags 𝕣/𝕠/𝕒/𝕘 (from Records.flags)
|
|
2556
|
+
subject = self.apply_flags(record_id, subject or "(untitled)")
|
|
2557
|
+
completed_dt = completed_dt.astimezone()
|
|
2558
|
+
due_dt = due_dt.astimezone() if due_dt else None
|
|
2559
|
+
|
|
2560
|
+
# display: " 5 14:30" style like get_next
|
|
2561
|
+
monthday = completed_dt.strftime("%-m-%d")
|
|
2562
|
+
time_part = format_hours_mins(completed_dt, HRS_MINS)
|
|
2563
|
+
when_str = f"{monthday:>2} {time_part}"
|
|
2564
|
+
|
|
2565
|
+
type_color = TYPE_TO_COLOR.get(itemtype, "white")
|
|
2566
|
+
when_frag = f"[not bold]{when_str}[/not bold]"
|
|
2567
|
+
|
|
2568
|
+
item = {
|
|
2569
|
+
"record_id": record_id,
|
|
2570
|
+
"job_id": None, # no per-job completions yet
|
|
2571
|
+
"datetime_id": None, # keeping keys parallel with other views
|
|
2572
|
+
"instance_ts": due_dt.strftime("%Y%m%dT%H%M") if due_dt else "none",
|
|
2573
|
+
"text": f"[{type_color}]{itemtype} {when_frag} {subject}[/{type_color}]",
|
|
2574
|
+
}
|
|
2575
|
+
|
|
2576
|
+
ym = completed_dt.strftime("%b %Y")
|
|
2577
|
+
year_to_events[ym].append(item)
|
|
2578
|
+
|
|
2579
|
+
# Flatten to rows (month headers + items), then page-tagger
|
|
2580
|
+
rows: list[dict] = []
|
|
2581
|
+
for ym, events in year_to_events.items():
|
|
2582
|
+
if events:
|
|
2583
|
+
rows.append(
|
|
2584
|
+
{
|
|
2585
|
+
"dt_id": None,
|
|
2586
|
+
"record_id": None,
|
|
2587
|
+
"job_id": None,
|
|
2588
|
+
"datetime_id": None,
|
|
2589
|
+
"instance_ts": None,
|
|
2590
|
+
"text": f"[not bold][{HEADER_COLOR}]{ym}[/{HEADER_COLOR}][/not bold]",
|
|
2591
|
+
}
|
|
2592
|
+
)
|
|
2593
|
+
rows.extend(events)
|
|
2594
|
+
|
|
2595
|
+
pages = page_tagger(rows)
|
|
2596
|
+
return pages, header
|
|
2597
|
+
|
|
2598
|
+
def get_record_completions(self, record_id: int, width: int = 70):
|
|
2599
|
+
"""
|
|
2600
|
+
Fetch and format completion history for a given record.
|
|
2601
|
+
"""
|
|
2602
|
+
completions = self.db_manager.get_completions(record_id)
|
|
2603
|
+
header = "Completion history"
|
|
2604
|
+
results = [header]
|
|
2605
|
+
|
|
2606
|
+
if not completions:
|
|
2607
|
+
results.append(f" [{HEADER_COLOR}]no completions recorded[/{HEADER_COLOR}]")
|
|
2608
|
+
return results
|
|
2609
|
+
|
|
2610
|
+
# Column widths similar to alerts
|
|
2611
|
+
completed_width = 14 # space for "YYYY-MM-DD HH:MM"
|
|
2612
|
+
due_width = 14
|
|
2613
|
+
name_width = width - (3 + 3 + completed_width + due_width + 6)
|
|
2614
|
+
|
|
2615
|
+
results.append(
|
|
2616
|
+
f"[bold][dim]{'tag':^3}[/dim] "
|
|
2617
|
+
f"{'completed':^{completed_width}} "
|
|
2618
|
+
f"{'due':^{due_width}} "
|
|
2619
|
+
f"{'subject':<{name_width}}[/bold]"
|
|
2620
|
+
)
|
|
2621
|
+
|
|
2622
|
+
# self.set_afill(completions, "record_completions")
|
|
2623
|
+
self.list_tag_to_id.setdefault("record_completions", {})
|
|
2624
|
+
indx = 0
|
|
2625
|
+
|
|
2626
|
+
for (
|
|
2627
|
+
record_id,
|
|
2628
|
+
subject,
|
|
2629
|
+
description,
|
|
2630
|
+
itemtype,
|
|
2631
|
+
due_ts,
|
|
2632
|
+
completed_ts,
|
|
2633
|
+
) in completions:
|
|
2634
|
+
completed_dt = datetime_from_timestamp(completed_ts)
|
|
2635
|
+
completed_str = self.format_datetime(completed_dt, short=True)
|
|
2636
|
+
|
|
2637
|
+
due_str = (
|
|
2638
|
+
self.format_datetime(datetime_from_timestamp(due_ts), short=True)
|
|
2639
|
+
if due_ts
|
|
2640
|
+
else "-"
|
|
2641
|
+
)
|
|
2642
|
+
subj_fmt = truncate_string(subject, name_width)
|
|
2643
|
+
|
|
2644
|
+
tag_fmt, indx = self.add_tag("record_completions", indx, record_id)
|
|
2645
|
+
|
|
2646
|
+
row = " ".join(
|
|
2647
|
+
[
|
|
2648
|
+
f"{tag_fmt}",
|
|
2649
|
+
f"[{SALMON}]{completed_str:<{completed_width}}[/{SALMON}]",
|
|
2650
|
+
f"[{PALE_GREEN}]{due_str:<{due_width}}[/{PALE_GREEN}]",
|
|
2651
|
+
f"[{AVAILABLE_COLOR}]{subj_fmt:<{name_width}}[/{AVAILABLE_COLOR}]",
|
|
2652
|
+
]
|
|
2653
|
+
)
|
|
2654
|
+
results.append(row)
|
|
2655
|
+
|
|
2656
|
+
return results
|
|
2657
|
+
|
|
2658
|
+
def get_agenda(self, now: datetime = datetime.now()):
|
|
2659
|
+
""" """
|
|
2660
|
+
header = "Agenda - Events and Tasks"
|
|
2661
|
+
divider = [
|
|
2662
|
+
{"record_id": None, "job_id": None, "text": " "},
|
|
2663
|
+
]
|
|
2664
|
+
events_by_date = self.get_agenda_events()
|
|
2665
|
+
tasks_by_urgency = self.get_agenda_tasks()
|
|
2666
|
+
events_and_tasks = events_by_date + divider + tasks_by_urgency
|
|
2667
|
+
pages = page_tagger(events_and_tasks)
|
|
2668
|
+
# bug_msg(f"{pages = }")
|
|
2669
|
+
return pages, header
|
|
2670
|
+
|
|
2671
|
+
def get_agenda_events(self, now: datetime = datetime.now()):
|
|
2672
|
+
"""
|
|
2673
|
+
Returns dict: date -> list of (tag, label, subject) for up to three days.
|
|
2674
|
+
Rules:
|
|
2675
|
+
• Pick the first 3 days that have events.
|
|
2676
|
+
• Also include TODAY if it has notice/drafts even with no events.
|
|
2677
|
+
• If nothing to display at all, return {}.
|
|
2678
|
+
"""
|
|
2679
|
+
notice_records = (
|
|
2680
|
+
self.db_manager.get_notice_for_events()
|
|
2681
|
+
) # (record_id, days_remaining, subject)
|
|
2682
|
+
draft_records = self.db_manager.get_drafts() # (record_id, subject)
|
|
2683
|
+
|
|
2684
|
+
today_dt = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
2685
|
+
today = today_dt.date()
|
|
2686
|
+
now_ts = _fmt_naive(now)
|
|
2687
|
+
|
|
2688
|
+
# Pull events for the next couple of weeks (or whatever window you prefer)
|
|
2689
|
+
window_start = today_dt
|
|
2690
|
+
window_end = today_dt + timedelta(days=14)
|
|
2691
|
+
events = self.db_manager.get_events_for_period(
|
|
2692
|
+
_to_local_naive(window_start), _to_local_naive(window_end)
|
|
2693
|
+
)
|
|
2694
|
+
# events rows: (start_ts, end_ts, itemtype, subject, record_id)
|
|
2695
|
+
|
|
2696
|
+
grouped_by_date = self.group_events_by_date_and_time(
|
|
2697
|
+
events
|
|
2698
|
+
) # {date: [(time_key, (start_ts, end_ts, subject, record_id)), ...]}
|
|
2699
|
+
|
|
2700
|
+
# 1) Determine the first three dates with events
|
|
2701
|
+
event_dates_sorted = sorted(grouped_by_date.keys())
|
|
2702
|
+
allowed_dates: list[date] = []
|
|
2703
|
+
for d in event_dates_sorted:
|
|
2704
|
+
allowed_dates.append(d)
|
|
2705
|
+
if len(allowed_dates) == 3:
|
|
2706
|
+
break
|
|
2707
|
+
|
|
2708
|
+
# 2) If today has notice/draft items, include it even if it has no events
|
|
2709
|
+
has_today_meta = bool(notice_records or draft_records)
|
|
2710
|
+
if has_today_meta and today not in allowed_dates:
|
|
2711
|
+
# Prepend today; keep max three days
|
|
2712
|
+
allowed_dates = [today] + allowed_dates
|
|
2713
|
+
# De-dupe while preserving order
|
|
2714
|
+
seen = set()
|
|
2715
|
+
deduped = []
|
|
2716
|
+
for d in allowed_dates:
|
|
2717
|
+
if d not in seen:
|
|
2718
|
+
seen.add(d)
|
|
2719
|
+
deduped.append(d)
|
|
2720
|
+
allowed_dates = deduped[:3] # cap to 3
|
|
2721
|
+
|
|
2722
|
+
# 3) If nothing at all to show, bail early
|
|
2723
|
+
nothing_to_show = (not allowed_dates) and (not has_today_meta)
|
|
2724
|
+
if nothing_to_show:
|
|
2725
|
+
return []
|
|
2726
|
+
|
|
2727
|
+
# 4) Build events_by_date only for allowed dates
|
|
2728
|
+
events_by_date: dict[date, list[dict]] = {}
|
|
2729
|
+
|
|
2730
|
+
for d in allowed_dates:
|
|
2731
|
+
entries = grouped_by_date.get(d, [])
|
|
2732
|
+
for _, (dt_id, start_ts, end_ts, subject, record_id, job_id) in entries:
|
|
2733
|
+
subject = self.apply_flags(record_id, subject)
|
|
2734
|
+
end_ts = end_ts or start_ts
|
|
2735
|
+
label = format_time_range(start_ts, end_ts, self.AMPM).strip()
|
|
2736
|
+
if end_ts.endswith("T000000"):
|
|
2737
|
+
color = ALLDAY_COLOR
|
|
2738
|
+
elif end_ts <= now_ts and end_ts != start_ts:
|
|
2739
|
+
color = PASSED_EVENT
|
|
2740
|
+
elif start_ts <= now_ts:
|
|
2741
|
+
color = ACTIVE_EVENT
|
|
2742
|
+
else:
|
|
2743
|
+
color = EVENT_COLOR
|
|
2744
|
+
label_fmt = f"{label} " if label else ""
|
|
2745
|
+
events_by_date.setdefault(d, []).append(
|
|
2746
|
+
{
|
|
2747
|
+
"record_id": record_id,
|
|
2748
|
+
"job_id": None,
|
|
2749
|
+
"datetime_id": dt_id,
|
|
2750
|
+
"instance_ts": start_ts,
|
|
2751
|
+
"text": f"[{color}]{label_fmt}{subject}[/{color}]",
|
|
2752
|
+
}
|
|
2753
|
+
)
|
|
2754
|
+
|
|
2755
|
+
# 5) If TODAY is in allowed_dates (either because it had events or we added it)
|
|
2756
|
+
# attach notice + draft markers even if it had no events
|
|
2757
|
+
if today in allowed_dates:
|
|
2758
|
+
if notice_records:
|
|
2759
|
+
for record_id, days_remaining, subject in notice_records:
|
|
2760
|
+
events_by_date.setdefault(today, []).append(
|
|
2761
|
+
{
|
|
2762
|
+
"record_id": record_id,
|
|
2763
|
+
"job_id": None,
|
|
2764
|
+
"datetime_id": dt_id,
|
|
2765
|
+
"instance_ts": start_ts,
|
|
2766
|
+
"text": f"[{NOTICE_COLOR}]+{days_remaining}d {subject} [/{NOTICE_COLOR}]",
|
|
2767
|
+
}
|
|
2768
|
+
)
|
|
2769
|
+
if draft_records:
|
|
2770
|
+
for record_id, subject in draft_records:
|
|
2771
|
+
events_by_date.setdefault(today, []).append(
|
|
2772
|
+
{
|
|
2773
|
+
"record_id": record_id,
|
|
2774
|
+
"job_id": None,
|
|
2775
|
+
"datetime_id": None,
|
|
2776
|
+
"instance_ts": None,
|
|
2777
|
+
"text": f"[{DRAFT_COLOR}] ? {subject}[/{DRAFT_COLOR}]",
|
|
2778
|
+
}
|
|
2779
|
+
)
|
|
2780
|
+
|
|
2781
|
+
# 6) Tagging and indexing
|
|
2782
|
+
total_items = sum(len(v) for v in events_by_date.values())
|
|
2783
|
+
if total_items == 0:
|
|
2784
|
+
# Edge case: allowed_dates may exist but nothing actually added (shouldn’t happen, but safe-guard)
|
|
2785
|
+
return {}
|
|
2786
|
+
|
|
2787
|
+
# self.set_afill(range(total_items), "events")
|
|
2788
|
+
# self.afill_by_view["events"] = self.afill
|
|
2789
|
+
# self.list_tag_to_id.setdefault("events", {})
|
|
2790
|
+
|
|
2791
|
+
rows = []
|
|
2792
|
+
for d, events in sorted(events_by_date.items()):
|
|
2793
|
+
if events:
|
|
2794
|
+
rows.append(
|
|
2795
|
+
{
|
|
2796
|
+
"record_id": None,
|
|
2797
|
+
"job_id": None,
|
|
2798
|
+
"datetime_id": None,
|
|
2799
|
+
"instance_ts": None,
|
|
2800
|
+
"text": f"[not bold][{HEADER_COLOR}]{d.strftime('%a %b %-d')}[/{HEADER_COLOR}][/not bold]",
|
|
2801
|
+
}
|
|
2802
|
+
)
|
|
2803
|
+
for event in events:
|
|
2804
|
+
rows.append(event)
|
|
2805
|
+
|
|
2806
|
+
return rows
|
|
2807
|
+
|
|
2808
|
+
# def get_agenda_tasks(self):
|
|
2809
|
+
# """
|
|
2810
|
+
# Returns list of (urgency_str_or_pin, color, tag_fmt, colored_subject)
|
|
2811
|
+
# Suitable for the Agenda Tasks pane.
|
|
2812
|
+
# """
|
|
2813
|
+
# tasks_by_urgency = []
|
|
2814
|
+
#
|
|
2815
|
+
# # Use the JOIN with Pinned so pins persist across restarts
|
|
2816
|
+
# urgency_records = self.db_manager.get_urgency()
|
|
2817
|
+
# # rows: (record_id, job_id, subject, urgency, color, status, weights, pinned_int)
|
|
2818
|
+
#
|
|
2819
|
+
# # self.set_afill(urgency_records, "tasks")
|
|
2820
|
+
# # log_msg(f"urgency_records {self.afill_by_view = }, {len(urgency_records) = }")
|
|
2821
|
+
# # indx = 0
|
|
2822
|
+
# # self.list_tag_to_id.setdefault("tasks", {})
|
|
2823
|
+
#
|
|
2824
|
+
# # Agenda tasks (has job_id)
|
|
2825
|
+
# header = f"Tasks ({len(urgency_records)})"
|
|
2826
|
+
# rows = [
|
|
2827
|
+
# {"record_id": None, "job_id": None, "text": header},
|
|
2828
|
+
# ]
|
|
2829
|
+
# for (
|
|
2830
|
+
# record_id,
|
|
2831
|
+
# job_id,
|
|
2832
|
+
# subject,
|
|
2833
|
+
# urgency,
|
|
2834
|
+
# color,
|
|
2835
|
+
# status,
|
|
2836
|
+
# weights,
|
|
2837
|
+
# pinned,
|
|
2838
|
+
# ) in urgency_records:
|
|
2839
|
+
# # log_msg(f"collecting tasks {record_id = }, {job_id = }, {subject = }")
|
|
2840
|
+
# # tag_fmt, indx = self.add_tag("tasks", indx, record_id, job_id=job_id)
|
|
2841
|
+
# urgency_str = (
|
|
2842
|
+
# "📌" if pinned else f"[{color}]{int(round(urgency * 100)):>2}[/{color}]"
|
|
2843
|
+
# )
|
|
2844
|
+
# rows.append(
|
|
2845
|
+
# {
|
|
2846
|
+
# "record_id": record_id,
|
|
2847
|
+
# "job_id": job_id,
|
|
2848
|
+
# "text": f"[{TASK_COLOR}]{urgency_str} {self.apply_flags(record_id, subject)}[/{TASK_COLOR}]",
|
|
2849
|
+
# }
|
|
2850
|
+
# )
|
|
2851
|
+
#
|
|
2852
|
+
# return rows
|
|
2853
|
+
|
|
2854
|
+
def get_agenda_tasks(self):
|
|
2855
|
+
"""
|
|
2856
|
+
Returns rows suitable for the Agenda Tasks pane.
|
|
2857
|
+
|
|
2858
|
+
Each row is a dict:
|
|
2859
|
+
{
|
|
2860
|
+
"record_id": int | None,
|
|
2861
|
+
"job_id": int | None,
|
|
2862
|
+
"datetime_id": int | None,
|
|
2863
|
+
"instance_ts": str | None,
|
|
2864
|
+
"text": str,
|
|
2865
|
+
}
|
|
2866
|
+
"""
|
|
2867
|
+
tasks_by_urgency = []
|
|
2868
|
+
|
|
2869
|
+
# Use the JOIN with Pinned so pins persist across restarts
|
|
2870
|
+
urgency_records = self.db_manager.get_urgency()
|
|
2871
|
+
# rows now:
|
|
2872
|
+
# (record_id, job_id, subject, urgency, color, status, weights,
|
|
2873
|
+
# pinned_int, datetime_id, instance_ts)
|
|
2874
|
+
|
|
2875
|
+
header = f"Tasks ({len(urgency_records)})"
|
|
2876
|
+
rows = [
|
|
2877
|
+
{
|
|
2878
|
+
"record_id": None,
|
|
2879
|
+
"job_id": None,
|
|
2880
|
+
"datetime_id": None,
|
|
2881
|
+
"instance_ts": None,
|
|
2882
|
+
"text": header,
|
|
2883
|
+
},
|
|
2884
|
+
]
|
|
2885
|
+
|
|
2886
|
+
for (
|
|
2887
|
+
record_id,
|
|
2888
|
+
job_id,
|
|
2889
|
+
subject,
|
|
2890
|
+
urgency,
|
|
2891
|
+
color,
|
|
2892
|
+
status,
|
|
2893
|
+
weights,
|
|
2894
|
+
pinned,
|
|
2895
|
+
datetime_id,
|
|
2896
|
+
instance_ts,
|
|
2897
|
+
) in urgency_records:
|
|
2898
|
+
urgency_str = (
|
|
2899
|
+
"📌" if pinned else f"[{color}]{int(round(urgency * 100)):>2}[/{color}]"
|
|
2900
|
+
)
|
|
2901
|
+
|
|
2902
|
+
rows.append(
|
|
2903
|
+
{
|
|
2904
|
+
"record_id": record_id,
|
|
2905
|
+
"job_id": job_id,
|
|
2906
|
+
"datetime_id": datetime_id, # 👈 earliest DateTimes.id, or None
|
|
2907
|
+
"instance_ts": instance_ts, # 👈 earliest start_datetime TEXT, or None
|
|
2908
|
+
"text": f"[{TASK_COLOR}]{urgency_str} {self.apply_flags(record_id, subject)}[/{TASK_COLOR}]",
|
|
2909
|
+
}
|
|
2910
|
+
)
|
|
2911
|
+
|
|
2912
|
+
return rows
|
|
2913
|
+
|
|
2914
|
+
def get_entry_from_record(self, record_id: int) -> str:
|
|
2915
|
+
"""
|
|
2916
|
+
1) Load record -> Item
|
|
2917
|
+
2) Call item.finish_without_exdate(...)
|
|
2918
|
+
3) Persist Item
|
|
2919
|
+
4) Insert Completions row
|
|
2920
|
+
5) If fully finished, remove from Urgency/DateTimes
|
|
2921
|
+
6) Return summary dict
|
|
2922
|
+
"""
|
|
2923
|
+
result = self.db_manager.get_tokens(record_id)
|
|
2924
|
+
tokens, rruleset, created, modified = result[0]
|
|
2925
|
+
entry = format_tokens(tokens, self.width, False)
|
|
2926
|
+
|
|
2927
|
+
return entry
|
|
2928
|
+
|
|
2929
|
+
if isinstance(tokens_value, str):
|
|
2930
|
+
try:
|
|
2931
|
+
tokens = json.loads(tokens_value)
|
|
2932
|
+
except Exception:
|
|
2933
|
+
# already a list or malformed — best effort
|
|
2934
|
+
pass
|
|
2935
|
+
if not isinstance(tokens, list):
|
|
2936
|
+
raise ValueError("Structured tokens not available/invalid for this record.")
|
|
2937
|
+
|
|
2938
|
+
entry_str = "\n".join(tok.get("token", "") for tok in tokens)
|
|
2939
|
+
return entry_str
|
|
2940
|
+
|
|
2941
|
+
def finish_from_details(
|
|
2942
|
+
self, record_id: int, job_id: int | None, completed_dt: datetime
|
|
2943
|
+
) -> dict:
|
|
2944
|
+
"""
|
|
2945
|
+
1) Load record -> Item
|
|
2946
|
+
2) Call item.finish_without_exdate(...)
|
|
2947
|
+
3) Persist Item
|
|
2948
|
+
4) Insert Completions row
|
|
2949
|
+
5) If fully finished, remove from Urgency/DateTimes
|
|
2950
|
+
6) Return summary dict
|
|
2951
|
+
"""
|
|
2952
|
+
row = self.db_manager.get_record(record_id)
|
|
2953
|
+
if not row:
|
|
2954
|
+
raise ValueError(f"No record found for id {record_id}")
|
|
2955
|
+
|
|
2956
|
+
# 0..16 schema like you described; 13 = tokens
|
|
2957
|
+
tokens_value = row[13]
|
|
2958
|
+
tokens = tokens_value
|
|
2959
|
+
if isinstance(tokens_value, str):
|
|
2960
|
+
try:
|
|
2961
|
+
tokens = json.loads(tokens_value)
|
|
2962
|
+
except Exception:
|
|
2963
|
+
# already a list or malformed — best effort
|
|
2964
|
+
pass
|
|
2965
|
+
if not isinstance(tokens, list):
|
|
2966
|
+
raise ValueError("Structured tokens not available/invalid for this record.")
|
|
2967
|
+
|
|
2968
|
+
entry_str = "".join(tok.get("token", "") for tok in tokens).strip()
|
|
2969
|
+
|
|
2970
|
+
# Build/parse the Item
|
|
2971
|
+
# item = Item(entry_str)
|
|
2972
|
+
item = self.make_item(entry_str)
|
|
2973
|
+
if not getattr(item, "parse_ok", True):
|
|
2974
|
+
# Some Item versions set parse_ok/parse_message; if not, skip this guard.
|
|
2975
|
+
raise ValueError(getattr(item, "parse_message", "Item.parse failed"))
|
|
2976
|
+
|
|
2977
|
+
# Remember subject fallback so we never null it on update
|
|
2978
|
+
existing_subject = row[2]
|
|
2979
|
+
if not item.subject:
|
|
2980
|
+
item.subject = existing_subject
|
|
2981
|
+
|
|
2982
|
+
# 2) Let Item do all the schedule math (no EXDATE path as requested)
|
|
2983
|
+
fin = item.finish_without_exdate(
|
|
2984
|
+
completed_dt=completed_dt,
|
|
2985
|
+
record_id=record_id,
|
|
2986
|
+
job_id=job_id,
|
|
2987
|
+
)
|
|
2988
|
+
due_ts_used = getattr(fin, "due_ts_used", None)
|
|
2989
|
+
finished_final = getattr(fin, "finished_final", False)
|
|
2990
|
+
|
|
2991
|
+
# 3) Persist the mutated Item
|
|
2992
|
+
self.db_manager.update_item(record_id, item)
|
|
2993
|
+
|
|
2994
|
+
# 4) Insert completion (NULL due is allowed for one-shots)
|
|
2995
|
+
self.db_manager.insert_completion(
|
|
2996
|
+
record_id=record_id,
|
|
2997
|
+
due_ts=due_ts_used,
|
|
2998
|
+
completed_ts=int(completed_dt.timestamp()),
|
|
2999
|
+
)
|
|
3000
|
+
|
|
3001
|
+
# 5) If final, purge from derived tables so it vanishes from lists
|
|
3002
|
+
if finished_final:
|
|
3003
|
+
try:
|
|
3004
|
+
self.db_manager.cursor.execute(
|
|
3005
|
+
"DELETE FROM Urgency WHERE record_id=?", (record_id,)
|
|
3006
|
+
)
|
|
3007
|
+
self.db_manager.cursor.execute(
|
|
3008
|
+
"DELETE FROM DateTimes WHERE record_id=?", (record_id,)
|
|
3009
|
+
)
|
|
3010
|
+
self.db_manager.conn.commit()
|
|
3011
|
+
except Exception:
|
|
3012
|
+
pass
|
|
3013
|
+
|
|
3014
|
+
# Optional: recompute derivations; DetailsScreen also calls refresh, but safe here
|
|
3015
|
+
try:
|
|
3016
|
+
self.db_manager.populate_dependent_tables()
|
|
3017
|
+
except Exception:
|
|
3018
|
+
pass
|
|
3019
|
+
|
|
3020
|
+
return {
|
|
3021
|
+
"record_id": record_id,
|
|
3022
|
+
"final": finished_final,
|
|
3023
|
+
"due_ts": due_ts_used,
|
|
3024
|
+
"completed_ts": int(completed_dt.timestamp()),
|
|
3025
|
+
"new_rruleset": item.rruleset or "",
|
|
3026
|
+
}
|
|
3027
|
+
|
|
3028
|
+
def get_bin_name(self, bin_id: int) -> str:
|
|
3029
|
+
return self.db_manager.get_bin_name(bin_id)
|
|
3030
|
+
|
|
3031
|
+
def get_parent_bin(self, bin_id: int) -> dict | None:
|
|
3032
|
+
return self.db_manager.get_parent_bin(bin_id)
|
|
3033
|
+
|
|
3034
|
+
def get_subbins(self, bin_id: int) -> list[dict]:
|
|
3035
|
+
return self.db_manager.get_subbins(bin_id)
|
|
3036
|
+
|
|
3037
|
+
# def get_reminders(self, bin_id: int) -> list[dict]:
|
|
3038
|
+
# return self.db_manager.get_reminders_in_bin(bin_id)
|
|
3039
|
+
|
|
3040
|
+
# def _bin_name(self, bin_id: int) -> str:
|
|
3041
|
+
# self.db_manager.cursor.execute("SELECT name FROM Bins WHERE id=?", (bin_id,))
|
|
3042
|
+
# row = self.db_manager.cursor.fetchone()
|
|
3043
|
+
# return row[0] if row else f"bin:{bin_id}"
|
|
3044
|
+
|
|
3045
|
+
# def _is_root(self, bin_id: int) -> bool:
|
|
3046
|
+
# # adjust if your root id differs
|
|
3047
|
+
# return bin_id == getattr(self, "root_id", 0)
|
|
3048
|
+
|
|
3049
|
+
# @lru_cache(maxsize=2048)
|
|
3050
|
+
# def _bin_name(self, bin_id: int) -> str:
|
|
3051
|
+
# if self._is_root(bin_id):
|
|
3052
|
+
# # choose what you want to display for root
|
|
3053
|
+
# return "root" # or "" if you prefer no label
|
|
3054
|
+
# cur = self.db_manager.cursor
|
|
3055
|
+
# cur.execute("SELECT name FROM Bins WHERE id=?", (bin_id,))
|
|
3056
|
+
# row = cur.fetchone()
|
|
3057
|
+
# return row[0] if row and row[0] else f"bin:{bin_id}"
|
|
3058
|
+
#
|
|
3059
|
+
# def _parent_bin_id(self, bin_id: int) -> Optional[int]:
|
|
3060
|
+
# # Root has NULL parent
|
|
3061
|
+
# self.db_manager.cursor.execute(
|
|
3062
|
+
# "SELECT container_id FROM BinLinks WHERE bin_id=? LIMIT 1", (bin_id,)
|
|
3063
|
+
# )
|
|
3064
|
+
# row = self.db_manager.cursor.fetchone()
|
|
3065
|
+
# return row[0] if row and row[0] is not None else None
|
|
3066
|
+
#
|
|
3067
|
+
# def _bin_path_ids(self, bin_id: int) -> List[int]:
|
|
3068
|
+
# """Return path of bin ids from root→...→bin_id, but EXCLUDING root."""
|
|
3069
|
+
# path: List[int] = []
|
|
3070
|
+
# cur = bin_id
|
|
3071
|
+
# while cur is not None:
|
|
3072
|
+
# parent = self._parent_bin_id(cur)
|
|
3073
|
+
# path.append(cur)
|
|
3074
|
+
# cur = parent
|
|
3075
|
+
# path.reverse()
|
|
3076
|
+
# # Exclude root if it exists and is first
|
|
3077
|
+
# if path and self._bin_name(path[0]).lower() == "root":
|
|
3078
|
+
# path = path[1:]
|
|
3079
|
+
# return path
|
|
3080
|
+
|
|
3081
|
+
# def bin_tagger(self, bin_id: int, page_size: int = 26) -> List[Page]:
|
|
3082
|
+
# """
|
|
3083
|
+
# Build pages for a single Bin view.
|
|
3084
|
+
#
|
|
3085
|
+
# Path (excluding 'root') is shown as the first row on every page.
|
|
3086
|
+
# - Path segments are tagged a.., but the LAST segment (the current bin) is NOT tagged.
|
|
3087
|
+
# - On every page, content letters start after the header letters, so if header used a..c,
|
|
3088
|
+
# content begins at 'd' on each page.
|
|
3089
|
+
# - Only taggable rows (bins + reminders) count toward page_size.
|
|
3090
|
+
#
|
|
3091
|
+
# Returns: list[ (rows: list[str], tag_map: dict[str, ('bin'| 'record', target)]) ]
|
|
3092
|
+
# - target is bin_id for 'bin', or (record_id, job_id|None) for 'record'.
|
|
3093
|
+
# """
|
|
3094
|
+
#
|
|
3095
|
+
# # ---------- helpers ----------
|
|
3096
|
+
# def _is_root(bid: int) -> bool:
|
|
3097
|
+
# # Adjust if you use a different root id
|
|
3098
|
+
# return bid == getattr(self, "root_id", 0)
|
|
3099
|
+
#
|
|
3100
|
+
# @lru_cache(maxsize=4096)
|
|
3101
|
+
# def _bin_name(bid: int) -> str:
|
|
3102
|
+
# if _is_root(bid):
|
|
3103
|
+
# return "root"
|
|
3104
|
+
# cur = self.db_manager.cursor
|
|
3105
|
+
# cur.execute("SELECT name FROM Bins WHERE id=?", (bid,))
|
|
3106
|
+
# row = cur.fetchone()
|
|
3107
|
+
# return row[0] if row and row[0] else f"bin:{bid}"
|
|
3108
|
+
#
|
|
3109
|
+
# def _bin_path_ids(bid: int) -> List[int]:
|
|
3110
|
+
# """Return ancestor path including current bin, excluding root."""
|
|
3111
|
+
# ids: List[int] = []
|
|
3112
|
+
# cur = self.db_manager.cursor
|
|
3113
|
+
# b = bid
|
|
3114
|
+
# while b is not None and not _is_root(b):
|
|
3115
|
+
# ids.append(b)
|
|
3116
|
+
# cur.execute(
|
|
3117
|
+
# "SELECT container_id FROM BinLinks WHERE bin_id = ? LIMIT 1", (b,)
|
|
3118
|
+
# )
|
|
3119
|
+
# row = cur.fetchone()
|
|
3120
|
+
# b = row[0] if row else None
|
|
3121
|
+
# ids.reverse()
|
|
3122
|
+
# return ids
|
|
3123
|
+
#
|
|
3124
|
+
# def _pretty_child_name(parent_name: str, child_name: str) -> str:
|
|
3125
|
+
# """
|
|
3126
|
+
# Trim exactly 'parent:' from the front of a child name.
|
|
3127
|
+
# This avoids accidental trims when a child merely starts with the same characters.
|
|
3128
|
+
# Examples:
|
|
3129
|
+
# parent='2025', child='2025:10' -> '10'
|
|
3130
|
+
# parent='people', child='people:S' -> 'S'
|
|
3131
|
+
# parent='2025', child='202510' -> '202510' (unchanged)
|
|
3132
|
+
# parent='2025', child='2025x' -> '2025x' (unchanged)
|
|
3133
|
+
# """
|
|
3134
|
+
# if not parent_name:
|
|
3135
|
+
# return child_name
|
|
3136
|
+
# prefix = f"{parent_name}:"
|
|
3137
|
+
# if child_name.startswith(prefix):
|
|
3138
|
+
# suffix = child_name[len(prefix) :]
|
|
3139
|
+
# return suffix or child_name # never return empty string
|
|
3140
|
+
# return child_name
|
|
3141
|
+
#
|
|
3142
|
+
# def _format_path_header(
|
|
3143
|
+
# path_ids: List[int], continued: bool
|
|
3144
|
+
# ) -> Tuple[str, Dict[str, Tuple[str, int]], int]:
|
|
3145
|
+
# """
|
|
3146
|
+
# Build the header text and its tag_map.
|
|
3147
|
+
# Tag all but the last path segment (so the current bin is untagged).
|
|
3148
|
+
# Returns: (header_text, header_tagmap, header_letters_count)
|
|
3149
|
+
# """
|
|
3150
|
+
# tag_map: Dict[str, Tuple[str, int]] = {}
|
|
3151
|
+
# segs: List[str] = []
|
|
3152
|
+
# if not path_ids:
|
|
3153
|
+
# header_text = ".."
|
|
3154
|
+
# return (
|
|
3155
|
+
# (header_text + (" [i](continued)[/i]" if continued else "")),
|
|
3156
|
+
# tag_map,
|
|
3157
|
+
# 0,
|
|
3158
|
+
# )
|
|
3159
|
+
#
|
|
3160
|
+
# # how many path letters to tag (exclude current bin)
|
|
3161
|
+
# taggable = max(0, len(path_ids) - 1)
|
|
3162
|
+
# header_letters = min(taggable, 26)
|
|
3163
|
+
#
|
|
3164
|
+
# for i, bid in enumerate(path_ids):
|
|
3165
|
+
# name = _bin_name(bid)
|
|
3166
|
+
# if i < header_letters: # tagged ancestor
|
|
3167
|
+
# tag = chr(ord("a") + i)
|
|
3168
|
+
# tag_map[tag] = ("bin", bid)
|
|
3169
|
+
# segs.append(f"[dim]{tag}[/dim] {name}")
|
|
3170
|
+
# elif i == len(path_ids) - 1: # current bin (untagged)
|
|
3171
|
+
# segs.append(f"[bold red]{name}[/bold red]")
|
|
3172
|
+
# else: # very deep path overflow (unlikely)
|
|
3173
|
+
# f"[bold yellow]{segs.append(name)}[/bold yellow]"
|
|
3174
|
+
#
|
|
3175
|
+
# header = " / ".join(segs) if segs else ".."
|
|
3176
|
+
# if continued:
|
|
3177
|
+
# header += " [i](continued)[/i]"
|
|
3178
|
+
# return header, tag_map, header_letters
|
|
3179
|
+
#
|
|
3180
|
+
# # ---------- gather data ----------
|
|
3181
|
+
# path_ids = _bin_path_ids(bin_id) # excludes root, includes current bin
|
|
3182
|
+
# current_name = "" if _is_root(bin_id) else _bin_name(bin_id)
|
|
3183
|
+
#
|
|
3184
|
+
# subbins = self.db_manager.get_subbins(bin_id) # [{id,name,subbins,reminders}]
|
|
3185
|
+
# reminders = self.db_manager.get_reminders_in_bin(
|
|
3186
|
+
# bin_id
|
|
3187
|
+
# ) # [{id,subject,itemtype}]
|
|
3188
|
+
#
|
|
3189
|
+
# # Prepare content rows (bins then reminders), sorted
|
|
3190
|
+
# bin_rows: List[Tuple[str, Any, str]] = []
|
|
3191
|
+
# for b in sorted(subbins, key=lambda x: x["name"].lower()):
|
|
3192
|
+
# disp = _pretty_child_name(current_name, b["name"])
|
|
3193
|
+
# bin_rows.append(
|
|
3194
|
+
# (
|
|
3195
|
+
# "bin",
|
|
3196
|
+
# b["id"],
|
|
3197
|
+
# f"[bold yellow]{disp}[/bold yellow] [dim]({b['subbins']}/{b['reminders']})[/dim]",
|
|
3198
|
+
# )
|
|
3199
|
+
# )
|
|
3200
|
+
#
|
|
3201
|
+
# rec_rows: List[Tuple[str, Any, str]] = []
|
|
3202
|
+
#
|
|
3203
|
+
# for r in sorted(reminders, key=lambda x: x["subject"].lower()):
|
|
3204
|
+
# log_msg(f"bins {r = }")
|
|
3205
|
+
# color = TYPE_TO_COLOR.get(r.get("itemtype", ""), "white")
|
|
3206
|
+
# old_subject = r["subject"]
|
|
3207
|
+
# subject = self.apply_flags(r["id"], r["subject"])
|
|
3208
|
+
# log_msg(f"bins {old_subject = }, {subject = }")
|
|
3209
|
+
# rec_rows.append(
|
|
3210
|
+
# (
|
|
3211
|
+
# "record",
|
|
3212
|
+
# (r["id"], None),
|
|
3213
|
+
# f"[{color}]{r.get('itemtype', '')} {subject}[/{color}]",
|
|
3214
|
+
# )
|
|
3215
|
+
# )
|
|
3216
|
+
#
|
|
3217
|
+
# all_rows: List[Tuple[str, Any, str]] = bin_rows + rec_rows
|
|
3218
|
+
#
|
|
3219
|
+
# # ---------- paging ----------
|
|
3220
|
+
# pages: List[Page] = []
|
|
3221
|
+
# idx = 0
|
|
3222
|
+
# first = True
|
|
3223
|
+
#
|
|
3224
|
+
# # header (first page) + how many letters consumed by header
|
|
3225
|
+
# first_header_text, first_hdr_map, header_letters = _format_path_header(
|
|
3226
|
+
# path_ids, continued=False
|
|
3227
|
+
# )
|
|
3228
|
+
# content_capacity = max(0, page_size - header_letters)
|
|
3229
|
+
#
|
|
3230
|
+
# while first or idx < len(all_rows):
|
|
3231
|
+
# if first:
|
|
3232
|
+
# header_text, hdr_map = first_header_text, dict(first_hdr_map)
|
|
3233
|
+
# else:
|
|
3234
|
+
# # repeated header with (continued)
|
|
3235
|
+
# header_text, hdr_map, _ = _format_path_header(path_ids, continued=True)
|
|
3236
|
+
#
|
|
3237
|
+
# rows_out: List[str] = [header_text]
|
|
3238
|
+
# tag_map: Dict[str, Tuple[str, Any]] = dict(hdr_map)
|
|
3239
|
+
#
|
|
3240
|
+
# if content_capacity == 0:
|
|
3241
|
+
# # Deep path; show header-only page to avoid infinite loop
|
|
3242
|
+
# pages.append((rows_out, tag_map))
|
|
3243
|
+
# break
|
|
3244
|
+
#
|
|
3245
|
+
# tagged = 0
|
|
3246
|
+
# next_letter_idx = (
|
|
3247
|
+
# header_letters # content starts after header letters every page
|
|
3248
|
+
# )
|
|
3249
|
+
# while idx < len(all_rows) and tagged < content_capacity:
|
|
3250
|
+
# kind, payload, text = all_rows[idx]
|
|
3251
|
+
# idx += 1
|
|
3252
|
+
# tag = chr(ord("a") + next_letter_idx)
|
|
3253
|
+
# if kind == "bin":
|
|
3254
|
+
# tag_map[tag] = ("bin", payload)
|
|
3255
|
+
# else:
|
|
3256
|
+
# tag_map[tag] = ("record", payload) # (record_id, job_id)
|
|
3257
|
+
# rows_out.append(f" [dim]{tag}[/dim] {text}")
|
|
3258
|
+
# tagged += 1
|
|
3259
|
+
# next_letter_idx += 1
|
|
3260
|
+
#
|
|
3261
|
+
# pages.append((rows_out, tag_map))
|
|
3262
|
+
# first = False
|
|
3263
|
+
#
|
|
3264
|
+
# return pages
|
|
3265
|
+
|
|
3266
|
+
# def get_bin_pages(self, bin_id: int):
|
|
3267
|
+
# """Public API the view will call."""
|
|
3268
|
+
# pages = self.bin_tagger(bin_id)
|
|
3269
|
+
# # Title: path text without tags, e.g. "Activities / Travel". If no path => "root".
|
|
3270
|
+
# path_ids = self._bin_path_ids(bin_id)
|
|
3271
|
+
# # title = " / ".join(self._bin_name(b) for b in path_ids) or ".."
|
|
3272
|
+
# title = "Bins"
|
|
3273
|
+
# return pages, title
|
|
3274
|
+
|
|
3275
|
+
def get_record_details(self, record_id: int) -> str:
|
|
3276
|
+
"""Fetch record details formatted for the details pane."""
|
|
3277
|
+
record = self.db_manager.get_record(record_id)
|
|
3278
|
+
if not record:
|
|
3279
|
+
return "[red]No details found[/red]"
|
|
3280
|
+
|
|
3281
|
+
subject = record[2]
|
|
3282
|
+
desc = record[3] or ""
|
|
3283
|
+
itemtype = record[1]
|
|
3284
|
+
return f"[bold]{itemtype}[/bold] {subject}\n\n{desc}"
|
|
3285
|
+
|
|
3286
|
+
# controller.py (inside class Controller)
|
|
3287
|
+
|
|
3288
|
+
# --- Backup helpers ---------------------------------------------------------
|
|
3289
|
+
def _db_path_from_self(self) -> Path:
|
|
3290
|
+
"""
|
|
3291
|
+
Resolve the path of the live DB from Controller/DatabaseManager.
|
|
3292
|
+
Adjust the attribute names if yours differ.
|
|
3293
|
+
"""
|
|
3294
|
+
# Common patterns; pick whichever exists in your DB manager:
|
|
3295
|
+
for attr in ("db_path", "database_path", "path"):
|
|
3296
|
+
p = getattr(self.db_manager, attr, None)
|
|
3297
|
+
if p:
|
|
3298
|
+
return Path(p)
|
|
3299
|
+
# Fallback if you also store it on the controller:
|
|
3300
|
+
if hasattr(self, "db_path"):
|
|
3301
|
+
return Path(self.db_path)
|
|
3302
|
+
raise RuntimeError(
|
|
3303
|
+
"Couldn't resolve database path from Controller / db_manager."
|
|
3304
|
+
)
|
|
3305
|
+
|
|
3306
|
+
def _parse_backup_name(self, p: Path) -> Optional[date]:
|
|
3307
|
+
m = _BACKUP_RE.match(p.name)
|
|
3308
|
+
if not m:
|
|
3309
|
+
return None
|
|
3310
|
+
y, mth, d = map(int, m.groups())
|
|
3311
|
+
return date(y, mth, d)
|
|
3312
|
+
|
|
3313
|
+
def _find_backups(self, dir_path: Path) -> List[_BackupInfo]:
|
|
3314
|
+
out: List[_BackupInfo] = []
|
|
3315
|
+
if not dir_path.exists():
|
|
3316
|
+
return out
|
|
3317
|
+
for p in dir_path.iterdir():
|
|
3318
|
+
if not p.is_file():
|
|
3319
|
+
continue
|
|
3320
|
+
d = self._parse_backup_name(p)
|
|
3321
|
+
if d is None:
|
|
3322
|
+
continue
|
|
3323
|
+
try:
|
|
3324
|
+
st = p.stat()
|
|
3325
|
+
except FileNotFoundError:
|
|
3326
|
+
continue
|
|
3327
|
+
out.append(_BackupInfo(path=p, day=d, mtime=st.st_mtime))
|
|
3328
|
+
out.sort(key=lambda bi: (bi.day, bi.mtime), reverse=True)
|
|
3329
|
+
return out
|
|
3330
|
+
|
|
3331
|
+
# def _sqlite_backup(self, src_db: Path, dest_db: Path) -> None:
|
|
3332
|
+
# """Use SQLite's backup API for a consistent snapshot."""
|
|
3333
|
+
# dest_tmp = dest_db.with_suffix(dest_db.suffix + ".tmp")
|
|
3334
|
+
# dest_db.parent.mkdir(parents=True, exist_ok=True)
|
|
3335
|
+
# with sqlite3.connect(str(src_db)) as src, sqlite3.connect(str(dest_tmp)) as dst:
|
|
3336
|
+
# src.backup(dst, pages=0) # full backup
|
|
3337
|
+
# # Safety on the destination file only:
|
|
3338
|
+
# dst.execute("PRAGMA wal_checkpoint(TRUNCATE);")
|
|
3339
|
+
# dst.execute("VACUUM;")
|
|
3340
|
+
# dst.commit()
|
|
3341
|
+
# try:
|
|
3342
|
+
# shutil.copystat(src_db, dest_tmp)
|
|
3343
|
+
# except Exception:
|
|
3344
|
+
# pass
|
|
3345
|
+
# dest_tmp.replace(dest_db)
|
|
3346
|
+
|
|
3347
|
+
def _should_snapshot(self, db_path: Path, backups: List[_BackupInfo]) -> bool:
|
|
3348
|
+
try:
|
|
3349
|
+
db_mtime = db_path.stat().st_mtime
|
|
3350
|
+
except FileNotFoundError:
|
|
3351
|
+
return False
|
|
3352
|
+
latest_backup_mtime = max((b.mtime for b in backups), default=0.0)
|
|
3353
|
+
return db_mtime > latest_backup_mtime
|
|
3354
|
+
|
|
3355
|
+
def _select_retention(
|
|
3356
|
+
self, backups: List[_BackupInfo], today_local: date
|
|
3357
|
+
) -> Set[Path]:
|
|
3358
|
+
"""
|
|
3359
|
+
Keep at most 5:
|
|
3360
|
+
newest overall, newest >=3d, >=7d, >=14d, >=28d (by calendar day).
|
|
3361
|
+
"""
|
|
3362
|
+
keep: Set[Path] = set()
|
|
3363
|
+
if not backups:
|
|
3364
|
+
return keep
|
|
3365
|
+
|
|
3366
|
+
newest = max(backups, key=lambda b: (b.day, b.mtime))
|
|
3367
|
+
keep.add(newest.path)
|
|
3368
|
+
|
|
3369
|
+
for days in (3, 7, 14, 28):
|
|
3370
|
+
cutoff = today_local - timedelta(days=days)
|
|
3371
|
+
cands = [b for b in backups if b.day <= cutoff]
|
|
3372
|
+
if cands:
|
|
3373
|
+
chosen = max(cands, key=lambda b: (b.day, b.mtime))
|
|
3374
|
+
keep.add(chosen.path)
|
|
3375
|
+
return keep
|
|
3376
|
+
|
|
3377
|
+
# --- Public API --------------------------------------------------------------
|
|
3378
|
+
def rotate_daily_backups(self) -> None:
|
|
3379
|
+
# Where is the live DB?
|
|
3380
|
+
db_path: Path = Path(
|
|
3381
|
+
self.db_manager.db_path
|
|
3382
|
+
).resolve() # ensure DatabaseManager exposes .db_path
|
|
3383
|
+
backup_dir: Path = db_path.parent / "backups"
|
|
3384
|
+
backup_dir.mkdir(parents=True, exist_ok=True)
|
|
3385
|
+
|
|
3386
|
+
# Example: name yesterday’s snapshot
|
|
3387
|
+
snap_date = date.today() - timedelta(days=1)
|
|
3388
|
+
target = backup_dir / f"{snap_date.isoformat()}.db"
|
|
3389
|
+
|
|
3390
|
+
# Make the snapshot
|
|
3391
|
+
self.db_manager.backup_to(target)
|
|
3392
|
+
|
|
3393
|
+
# …then your retention/pruning logic …
|
|
3394
|
+
tz = getattr(getattr(self, "env", None), "timezone", "America/New_York")
|
|
3395
|
+
tzinfo = ZoneInfo(tz)
|
|
3396
|
+
|
|
3397
|
+
now = datetime.now(tzinfo)
|
|
3398
|
+
today = now.date()
|
|
3399
|
+
yesterday = today - timedelta(days=1)
|
|
3400
|
+
|
|
3401
|
+
bdir = Path(backup_dir) if backup_dir else db_path.parent
|
|
3402
|
+
bdir.mkdir(parents=True, exist_ok=True)
|
|
3403
|
+
|
|
3404
|
+
backups = self._find_backups(bdir)
|
|
3405
|
+
|
|
3406
|
+
created: Optional[Path] = None
|
|
3407
|
+
if self._should_snapshot(db_path, backups):
|
|
3408
|
+
target = bdir / f"{yesterday.isoformat()}.db"
|
|
3409
|
+
self.db_manager.backup_to(target)
|
|
3410
|
+
created = target
|
|
3411
|
+
backups = self._find_backups(bdir) # refresh
|
|
3412
|
+
|
|
3413
|
+
keep = self._select_retention(backups, today_local=today)
|
|
3414
|
+
kept = sorted(keep)
|
|
3415
|
+
removed: List[Path] = []
|
|
3416
|
+
for b in backups:
|
|
3417
|
+
if b.path not in keep:
|
|
3418
|
+
removed.append(b.path)
|
|
3419
|
+
try:
|
|
3420
|
+
b.path.unlink()
|
|
3421
|
+
except FileNotFoundError:
|
|
3422
|
+
pass
|
|
3423
|
+
|
|
3424
|
+
return created, kept, removed
|
|
3425
|
+
|
|
3426
|
+
###VVV new for tagged bin tree
|
|
3427
|
+
|
|
3428
|
+
def get_root_bin_id(self) -> int:
|
|
3429
|
+
# Reuse your existing, tested anchor
|
|
3430
|
+
return self.db_manager.ensure_root_exists()
|
|
3431
|
+
|
|
3432
|
+
def _make_crumb(self, bin_id: int | None):
|
|
3433
|
+
"""Return [(id, name), ...] from root to current."""
|
|
3434
|
+
if bin_id is None:
|
|
3435
|
+
rid = self.db_manager.ensure_root_exists()
|
|
3436
|
+
return [(rid, "root")]
|
|
3437
|
+
# climb using your get_parent_bin
|
|
3438
|
+
chain = []
|
|
3439
|
+
cur = bin_id
|
|
3440
|
+
while cur is not None:
|
|
3441
|
+
name = self.db_manager.get_bin_name(cur)
|
|
3442
|
+
chain.append((cur, name))
|
|
3443
|
+
parent = self.db_manager.get_parent_bin(cur) # {'id','name'} or None
|
|
3444
|
+
cur = parent["id"] if parent else None
|
|
3445
|
+
return list(reversed(chain)) or [(self.db_manager.ensure_root_exists(), "root")]
|
|
3446
|
+
|
|
3447
|
+
def get_bin_summary(self, bin_id: int | None, *, filter_text: str | None = None):
|
|
3448
|
+
"""
|
|
3449
|
+
Returns:
|
|
3450
|
+
children -> [ChildBinRow]
|
|
3451
|
+
reminders -> [ReminderRow]
|
|
3452
|
+
crumb -> [(id, name), ...]
|
|
3453
|
+
Uses ONLY DatabaseManager public methods.
|
|
3454
|
+
"""
|
|
3455
|
+
# 1) children (uses your counts + sort)
|
|
3456
|
+
raw_children = self.db_manager.get_subbins(
|
|
3457
|
+
bin_id if bin_id is not None else self.get_root_bin_id()
|
|
3458
|
+
)
|
|
3459
|
+
# shape: {"id","name","subbins","reminders"}
|
|
3460
|
+
children = [
|
|
3461
|
+
ChildBinRow(
|
|
3462
|
+
bin_id=c["id"],
|
|
3463
|
+
name=c["name"],
|
|
3464
|
+
child_ct=c["subbins"],
|
|
3465
|
+
rem_ct=c["reminders"],
|
|
3466
|
+
)
|
|
3467
|
+
for c in raw_children
|
|
3468
|
+
]
|
|
3469
|
+
|
|
3470
|
+
# — Custom ordering of children based on config.bin_orders —
|
|
3471
|
+
root_name = self.get_bin_name(
|
|
3472
|
+
bin_id if bin_id is not None else self.get_root_bin_id()
|
|
3473
|
+
)
|
|
3474
|
+
order_list = self.env.config.bin_orders.get(root_name, [])
|
|
3475
|
+
if order_list:
|
|
3476
|
+
|
|
3477
|
+
def _child_sort_key(c: ChildBinRow):
|
|
3478
|
+
try:
|
|
3479
|
+
return (0, order_list.index(c.name))
|
|
3480
|
+
except ValueError:
|
|
3481
|
+
return (1, c.name.lower())
|
|
3482
|
+
|
|
3483
|
+
children.sort(key=_child_sort_key)
|
|
3484
|
+
else:
|
|
3485
|
+
children.sort(key=lambda c: c.name.lower())
|
|
3486
|
+
|
|
3487
|
+
# 2) reminders (linked via ReminderLinks)
|
|
3488
|
+
raw_reminders = self.db_manager.get_reminders_in_bin(
|
|
3489
|
+
bin_id if bin_id is not None else self.get_root_bin_id()
|
|
3490
|
+
)
|
|
3491
|
+
|
|
3492
|
+
reminders = [
|
|
3493
|
+
ReminderRow(
|
|
3494
|
+
record_id=r["id"],
|
|
3495
|
+
subject=self.apply_flags(r["id"], r["subject"]),
|
|
3496
|
+
# subject=r["subject"],
|
|
3497
|
+
itemtype=r["itemtype"],
|
|
3498
|
+
)
|
|
3499
|
+
for r in raw_reminders
|
|
3500
|
+
]
|
|
3501
|
+
|
|
3502
|
+
# 3) apply filter (controller-level; no new SQL)
|
|
3503
|
+
if filter_text:
|
|
3504
|
+
f = filter_text.casefold()
|
|
3505
|
+
children = [c for c in children if f in c.name.casefold()]
|
|
3506
|
+
reminders = [r for r in reminders if f in r.subject.casefold()]
|
|
3507
|
+
|
|
3508
|
+
# 4) crumb
|
|
3509
|
+
crumb = self._make_crumb(
|
|
3510
|
+
bin_id if bin_id is not None else self.get_root_bin_id()
|
|
3511
|
+
)
|
|
3512
|
+
return children, reminders, crumb
|
|
3513
|
+
|
|
3514
|
+
# def get_reminder_details(self, record_id: int) -> str:
|
|
3515
|
+
# # Minimal, safe detail using your existing schema
|
|
3516
|
+
# row = self.db_manager.cursor.execute(
|
|
3517
|
+
# "SELECT subject, itemtype FROM Records WHERE id=?",
|
|
3518
|
+
# (record_id,),
|
|
3519
|
+
# ).fetchone()
|
|
3520
|
+
# if not row:
|
|
3521
|
+
# return "[b]Unknown reminder[/b]"
|
|
3522
|
+
# old_subject, itemtype = row
|
|
3523
|
+
# subject = self.apply_flags(record_id, old_subject)
|
|
3524
|
+
# log_msg(f"bins new {old_subject = }, {subject = }")
|
|
3525
|
+
# return f"[b]{subject}[/b]\n[dim]type:[/dim] {itemtype or '—'}"
|
|
3526
|
+
|
|
3527
|
+
def get_descendant_tree(self, bin_id: int) -> list[tuple[int, str, int]]:
|
|
3528
|
+
"""
|
|
3529
|
+
Return a pre-order flattened list of (bin_id, name, depth)
|
|
3530
|
+
for the bins-only subtree rooted at `bin_id`.
|
|
3531
|
+
Uses DatabaseManager.get_subbins(), but applies custom sorting.
|
|
3532
|
+
"""
|
|
3533
|
+
out: list[tuple[int, str, int]] = []
|
|
3534
|
+
bin_orders = self.env.config.bin_orders # Adjust this to how you access config
|
|
3535
|
+
|
|
3536
|
+
def walk(current_id: int, depth: int) -> None:
|
|
3537
|
+
root_name = self.db_manager.get_bin_name(current_id)
|
|
3538
|
+
order_list = self.env.config.bin_orders.get(root_name)
|
|
3539
|
+
sorted_children = self.db_manager.get_subbins(
|
|
3540
|
+
current_id, custom_order=order_list
|
|
3541
|
+
)
|
|
3542
|
+
|
|
3543
|
+
for ch in sorted_children:
|
|
3544
|
+
out.append((ch["id"], ch["name"], depth + 1))
|
|
3545
|
+
walk(ch["id"], depth + 1)
|
|
3546
|
+
|
|
3547
|
+
root_name = self.db_manager.get_bin_name(bin_id)
|
|
3548
|
+
out.append((bin_id, root_name, 0))
|
|
3549
|
+
walk(bin_id, 0)
|
|
3550
|
+
return out
|
|
3551
|
+
|
|
3552
|
+
def get_tag_groups(self) -> dict[str, list[dict]]:
|
|
3553
|
+
"""
|
|
3554
|
+
Return a mapping: tag -> list of Records rows for that tag.
|
|
3555
|
+
"""
|
|
3556
|
+
cur = self.db_manager.conn.cursor()
|
|
3557
|
+
cur.execute(
|
|
3558
|
+
"""
|
|
3559
|
+
SELECT H.tag, R.*
|
|
3560
|
+
FROM Hashtags H
|
|
3561
|
+
JOIN Records R ON H.record_id = R.id
|
|
3562
|
+
ORDER BY H.tag, R.id
|
|
3563
|
+
"""
|
|
3564
|
+
)
|
|
3565
|
+
|
|
3566
|
+
columns = [col[0] for col in cur.description]
|
|
3567
|
+
tag_index = columns.index("tag")
|
|
3568
|
+
|
|
3569
|
+
tag_groups: dict[str, list[dict]] = {}
|
|
3570
|
+
|
|
3571
|
+
for row in cur.fetchall():
|
|
3572
|
+
row_dict = dict(zip(columns, row))
|
|
3573
|
+
tag = row_dict.pop("tag")
|
|
3574
|
+
tag_groups.setdefault(tag, []).append(row_dict)
|
|
3575
|
+
|
|
3576
|
+
return tag_groups
|
|
3577
|
+
|
|
3578
|
+
def get_tag_view(self):
|
|
3579
|
+
"""
|
|
3580
|
+
Build paged rows for the Tag view.
|
|
3581
|
+
|
|
3582
|
+
Returns:
|
|
3583
|
+
pages: list[list[dict]] # from page_tagger
|
|
3584
|
+
header: str # e.g. "Tags (N)"
|
|
3585
|
+
"""
|
|
3586
|
+
tag_groups = self.get_tag_groups()
|
|
3587
|
+
|
|
3588
|
+
rows: list[dict] = []
|
|
3589
|
+
|
|
3590
|
+
# Sort tags alphabetically (you can tweak this later)
|
|
3591
|
+
for tag in sorted(tag_groups.keys(), key=str.lower):
|
|
3592
|
+
records = tag_groups[tag]
|
|
3593
|
+
if not records:
|
|
3594
|
+
continue
|
|
3595
|
+
|
|
3596
|
+
# Header row for the tag
|
|
3597
|
+
rows.append(
|
|
3598
|
+
{
|
|
3599
|
+
"record_id": None,
|
|
3600
|
+
"job_id": None,
|
|
3601
|
+
"text": f"[bold][{HEADER_COLOR}]{tag}[/{HEADER_COLOR}][/bold]",
|
|
3602
|
+
}
|
|
3603
|
+
)
|
|
3604
|
+
|
|
3605
|
+
# One row per record under this tag
|
|
3606
|
+
for rec in records:
|
|
3607
|
+
rid = rec["id"]
|
|
3608
|
+
subj = rec.get("subject") or ""
|
|
3609
|
+
flags = rec.get("flags") or ""
|
|
3610
|
+
# subject + flags
|
|
3611
|
+
display = subj + flags
|
|
3612
|
+
|
|
3613
|
+
rows.append(
|
|
3614
|
+
{
|
|
3615
|
+
"record_id": rid,
|
|
3616
|
+
"job_id": None,
|
|
3617
|
+
"text": f"{display}",
|
|
3618
|
+
}
|
|
3619
|
+
)
|
|
3620
|
+
|
|
3621
|
+
if not rows:
|
|
3622
|
+
header = "Tags (0)"
|
|
3623
|
+
return page_tagger(
|
|
3624
|
+
[
|
|
3625
|
+
{
|
|
3626
|
+
"record_id": None,
|
|
3627
|
+
"job_id": None,
|
|
3628
|
+
"text": f"[{HEADER_COLOR}]No tags found[/{HEADER_COLOR}]",
|
|
3629
|
+
}
|
|
3630
|
+
]
|
|
3631
|
+
), header
|
|
3632
|
+
|
|
3633
|
+
pages = page_tagger(rows)
|
|
3634
|
+
title = f"Tags ({len(tag_groups)})"
|
|
3635
|
+
return pages, title
|