tklr-dgraham 0.0.0rc22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tklr/model.py ADDED
@@ -0,0 +1,4548 @@
1
+ from __future__ import annotations
2
+ import os
3
+ import sqlite3
4
+ import json
5
+ from typing import Optional
6
+ from datetime import date, datetime, time, timedelta
7
+ from dateutil.rrule import rrulestr
8
+ from dateutil.parser import parse
9
+
10
+ from typing import List, Tuple, Optional, Dict, Any, Set, Iterable
11
+ from rich import print
12
+ from tklr.tklr_env import TklrEnvironment
13
+ from dateutil import tz
14
+
15
+ # from dateutil.tz import gettz
16
+ # import math
17
+ import numpy as np
18
+ from pathlib import Path
19
+ from dataclasses import dataclass, field
20
+
21
+ import shutil
22
+
23
+ # from textwrap import indent
24
+ from rich.console import Console
25
+ from rich.text import Text
26
+
27
+
28
+ from .shared import (
29
+ HRS_MINS,
30
+ log_msg,
31
+ bug_msg,
32
+ parse,
33
+ format_datetime,
34
+ _to_local_naive,
35
+ datetime_from_timestamp,
36
+ duration_in_words,
37
+ datetime_in_words,
38
+ fmt_local_compact,
39
+ parse_local_compact,
40
+ fmt_utc_z,
41
+ parse_utc_z,
42
+ fmt_user,
43
+ get_anchor,
44
+ )
45
+
46
+ import re
47
+ from .item import Item
48
+ from collections import defaultdict, deque
49
+
50
+ TAG_RE = re.compile(r"(?<!\w)#([A-Za-z0-9]+)")
51
+
52
+
53
+ anniversary_regex = re.compile(r"!(\d{4})!")
54
+
55
+ BIN_ROOTS = {
56
+ "activities",
57
+ "journal",
58
+ "library",
59
+ "people",
60
+ "places",
61
+ "projects",
62
+ "seedbed",
63
+ "tags",
64
+ "unlinked",
65
+ }
66
+
67
+ BIN_PATHS = [
68
+ ["books", "library"],
69
+ ["movies", "library"],
70
+ ["series", "library"],
71
+ ["poetry", "library"],
72
+ ["quotations", "library"],
73
+ ["seed", "seedbed"],
74
+ ["germination", "seedbed"],
75
+ ["seedling", "seedbed"],
76
+ ["growing", "seedbed"],
77
+ ["flowering", "seedbed"],
78
+ ]
79
+
80
+
81
+ def regexp(pattern, value):
82
+ try:
83
+ return re.search(pattern, value) is not None
84
+ except TypeError:
85
+ return False # Handle None values gracefully
86
+
87
+
88
+ def utc_now_string():
89
+ """Return current UTC time as 'YYYYMMDDTHHMMSS'."""
90
+ return datetime.utcnow().strftime("%Y%m%dT%H%MZ")
91
+
92
+
93
+ def utc_now_to_seconds():
94
+ return round(datetime.utcnow().timestamp())
95
+
96
+
97
+ def is_date(obj):
98
+ return isinstance(obj, date) and not isinstance(obj, datetime)
99
+
100
+
101
+ DATE_FMT = "%Y%m%d"
102
+ DT_FMT = "%Y%m%dT%H%M"
103
+
104
+
105
+ def _fmt_date(d: date) -> str:
106
+ return d.strftime(DATE_FMT)
107
+
108
+
109
+ def _fmt_naive(dt: datetime) -> str:
110
+ return dt.strftime(DT_FMT)
111
+
112
+
113
+ def _fmt_utc(dt_aware_utc: datetime) -> str:
114
+ return dt_aware_utc.astimezone(tz.UTC).strftime(DT_FMT) + "Z"
115
+
116
+
117
+ # def _to_local_naive(dt: datetime) -> datetime:
118
+ # """
119
+ # Convert aware -> local-naive; leave naive unchanged.
120
+ # Assumes dt is datetime (not date).
121
+ # """
122
+ # if dt.tzinfo is not None:
123
+ # dt = dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
124
+ # return dt
125
+
126
+
127
+ def _to_key(dt: datetime) -> str:
128
+ """Naive-local datetime -> 'YYYYMMDDTHHMMSS' string key."""
129
+ return dt.strftime("%Y%m%dT%H%M")
130
+
131
+
132
+ def _today_key() -> str:
133
+ """'YYYYMMDDTHHMMSS' for now in local time, used for lexicographic comparisons."""
134
+ return datetime.now().strftime("%Y%m%dT%H%M")
135
+
136
+
137
+ def _split_span_local_days(
138
+ start_local: datetime, end_local: datetime
139
+ ) -> list[tuple[datetime, datetime]]:
140
+ """
141
+ Split a local-naive span into same-day segments.
142
+ Inclusive start, inclusive end per segment.
143
+ """
144
+ if end_local <= start_local:
145
+ return [(start_local, end_local)]
146
+
147
+ segs: list[tuple[datetime, datetime]] = []
148
+ cur_start = start_local
149
+
150
+ while cur_start.date() < end_local.date():
151
+ day_end = datetime.combine(cur_start.date(), time(23, 59, 59))
152
+ segs.append((cur_start, day_end))
153
+ next_day_start = datetime.combine(
154
+ cur_start.date() + timedelta(days=1), time(0, 0, 0)
155
+ )
156
+ cur_start = next_day_start
157
+
158
+ segs.append((cur_start, end_local))
159
+ return segs
160
+
161
+
162
+ def td_str_to_td(duration_str: str) -> timedelta:
163
+ """Convert a duration string like '1h30m20s' into a timedelta."""
164
+ duration_str = duration_str.strip()
165
+ sign = "+"
166
+ if duration_str[0] in ["+", "-"]:
167
+ sign = duration_str[0]
168
+ duration_str = duration_str[1:]
169
+
170
+ pattern = r"(?:(\d+)w)?(?:(\d+)d)?(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?"
171
+ match = re.fullmatch(pattern, duration_str.strip())
172
+ if not match:
173
+ raise ValueError(f"Invalid duration format: '{duration_str}'")
174
+ weeks, days, hours, minutes, seconds = [int(x) if x else 0 for x in match.groups()]
175
+ if sign == "-":
176
+ return -timedelta(
177
+ weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds
178
+ )
179
+ else:
180
+ return timedelta(
181
+ weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds
182
+ )
183
+
184
+
185
+ def td_str_to_seconds(duration_str: str) -> int:
186
+ """Convert a duration string like '1h30m20s' into a timedelta."""
187
+ duration_str = duration_str.strip()
188
+ if not duration_str:
189
+ return 0
190
+ sign = "+"
191
+ if duration_str[0] in ["+", "-"]:
192
+ sign = duration_str[0]
193
+ duration_str = duration_str[1:]
194
+
195
+ pattern = r"(?:(\d+)w)?(?:(\d+)d)?(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?"
196
+ match = re.fullmatch(pattern, duration_str.strip())
197
+ if not match:
198
+ raise ValueError(f"Invalid duration format: '{duration_str}'")
199
+ weeks, days, hours, minutes, seconds = [int(x) if x else 0 for x in match.groups()]
200
+
201
+ # log_msg(f"{weeks = }, {days = }, {hours = }, {minutes = }, {seconds = }")
202
+
203
+ if sign == "-":
204
+ return -(weeks * 604800 + days * 86400 + hours * 3600 + minutes * 60 + seconds)
205
+ else:
206
+ return weeks * 604800 + days * 86400 + hours * 3600 + minutes * 60 + seconds
207
+
208
+
209
+ def dt_str_to_seconds(datetime_str: str) -> int:
210
+ """Convert a datetime string like '20250601T090000' into a datetime object."""
211
+ if not datetime_str:
212
+ return None
213
+ if "T" not in datetime_str:
214
+ datetime_str += "T000000"
215
+ try:
216
+ return round(datetime.strptime(datetime_str[:13], "%Y%m%dT%H%M").timestamp())
217
+
218
+ except ValueError:
219
+ return round(
220
+ datetime.strptime(datetime_str.rstrip("Z"), "%Y%m%dT0000").timestamp()
221
+ ) # Allow date-only
222
+
223
+
224
+ def dt_to_dtstr(dt_obj: datetime) -> str:
225
+ """Convert a datetime object to 'YYYYMMDDTHHMM' format."""
226
+ if is_date:
227
+ return dt_obj.strftime("%Y%m%d")
228
+ return dt_obj.strftime("%Y%m%dT%H%M")
229
+
230
+
231
+ def td_to_tdstr(td_obj: timedelta) -> str:
232
+ """Convert a timedelta object to a compact string like '1h30m20s'."""
233
+ total = int(td_obj.total_seconds())
234
+ if total == 0:
235
+ return "0s"
236
+
237
+ w, remainder = divmod(total, 604800)
238
+
239
+ d, remainder = divmod(total, 86400)
240
+
241
+ h, remainder = divmod(remainder, 3600)
242
+
243
+ m, s = divmod(remainder, 60)
244
+
245
+ parts = []
246
+ if w:
247
+ parts.append(f"{d}w")
248
+ if d:
249
+ parts.append(f"{d}d")
250
+ if h:
251
+ parts.append(f"{h}h")
252
+ if m:
253
+ parts.append(f"{m}m")
254
+ if s:
255
+ parts.append(f"{s}s")
256
+
257
+ return "".join(parts)
258
+
259
+
260
+ # If you already have these helpers elsewhere, import and reuse them.
261
+ def _fmt_compact_local_naive(dt: datetime) -> str:
262
+ """Return local-naive 'YYYYMMDD' or 'YYYYMMDDTHHMMSS'."""
263
+ if dt.tzinfo is not None:
264
+ dt = dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
265
+ if dt.hour == 0 and dt.minute == 0 and dt.second == 0:
266
+ return dt.strftime("%Y%m%d")
267
+ return dt.strftime("%Y%m%dT%H%M")
268
+
269
+
270
+ def _shift_from_parent(parent_dt: datetime, seconds: int) -> datetime:
271
+ """
272
+ Positive seconds = '&s 5d' means 5 days BEFORE parent => subtract.
273
+ Negative seconds => AFTER parent => add.
274
+ """
275
+ return parent_dt - timedelta(seconds=seconds)
276
+
277
+
278
+ def _parse_jobs_json(jobs_json: str | None) -> list[dict]:
279
+ """
280
+ Parse your jobs list. Expects a list of dicts like:
281
+ {"~": "create plan", "s": "1w", "e": "1h", "i": 1, "status": "...", ...}
282
+ Returns a normalized list with keys: job_id, offset_str, extent_str, status.
283
+ """
284
+ if not jobs_json:
285
+ return []
286
+ try:
287
+ data = json.loads(jobs_json)
288
+ except Exception:
289
+ return []
290
+
291
+ jobs = []
292
+ if isinstance(data, list):
293
+ for j in data:
294
+ if isinstance(j, dict):
295
+ # log_msg(f"json jobs: {j = }")
296
+ jobs.append(
297
+ {
298
+ "job_id": j.get("id"),
299
+ "offset_str": (j.get("s") or "").strip(),
300
+ "extent_str": (j.get("e") or "").strip(),
301
+ "status": (j.get("status") or "").strip().lower(),
302
+ "display_subject": (j.get("display_subject") or "").strip(),
303
+ }
304
+ )
305
+ return jobs
306
+
307
+
308
+ # 6-hour windows within a day (local-naive)
309
+ WINDOWS = [
310
+ (0, 6), # bit 1: 00:00 - 06:00
311
+ (6, 12), # bit 2: 06:00 - 12:00
312
+ (12, 18), # bit 3: 12:00 - 18:00
313
+ (18, 24), # bit 4: 18:00 - 24:00
314
+ ]
315
+
316
+
317
+ def bits_to_int(bitstring: str) -> int:
318
+ """'0000101...' → integer."""
319
+ return int(bitstring, 2)
320
+
321
+
322
+ def int_to_bits(value: int) -> str:
323
+ """Integer → 35-bit '010...'."""
324
+ return format(value, "035b")
325
+
326
+
327
+ def or_aggregate(values: list[int]) -> int:
328
+ """Bitwise OR aggregate."""
329
+ acc = 0
330
+ for v in values:
331
+ acc |= v
332
+ return acc
333
+
334
+
335
+ def _parse_local_naive(ts: str) -> datetime:
336
+ # "YYYYmmddTHHMM" → naive local datetime
337
+ return datetime.strptime(ts, "%Y%m%dT%H%M")
338
+
339
+
340
+ def _iso_year_week(d: datetime) -> str:
341
+ y, w, _ = d.isocalendar()
342
+ return f"{y:04d}-{w:02d}"
343
+
344
+
345
+ def fine_busy_bits_for_event(
346
+ start_str: str, end_str: str | None
347
+ ) -> dict[str, np.ndarray]:
348
+ """
349
+ Return dict of {year_week: 679-slot uint8 array}
350
+ (7 days × (1 all-day + 96 fifteen-minute blocks))
351
+ """
352
+ start = parse(start_str)
353
+
354
+ # --- handle end rules ---
355
+ end = parse(end_str) if end_str else None
356
+
357
+ # if end is None and (start.hour != 0 or start.minute != 0):
358
+ if end is None or not isinstance(start, datetime):
359
+ # zero-extent event: contributes nothing
360
+ return {}
361
+
362
+ slot_minutes = 15
363
+ slots_per_day = 96
364
+ slots_per_week = 7 * (1 + slots_per_day) # 679
365
+ weeks: dict[str, np.ndarray] = {}
366
+
367
+ def yw_key(dt: datetime) -> str:
368
+ y, w, _ = dt.isocalendar()
369
+ return f"{y:04d}-{w:02d}"
370
+
371
+ cur = start
372
+ busy_count = 0
373
+ while True:
374
+ yw = yw_key(cur)
375
+ if yw not in weeks:
376
+ weeks[yw] = np.zeros(slots_per_week, dtype=np.uint8)
377
+
378
+ day_index = cur.weekday() # Mon=0
379
+ base = day_index * (1 + slots_per_day)
380
+
381
+ if end is None:
382
+ # all-day flag only
383
+ weeks[yw][base] = 1
384
+ else:
385
+ day_start = datetime.combine(cur.date(), datetime.min.time())
386
+ day_end = datetime.combine(cur.date(), datetime.max.time())
387
+ s = max(start, day_start)
388
+ e = min(end, day_end)
389
+
390
+ s_idx = (s.hour * 60 + s.minute) // slot_minutes
391
+ e_idx = (e.hour * 60 + e.minute) // slot_minutes
392
+ # log_msg(f"{s_idx = }, {e_idx = }, {e_idx - s_idx = } ")
393
+ weeks[yw][base + 1 + s_idx : base + 1 + e_idx + 1] = 1
394
+ busy_count += np.count_nonzero(weeks[yw])
395
+
396
+ if end is None or cur.date() >= end.date():
397
+ break
398
+ cur += timedelta(days=1)
399
+ # log_msg(f"{start_str = }, {end_str = }, {busy_count = }")
400
+ return weeks
401
+
402
+
403
+ def _reduce_to_35_slots(arr: np.ndarray) -> np.ndarray:
404
+ """
405
+ Convert 679 fine bits (7 × (1 + 96)) into 35 coarse slots
406
+ (7 × [1 all-day + 4 × 6-hour blocks]).
407
+ """
408
+ days = 7
409
+ allday_bits = arr.reshape(days, 97)[:, 0]
410
+ quarters = arr.reshape(days, 97)[:, 1:] # 7×96
411
+
412
+ coarse = np.zeros((days, 5), dtype=np.uint8)
413
+
414
+ for d in range(days):
415
+ # all-day stays as-is
416
+ coarse[d, 0] = allday_bits[d]
417
+
418
+ # 4 six-hour ranges
419
+ for i in range(4):
420
+ start = i * 24 # 6h = 24 × 15min
421
+ end = start + 24
422
+ chunk = quarters[d, start:end]
423
+ if np.any(chunk == 2):
424
+ coarse[d, i + 1] = 2
425
+ elif np.any(chunk == 1):
426
+ coarse[d, i + 1] = 1
427
+ else:
428
+ coarse[d, i + 1] = 0
429
+
430
+ return coarse.flatten()
431
+
432
+
433
+ class SafeDict(dict):
434
+ def __missing__(self, key):
435
+ # Return a placeholder or empty string
436
+ return f"{{{key}}}"
437
+
438
+
439
+ @dataclass
440
+ class BinPathConfig:
441
+ allow_reparent: bool = True
442
+ standard_roots: Set[str] = field(
443
+ default_factory=lambda: BIN_ROOTS
444
+ ) # anchored at root
445
+ standard_paths: List[List[str]] = field(default_factory=lambda: BIN_PATHS)
446
+
447
+
448
+ class BinPathProcessor:
449
+ def __init__(self, model, cfg: Optional[BinPathConfig] = None):
450
+ """
451
+ model: your Model instance (ensure_system_bins, ensure_root_children, move_bin, etc.)
452
+ """
453
+ self.m = model
454
+ self.cfg = cfg or BinPathConfig()
455
+ # Ensure system bins + standard roots exist at startup
456
+ self.m.ensure_system_bins()
457
+ if self.cfg.standard_roots:
458
+ self.m.ensure_root_children(sorted(self.cfg.standard_roots)) # idempotent
459
+ # NEW: ensure standard child paths exist + are correctly anchored
460
+ for parts in self.cfg.standard_paths or []:
461
+ try:
462
+ # parts: ["leaf", "parent", "grandparent", ...]
463
+ # apply_parts ensures/repairs hierarchy without touching records
464
+ _norm, _log, _leaf_id = self.apply_parts(parts)
465
+ # You could log _log somewhere if desired
466
+ except Exception as e:
467
+ # Fail soft: don’t break startup if one path is weird
468
+ print(f"[binpaths] error applying standard path {parts!r}: {e}")
469
+
470
+ @staticmethod
471
+ def canon(name: str) -> str:
472
+ return (name or "").strip()
473
+
474
+ def _is_unlinked(self, bin_id: int) -> bool:
475
+ """
476
+ Unlinked if no parent row in BinLinks OR parent is the explicit 'unlinked' bin.
477
+ """
478
+ parent = self.m.get_parent_bin(bin_id) # {'id','name'} or None
479
+ if parent is None:
480
+ return True
481
+ return self.canon(parent["name"]) == "unlinked"
482
+
483
+ def _ensure_standard_root_anchor(self, name: str) -> None:
484
+ """
485
+ Ensure standard roots exist directly under root.
486
+ """
487
+ self.m.ensure_root_children([name]) # puts child under root if missing
488
+
489
+ # --- New: operate on already-split parts instead of parsing a string ---
490
+
491
+ def apply_parts(self, parts: List[str]) -> Tuple[str, List[str], int]:
492
+ """
493
+ Process a bin path given as parts, e.g. ["lille","france","places"].
494
+ Interpretation: parts[0] is the leaf, following are ancestors (nearest first).
495
+ Returns: (normalized_token '@b <leaf>', log, leaf_bin_id)
496
+ """
497
+ log: List[str] = []
498
+
499
+ parts = [p for p in (parts or []) if (p or "").strip()]
500
+ if not parts:
501
+ raise ValueError("Empty @b parts")
502
+
503
+ leaf_name = self.canon(parts[0])
504
+ ancestors = [self.canon(p) for p in parts[1:]] # nearest first
505
+ log.append(f"Parsed leaf='{leaf_name}', ancestors={ancestors!r}")
506
+
507
+ # Ensure system bins present
508
+ root_id, unlinked_id = self.m.ensure_system_bins()
509
+
510
+ # Ensure leaf exists
511
+ leaf_id = self.m.ensure_bin_exists(leaf_name)
512
+ normalized = f"@b {leaf_name}"
513
+
514
+ # No ancestors case
515
+ if not ancestors:
516
+ if not self._is_unlinked(leaf_id):
517
+ log.append("Leaf already linked (not under 'unlinked'); no changes.")
518
+ return normalized, log, leaf_id
519
+ self._attach_if_missing(leaf_name, "unlinked", log)
520
+ log.append("Leaf had no parent; placed under 'unlinked'.")
521
+ return normalized, log, leaf_id
522
+
523
+ # Walk up the chain: leaf -> parent -> grandparent...
524
+ child_name = leaf_name
525
+ for anc in ancestors:
526
+ if anc in self.cfg.standard_roots:
527
+ self._ensure_standard_root_anchor(anc)
528
+ self._attach_if_missing(child_name, anc, log)
529
+ child_name = anc
530
+
531
+ top = ancestors[-1]
532
+ if top in self.cfg.standard_roots:
533
+ log.append(f"Ensured standard root '{top}' is anchored under root.")
534
+ return normalized, log, leaf_id
535
+
536
+ def _attach_if_missing(
537
+ self, child_name: str, parent_name: str, log: List[str]
538
+ ) -> None:
539
+ """
540
+ Attach child under parent if not already so; reparenting via move_bin (cycle-safe).
541
+ """
542
+ try:
543
+ child_id = self.m.ensure_bin_exists(child_name)
544
+ parent_id = self.m.ensure_bin_exists(parent_name)
545
+
546
+ parent = self.m.get_parent_bin(child_id)
547
+ if parent and self.canon(parent["name"]) == self.canon(parent_name):
548
+ log.append(f"'{child_name}' already under '{parent_name}'.")
549
+ return
550
+
551
+ if (
552
+ (not self.cfg.allow_reparent)
553
+ and parent
554
+ and self.canon(parent["name"]) != self.canon(parent_name)
555
+ ):
556
+ log.append(
557
+ f"Skipped reparenting '{child_name}' (existing parent='{parent['name']}') "
558
+ f"-> requested '{parent_name}' (allow_reparent=False)"
559
+ )
560
+ return
561
+
562
+ ok = self.m.move_bin(child_name, parent_name)
563
+ log.append(
564
+ f"{'Attached' if ok else 'Failed to attach'} '{child_name}' under '{parent_name}'."
565
+ )
566
+ except Exception as e:
567
+ log.append(f"Error attaching '{child_name}' -> '{parent_name}': {e}")
568
+
569
+ # Convenience wrappers for your controller:
570
+
571
+ def assign_record_via_parts(
572
+ self, record_id: int, parts: List[str]
573
+ ) -> Tuple[str, List[str], int]:
574
+ """
575
+ Ensure/repair hierarchy for {parts} and link the record to the leaf.
576
+ """
577
+ normalized, log, leaf_id = self.apply_parts(parts)
578
+ self.m.link_record_to_bin(record_id, leaf_id) # idempotent
579
+ log.append(
580
+ f"Linked record {record_id} → bin {leaf_id} ('{self.m.get_bin_name(leaf_id)}')."
581
+ )
582
+ return normalized, log, leaf_id
583
+
584
+ def assign_record_many(
585
+ self, record_id: int, list_of_parts: List[List[str]]
586
+ ) -> Tuple[List[str], List[str], List[int]]:
587
+ """
588
+ Process multiple bin paths for a single record.
589
+ Returns: (normalized_tokens, combined_log, leaf_ids)
590
+ """
591
+ norm_tokens: List[str] = []
592
+ combined_log: List[str] = []
593
+ leaf_ids: List[int] = []
594
+
595
+ # De-duplicate exact paths to avoid redundant work
596
+ seen = set()
597
+ for parts in list_of_parts or []:
598
+ key = tuple(self.canon(p) for p in parts if (p or "").strip())
599
+ if not key or key in seen:
600
+ continue
601
+ seen.add(key)
602
+
603
+ norm, log, leaf_id = self.assign_record_via_parts(record_id, list(key))
604
+ norm_tokens.append(norm)
605
+ combined_log.extend(log)
606
+ leaf_ids.append(leaf_id)
607
+
608
+ return norm_tokens, combined_log, leaf_ids
609
+
610
+
611
+ # bin_cache.py
612
+ def _rev_path_for(
613
+ bid: int, name: Dict[int, str], parent: Dict[int, Optional[int]]
614
+ ) -> str:
615
+ parts: List[str] = []
616
+ cur = bid
617
+ while cur is not None:
618
+ parts.append(name[cur])
619
+ cur = parent.get(cur)
620
+ return "/".join(parts) # leaf → ... → root
621
+
622
+
623
+ class BinCache:
624
+ """
625
+ Incremental cache for bins/links with a simple public API:
626
+
627
+ - name_to_binpath(): Dict[str, str] # { leaf_lower: "Leaf/Parent/.../Root" }
628
+
629
+ Update methods you call from your existing model helpers:
630
+
631
+ - on_create(bid, name, parent_id)
632
+ - on_rename(bid, new_name)
633
+ - on_link(bid, parent_id) # (re)parent; also used by move
634
+ - on_unlink(bid) # set parent to None
635
+ - on_delete(bid) # delete a bin and its subtree
636
+ """
637
+
638
+ def __init__(self, conn: sqlite3.Connection):
639
+ self.conn = conn
640
+ self.name: Dict[int, str] = {}
641
+ self.parent: Dict[int, Optional[int]] = {}
642
+ self.children: Dict[Optional[int], Set[int]] = defaultdict(set)
643
+ self.rev_path: Dict[int, str] = {}
644
+ self._name_to_binpath: Dict[str, str] = {}
645
+ self._load_all()
646
+
647
+ # ---------- initial build ----------
648
+
649
+ def _load_all(self) -> None:
650
+ rows = self.conn.execute("""
651
+ SELECT b.id, b.name, bl.container_id
652
+ FROM Bins b
653
+ LEFT JOIN BinLinks bl ON bl.bin_id = b.id
654
+ """).fetchall()
655
+
656
+ self.name.clear()
657
+ self.parent.clear()
658
+ self.children.clear()
659
+ for bid, nm, par in rows:
660
+ self.name[bid] = nm
661
+ self.parent[bid] = par
662
+ self.children[par].add(bid)
663
+
664
+ # compute reversed (leaf→root) paths
665
+ self.rev_path = {
666
+ bid: _rev_path_for(bid, self.name, self.parent) for bid in self.name
667
+ }
668
+ self._rebuild_name_dict()
669
+ # log_msg(f"{self.name_to_binpath() = }")
670
+
671
+ def _rebuild_name_dict(self) -> None:
672
+ self._name_to_binpath = {
673
+ nm.lower(): self.rev_path[bid] for bid, nm in self.name.items()
674
+ }
675
+
676
+ # ---------- subtree utilities ----------
677
+
678
+ def _iter_subtree(self, root_id: int) -> Iterable[int]:
679
+ q = deque([root_id])
680
+ while q:
681
+ x = q.popleft()
682
+ yield x
683
+ for c in self.children.get(x, ()):
684
+ q.append(c)
685
+
686
+ def _refresh_paths_for_subtree(self, root_id: int) -> None:
687
+ # recompute rev_path for root and descendants; update name_to_binpath values
688
+ for bid in self._iter_subtree(root_id):
689
+ self.rev_path[bid] = _rev_path_for(bid, self.name, self.parent)
690
+ for bid in self._iter_subtree(root_id):
691
+ self._name_to_binpath[self.name[bid].lower()] = self.rev_path[bid]
692
+
693
+ # ---------- mutations you call ----------
694
+
695
+ def on_create(self, bid: int, nm: str, parent_id: Optional[int]) -> None:
696
+ self.name[bid] = nm
697
+ self.parent[bid] = parent_id
698
+ self.children[parent_id].add(bid)
699
+ self.rev_path[bid] = _rev_path_for(bid, self.name, self.parent)
700
+ self._name_to_binpath[nm.lower()] = self.rev_path[bid]
701
+
702
+ def on_rename(self, bid: int, new_name: str) -> None:
703
+ old = self.name[bid]
704
+ if old.lower() != new_name.lower():
705
+ self._name_to_binpath.pop(old.lower(), None)
706
+ self.name[bid] = new_name
707
+ self._refresh_paths_for_subtree(bid)
708
+
709
+ def on_link(self, bid: int, new_parent_id: Optional[int]) -> None:
710
+ old_parent = self.parent.get(bid)
711
+ if old_parent == new_parent_id:
712
+ # nothing changed
713
+ return
714
+ if old_parent in self.children:
715
+ self.children[old_parent].discard(bid)
716
+ self.children[new_parent_id].add(bid)
717
+ self.parent[bid] = new_parent_id
718
+ self._refresh_paths_for_subtree(bid)
719
+
720
+ def on_unlink(self, bid: int) -> None:
721
+ old_parent = self.parent.get(bid)
722
+ if old_parent in self.children:
723
+ self.children[old_parent].discard(bid)
724
+ self.parent[bid] = None
725
+ self._refresh_paths_for_subtree(bid)
726
+
727
+ def on_delete(self, bid: int) -> None:
728
+ # remove whole subtree
729
+ to_rm = list(self._iter_subtree(bid))
730
+ par = self.parent.get(bid)
731
+ if par in self.children:
732
+ self.children[par].discard(bid)
733
+ for x in to_rm:
734
+ self._name_to_binpath.pop(self.name[x].lower(), None)
735
+ # detach from parent/children maps
736
+ p = self.parent.get(x)
737
+ if p in self.children:
738
+ self.children[p].discard(x)
739
+ self.children.pop(x, None)
740
+ self.parent.pop(x, None)
741
+ self.rev_path.pop(x, None)
742
+ self.name.pop(x, None)
743
+
744
+ # ---------- query ----------
745
+
746
+ def name_to_binpath(self) -> Dict[str, str]:
747
+ return self._name_to_binpath
748
+
749
+
750
+ class UrgencyComputer:
751
+ def __init__(self, env: TklrEnvironment):
752
+ self.env = env
753
+ self.urgency = env.config.urgency
754
+
755
+ self.MIN_URGENCY = self.urgency.colors.min_urgency
756
+ self.MIN_HEX_COLOR = self.urgency.colors.min_hex_color
757
+ self.MAX_HEX_COLOR = self.urgency.colors.max_hex_color
758
+ self.STEPS = self.urgency.colors.steps
759
+ self.BUCKETS = self.get_urgency_color_buckets()
760
+
761
+ def hex_to_rgb(self, hex_color: str) -> Tuple[int, int, int]:
762
+ hex_color = hex_color.lstrip("#")
763
+ return tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4))
764
+
765
+ def rgb_to_hex(self, rgb: Tuple[int, int, int]) -> str:
766
+ return "#{:02x}{:02x}{:02x}".format(*rgb)
767
+
768
+ def get_urgency_color_buckets(self) -> List[str]:
769
+ neg_rgb = self.hex_to_rgb(self.MIN_HEX_COLOR)
770
+ max_rgb = self.hex_to_rgb(self.MAX_HEX_COLOR)
771
+
772
+ buckets = []
773
+ for i in range(self.STEPS):
774
+ t = i / (self.STEPS - 1)
775
+ rgb = tuple(
776
+ round(neg + t * (maxc - neg)) for neg, maxc in zip(neg_rgb, max_rgb)
777
+ )
778
+ buckets.append(self.rgb_to_hex(rgb))
779
+ return buckets
780
+
781
+ def urgency_to_bucket_color(self, urgency: float) -> str:
782
+ if urgency <= self.MIN_URGENCY:
783
+ return self.MIN_HEX_COLOR
784
+ if urgency >= 1.0:
785
+ return self.MAX_HEX_COLOR
786
+
787
+ i = min(
788
+ int((urgency - self.MIN_URGENCY) * len(self.BUCKETS)), len(self.BUCKETS) - 1
789
+ )
790
+ return self.BUCKETS[i]
791
+
792
+ def compute_partitioned_urgency(self, weights: dict[str, float]) -> float:
793
+ """
794
+ Compute urgency from signed weights:
795
+ - Positive weights push urgency up
796
+ - Negative weights pull urgency down
797
+ - Equal weights → urgency = 0
798
+
799
+ Returns:
800
+ urgency ∈ [-1.0, 1.0]
801
+ """
802
+ Wp = 0.0 + sum(w for w in weights.values() if w > 0)
803
+
804
+ Wn = 0.0 + sum(abs(w) for w in weights.values() if w < 0)
805
+
806
+ urgency = (Wp - Wn) / (2 + Wn + Wp)
807
+ # log_msg(f"{Wp = }, {Wn = }, {Wp - Wn = }, {Wp + Wn = }, {urgency = }")
808
+ return urgency
809
+
810
+ def urgency_due(self, due_seconds: int, now_seconds: int) -> float:
811
+ """
812
+ This function calculates the urgency contribution for a task based
813
+ on its due datetime relative to the current datetime and returns
814
+ a float value between 0.0 when (now <= due - interval) and max when
815
+ (now >= due).
816
+ """
817
+ due_max = self.urgency.due.max
818
+ interval = self.urgency.due.interval
819
+ if due_seconds and due_max and interval:
820
+ interval_seconds = td_str_to_seconds(interval)
821
+ # log_msg(f"{due_max = }, {interval = }, {interval_seconds = }")
822
+ return max(
823
+ 0.0,
824
+ min(
825
+ due_max,
826
+ due_max * (1.0 - (now_seconds - due_seconds) / interval_seconds),
827
+ ),
828
+ )
829
+ return 0.0
830
+
831
+ def urgency_pastdue(self, due_seconds: int, now_seconds: int) -> float:
832
+ """
833
+ This function calculates the urgency contribution for a task based
834
+ on its due datetime relative to the current datetime and returns
835
+ a float value between 0.0 when (now <= due) and max when
836
+ (now >= due + interval).
837
+ """
838
+
839
+ pastdue_max = self.urgency.pastdue.max
840
+ interval = self.urgency.pastdue.interval
841
+ if due_seconds and pastdue_max and interval:
842
+ interval_seconds = td_str_to_seconds(interval)
843
+ return max(
844
+ 0.0,
845
+ min(
846
+ pastdue_max,
847
+ pastdue_max * (now_seconds - due_seconds) / interval_seconds,
848
+ ),
849
+ )
850
+ return 0.0
851
+
852
+ def urgency_recent(self, modified_seconds: int, now_seconds: int) -> float:
853
+ """
854
+ This function calculates the urgency contribution for a task based
855
+ on the current datetime relative to the (last) modified datetime. It
856
+ represents a combination of a decreasing contribution from recent_max
857
+ based on how recently it was modified and an increasing contribution
858
+ from 0 based on how long ago it was modified. The maximum of the two
859
+ is the age contribution.
860
+ """
861
+ recent_contribution = 0.0
862
+ recent_interval = self.urgency.recent.interval
863
+ recent_max = self.urgency.recent.max
864
+ # log_msg(f"{recent_interval = }")
865
+ if recent_max and recent_interval:
866
+ recent_interval_seconds = td_str_to_seconds(recent_interval)
867
+ recent_contribution = max(
868
+ 0.0,
869
+ min(
870
+ recent_max,
871
+ recent_max
872
+ * (1 - (now_seconds - modified_seconds) / recent_interval_seconds),
873
+ ),
874
+ )
875
+ # log_msg(f"computed {recent_contribution = }")
876
+ return recent_contribution
877
+
878
+ def urgency_age(self, modified_seconds: int, now_seconds: int) -> float:
879
+ """
880
+ This function calculates the urgency contribution for a task based
881
+ on the current datetime relative to the (last) modified datetime. It
882
+ represents a combination of a decreasing contribution from recent_max
883
+ based on how recently it was modified and an increasing contribution
884
+ from 0 based on how long ago it was modified. The maximum of the two
885
+ is the age contribution.
886
+ """
887
+ age_contribution = 0
888
+ age_interval = self.urgency.age.interval
889
+ age_max = self.urgency.age.max
890
+ # log_msg(f"{age_interval = }")
891
+ if age_max and age_interval:
892
+ age_interval_seconds = td_str_to_seconds(age_interval)
893
+ age_contribution = max(
894
+ 0.0,
895
+ min(
896
+ age_max,
897
+ age_max * (now_seconds - modified_seconds) / age_interval_seconds,
898
+ ),
899
+ )
900
+ # log_msg(f"computed {age_contribution = }")
901
+ return age_contribution
902
+
903
+ def urgency_priority(self, priority_level: int) -> float:
904
+ priority = self.urgency.priority.root.get(str(priority_level), 0.0)
905
+ # log_msg(f"computed {priority = }")
906
+ return priority
907
+
908
+ def urgency_extent(self, extent_seconds: int) -> float:
909
+ extent_max = 1.0
910
+ extent_interval = td_str_to_seconds(self.urgency.extent.interval)
911
+ extent = max(
912
+ 0.0, min(extent_max, extent_max * extent_seconds / extent_interval)
913
+ )
914
+ # log_msg(f"{extent_seconds = }, {extent = }")
915
+ return extent
916
+
917
+ def urgency_blocking(self, num_blocking: int) -> float:
918
+ blocking = 0.0
919
+ if num_blocking:
920
+ blocking_max = self.urgency.blocking.max
921
+ blocking_count = self.urgency.blocking.count
922
+ if blocking_max and blocking_count:
923
+ blocking = max(
924
+ 0.0, min(blocking_max, blocking_max * num_blocking / blocking_count)
925
+ )
926
+ # log_msg(f"computed {blocking = }")
927
+ return blocking
928
+
929
+ def urgency_tags(self, num_tags: int) -> float:
930
+ tags = 0.0
931
+ tags_max = self.urgency.tags.max
932
+ tags_count = self.urgency.tags.count
933
+ if tags_max and tags_count:
934
+ tags = max(0.0, min(tags_max, tags_max * num_tags / tags_count))
935
+ # log_msg(f"computed {tags = }")
936
+ return tags
937
+
938
+ def urgency_description(self, has_description: bool) -> float:
939
+ description_max = self.urgency.description.max
940
+ description = 0.0
941
+ if has_description and description_max:
942
+ description = description_max
943
+ # log_msg(f"computed {description = }")
944
+ return description
945
+
946
+ def urgency_project(self, has_project: bool) -> float:
947
+ project_max = self.urgency.project.max
948
+ project = 0.0
949
+ if has_project and project_max:
950
+ project = project_max
951
+ # log_msg(f"computed {project = }")
952
+ return project
953
+
954
+ def from_args_and_weights(self, **kwargs):
955
+ if bool(kwargs.get("pinned", False)):
956
+ return 1.0, self.urgency_to_bucket_color(1.0), {}
957
+ weights = {
958
+ "due": self.urgency_due(kwargs.get("due"), kwargs["now"]),
959
+ "pastdue": self.urgency_pastdue(kwargs.get("due"), kwargs["now"]),
960
+ "age": self.urgency_age(kwargs["modified"], kwargs["now"]),
961
+ "recent": self.urgency_recent(kwargs["modified"], kwargs["now"]),
962
+ "priority": self.urgency_priority(kwargs.get("priority_level")),
963
+ "extent": self.urgency_extent(kwargs["extent"]),
964
+ "blocking": self.urgency_blocking(kwargs.get("blocking", 0.0)),
965
+ "tags": self.urgency_tags(kwargs.get("tags", 0)),
966
+ "description": self.urgency_description(kwargs.get("description", False)),
967
+ "project": 1.0 if bool(kwargs.get("jobs", False)) else 0.0,
968
+ }
969
+ if bool(kwargs.get("pinned", False)):
970
+ urgency = 1.0
971
+ # log_msg("pinned, ignoring weights, returning urgency 1.0")
972
+ else:
973
+ urgency = self.compute_partitioned_urgency(weights)
974
+ # log_msg(f"{weights = }\n returning {urgency = }")
975
+ return urgency, self.urgency_to_bucket_color(urgency), weights
976
+
977
+
978
+ class DatabaseManager:
979
+ def __init__(self, db_path: str, env: TklrEnvironment, reset: bool = False):
980
+ self.db_path = db_path
981
+ self.env = env
982
+ self.AMPM = env.config.ui.ampm
983
+ self.ALERTS = env.config.alerts
984
+ self.urgency = self.env.config.urgency
985
+
986
+ if reset and os.path.exists(self.db_path):
987
+ os.remove(self.db_path)
988
+
989
+ self.conn = sqlite3.connect(self.db_path)
990
+ self.cursor = self.conn.cursor()
991
+ self.conn.create_function("REGEXP", 2, regexp)
992
+ self.conn.create_function("REGEXP", 2, regexp)
993
+ self.setup_database()
994
+ self.compute_urgency = UrgencyComputer(env)
995
+ self.binproc = BinPathProcessor(
996
+ self,
997
+ BinPathConfig(
998
+ allow_reparent=True, # or False if you want conservative behavior
999
+ standard_roots=BIN_ROOTS, # <— same set, all lowercase
1000
+ ),
1001
+ )
1002
+ self.bin_cache = BinCache(self.conn)
1003
+ # bug_msg(f"{self.bin_cache.name_to_binpath() = }")
1004
+ self.populate_dependent_tables()
1005
+
1006
+ def format_datetime(self, fmt_dt: str) -> str:
1007
+ return format_datetime(fmt_dt, self.ampm)
1008
+
1009
+ def datetime_in_words(self, fmt_dt: str) -> str:
1010
+ return datetime_in_words(fmt_dt, self.ampm)
1011
+
1012
+ def setup_database(self):
1013
+ """
1014
+ Create (if missing) all tables and indexes for tklr.
1015
+
1016
+ Simplified tags model:
1017
+ - Tags live ONLY in Records.tags (JSON text).
1018
+ - No separate Tags / RecordTags tables.
1019
+
1020
+ Other notes:
1021
+ - Timestamps are stored as TEXT in UTC (e.g., 'YYYYMMDDTHHMMSS') unless otherwise noted.
1022
+ - DateTimes.start/end are local-naive TEXT ('YYYYMMDD' or 'YYYYMMDDTHHMMSS').
1023
+ """
1024
+ # FK safety
1025
+ self.cursor.execute("PRAGMA foreign_keys = ON")
1026
+
1027
+ # --- Optional cleanup of old tag tables (safe if they don't exist) ---
1028
+ self.cursor.execute("DROP TABLE IF EXISTS RecordTags;")
1029
+ self.cursor.execute("DROP TABLE IF EXISTS Tags;")
1030
+
1031
+ # ---------------- Records ----------------
1032
+ self.cursor.execute("""
1033
+ CREATE TABLE IF NOT EXISTS Records (
1034
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1035
+ itemtype TEXT, -- '*','~','^','%','?','+','x'
1036
+ subject TEXT,
1037
+ description TEXT,
1038
+ rruleset TEXT, -- serialized ruleset
1039
+ timezone TEXT, -- TZ name or 'float'
1040
+ extent TEXT, -- optional JSON or text
1041
+ alerts TEXT, -- JSON
1042
+ notice TEXT,
1043
+ context TEXT,
1044
+ jobs TEXT, -- JSON
1045
+ flags TEXT, -- compact flags (e.g. 𝕒𝕘𝕠𝕣)
1046
+ priority INTEGER CHECK (priority IN (1,2,3,4,5)),
1047
+ tokens TEXT, -- JSON text (parsed tokens)
1048
+ processed INTEGER, -- 0/1
1049
+ created TEXT, -- 'YYYYMMDDTHHMMSS' UTC
1050
+ modified TEXT -- 'YYYYMMDDTHHMMSS' UTC
1051
+ );
1052
+ """)
1053
+
1054
+ # ---------------- Pinned ----------------
1055
+ self.cursor.execute("""
1056
+ CREATE TABLE IF NOT EXISTS Pinned (
1057
+ record_id INTEGER PRIMARY KEY,
1058
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1059
+ );
1060
+ """)
1061
+ self.cursor.execute("""
1062
+ CREATE INDEX IF NOT EXISTS idx_pinned_record
1063
+ ON Pinned(record_id);
1064
+ """)
1065
+
1066
+ # ---------------- Urgency (NO pinned column) ----------------
1067
+ self.cursor.execute("""
1068
+ CREATE TABLE IF NOT EXISTS Urgency (
1069
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1070
+ record_id INTEGER NOT NULL, -- References Records.id
1071
+ job_id INTEGER, -- NULL if not part of a project
1072
+ subject TEXT NOT NULL,
1073
+ urgency REAL NOT NULL,
1074
+ color TEXT, -- optional precomputed color
1075
+ status TEXT NOT NULL, -- "next","waiting","scheduled",…
1076
+ weights TEXT, -- JSON of component weights (optional)
1077
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1078
+ );
1079
+ """)
1080
+ self.cursor.execute("""
1081
+ CREATE INDEX IF NOT EXISTS idx_urgency_record
1082
+ ON Urgency(record_id);
1083
+ """)
1084
+ self.cursor.execute("""
1085
+ CREATE INDEX IF NOT EXISTS idx_urgency_urgency
1086
+ ON Urgency(urgency DESC);
1087
+ """)
1088
+
1089
+ # ---------------- Completions ----------------
1090
+ self.cursor.execute("""
1091
+ CREATE TABLE IF NOT EXISTS Completions (
1092
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1093
+ record_id INTEGER NOT NULL,
1094
+ completed TEXT NOT NULL, -- UTC-aware: "YYYYMMDDTHHMMZ"
1095
+ due TEXT, -- optional UTC-aware: "YYYYMMDDTHHMMZ"
1096
+ FOREIGN KEY(record_id) REFERENCES Records(id) ON DELETE CASCADE
1097
+ );
1098
+ """)
1099
+ self.cursor.execute("""
1100
+ CREATE INDEX IF NOT EXISTS idx_completions_record_id
1101
+ ON Completions(record_id);
1102
+ """)
1103
+ self.cursor.execute("""
1104
+ CREATE INDEX IF NOT EXISTS idx_completions_completed
1105
+ ON Completions(completed);
1106
+ """)
1107
+ self.cursor.execute("""
1108
+ CREATE INDEX IF NOT EXISTS idx_completions_record_due
1109
+ ON Completions(record_id, due);
1110
+ """)
1111
+
1112
+ # ---------------- DateTimes ----------------
1113
+ # self.cursor.execute("""
1114
+ # CREATE TABLE IF NOT EXISTS DateTimes (
1115
+ # record_id INTEGER NOT NULL,
1116
+ # job_id INTEGER, -- nullable; link to specific job if any
1117
+ # start_datetime TEXT NOT NULL, -- 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive)
1118
+ # end_datetime TEXT, -- NULL if instantaneous; same formats as start
1119
+ # FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1120
+ # );
1121
+ # """)
1122
+
1123
+ self.cursor.execute("""
1124
+ CREATE TABLE IF NOT EXISTS DateTimes (
1125
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1126
+ record_id INTEGER NOT NULL,
1127
+ job_id INTEGER, -- nullable; link to specific job if any
1128
+ start_datetime TEXT NOT NULL, -- 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive)
1129
+ end_datetime TEXT, -- NULL if instantaneous; same formats as start
1130
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1131
+ );
1132
+ """)
1133
+
1134
+ # enforce uniqueness across (record_id, job_id, start, end)
1135
+ self.cursor.execute("""
1136
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_datetimes_unique
1137
+ ON DateTimes(
1138
+ record_id,
1139
+ COALESCE(job_id, -1),
1140
+ start_datetime,
1141
+ COALESCE(end_datetime, '')
1142
+ );
1143
+ """)
1144
+ # range query helper
1145
+ self.cursor.execute("""
1146
+ CREATE INDEX IF NOT EXISTS idx_datetimes_start
1147
+ ON DateTimes(start_datetime);
1148
+ """)
1149
+
1150
+ # ---------------- GeneratedWeeks (cache of week ranges) ----------------
1151
+ self.cursor.execute("""
1152
+ CREATE TABLE IF NOT EXISTS GeneratedWeeks (
1153
+ start_year INTEGER,
1154
+ start_week INTEGER,
1155
+ end_year INTEGER,
1156
+ end_week INTEGER
1157
+ );
1158
+ """)
1159
+
1160
+ # ---------------- Alerts ----------------
1161
+ self.cursor.execute("""
1162
+ CREATE TABLE IF NOT EXISTS Alerts (
1163
+ alert_id INTEGER PRIMARY KEY AUTOINCREMENT,
1164
+ record_id INTEGER NOT NULL,
1165
+ record_name TEXT NOT NULL,
1166
+ trigger_datetime TEXT NOT NULL, -- 'YYYYMMDDTHHMMSS' (local-naive)
1167
+ start_datetime TEXT NOT NULL, -- 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive)
1168
+ alert_name TEXT NOT NULL,
1169
+ alert_command TEXT NOT NULL,
1170
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1171
+ );
1172
+ """)
1173
+ # Prevent duplicates: one alert per (record, start, name, trigger)
1174
+ self.cursor.execute("""
1175
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_alerts_unique
1176
+ ON Alerts(record_id, start_datetime, alert_name, COALESCE(trigger_datetime,''));
1177
+ """)
1178
+ # Helpful for “what’s due now”
1179
+ self.cursor.execute("""
1180
+ CREATE INDEX IF NOT EXISTS idx_alerts_trigger
1181
+ ON Alerts(trigger_datetime);
1182
+ """)
1183
+
1184
+ # ---------------- Notice (days remaining notices) ----------------
1185
+ self.cursor.execute("""
1186
+ CREATE TABLE IF NOT EXISTS Notice (
1187
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1188
+ record_id INTEGER NOT NULL,
1189
+ days_remaining INTEGER NOT NULL,
1190
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1191
+ );
1192
+ """)
1193
+
1194
+ # ---------------- Bins & Links ----------------
1195
+ self.cursor.execute("PRAGMA foreign_keys = ON;")
1196
+
1197
+ self.cursor.execute("""
1198
+ CREATE TABLE IF NOT EXISTS Bins (
1199
+ id INTEGER PRIMARY KEY,
1200
+ name TEXT NOT NULL CHECK (length(trim(name)) > 0)
1201
+ );
1202
+ """)
1203
+
1204
+ self.cursor.execute("""
1205
+ CREATE UNIQUE INDEX IF NOT EXISTS uq_bins_name_nocase
1206
+ ON Bins(name COLLATE NOCASE);
1207
+ """)
1208
+
1209
+ self.cursor.execute("""
1210
+ CREATE TABLE IF NOT EXISTS BinLinks (
1211
+ bin_id INTEGER NOT NULL,
1212
+ container_id INTEGER,
1213
+ FOREIGN KEY (bin_id) REFERENCES Bins(id) ON DELETE CASCADE,
1214
+ FOREIGN KEY (container_id) REFERENCES Bins(id) ON DELETE SET NULL,
1215
+ UNIQUE(bin_id)
1216
+ );
1217
+ """)
1218
+
1219
+ self.cursor.execute("""
1220
+ CREATE INDEX IF NOT EXISTS idx_binlinks_container
1221
+ ON BinLinks(container_id);
1222
+ """)
1223
+
1224
+ self.cursor.execute("""
1225
+ CREATE TABLE IF NOT EXISTS ReminderLinks (
1226
+ reminder_id INTEGER NOT NULL,
1227
+ bin_id INTEGER NOT NULL,
1228
+ FOREIGN KEY (reminder_id) REFERENCES Records(id) ON DELETE CASCADE,
1229
+ FOREIGN KEY (bin_id) REFERENCES Bins(id) ON DELETE CASCADE,
1230
+ UNIQUE(reminder_id, bin_id)
1231
+ );
1232
+ """)
1233
+
1234
+ self.cursor.execute("""
1235
+ CREATE INDEX IF NOT EXISTS idx_reminderlinks_bin
1236
+ ON ReminderLinks(bin_id);
1237
+ """)
1238
+
1239
+ self.cursor.execute("""
1240
+ CREATE INDEX IF NOT EXISTS idx_reminderlinks_reminder
1241
+ ON ReminderLinks(reminder_id);
1242
+ """)
1243
+
1244
+ self.cursor.execute("""
1245
+ CREATE TABLE IF NOT EXISTS Hashtags (
1246
+ tag TEXT NOT NULL,
1247
+ record_id INTEGER NOT NULL,
1248
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1249
+ );
1250
+ """)
1251
+
1252
+ self.cursor.execute("""
1253
+ CREATE INDEX IF NOT EXISTS idx_hashtags_tag ON Hashtags(tag);
1254
+ """)
1255
+
1256
+ self.cursor.execute("""
1257
+ CREATE INDEX IF NOT EXISTS idx_hashtags_record ON Hashtags(record_id);
1258
+ """)
1259
+
1260
+ # ---------------- Busy tables (unchanged) ----------------
1261
+ self.setup_busy_tables()
1262
+ # Seed default top-level bins (idempotent)
1263
+
1264
+ self.ensure_root_children(sorted(BIN_ROOTS))
1265
+
1266
+ self.conn.commit()
1267
+ self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
1268
+ tables = [row[0] for row in self.cursor.fetchall()]
1269
+ tables.sort()
1270
+ # bug_msg(f"Tables after setup_database in {tables = }")
1271
+
1272
+ def setup_busy_tables(self):
1273
+ """
1274
+ Create / reset busy cache tables and triggers.
1275
+
1276
+ Design:
1277
+ - BusyWeeksFromDateTimes: per (record_id, year_week) cache of fine-grained busybits (BLOB, 672 slots).
1278
+ FK references Records(id) — not DateTimes — since we aggregate per record/week.
1279
+ - BusyWeeks: per year_week aggregated ternary bits (TEXT, 35 chars).
1280
+ - BusyUpdateQueue: queue of record_ids to recompute.
1281
+
1282
+ Triggers enqueue record_id on any insert/update/delete in DateTimes.
1283
+ """
1284
+
1285
+ # Make schema idempotent and remove any old incompatible objects.
1286
+ self.cursor.execute("PRAGMA foreign_keys=ON")
1287
+
1288
+ # Drop old triggers (names must match what you used previously)
1289
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_insert")
1290
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_update")
1291
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_delete")
1292
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_records_delete")
1293
+
1294
+ # Drop old tables if they exist (to get rid of the bad FK)
1295
+ self.cursor.execute("DROP TABLE IF EXISTS BusyWeeksFromDateTimes")
1296
+ self.cursor.execute("DROP TABLE IF EXISTS BusyWeeks")
1297
+ self.cursor.execute("DROP TABLE IF EXISTS BusyUpdateQueue")
1298
+
1299
+ # Recreate BusyWeeks (aggregate per week)
1300
+ self.cursor.execute("""
1301
+ CREATE TABLE IF NOT EXISTS BusyWeeks (
1302
+ year_week TEXT PRIMARY KEY,
1303
+ busybits TEXT NOT NULL -- 35-char string of '0','1','2'
1304
+ );
1305
+ """)
1306
+
1307
+ # Recreate BusyWeeksFromDateTimes (per record/week)
1308
+ # PRIMARY KEY enforces one row per (record, week)
1309
+ # FK to Records(id) so deletes of records cascade cleanly
1310
+ self.cursor.execute("""
1311
+ CREATE TABLE IF NOT EXISTS BusyWeeksFromDateTimes (
1312
+ record_id INTEGER NOT NULL,
1313
+ year_week TEXT NOT NULL,
1314
+ busybits BLOB NOT NULL, -- 672 slots (15-min blocks)
1315
+ PRIMARY KEY (record_id, year_week),
1316
+ FOREIGN KEY(record_id) REFERENCES Records(id) ON DELETE CASCADE
1317
+ );
1318
+ """)
1319
+
1320
+ # Update queue for incremental recomputation
1321
+ self.cursor.execute("""
1322
+ CREATE TABLE IF NOT EXISTS BusyUpdateQueue (
1323
+ record_id INTEGER PRIMARY KEY
1324
+ );
1325
+ """)
1326
+
1327
+ # Triggers on DateTimes to enqueue affected record
1328
+ self.cursor.execute("""
1329
+ CREATE TRIGGER IF NOT EXISTS trig_busy_insert
1330
+ AFTER INSERT ON DateTimes
1331
+ BEGIN
1332
+ INSERT OR IGNORE INTO BusyUpdateQueue(record_id)
1333
+ VALUES (NEW.record_id);
1334
+ END;
1335
+ """)
1336
+
1337
+ self.cursor.execute("""
1338
+ CREATE TRIGGER IF NOT EXISTS trig_busy_update
1339
+ AFTER UPDATE ON DateTimes
1340
+ BEGIN
1341
+ INSERT OR IGNORE INTO BusyUpdateQueue(record_id)
1342
+ VALUES (NEW.record_id);
1343
+ END;
1344
+ """)
1345
+
1346
+ self.cursor.execute("""
1347
+ CREATE TRIGGER IF NOT EXISTS trig_busy_delete
1348
+ AFTER DELETE ON DateTimes
1349
+ BEGIN
1350
+ INSERT OR IGNORE INTO BusyUpdateQueue(record_id)
1351
+ VALUES (OLD.record_id);
1352
+ END;
1353
+ """)
1354
+
1355
+ # If a record is deleted, clean any cache rows (cascades remove BusyWeeksFromDateTimes).
1356
+ # Also clear from the queue if present.
1357
+ self.cursor.execute("""
1358
+ CREATE TRIGGER IF NOT EXISTS trig_busy_records_delete
1359
+ AFTER DELETE ON Records
1360
+ BEGIN
1361
+ DELETE FROM BusyUpdateQueue WHERE record_id = OLD.id;
1362
+ -- BusyWeeksFromDateTimes rows are removed by FK ON DELETE CASCADE.
1363
+ END;
1364
+ """)
1365
+
1366
+ self.conn.commit()
1367
+
1368
+ def backup_to(self, dest_db: Path) -> Path:
1369
+ """
1370
+ Create a consistent SQLite snapshot of the current database at dest_db.
1371
+ Uses the live connection (self.conn) to copy committed state.
1372
+ Returns the final backup path.
1373
+ """
1374
+ dest_db = Path(dest_db)
1375
+ tmp = dest_db.with_suffix(dest_db.suffix + ".tmp")
1376
+ dest_db.parent.mkdir(parents=True, exist_ok=True)
1377
+
1378
+ # Ensure we copy a committed state
1379
+ self.conn.commit()
1380
+
1381
+ # Copy using SQLite's backup API
1382
+ with sqlite3.connect(str(tmp)) as dst:
1383
+ self.conn.backup(dst) # full backup
1384
+ # Tidy destination file only
1385
+ dst.execute("PRAGMA wal_checkpoint(TRUNCATE);")
1386
+ dst.execute("VACUUM;")
1387
+ dst.commit()
1388
+
1389
+ # Preserve timestamps/permissions from the source file if available
1390
+ try:
1391
+ # Adjust attribute name if your manager stores the DB path differently
1392
+ src_path = Path(
1393
+ getattr(
1394
+ self,
1395
+ "db_path",
1396
+ self.conn.execute("PRAGMA database_list").fetchone()[2],
1397
+ )
1398
+ )
1399
+ shutil.copystat(src_path, tmp)
1400
+ except Exception:
1401
+ pass
1402
+
1403
+ tmp.replace(dest_db)
1404
+ return dest_db
1405
+
1406
+ def populate_dependent_tables(self):
1407
+ """Populate all tables derived from current Records (Tags, DateTimes, Alerts, notice)."""
1408
+ # log_msg("populate dependent tables")
1409
+ yr, wk = datetime.now().isocalendar()[:2]
1410
+ # bug_msg(f"Generating weeks for 12 weeks starting from {yr} week number {wk}")
1411
+ self.extend_datetimes_for_weeks(yr, wk, 12)
1412
+ # self.populate_tags()
1413
+ # bug_msg("calling populate_alerts")
1414
+ self.populate_alerts()
1415
+ # bug_msg("calling populate_notice")
1416
+ self.populate_notice()
1417
+ # bug_msg("calling populate_busy_from_datetimes")
1418
+ self.populate_busy_from_datetimes() # 👈 new step: source layer
1419
+ self.rebuild_busyweeks_from_source() # 👈 add this line
1420
+ self.populate_all_urgency()
1421
+ self.ensure_system_bins()
1422
+
1423
+ def _normalize_tags(self, tags) -> list[str]:
1424
+ """Return a sorted, de-duplicated, lowercased list of tag strings."""
1425
+ if tags is None:
1426
+ return []
1427
+ if isinstance(tags, str):
1428
+ parts = [p for p in re.split(r"[,\s]+", tags) if p]
1429
+ else:
1430
+ parts = list(tags)
1431
+ return sorted({p.strip().lower() for p in parts if p and p.strip()})
1432
+
1433
+ def _compute_flags(self, item) -> str:
1434
+ """
1435
+ Derive flags string from an Item:
1436
+ 𝕒 -> has alerts
1437
+ 𝕘 -> has goto (@g)
1438
+ 𝕠 -> has offset (@o)
1439
+ 𝕣 -> has repeat (@r or @+)
1440
+ """
1441
+ flags: list[str] = []
1442
+ tokens = getattr(item, "tokens", []) or []
1443
+
1444
+ # alerts: explicit @a or non-empty item.alerts
1445
+ has_alert = bool(item.alerts) or any(
1446
+ t.get("t") == "@" and t.get("k") == "a" for t in tokens
1447
+ )
1448
+ if has_alert:
1449
+ flags.append("𝕒")
1450
+
1451
+ # goto: @g
1452
+ if any(t.get("t") == "@" and t.get("k") == "g" for t in tokens):
1453
+ flags.append("𝕘")
1454
+
1455
+ # offset: @o
1456
+ if any(t.get("t") == "@" and t.get("k") == "o" for t in tokens):
1457
+ flags.append("𝕠")
1458
+
1459
+ # repeat: @r or @+
1460
+ if any(t.get("t") == "@" and t.get("k") in ("r", "+") for t in tokens):
1461
+ flags.append("𝕣")
1462
+
1463
+ return "".join(flags)
1464
+
1465
+ def _update_hashtags_for_record(
1466
+ self,
1467
+ record_id: int,
1468
+ subject: str | None,
1469
+ description: str | None,
1470
+ ) -> None:
1471
+ text = (subject or "") + "\n" + (description or "")
1472
+ tags = set(TAG_RE.findall(text))
1473
+ # if "#" in text:
1474
+ # bug_msg(f"has hash mark: {text = }, {tags = }")
1475
+
1476
+ self.cursor.execute("DELETE FROM Hashtags WHERE record_id = ?", (record_id,))
1477
+ for tag in tags:
1478
+ self.cursor.execute(
1479
+ "INSERT INTO Hashtags (tag, record_id) VALUES (?, ?)",
1480
+ (tag, record_id),
1481
+ )
1482
+
1483
+ def add_item(self, item: Item) -> int:
1484
+ flags = self._compute_flags(item)
1485
+ try:
1486
+ timestamp = utc_now_string()
1487
+ self.cursor.execute(
1488
+ """
1489
+ INSERT INTO Records (
1490
+ itemtype, subject, description, rruleset, timezone,
1491
+ extent, alerts, notice, context, jobs, flags, priority,
1492
+ tokens, processed, created, modified
1493
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1494
+ """,
1495
+ (
1496
+ item.itemtype,
1497
+ item.subject,
1498
+ item.description,
1499
+ item.rruleset,
1500
+ item.tz_str,
1501
+ item.extent,
1502
+ json.dumps(item.alerts),
1503
+ item.notice,
1504
+ item.context,
1505
+ json.dumps(item.jobs),
1506
+ flags,
1507
+ item.priority,
1508
+ json.dumps(item.tokens),
1509
+ 0,
1510
+ timestamp,
1511
+ timestamp,
1512
+ ),
1513
+ )
1514
+ self.conn.commit()
1515
+
1516
+ record_id = self.cursor.lastrowid
1517
+ self.relink_bins_for_record(record_id, item) # ← add this
1518
+ self._update_hashtags_for_record(record_id, item.subject, item.description)
1519
+ return record_id
1520
+
1521
+ except Exception as e:
1522
+ print(f"Error adding {item}: {e}")
1523
+ raise
1524
+
1525
+ def update_item(self, record_id: int, item: Item):
1526
+ try:
1527
+ fields, values = [], []
1528
+
1529
+ def set_field(name, value):
1530
+ if value is not None:
1531
+ fields.append(f"{name} = ?")
1532
+ values.append(value)
1533
+
1534
+ set_field("itemtype", item.itemtype)
1535
+ set_field("subject", item.subject)
1536
+ set_field("description", item.description)
1537
+ set_field("rruleset", item.rruleset)
1538
+ set_field("timezone", item.tz_str)
1539
+ set_field("extent", item.extent)
1540
+ set_field(
1541
+ "alerts", json.dumps(item.alerts) if item.alerts is not None else None
1542
+ )
1543
+ set_field("notice", item.notice)
1544
+ set_field("context", item.context)
1545
+ set_field("jobs", json.dumps(item.jobs) if item.jobs is not None else None)
1546
+ set_field("priority", item.priority)
1547
+ set_field(
1548
+ "tokens", json.dumps(item.tokens) if item.tokens is not None else None
1549
+ )
1550
+ set_field("processed", 0)
1551
+
1552
+ fields.append("modified = ?")
1553
+ values.append(utc_now_string())
1554
+ values.append(record_id)
1555
+
1556
+ sql = f"UPDATE Records SET {', '.join(fields)} WHERE id = ?"
1557
+
1558
+ self.cursor.execute(sql, values)
1559
+ self.conn.commit()
1560
+ self.relink_bins_for_record(record_id, item) # ← add this
1561
+
1562
+ except Exception as e:
1563
+ print(f"Error updating record {record_id}: {e}")
1564
+ raise
1565
+
1566
+ # def save_record(self, item: Item, record_id: int | None = None):
1567
+ # """Insert or update a record and refresh associated tables."""
1568
+ # timestamp = utc_now_string()
1569
+ #
1570
+ # if record_id is None:
1571
+ # # Insert new record
1572
+ # self.cursor.execute(
1573
+ # """
1574
+ # INSERT INTO Records (
1575
+ # itemtype, subject, description, rruleset, timezone,
1576
+ # extent, alerts, notice, context, jobs,
1577
+ # tokens, created, modified
1578
+ # ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1579
+ # """,
1580
+ # (
1581
+ # item.itemtype,
1582
+ # item.subject,
1583
+ # item.description,
1584
+ # item.rruleset,
1585
+ # item.tz_str,
1586
+ # item.extent,
1587
+ # json.dumps(item.alerts),
1588
+ # item.notice,
1589
+ # item.context,
1590
+ # json.dumps(item.jobs),
1591
+ # json.dumps(item.tokens),
1592
+ # timestamp,
1593
+ # timestamp,
1594
+ # ),
1595
+ # )
1596
+ # record_id = self.cursor.lastrowid
1597
+ # else:
1598
+ # # Update existing record
1599
+ # self.cursor.execute(
1600
+ # """
1601
+ # UPDATE Records
1602
+ # SET itemtype = ?, subject = ?, description = ?, rruleset = ?, timezone = ?,
1603
+ # extent = ?, alerts = ?, notice = ?, context = ?, jobs = ?,
1604
+ # tokens = ?, modified = ?
1605
+ # WHERE id = ?
1606
+ # """,
1607
+ # (
1608
+ # item.itemtype,
1609
+ # item.subject,
1610
+ # item.description,
1611
+ # item.rruleset,
1612
+ # item.tz_str,
1613
+ # item.extent,
1614
+ # json.dumps(item.alerts),
1615
+ # item.notice,
1616
+ # item.context,
1617
+ # json.dumps(item.jobs),
1618
+ # json.dumps(item.tokens),
1619
+ # timestamp,
1620
+ # record_id,
1621
+ # ),
1622
+ # )
1623
+ #
1624
+ # self.conn.commit()
1625
+ #
1626
+ # # Refresh auxiliary tables
1627
+ # self.generate_datetimes_for_record(record_id)
1628
+ # self.populate_alerts_for_record(record_id)
1629
+ # if item.notice:
1630
+ # self.populate_notice_for_record(record_id)
1631
+ # if item.itemtype in ["~", "^"]:
1632
+ # self.populate_urgency_from_record(record_id)
1633
+
1634
+ def save_record(self, item: Item, record_id: int | None = None) -> int:
1635
+ """Insert or update a record and refresh associated tables."""
1636
+ timestamp = utc_now_string()
1637
+ flags = self._compute_flags(item)
1638
+
1639
+ if record_id is None:
1640
+ # Insert new record
1641
+ self.cursor.execute(
1642
+ """
1643
+ INSERT INTO Records (
1644
+ itemtype, subject, description, rruleset, timezone,
1645
+ extent, alerts, notice, context, jobs,
1646
+ flags, priority, tokens, processed, created, modified
1647
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1648
+ """,
1649
+ (
1650
+ item.itemtype,
1651
+ item.subject,
1652
+ item.description,
1653
+ item.rruleset,
1654
+ item.tz_str,
1655
+ item.extent,
1656
+ json.dumps(item.alerts),
1657
+ item.notice,
1658
+ item.context,
1659
+ json.dumps(item.jobs),
1660
+ flags,
1661
+ item.priority,
1662
+ json.dumps(item.tokens),
1663
+ 0,
1664
+ timestamp,
1665
+ timestamp,
1666
+ ),
1667
+ )
1668
+ record_id = self.cursor.lastrowid
1669
+ else:
1670
+ # Update existing record
1671
+ self.cursor.execute(
1672
+ """
1673
+ UPDATE Records
1674
+ SET itemtype = ?, subject = ?, description = ?, rruleset = ?, timezone = ?,
1675
+ extent = ?, alerts = ?, notice = ?, context = ?, jobs = ?,
1676
+ flags = ?, priority = ?, tokens = ?, processed = 0, modified = ?
1677
+ WHERE id = ?
1678
+ """,
1679
+ (
1680
+ item.itemtype,
1681
+ item.subject,
1682
+ item.description,
1683
+ item.rruleset,
1684
+ item.tz_str,
1685
+ item.extent,
1686
+ json.dumps(item.alerts),
1687
+ item.notice,
1688
+ item.context,
1689
+ json.dumps(item.jobs),
1690
+ flags,
1691
+ item.priority,
1692
+ json.dumps(item.tokens),
1693
+ timestamp,
1694
+ record_id,
1695
+ ),
1696
+ )
1697
+
1698
+ self.conn.commit()
1699
+
1700
+ # Dependent tables
1701
+ # bug_msg(f"save record for {record_id = }, {item.itemtype = }")
1702
+ self.relink_bins_for_record(record_id, item)
1703
+ self.generate_datetimes_for_record(record_id)
1704
+ self.populate_alerts_for_record(record_id)
1705
+ if item.notice:
1706
+ self.populate_notice_for_record(record_id)
1707
+ if item.itemtype in ["~", "^"]:
1708
+ # bug_msg("calling populate_urgency_from_record")
1709
+ self.populate_urgency_from_record(record_id)
1710
+
1711
+ # Hashtags: based on subject + description
1712
+ self._update_hashtags_for_record(record_id, item.subject, item.description)
1713
+
1714
+ self.conn.commit()
1715
+ return record_id
1716
+
1717
+ # def get_record_tags(self, record_id: int) -> list[str]:
1718
+ # self.cursor.execute(
1719
+ # "SELECT COALESCE(tags,'[]') FROM Records WHERE id=?", (record_id,)
1720
+ # )
1721
+ # row = self.cursor.fetchone()
1722
+ # try:
1723
+ # return self._normalize_tags(json.loads(row[0])) if row and row[0] else []
1724
+ # except Exception:
1725
+ # return []
1726
+ #
1727
+ # def find_records_with_any_tags(self, tags: list[str]) -> list[tuple]:
1728
+ # want = set(self._normalize_tags(tags))
1729
+ # self.cursor.execute("SELECT id, subject, COALESCE(tags,'[]') FROM Records")
1730
+ # out = []
1731
+ # for rid, subj, tags_json in self.cursor.fetchall():
1732
+ # try:
1733
+ # have = (
1734
+ # set(self._normalize_tags(json.loads(tags_json)))
1735
+ # if tags_json
1736
+ # else set()
1737
+ # )
1738
+ # except Exception:
1739
+ # have = set()
1740
+ # if want & have:
1741
+ # out.append((rid, subj))
1742
+ # return out
1743
+
1744
+ def add_completion(
1745
+ self,
1746
+ record_id: int,
1747
+ completion: tuple[datetime, datetime | None],
1748
+ ) -> None:
1749
+ """Store a completion record as UTC-aware compact strings."""
1750
+ if completion is None:
1751
+ return
1752
+
1753
+ completed_dt, due_dt = completion
1754
+ self.cursor.execute(
1755
+ """
1756
+ INSERT INTO Completions (record_id, completed, due)
1757
+ VALUES (?, ?, ?)
1758
+ """,
1759
+ (
1760
+ record_id,
1761
+ _fmt_utc(completed_dt),
1762
+ _fmt_utc(due_dt) if due_dt else None,
1763
+ ),
1764
+ )
1765
+ self.conn.commit()
1766
+
1767
+ def get_completions(self, record_id: int):
1768
+ """
1769
+ Return all completions for a given record, sorted newest first.
1770
+
1771
+ Returns:
1772
+ [(record_id, subject, description, itemtype, due_dt, completed_dt)]
1773
+ """
1774
+ self.cursor.execute(
1775
+ """
1776
+ SELECT
1777
+ r.id,
1778
+ r.subject,
1779
+ r.description,
1780
+ r.itemtype,
1781
+ c.due,
1782
+ c.completed
1783
+ FROM Completions c
1784
+ JOIN Records r ON c.record_id = r.id
1785
+ WHERE r.id = ?
1786
+ ORDER BY c.completed DESC
1787
+ """,
1788
+ (record_id,),
1789
+ )
1790
+ rows = self.cursor.fetchall()
1791
+ return [
1792
+ (
1793
+ rid,
1794
+ subj,
1795
+ desc,
1796
+ itype,
1797
+ parse_utc_z(due) if due else None,
1798
+ parse_utc_z(comp),
1799
+ )
1800
+ for (rid, subj, desc, itype, due, comp) in rows
1801
+ ]
1802
+
1803
+ def get_all_completions(self):
1804
+ """
1805
+ Return all completions across all records, newest first.
1806
+
1807
+ Rows:
1808
+ [(record_id, subject, description, itemtype, due_dt, completed_dt)]
1809
+ """
1810
+ self.cursor.execute(
1811
+ """
1812
+ SELECT
1813
+ r.id,
1814
+ r.subject,
1815
+ r.description,
1816
+ r.itemtype,
1817
+ c.due,
1818
+ c.completed
1819
+ FROM Completions c
1820
+ JOIN Records r ON c.record_id = r.id
1821
+ ORDER BY c.completed DESC
1822
+ """
1823
+ )
1824
+ rows = self.cursor.fetchall()
1825
+ return [
1826
+ (
1827
+ rid,
1828
+ subj,
1829
+ desc,
1830
+ itype,
1831
+ parse_utc_z(due) if due else None,
1832
+ parse_utc_z(comp),
1833
+ )
1834
+ for (rid, subj, desc, itype, due, comp) in rows
1835
+ ]
1836
+
1837
+ def touch_record(self, record_id: int):
1838
+ """
1839
+ Update the 'modified' timestamp for the given record to the current UTC time.
1840
+ """
1841
+ now = utc_now_string()
1842
+ self.cursor.execute(
1843
+ """
1844
+ UPDATE Records SET modified = ? WHERE id = ?
1845
+ """,
1846
+ (now, record_id),
1847
+ )
1848
+ self.conn.commit()
1849
+
1850
+ def toggle_pinned(self, record_id: int) -> None:
1851
+ self.cursor.execute("SELECT 1 FROM Pinned WHERE record_id=?", (record_id,))
1852
+ if self.cursor.fetchone():
1853
+ self.cursor.execute("DELETE FROM Pinned WHERE record_id=?", (record_id,))
1854
+ else:
1855
+ self.cursor.execute(
1856
+ "INSERT INTO Pinned(record_id) VALUES (?)", (record_id,)
1857
+ )
1858
+ self.conn.commit()
1859
+
1860
+ def is_pinned(self, record_id: int) -> bool:
1861
+ self.cursor.execute(
1862
+ "SELECT 1 FROM Pinned WHERE record_id=? LIMIT 1", (record_id,)
1863
+ )
1864
+ return self.cursor.fetchone() is not None
1865
+
1866
+ def get_due_alerts(self):
1867
+ """Retrieve alerts that need execution within the next 6 seconds."""
1868
+ # now = round(datetime.now().timestamp())
1869
+ now = datetime.now()
1870
+ now_minus = _fmt_naive(now - timedelta(seconds=2))
1871
+ now_plus = _fmt_naive(now + timedelta(seconds=5))
1872
+ # log_msg(f"{now_minus = }, {now_plus = }")
1873
+
1874
+ self.cursor.execute(
1875
+ """
1876
+ SELECT alert_id, record_id, trigger_datetime, start_datetime, alert_name, alert_command
1877
+ FROM Alerts
1878
+ WHERE (trigger_datetime) BETWEEN ? AND ?
1879
+ """,
1880
+ (now_minus, now_plus),
1881
+ )
1882
+
1883
+ return self.cursor.fetchall()
1884
+
1885
+ def get_active_alerts(self):
1886
+ """Retrieve alerts that will trigger on or after the current moment and before midnight."""
1887
+
1888
+ self.cursor.execute(
1889
+ """
1890
+ SELECT alert_id, record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command
1891
+ FROM Alerts
1892
+ ORDER BY trigger_datetime ASC
1893
+ """,
1894
+ )
1895
+
1896
+ alerts = self.cursor.fetchall()
1897
+ # bug_msg(f"{alerts = }")
1898
+
1899
+ if not alerts:
1900
+ return []
1901
+
1902
+ results = []
1903
+ for alert in alerts:
1904
+ (
1905
+ alert_id,
1906
+ record_id,
1907
+ record_name,
1908
+ trigger_datetime,
1909
+ start_datetime,
1910
+ alert_name,
1911
+ alert_command,
1912
+ ) = alert
1913
+ results.append(
1914
+ [
1915
+ alert_id,
1916
+ record_id,
1917
+ record_name,
1918
+ trigger_datetime,
1919
+ start_datetime,
1920
+ alert_name,
1921
+ alert_command,
1922
+ ]
1923
+ )
1924
+
1925
+ return results
1926
+
1927
+ def get_all_tasks(self) -> list[dict]:
1928
+ """
1929
+ Retrieve all task and project records from the database.
1930
+
1931
+ Returns:
1932
+ A list of dictionaries representing task and project records.
1933
+ """
1934
+ self.cursor.execute(
1935
+ """
1936
+ SELECT * FROM Records
1937
+ WHERE itemtype IN ('~', '^')
1938
+ ORDER BY id
1939
+ """
1940
+ )
1941
+ columns = [column[0] for column in self.cursor.description]
1942
+ return [dict(zip(columns, row)) for row in self.cursor.fetchall()]
1943
+
1944
+ def get_job_display_subject(self, record_id: int, job_id: int | None) -> str | None:
1945
+ """
1946
+ Return the display_subject for a given record_id + job_id pair.
1947
+ Falls back to None if not found or no display_subject is present.
1948
+ """
1949
+ if job_id is None:
1950
+ return None
1951
+
1952
+ self.cursor.execute("SELECT jobs FROM Records WHERE id=?", (record_id,))
1953
+ row = self.cursor.fetchone()
1954
+ if not row or not row[0]:
1955
+ return None
1956
+
1957
+ jobs = _parse_jobs_json(row[0])
1958
+ for job in jobs:
1959
+ # bug_msg(f"{job = }")
1960
+ if job.get("job_id") == job_id:
1961
+ return job.get("display_subject") or None
1962
+
1963
+ return None
1964
+
1965
+ def get_job_dict(self, record_id: int, job_id: int | None) -> dict | None:
1966
+ """
1967
+ Return the full job dictionary for the given record_id + job_id pair.
1968
+ Returns None if not found.
1969
+
1970
+ """
1971
+ # bug_msg(f"getting job_dict for {record_id = }, {job_id = }")
1972
+ if job_id is None:
1973
+ return None
1974
+
1975
+ self.cursor.execute("SELECT jobs FROM Records WHERE id=?", (record_id,))
1976
+ row = self.cursor.fetchone()
1977
+ if not row or not row[0]:
1978
+ return None
1979
+
1980
+ jobs = _parse_jobs_json(row[0])
1981
+ # bug_msg(f"{jobs = }")
1982
+ for job in jobs:
1983
+ if job.get("job_id") == job_id:
1984
+ return job # Return the full dictionary
1985
+
1986
+ # bug_msg(f"returning None for {record_id = }, {job_id = }")
1987
+ return None
1988
+
1989
+ def get_all_alerts(self):
1990
+ """Retrieve all stored alerts for debugging."""
1991
+ self.cursor.execute("""
1992
+ SELECT alert_id, record_id, record_name, start_datetime, timedelta, command
1993
+ FROM Alerts
1994
+ ORDER BY start_datetime ASC
1995
+ """)
1996
+ alerts = self.cursor.fetchall()
1997
+
1998
+ if not alerts:
1999
+ return [
2000
+ "🔔 No alerts found.",
2001
+ ]
2002
+
2003
+ results = [
2004
+ "🔔 Current Alerts:",
2005
+ ]
2006
+ for alert in alerts:
2007
+ alert_id, record_id, record_name, start_dt, td, command = alert
2008
+ execution_time = start_dt - td # When the alert is scheduled to run
2009
+ formatted_time = datetime_from_timestamp(execution_time).strftime(
2010
+ "%Y-%m-%d %H:%M"
2011
+ )
2012
+
2013
+ results.append([alert_id, record_id, record_name, formatted_time, command])
2014
+
2015
+ return results
2016
+
2017
+ def mark_alert_executed(self, alert_id):
2018
+ """Optional: Mark alert as executed to prevent duplicate execution."""
2019
+ self.cursor.execute(
2020
+ """
2021
+ DELETE FROM Alerts WHERE alert_id = ?
2022
+ """,
2023
+ (alert_id,),
2024
+ )
2025
+ self.conn.commit()
2026
+
2027
+ def create_alert(
2028
+ self,
2029
+ command_name,
2030
+ timedelta,
2031
+ start_datetime,
2032
+ record_id,
2033
+ record_name,
2034
+ record_description,
2035
+ record_location,
2036
+ ):
2037
+ if command_name == "n":
2038
+ alert_command = "{name} {when} ({start})"
2039
+ else:
2040
+ alert_command = self.ALERTS.get(command_name, "")
2041
+ if not alert_command:
2042
+ log_msg(f"❌ Alert command not found for '{command_name}'")
2043
+ return None # Explicitly return None if command is missing
2044
+
2045
+ name = record_name
2046
+ description = record_description
2047
+ location = record_location
2048
+
2049
+ if timedelta > 0:
2050
+ when = f"in {duration_in_words(timedelta)}"
2051
+ elif timedelta == 0:
2052
+ when = "now"
2053
+ else:
2054
+ when = f"{duration_in_words(-timedelta)} ago"
2055
+
2056
+ start = format_datetime(start_datetime, HRS_MINS)
2057
+ time_fmt = datetime_in_words(start_datetime)
2058
+
2059
+ alert_command = alert_command.format(
2060
+ name=name,
2061
+ when=when,
2062
+ time=time_fmt,
2063
+ description=description,
2064
+ location=location,
2065
+ start=start,
2066
+ )
2067
+ # bug_msg(f"formatted alert {alert_command = }")
2068
+ return alert_command
2069
+
2070
+ def create_alert(
2071
+ self,
2072
+ command_name,
2073
+ timedelta,
2074
+ start_datetime,
2075
+ record_id,
2076
+ record_name,
2077
+ record_description,
2078
+ record_location,
2079
+ ):
2080
+ if command_name == "n":
2081
+ alert_command_template = "{name} {when} at {start}"
2082
+ else:
2083
+ alert_command_template = self.ALERTS.get(command_name, "")
2084
+ if not alert_command_template:
2085
+ log_msg(f"❌ Alert command not found for '{command_name}'")
2086
+ return None
2087
+
2088
+ name = record_name
2089
+ description = record_description
2090
+ location = record_location
2091
+
2092
+ if timedelta > 0:
2093
+ when = f"in {duration_in_words(timedelta)}"
2094
+ elif timedelta == 0:
2095
+ when = "now"
2096
+ else:
2097
+ when = f"{duration_in_words(-timedelta)} ago"
2098
+
2099
+ start = format_datetime(start_datetime, HRS_MINS)
2100
+ start_words = datetime_in_words(start_datetime)
2101
+
2102
+ # Prepare dict of available fields
2103
+ field_values = {
2104
+ "name": name,
2105
+ "when": when,
2106
+ "start": start,
2107
+ "time": start_words,
2108
+ "description": description,
2109
+ "location": location,
2110
+ }
2111
+
2112
+ # Use SafeDict to avoid KeyError for missing placeholders
2113
+ formatted = None
2114
+ try:
2115
+ formatted = alert_command_template.format_map(SafeDict(field_values))
2116
+ except Exception as e:
2117
+ log_msg(f"❌ Alert formatting error for command '{command_name}': {e}")
2118
+ # Fallback: use a minimal template or use the raw template
2119
+ formatted = alert_command_template.format_map(SafeDict(field_values))
2120
+
2121
+ # bug_msg(f"formatted alert: {formatted!r}")
2122
+ return formatted
2123
+
2124
+ def get_notice_for_today(self):
2125
+ self.cursor.execute("""
2126
+ SELECT Records.itemtype, Records.subject, notice.days_remaining
2127
+ FROM notice
2128
+ JOIN Records ON notice.record_id = Records.id
2129
+ ORDER BY notice.days_remaining ASC
2130
+ """)
2131
+ return [
2132
+ (
2133
+ record_id,
2134
+ itemtype,
2135
+ subject,
2136
+ int(round(days_remaining)),
2137
+ )
2138
+ for (
2139
+ record_id,
2140
+ itemtype,
2141
+ subject,
2142
+ days_remaining,
2143
+ ) in self.cursor.fetchall()
2144
+ ]
2145
+
2146
+ def get_tokens(self, record_id: int):
2147
+ """
2148
+ Retrieve the tokens field from a record and return it as a list of dictionaries.
2149
+ Returns an empty list if the field is null, empty, or if the record is not found.
2150
+ """
2151
+ self.cursor.execute(
2152
+ "SELECT tokens, rruleset, created, modified FROM Records WHERE id = ?",
2153
+ (record_id,),
2154
+ )
2155
+ return [
2156
+ (
2157
+ # " ".join([t["token"] for t in json.loads(tokens)]),
2158
+ json.loads(tokens),
2159
+ rruleset,
2160
+ created,
2161
+ modified,
2162
+ )
2163
+ for (
2164
+ tokens,
2165
+ rruleset,
2166
+ created,
2167
+ modified,
2168
+ ) in self.cursor.fetchall()
2169
+ ]
2170
+
2171
+ def populate_alerts(self):
2172
+ """
2173
+ Populate the Alerts table for all records that have alerts defined.
2174
+ Inserts alerts that will trigger between now and local end-of-day.
2175
+ Uses TEXT datetimes ('YYYYMMDD' or 'YYYYMMDDTHHMMSS', local-naive).
2176
+ """
2177
+
2178
+ # --- small helpers for TEXT <-> datetime (local-naive) ---
2179
+ from datetime import datetime, timedelta
2180
+
2181
+ def _parse_local_text_dt(s: str) -> datetime:
2182
+ """Parse 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive) into datetime."""
2183
+ s = (s or "").strip()
2184
+ if not s:
2185
+ raise ValueError("empty datetime text")
2186
+ if "T" in s:
2187
+ # datetime
2188
+ return datetime.strptime(s, "%Y%m%dT%H%M")
2189
+ else:
2190
+ # date-only -> treat as midnight local
2191
+ return datetime.strptime(s, "%Y%m%d")
2192
+
2193
+ def _to_text_dt(dt: datetime, is_date_only: bool = False) -> str:
2194
+ """
2195
+ Render datetime back to TEXT storage.
2196
+ If is_date_only=True, keep 'YYYYMMDD'; else use 'YYYYMMDDTHHMMSS'.
2197
+ """
2198
+ if is_date_only:
2199
+ return dt.strftime("%Y%m%d")
2200
+ return dt.strftime("%Y%m%dT%H%M")
2201
+
2202
+ def _is_date_only_text(s: str) -> bool:
2203
+ return "T" not in (s or "")
2204
+
2205
+ # --- time window (local-naive) ---
2206
+ now = datetime.now()
2207
+ end_of_day = now.replace(hour=23, minute=59, second=59, microsecond=0)
2208
+
2209
+ # Targeted delete: remove alerts in [now, end_of_day] so we can repopulate without duplicates.
2210
+ self.cursor.execute(
2211
+ """
2212
+ DELETE FROM Alerts
2213
+ WHERE trigger_datetime >= ?
2214
+ AND trigger_datetime <= ?
2215
+ """,
2216
+ (now.strftime("%Y%m%dT%H%M"), end_of_day.strftime("%Y%m%dT%H%M")),
2217
+ )
2218
+ self.conn.commit()
2219
+
2220
+ # Find records that have alerts and at least one DateTimes row
2221
+ self.cursor.execute(
2222
+ """
2223
+ SELECT R.id, R.subject, R.description, R.context, R.alerts, D.start_datetime
2224
+ FROM Records R
2225
+ JOIN DateTimes D ON R.id = D.record_id
2226
+ WHERE R.alerts IS NOT NULL AND R.alerts != ''
2227
+ """
2228
+ )
2229
+ records = self.cursor.fetchall()
2230
+ if not records:
2231
+ print("🔔 No records with alerts found.")
2232
+ return
2233
+
2234
+ for (
2235
+ record_id,
2236
+ record_name,
2237
+ record_description,
2238
+ record_location,
2239
+ alerts_json,
2240
+ start_text,
2241
+ ) in records:
2242
+ # start_text is local-naive TEXT ('YYYYMMDD' or 'YYYYMMDDTHHMMSS')
2243
+ try:
2244
+ start_dt = _parse_local_text_dt(start_text)
2245
+ except Exception as e:
2246
+ # bad/malformed DateTimes row; skip gracefully
2247
+ print(
2248
+ f"⚠️ Skipping record {record_id}: invalid start_datetime {start_text!r}: {e}"
2249
+ )
2250
+ continue
2251
+
2252
+ is_date_only = _is_date_only_text(start_text)
2253
+
2254
+ try:
2255
+ alert_list = json.loads(alerts_json)
2256
+ if not isinstance(alert_list, list):
2257
+ continue
2258
+ except Exception:
2259
+ continue
2260
+
2261
+ for alert in alert_list:
2262
+ if ":" not in alert:
2263
+ continue # ignore malformed alerts like "10m" with no command
2264
+ time_part, command_part = alert.split(":", 1)
2265
+
2266
+ # support multiple lead times and multiple commands per line
2267
+ try:
2268
+ lead_secs_list = [
2269
+ td_str_to_seconds(t.strip()) for t in time_part.split(",")
2270
+ ]
2271
+ except Exception:
2272
+ continue
2273
+ commands = [
2274
+ cmd.strip() for cmd in command_part.split(",") if cmd.strip()
2275
+ ]
2276
+ if not commands:
2277
+ continue
2278
+
2279
+ # For date-only starts, we alert relative to midnight (00:00:00) of that day
2280
+ if is_date_only:
2281
+ effective_start_dt = start_dt.replace(
2282
+ hour=0, minute=0, second=0, microsecond=0
2283
+ )
2284
+ else:
2285
+ effective_start_dt = start_dt
2286
+
2287
+ for lead_secs in lead_secs_list:
2288
+ trigger_dt = effective_start_dt - timedelta(seconds=lead_secs)
2289
+
2290
+ # only alerts that trigger today between now and end_of_day
2291
+ if not (now <= trigger_dt <= end_of_day):
2292
+ continue
2293
+
2294
+ trigger_text = _to_text_dt(trigger_dt) # always 'YYYYMMDDTHHMMSS'
2295
+ start_store_text = _to_text_dt(
2296
+ effective_start_dt, is_date_only=is_date_only
2297
+ )
2298
+
2299
+ for alert_name in commands:
2300
+ # If you have a helper that *builds* the command string, call it;
2301
+ # otherwise keep your existing create_alert signature but pass TEXTs.
2302
+ alert_command = self.create_alert(
2303
+ alert_name,
2304
+ lead_secs,
2305
+ start_store_text, # now TEXT, not epoch
2306
+ record_id,
2307
+ record_name,
2308
+ record_description,
2309
+ record_location,
2310
+ )
2311
+
2312
+ if not alert_command:
2313
+ continue
2314
+
2315
+ # Unique index will prevent duplicates; OR IGNORE keeps this idempotent.
2316
+ self.cursor.execute(
2317
+ """
2318
+ INSERT OR IGNORE INTO Alerts
2319
+ (record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command)
2320
+ VALUES (?, ?, ?, ?, ?, ?)
2321
+ """,
2322
+ (
2323
+ record_id,
2324
+ record_name,
2325
+ trigger_text,
2326
+ start_store_text,
2327
+ alert_name,
2328
+ alert_command,
2329
+ ),
2330
+ )
2331
+
2332
+ self.conn.commit()
2333
+ log_msg("✅ Alerts table updated with today's relevant alerts.")
2334
+
2335
+ # def populate_alerts_for_record(self, record_id: int):
2336
+ # """Regenerate alerts for a specific record, but only if any are scheduled for today."""
2337
+ #
2338
+ # # Clear old alerts for this record
2339
+ # self.cursor.execute("DELETE FROM Alerts WHERE record_id = ?", (record_id,))
2340
+ #
2341
+ # # Look up the record’s alert data and start datetimes
2342
+ # self.cursor.execute(
2343
+ # """
2344
+ # SELECT R.subject, R.description, R.context, R.alerts, D.start_datetime
2345
+ # FROM Records R
2346
+ # JOIN DateTimes D ON R.id = D.record_id
2347
+ # WHERE R.id = ? AND R.alerts IS NOT NULL AND R.alerts != ''
2348
+ # """,
2349
+ # (record_id,),
2350
+ # )
2351
+ # records = self.cursor.fetchall()
2352
+ # if not records:
2353
+ # log_msg(f"🔕 No alerts to populate for record {record_id}")
2354
+ # return
2355
+ #
2356
+ # now = round(datetime.now().timestamp())
2357
+ # midnight = round(
2358
+ # datetime.now().replace(hour=23, minute=59, second=59).timestamp()
2359
+ # )
2360
+ #
2361
+ # for subject, description, context, alerts_json, start_ts in records:
2362
+ # # start_dt = datetime.fromtimestamp(start_ts)
2363
+ # alerts = json.loads(alerts_json)
2364
+ # for alert in alerts:
2365
+ # if ":" not in alert:
2366
+ # continue
2367
+ # time_part, command_part = alert.split(":")
2368
+ # timedelta_values = [
2369
+ # td_str_to_seconds(t.strip()) for t in time_part.split(",")
2370
+ # ]
2371
+ # commands = [cmd.strip() for cmd in command_part.split(",")]
2372
+ #
2373
+ # for td in timedelta_values:
2374
+ # trigger = start_ts - td
2375
+ # if now <= trigger < midnight:
2376
+ # for name in commands:
2377
+ # alert_command = self.create_alert(
2378
+ # name,
2379
+ # td,
2380
+ # start_ts,
2381
+ # record_id,
2382
+ # subject,
2383
+ # description,
2384
+ # context,
2385
+ # )
2386
+ # if alert_command:
2387
+ # self.cursor.execute(
2388
+ # "INSERT INTO Alerts (record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command) VALUES (?, ?, ?, ?, ?, ?)",
2389
+ # (
2390
+ # record_id,
2391
+ # subject,
2392
+ # trigger,
2393
+ # start_ts,
2394
+ # name,
2395
+ # alert_command,
2396
+ # ),
2397
+ # )
2398
+ #
2399
+ # self.conn.commit()
2400
+ # log_msg(f"✅ Alerts updated for record {record_id}")
2401
+
2402
+ def populate_alerts_for_record(self, record_id: int):
2403
+ """
2404
+ Regenerate alerts for a specific record, for alerts that trigger today
2405
+ (local time), using the same TEXT-based semantics as populate_alerts().
2406
+ """
2407
+
2408
+ # --- small helpers (you can factor these out to avoid duplication) ---
2409
+ def _parse_local_text_dt(s: str) -> datetime:
2410
+ """Parse 'YYYYMMDD' or 'YYYYMMDDTHHMM' (local-naive) into datetime."""
2411
+ s = (s or "").strip()
2412
+ if not s:
2413
+ raise ValueError("empty datetime text")
2414
+ if "T" in s:
2415
+ return datetime.strptime(s, "%Y%m%dT%H%M")
2416
+ else:
2417
+ return datetime.strptime(s, "%Y%m%d")
2418
+
2419
+ def _to_text_dt(dt: datetime, is_date_only: bool = False) -> str:
2420
+ """Render datetime back to TEXT storage."""
2421
+ if is_date_only:
2422
+ return dt.strftime("%Y%m%d")
2423
+ return dt.strftime("%Y%m%dT%H%M")
2424
+
2425
+ def _is_date_only_text(s: str) -> bool:
2426
+ return "T" not in (s or "")
2427
+
2428
+ # --- time window (local-naive) ---
2429
+ now = datetime.now()
2430
+ end_of_day = now.replace(hour=23, minute=59, second=59, microsecond=0)
2431
+
2432
+ now_text = now.strftime("%Y%m%dT%H%M")
2433
+ eod_text = end_of_day.strftime("%Y%m%dT%H%M")
2434
+
2435
+ # Clear old alerts for this record in today's window
2436
+ self.cursor.execute(
2437
+ """
2438
+ DELETE FROM Alerts
2439
+ WHERE record_id = ?
2440
+ AND trigger_datetime >= ?
2441
+ AND trigger_datetime <= ?
2442
+ """,
2443
+ (record_id, now_text, eod_text),
2444
+ )
2445
+ self.conn.commit()
2446
+
2447
+ # Look up the record’s alert data and start datetimes
2448
+ self.cursor.execute(
2449
+ """
2450
+ SELECT R.id, R.subject, R.description, R.context, R.alerts, D.start_datetime
2451
+ FROM Records R
2452
+ JOIN DateTimes D ON R.id = D.record_id
2453
+ WHERE R.id = ?
2454
+ AND R.alerts IS NOT NULL
2455
+ AND R.alerts != ''
2456
+ """,
2457
+ (record_id,),
2458
+ )
2459
+ records = self.cursor.fetchall()
2460
+ if not records:
2461
+ # bug_msg(f"🔕 No alerts to populate for record {record_id}")
2462
+ return
2463
+
2464
+ for (
2465
+ rec_id,
2466
+ record_name,
2467
+ record_description,
2468
+ record_location,
2469
+ alerts_json,
2470
+ start_text,
2471
+ ) in records:
2472
+ try:
2473
+ start_dt = _parse_local_text_dt(start_text)
2474
+ except Exception as e:
2475
+ log_msg(
2476
+ f"⚠️ Skipping record {rec_id}: invalid start_datetime {start_text!r}: {e}"
2477
+ )
2478
+ continue
2479
+
2480
+ is_date_only = _is_date_only_text(start_text)
2481
+
2482
+ try:
2483
+ alert_list = json.loads(alerts_json)
2484
+ if not isinstance(alert_list, list):
2485
+ continue
2486
+ except Exception:
2487
+ continue
2488
+
2489
+ for alert in alert_list:
2490
+ if ":" not in alert:
2491
+ continue # malformed, e.g. "10m"
2492
+ time_part, command_part = alert.split(":", 1)
2493
+
2494
+ try:
2495
+ lead_secs_list = [
2496
+ td_str_to_seconds(t.strip()) for t in time_part.split(",")
2497
+ ]
2498
+ except Exception:
2499
+ continue
2500
+
2501
+ commands = [
2502
+ cmd.strip() for cmd in command_part.split(",") if cmd.strip()
2503
+ ]
2504
+ if not commands:
2505
+ continue
2506
+
2507
+ # For date-only starts, schedule relative to midnight of that day
2508
+ if is_date_only:
2509
+ effective_start_dt = start_dt.replace(
2510
+ hour=0, minute=0, second=0, microsecond=0
2511
+ )
2512
+ else:
2513
+ effective_start_dt = start_dt
2514
+
2515
+ for lead_secs in lead_secs_list:
2516
+ trigger_dt = effective_start_dt - timedelta(seconds=lead_secs)
2517
+
2518
+ # only alerts that trigger today between now and end_of_day
2519
+ if not (now <= trigger_dt <= end_of_day):
2520
+ continue
2521
+
2522
+ trigger_text = _to_text_dt(trigger_dt)
2523
+ start_store_text = _to_text_dt(
2524
+ effective_start_dt, is_date_only=is_date_only
2525
+ )
2526
+
2527
+ for alert_name in commands:
2528
+ alert_command = self.create_alert(
2529
+ alert_name,
2530
+ lead_secs,
2531
+ start_store_text, # TEXT, same as in populate_alerts()
2532
+ rec_id,
2533
+ record_name,
2534
+ record_description,
2535
+ record_location,
2536
+ )
2537
+ if not alert_command:
2538
+ continue
2539
+
2540
+ self.cursor.execute(
2541
+ """
2542
+ INSERT OR IGNORE INTO Alerts
2543
+ (record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command)
2544
+ VALUES (?, ?, ?, ?, ?, ?)
2545
+ """,
2546
+ (
2547
+ rec_id,
2548
+ record_name,
2549
+ trigger_text,
2550
+ start_store_text,
2551
+ alert_name,
2552
+ alert_command,
2553
+ ),
2554
+ )
2555
+
2556
+ self.conn.commit()
2557
+ # bug_msg(f"✅ Alerts updated for record {record_id}")
2558
+
2559
+ def get_generated_weeks_range(self) -> tuple[int, int, int, int] | None:
2560
+ row = self.cursor.execute(
2561
+ "SELECT start_year, start_week, end_year, end_week FROM GeneratedWeeks"
2562
+ ).fetchone()
2563
+ return tuple(row) if row else None
2564
+
2565
+ @staticmethod
2566
+ def _week_key(year: int, week: int) -> tuple[int, int]:
2567
+ return (year, week)
2568
+
2569
+ def is_week_in_generated(self, year: int, week: int) -> bool:
2570
+ rng = self.get_generated_weeks_range()
2571
+ if not rng:
2572
+ return False
2573
+ sy, sw, ey, ew = rng
2574
+ return (
2575
+ self._week_key(sy, sw)
2576
+ <= self._week_key(year, week)
2577
+ <= self._week_key(ey, ew)
2578
+ )
2579
+
2580
+ @staticmethod
2581
+ def _iso_date(year: int, week: int, weekday: int = 1) -> datetime:
2582
+ # ISO: %G (ISO year), %V (ISO week), %u (1..7, Monday=1)
2583
+ return datetime.strptime(f"{year} {week} {weekday}", "%G %V %u")
2584
+
2585
+ def _weeks_between(self, a: tuple[int, int], b: tuple[int, int]) -> int:
2586
+ da = self._iso_date(*a)
2587
+ db = self._iso_date(*b)
2588
+ return (db - da).days // 7
2589
+
2590
+ def ensure_week_generated_with_topup(
2591
+ self,
2592
+ year: int,
2593
+ week: int,
2594
+ cushion: int = 6,
2595
+ topup_threshold: int = 2,
2596
+ ) -> bool:
2597
+ """
2598
+ Ensure (year, week) exists in DateTimes.
2599
+ - If it's outside the cached range (earlier or later): extend to include it (+ cushion).
2600
+ - If it's inside but within `topup_threshold` weeks of either edge, extend a bit past that edge.
2601
+ Returns True if any extension was performed.
2602
+ """
2603
+ rng = self.get_generated_weeks_range()
2604
+
2605
+ # No range yet: seed it from requested week
2606
+ if not rng:
2607
+ self.extend_datetimes_for_weeks(year, week, cushion + 1)
2608
+ return True
2609
+
2610
+ sy, sw, ey, ew = rng
2611
+ wk_key = self._week_key(year, week)
2612
+
2613
+ # Outside range -> extend starting at requested week
2614
+ if wk_key < self._week_key(sy, sw) or wk_key > self._week_key(ey, ew):
2615
+ self.extend_datetimes_for_weeks(year, week, cushion + 1)
2616
+ return True
2617
+
2618
+ # Inside range: check “near left” edge
2619
+ if self._weeks_between((sy, sw), (year, week)) <= topup_threshold:
2620
+ earlier_start = self._iso_date(sy, sw) - timedelta(weeks=cushion)
2621
+ e_y, e_w = earlier_start.isocalendar()[:2]
2622
+ self.extend_datetimes_for_weeks(e_y, e_w, cushion + 1)
2623
+ return True
2624
+
2625
+ # Inside range: check “near right” edge
2626
+ if self._weeks_between((year, week), (ey, ew)) <= topup_threshold:
2627
+ start_after = self._iso_date(ey, ew) + timedelta(weeks=1)
2628
+ n_y, n_w = start_after.isocalendar()[:2]
2629
+ self.extend_datetimes_for_weeks(n_y, n_w, cushion)
2630
+ return True
2631
+
2632
+ return False
2633
+
2634
+ def extend_datetimes_for_weeks(self, start_year, start_week, weeks):
2635
+ """
2636
+ Extend the DateTimes table by generating data for the specified number of weeks
2637
+ starting from a given year and week.
2638
+
2639
+ Args:
2640
+ start_year (int): The starting year.
2641
+ start_week (int): The starting ISO week.
2642
+ weeks (int): Number of weeks to generate.
2643
+ """
2644
+ start = datetime.strptime(f"{start_year} {start_week} 1", "%G %V %u")
2645
+ end = start + timedelta(weeks=weeks)
2646
+
2647
+ start_year, start_week = start.isocalendar()[:2]
2648
+ end_year, end_week = end.isocalendar()[:2]
2649
+
2650
+ self.cursor.execute(
2651
+ "SELECT start_year, start_week, end_year, end_week FROM GeneratedWeeks"
2652
+ )
2653
+ cached_ranges = self.cursor.fetchall()
2654
+
2655
+ # Determine the full range that needs to be generated
2656
+ min_year = (
2657
+ min(cached_ranges, key=lambda x: x[0])[0] if cached_ranges else start_year
2658
+ )
2659
+ min_week = (
2660
+ min(cached_ranges, key=lambda x: x[1])[1] if cached_ranges else start_week
2661
+ )
2662
+ max_year = (
2663
+ max(cached_ranges, key=lambda x: x[2])[2] if cached_ranges else end_year
2664
+ )
2665
+ max_week = (
2666
+ max(cached_ranges, key=lambda x: x[3])[3] if cached_ranges else end_week
2667
+ )
2668
+
2669
+ # Expand the range to include gaps and requested period
2670
+ if start_year < min_year or (start_year == min_year and start_week < min_week):
2671
+ min_year, min_week = start_year, start_week
2672
+ if end_year > max_year or (end_year == max_year and end_week > max_week):
2673
+ max_year, max_week = end_year, end_week
2674
+
2675
+ first_day = datetime.strptime(f"{min_year} {min_week} 1", "%G %V %u")
2676
+ last_day = datetime.strptime(
2677
+ f"{max_year} {max_week} 1", "%G %V %u"
2678
+ ) + timedelta(days=6)
2679
+
2680
+ # Generate new datetimes for the extended range
2681
+ # bug_msg(f"generating datetimes for {first_day = } {last_day = }")
2682
+ self.generate_datetimes_for_period(first_day, last_day)
2683
+
2684
+ # Update the GeneratedWeeks table
2685
+ self.cursor.execute("DELETE FROM GeneratedWeeks") # Clear old entries
2686
+ self.cursor.execute(
2687
+ """
2688
+ INSERT INTO GeneratedWeeks (start_year, start_week, end_year, end_week)
2689
+ VALUES (?, ?, ?, ?)
2690
+ """,
2691
+ (min_year, min_week, max_year, max_week),
2692
+ )
2693
+
2694
+ self.conn.commit()
2695
+
2696
+ def generate_datetimes(self, rule_str, extent, start_date, end_date):
2697
+ """
2698
+ Generate occurrences for a given rruleset within the specified date range.
2699
+
2700
+ Args:
2701
+ rule_str (str): The rrule string defining the recurrence rule.
2702
+ extent (int): The duration of each occurrence in minutes.
2703
+ start_date (datetime): The start of the range.
2704
+ end_date (datetime): The end of the range.
2705
+
2706
+ Returns:
2707
+ List[Tuple[datetime, datetime]]: A list of (start_dt, end_dt) tuples.
2708
+ """
2709
+
2710
+ # bug_msg(
2711
+ # f"getting datetimes for {rule_str} between {start_date = } and {end_date = }"
2712
+ # )
2713
+ rule = rrulestr(rule_str, dtstart=start_date)
2714
+ occurrences = list(rule.between(start_date, end_date, inc=True))
2715
+ print(f"{rule_str = }\n{occurrences = }")
2716
+ extent = td_str_to_td(extent) if isinstance(extent, str) else extent
2717
+ # bug_msg(
2718
+ # f"Generating for {len(occurrences) = } between {start_date = } and {end_date = } with {extent = } for {rule_str = }."
2719
+ # )
2720
+
2721
+ # Create (start, end) pairs
2722
+ results = []
2723
+ for start_dt in occurrences:
2724
+ end_dt = start_dt + extent if extent else start_dt
2725
+ results.append((start_dt, end_dt))
2726
+
2727
+ return results
2728
+
2729
+ def generate_datetimes_for_record(
2730
+ self,
2731
+ record_id: int,
2732
+ *,
2733
+ window: tuple[datetime, datetime] | None = None,
2734
+ clear_existing: bool = True,
2735
+ ) -> None:
2736
+ """
2737
+ Regenerate DateTimes rows for a single record.
2738
+
2739
+ Behavior:
2740
+ • If the record has jobs (project): generate rows for jobs ONLY (job_id set).
2741
+ • If the record has no jobs (event or single task): generate rows for the parent
2742
+ itself (job_id NULL).
2743
+ • Notes / unscheduled: nothing.
2744
+
2745
+ Infinite rules: constrained to `window` when provided.
2746
+ Finite rules: generated fully (window ignored).
2747
+ """
2748
+ # Fetch core fields including itemtype and jobs JSON
2749
+ self.cursor.execute(
2750
+ "SELECT itemtype, rruleset, extent, jobs, processed FROM Records WHERE id=?",
2751
+ (record_id,),
2752
+ )
2753
+ row = self.cursor.fetchone()
2754
+ if not row:
2755
+ log_msg(f"⚠️ No record found id={record_id}")
2756
+ return
2757
+
2758
+ itemtype, rruleset, record_extent, jobs_json, processed = row
2759
+ rule_str = (rruleset or "").replace("\\N", "\n").replace("\\n", "\n")
2760
+
2761
+ # Nothing to do without any schedule
2762
+ if not rule_str:
2763
+ return
2764
+
2765
+ # Optional: clear existing rows for this record
2766
+ if clear_existing:
2767
+ self.cursor.execute(
2768
+ "DELETE FROM DateTimes WHERE record_id = ?", (record_id,)
2769
+ )
2770
+
2771
+ # Parse jobs (if any)
2772
+ jobs = _parse_jobs_json(jobs_json)
2773
+ has_jobs = bool(jobs)
2774
+ # log_msg(f"{has_jobs = }, {jobs = }")
2775
+
2776
+ has_rrule = "RRULE" in rule_str
2777
+ is_finite = (not has_rrule) or ("COUNT=" in rule_str) or ("UNTIL=" in rule_str)
2778
+ is_aware = "Z" in rule_str
2779
+
2780
+ # Build parent recurrence iterator
2781
+ try:
2782
+ rule = rrulestr(rule_str)
2783
+ except Exception as e:
2784
+ log_msg(
2785
+ f"rrulestr failed for record {record_id}: {e}\n---\n{rule_str}\n---"
2786
+ )
2787
+ return
2788
+
2789
+ def _iter_parent_occurrences():
2790
+ if is_finite:
2791
+ anchor = datetime.min
2792
+ anchor = get_anchor(is_aware)
2793
+
2794
+ try:
2795
+ cur = rule.after(anchor, inc=True)
2796
+ except TypeError:
2797
+ log_msg(
2798
+ f"exception processing {anchor = } with {is_aware = } in {record_id = }"
2799
+ )
2800
+ cur = None
2801
+
2802
+ while cur is not None:
2803
+ yield cur
2804
+ cur = rule.after(cur, inc=False)
2805
+ else:
2806
+ if window:
2807
+ lo, hi = window
2808
+ try:
2809
+ occs = rule.between(lo, hi, inc=True)
2810
+ except TypeError:
2811
+ if lo.tzinfo is None:
2812
+ lo = lo.replace(tzinfo=tz.UTC)
2813
+ if hi.tzinfo is None:
2814
+ hi = hi.replace(tzinfo=tz.UTC)
2815
+ occs = rule.between(lo, hi, inc=True)
2816
+ for cur in occs:
2817
+ yield cur
2818
+ else:
2819
+ # default horizon for infinite rules
2820
+ start = datetime.now()
2821
+ end = start + timedelta(weeks=12)
2822
+ try:
2823
+ occs = rule.between(start, end, inc=True)
2824
+ except TypeError:
2825
+ occs = rule.between(
2826
+ start.replace(tzinfo=tz.UTC),
2827
+ end.replace(tzinfo=tz.UTC),
2828
+ inc=True,
2829
+ )
2830
+ for cur in occs:
2831
+ yield cur
2832
+
2833
+ extent_sec_record = td_str_to_seconds(record_extent or "")
2834
+
2835
+ # ---- PATH A: Projects with jobs -> generate job rows only ----
2836
+ if has_jobs:
2837
+ # bug_msg(f"{record_id = } has jobs")
2838
+ for parent_dt in _iter_parent_occurrences():
2839
+ parent_local = _to_local_naive(
2840
+ parent_dt
2841
+ if isinstance(parent_dt, datetime)
2842
+ else datetime.combine(parent_dt, datetime.min.time())
2843
+ )
2844
+ for j in jobs:
2845
+ # bug_msg(f"job: {j = }")
2846
+ if j.get("status") == "finished":
2847
+ continue
2848
+ job_id = j.get("job_id")
2849
+ off_sec = td_str_to_seconds(j.get("offset_str") or "")
2850
+ job_start = _shift_from_parent(parent_local, off_sec)
2851
+ job_extent_sec = (
2852
+ td_str_to_seconds(j.get("extent_str") or "")
2853
+ or extent_sec_record
2854
+ )
2855
+
2856
+ if job_extent_sec:
2857
+ job_end = job_start + timedelta(seconds=job_extent_sec)
2858
+ try:
2859
+ # preferred: split across days if you have this helper
2860
+ for seg_start, seg_end in _split_span_local_days(
2861
+ job_start, job_end
2862
+ ):
2863
+ s_txt = _fmt_naive(seg_start)
2864
+ e_txt = (
2865
+ None
2866
+ if seg_end == seg_start
2867
+ else _fmt_naive(seg_end)
2868
+ )
2869
+ # bug_msg(
2870
+ # f"inserting job datetimes {s_txt = }, {e_txt = } for {record_id = }, {job_id = }"
2871
+ # )
2872
+ self.cursor.execute(
2873
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, ?, ?, ?)",
2874
+ (record_id, job_id, s_txt, e_txt),
2875
+ )
2876
+ # bug_msg("success")
2877
+ except NameError:
2878
+ # fallback: single row
2879
+ self.cursor.execute(
2880
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, ?, ?, ?)",
2881
+ (
2882
+ record_id,
2883
+ job_id,
2884
+ _fmt_naive(job_start),
2885
+ _fmt_naive(job_end),
2886
+ ),
2887
+ )
2888
+ except Exception as e:
2889
+ log_msg(f"error: {e}")
2890
+ else:
2891
+ self.cursor.execute(
2892
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, ?, ?, NULL)",
2893
+ (record_id, job_id, _fmt_naive(job_start)),
2894
+ )
2895
+
2896
+ # ---- PATH B: Events / single tasks (no jobs) -> generate parent rows ----
2897
+ else:
2898
+ for cur in _iter_parent_occurrences():
2899
+ # cur can be aware/naive datetime (or, rarely, date)
2900
+ if isinstance(cur, datetime):
2901
+ start_local = _to_local_naive(cur)
2902
+ else:
2903
+ start_local = (
2904
+ cur # date; treated as local-naive midnight by _fmt_naive
2905
+ )
2906
+
2907
+ if extent_sec_record:
2908
+ end_local = (
2909
+ start_local + timedelta(seconds=extent_sec_record)
2910
+ if isinstance(start_local, datetime)
2911
+ else datetime.combine(start_local, datetime.min.time())
2912
+ + timedelta(seconds=extent_sec_record)
2913
+ )
2914
+ try:
2915
+ for seg_start, seg_end in _split_span_local_days(
2916
+ start_local, end_local
2917
+ ):
2918
+ s_txt = _fmt_naive(seg_start)
2919
+ e_txt = (
2920
+ None if seg_end == seg_start else _fmt_naive(seg_end)
2921
+ )
2922
+ self.cursor.execute(
2923
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, NULL, ?, ?)",
2924
+ (record_id, s_txt, e_txt),
2925
+ )
2926
+ except NameError:
2927
+ self.cursor.execute(
2928
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, NULL, ?, ?)",
2929
+ (record_id, _fmt_naive(start_local), _fmt_naive(end_local)),
2930
+ )
2931
+ else:
2932
+ self.cursor.execute(
2933
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, NULL, ?, NULL)",
2934
+ (record_id, _fmt_naive(start_local)),
2935
+ )
2936
+
2937
+ # Mark finite as processed only when we generated full set (no window)
2938
+ if is_finite and not window:
2939
+ self.cursor.execute(
2940
+ "UPDATE Records SET processed = 1 WHERE id = ?", (record_id,)
2941
+ )
2942
+ self.conn.commit()
2943
+
2944
+ def get_events_for_period(self, start_date: datetime, end_date: datetime):
2945
+ """
2946
+ Retrieve all events that occur or overlap within [start_date, end_date),
2947
+ ordered by start time.
2948
+
2949
+ Returns rows as:
2950
+ (start_datetime, end_datetime, itemtype, subject, record_id, job_id)
2951
+
2952
+ DateTimes table stores TEXT:
2953
+ - date-only: 'YYYYMMDD'
2954
+ - datetime: 'YYYYMMDDTHHMMSS'
2955
+ - end_datetime may be NULL (instantaneous)
2956
+
2957
+ Overlap rule:
2958
+ normalized_end >= period_start_key
2959
+ normalized_start < period_end_key
2960
+ """
2961
+ start_key = _to_key(start_date)
2962
+ end_key = _to_key(end_date)
2963
+
2964
+ sql = """
2965
+ SELECT
2966
+ dt.id,
2967
+ dt.start_datetime,
2968
+ dt.end_datetime,
2969
+ r.itemtype,
2970
+ r.subject,
2971
+ r.id,
2972
+ dt.job_id
2973
+ FROM DateTimes dt
2974
+ JOIN Records r ON dt.record_id = r.id
2975
+ WHERE
2976
+ -- normalized end >= period start
2977
+ (
2978
+ CASE
2979
+ WHEN dt.end_datetime IS NULL THEN
2980
+ CASE
2981
+ WHEN LENGTH(dt.start_datetime) = 8 THEN dt.start_datetime || 'T000000'
2982
+ ELSE dt.start_datetime
2983
+ END
2984
+ WHEN LENGTH(dt.end_datetime) = 8 THEN dt.end_datetime || 'T235959'
2985
+ ELSE dt.end_datetime
2986
+ END
2987
+ ) >= ?
2988
+ AND
2989
+ -- normalized start < period end
2990
+ (
2991
+ CASE
2992
+ WHEN LENGTH(dt.start_datetime) = 8 THEN dt.start_datetime || 'T000000'
2993
+ ELSE dt.start_datetime
2994
+ END
2995
+ ) < ?
2996
+ ORDER BY
2997
+ CASE
2998
+ WHEN LENGTH(dt.start_datetime) = 8 THEN dt.start_datetime || 'T000000'
2999
+ ELSE dt.start_datetime
3000
+ END
3001
+ """
3002
+ self.cursor.execute(sql, (start_key, end_key))
3003
+ return self.cursor.fetchall()
3004
+
3005
+ def generate_datetimes_for_period(self, start_date: datetime, end_date: datetime):
3006
+ self.cursor.execute("SELECT id FROM Records")
3007
+ for (record_id,) in self.cursor.fetchall():
3008
+ self.generate_datetimes_for_record(
3009
+ record_id,
3010
+ window=(start_date, end_date),
3011
+ clear_existing=True,
3012
+ )
3013
+
3014
+ def get_notice_for_events(self):
3015
+ """
3016
+ Retrieve (record_id, days_remaining, subject) from notice joined with Records
3017
+ for events only (itemtype '*').
3018
+
3019
+ Returns:
3020
+ List[Tuple[int, int, str]]: A list of (record_id, days_remaining, subject)
3021
+ """
3022
+ self.cursor.execute(
3023
+ """
3024
+ SELECT n.record_id, n.days_remaining, r.subject
3025
+ FROM notice n
3026
+ JOIN Records r ON n.record_id = r.id
3027
+ WHERE r.itemtype = '*'
3028
+ ORDER BY n.days_remaining
3029
+ """
3030
+ )
3031
+ return self.cursor.fetchall()
3032
+
3033
+ def get_drafts(self):
3034
+ """
3035
+ Retrieve all draft records (itemtype '?') with their ID and subject.
3036
+
3037
+ Returns:
3038
+ List[Tuple[int, str]]: A list of (id, subject)
3039
+ """
3040
+ self.cursor.execute(
3041
+ """
3042
+ SELECT id, subject
3043
+ FROM Records
3044
+ WHERE itemtype = '?'
3045
+ ORDER BY id
3046
+ """
3047
+ )
3048
+ return self.cursor.fetchall()
3049
+
3050
+ # def get_urgency(self):
3051
+ # """
3052
+ # Return tasks for the Agenda view, with pinned-first ordering.
3053
+ #
3054
+ # Rows:
3055
+ # (record_id, job_id, subject, urgency, color, status, weights, pinned_int)
3056
+ # """
3057
+ # self.cursor.execute(
3058
+ # """
3059
+ # SELECT
3060
+ # u.record_id,
3061
+ # u.job_id,
3062
+ # u.subject,
3063
+ # u.urgency,
3064
+ # u.color,
3065
+ # u.status,
3066
+ # u.weights,
3067
+ # CASE WHEN p.record_id IS NULL THEN 0 ELSE 1 END AS pinned
3068
+ # FROM Urgency AS u
3069
+ # JOIN Records AS r ON r.id = u.record_id
3070
+ # LEFT JOIN Pinned AS p ON p.record_id = u.record_id
3071
+ # WHERE r.itemtype != 'x'
3072
+ # ORDER BY pinned DESC, u.urgency DESC, u.id ASC
3073
+ # """
3074
+ # )
3075
+ # return self.cursor.fetchall()
3076
+
3077
+ def get_urgency(self):
3078
+ """
3079
+ Return tasks for the Agenda view, with pinned-first ordering.
3080
+
3081
+ Rows:
3082
+ (
3083
+ record_id,
3084
+ job_id,
3085
+ subject,
3086
+ urgency,
3087
+ color,
3088
+ status,
3089
+ weights,
3090
+ pinned_int,
3091
+ datetime_id, -- may be NULL
3092
+ instance_ts -- TEXT start_datetime or NULL
3093
+ )
3094
+ """
3095
+ self.cursor.execute(
3096
+ """
3097
+ WITH first_per_job AS (
3098
+ SELECT
3099
+ record_id,
3100
+ job_id,
3101
+ -- normalized start for correct ordering of date-only vs datetime
3102
+ MIN(
3103
+ CASE
3104
+ WHEN LENGTH(start_datetime) = 8
3105
+ THEN start_datetime || 'T000000'
3106
+ ELSE start_datetime
3107
+ END
3108
+ ) AS first_norm_start
3109
+ FROM DateTimes
3110
+ GROUP BY record_id, job_id
3111
+ ),
3112
+ first_dt AS (
3113
+ SELECT
3114
+ d.id,
3115
+ d.record_id,
3116
+ d.job_id,
3117
+ d.start_datetime
3118
+ FROM DateTimes d
3119
+ JOIN first_per_job fp
3120
+ ON d.record_id = fp.record_id
3121
+ AND COALESCE(d.job_id, -1) = COALESCE(fp.job_id, -1)
3122
+ AND CASE
3123
+ WHEN LENGTH(d.start_datetime) = 8
3124
+ THEN d.start_datetime || 'T000000'
3125
+ ELSE d.start_datetime
3126
+ END = fp.first_norm_start
3127
+ )
3128
+ SELECT
3129
+ u.record_id,
3130
+ u.job_id,
3131
+ u.subject,
3132
+ u.urgency,
3133
+ u.color,
3134
+ u.status,
3135
+ u.weights,
3136
+ CASE WHEN p.record_id IS NULL THEN 0 ELSE 1 END AS pinned,
3137
+ fd.id AS datetime_id,
3138
+ fd.start_datetime AS instance_ts
3139
+ FROM Urgency AS u
3140
+ JOIN Records AS r
3141
+ ON r.id = u.record_id
3142
+ LEFT JOIN Pinned AS p
3143
+ ON p.record_id = u.record_id
3144
+ LEFT JOIN first_dt AS fd
3145
+ ON fd.record_id = u.record_id
3146
+ AND COALESCE(fd.job_id, -1) = COALESCE(u.job_id, -1)
3147
+ WHERE r.itemtype != 'x'
3148
+ ORDER BY pinned DESC, u.urgency DESC, u.id ASC
3149
+ """
3150
+ )
3151
+ return self.cursor.fetchall()
3152
+
3153
+ def process_events(self, start_date, end_date):
3154
+ """
3155
+ Process events and split across days for display.
3156
+
3157
+ Args:
3158
+ start_date (datetime): The start of the period.
3159
+ end_date (datetime): The end of the period.
3160
+
3161
+ Returns:
3162
+ Dict[int, Dict[int, Dict[int, List[Tuple]]]]: Nested dictionary grouped by year, week, and weekday.
3163
+ """
3164
+
3165
+ # Retrieve all events for the specified period
3166
+ events = self.get_events_for_period(start_date, end_date)
3167
+ # Group events by ISO year, week, and weekday
3168
+ grouped_events = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
3169
+
3170
+ # for start_ts, end_ts, itemtype, subject, id, job_id in events:
3171
+ for dt_id, start_ts, end_ts, itemtype, subject, id, job_id in events:
3172
+ start_dt = (
3173
+ datetime_from_timestamp(start_ts)
3174
+ # .replace(tzinfo=gettz("UTC"))
3175
+ # .astimezone()
3176
+ # .replace(tzinfo=None)
3177
+ )
3178
+ end_dt = (
3179
+ datetime_from_timestamp(end_ts)
3180
+ # .replace(tzinfo=gettz("UTC"))
3181
+ # .astimezone()
3182
+ # .replace(tzinfo=None)
3183
+ )
3184
+
3185
+ iso_year, iso_week, iso_weekday = start_dt.isocalendar()
3186
+ grouped_events[iso_year][iso_week][iso_weekday].append((start_dt, end_dt))
3187
+
3188
+ return grouped_events
3189
+
3190
+ def populate_notice(self):
3191
+ """
3192
+ Populate the notice table for all records with valid notice entries.
3193
+ This clears existing entries and recomputes them from current record data.
3194
+ """
3195
+ self.cursor.execute("DELETE FROM Notice;")
3196
+ self.conn.commit()
3197
+
3198
+ # Fetch both record_id and notice value
3199
+ self.cursor.execute(
3200
+ "SELECT id, notice FROM Records WHERE notice IS NOT NULL AND notice != ''"
3201
+ )
3202
+ for record_id, notice in self.cursor.fetchall():
3203
+ self.populate_notice_for_record(record_id)
3204
+
3205
+ self.conn.commit()
3206
+
3207
+ def populate_notice_for_record(self, record_id: int):
3208
+ self.cursor.execute("SELECT notice FROM Records WHERE id = ?", (record_id,))
3209
+ row = self.cursor.fetchone()
3210
+ if not row or not row[0]:
3211
+ return # no notice for this record
3212
+ notice_str = row[0]
3213
+
3214
+ self.cursor.execute(
3215
+ "SELECT start_datetime FROM DateTimes WHERE record_id = ? ORDER BY start_datetime ASC",
3216
+ (record_id,),
3217
+ )
3218
+ occurrences = self.cursor.fetchall()
3219
+
3220
+ today = date.today()
3221
+ offset = td_str_to_td(notice_str)
3222
+
3223
+ for (start_ts,) in occurrences:
3224
+ scheduled_dt = datetime_from_timestamp(start_ts)
3225
+ notice_dt = scheduled_dt - offset
3226
+ if notice_dt.date() <= today < scheduled_dt.date():
3227
+ days_remaining = (scheduled_dt.date() - today).days
3228
+ self.cursor.execute(
3229
+ "INSERT INTO notice (record_id, days_remaining) VALUES (?, ?)",
3230
+ (record_id, days_remaining),
3231
+ )
3232
+ break # Only insert for the earliest qualifying instance
3233
+
3234
+ self.conn.commit()
3235
+
3236
+ def populate_busy_from_datetimes(self):
3237
+ """
3238
+ Build BusyWeeksFromDateTimes from DateTimes.
3239
+ For each (record_id, year_week) pair, accumulate busybits
3240
+ across all event segments — merging with np.maximum().
3241
+ """
3242
+ import numpy as np
3243
+
3244
+ log_msg("🧩 Rebuilding BusyWeeksFromDateTimes…")
3245
+ self.cursor.execute("DELETE FROM BusyWeeksFromDateTimes")
3246
+
3247
+ # Only include Records that are events (itemtype='*')
3248
+ self.cursor.execute("""
3249
+ SELECT dt.record_id, dt.start_datetime, dt.end_datetime
3250
+ FROM DateTimes AS dt
3251
+ JOIN Records AS r ON r.id = dt.record_id
3252
+ WHERE r.itemtype = '*'
3253
+ """)
3254
+ rows = self.cursor.fetchall()
3255
+ if not rows:
3256
+ print("⚠️ No event DateTimes entries found.")
3257
+ return
3258
+
3259
+ total_inserted = 0
3260
+ for record_id, start_str, end_str in rows:
3261
+ weeks = fine_busy_bits_for_event(start_str, end_str)
3262
+ for yw, arr in weeks.items():
3263
+ # ensure numpy array
3264
+ arr = np.asarray(arr, dtype=np.uint8)
3265
+
3266
+ # check if a row already exists for (record_id, week)
3267
+ self.cursor.execute(
3268
+ "SELECT busybits FROM BusyWeeksFromDateTimes WHERE record_id=? AND year_week=?",
3269
+ (record_id, yw),
3270
+ )
3271
+ row = self.cursor.fetchone()
3272
+ if row:
3273
+ existing = np.frombuffer(row[0], dtype=np.uint8)
3274
+ merged = np.maximum(existing, arr)
3275
+ else:
3276
+ merged = arr
3277
+
3278
+ # upsert
3279
+ self.cursor.execute(
3280
+ """
3281
+ INSERT INTO BusyWeeksFromDateTimes (record_id, year_week, busybits)
3282
+ VALUES (?, ?, ?)
3283
+ ON CONFLICT(record_id, year_week)
3284
+ DO UPDATE SET busybits = excluded.busybits
3285
+ """,
3286
+ (record_id, yw, merged.tobytes()),
3287
+ )
3288
+ total_inserted += 1
3289
+
3290
+ self.conn.commit()
3291
+ log_msg(f"✅ BusyWeeksFromDateTimes populated ({total_inserted} week-records).")
3292
+
3293
+ # def get_last_instances(
3294
+ # self,
3295
+ # ) -> List[Tuple[int, int | None, str, str, str, str]]:
3296
+ # """
3297
+ # Retrieve the last instances of each record/job falling before today.
3298
+ #
3299
+ # Returns:
3300
+ # List of tuples:
3301
+ # (record_id, job_id, subject, description, itemtype, last_datetime)
3302
+ # """
3303
+ # today = datetime.now().strftime("%Y%m%dT%H%M")
3304
+ # self.cursor.execute(
3305
+ # """
3306
+ # SELECT
3307
+ # r.id,
3308
+ # d.job_id,
3309
+ # r.subject,
3310
+ # r.description,
3311
+ # r.itemtype,
3312
+ # MAX(d.start_datetime) AS last_datetime
3313
+ # FROM Records r
3314
+ # JOIN DateTimes d ON r.id = d.record_id
3315
+ # WHERE d.start_datetime < ?
3316
+ # GROUP BY r.id, d.job_id
3317
+ # ORDER BY last_datetime DESC
3318
+ # """,
3319
+ # (today,),
3320
+ # )
3321
+ # return self.cursor.fetchall()
3322
+
3323
+ def get_last_instances(
3324
+ self,
3325
+ ) -> List[Tuple[int, int, int | None, str, str, str, str]]:
3326
+ """
3327
+ Retrieve the last instances of each record/job falling before today.
3328
+
3329
+ Returns:
3330
+ List of tuples:
3331
+ (
3332
+ datetime_id, # DateTimes.id
3333
+ record_id,
3334
+ job_id, # may be None
3335
+ subject,
3336
+ description,
3337
+ itemtype,
3338
+ instance_ts # TEXT 'YYYYMMDD' or 'YYYYMMDDTHHMMSS'
3339
+ )
3340
+ """
3341
+ today = datetime.now().strftime("%Y%m%dT%H%M")
3342
+
3343
+ self.cursor.execute(
3344
+ """
3345
+ WITH last_per_job AS (
3346
+ SELECT
3347
+ record_id,
3348
+ job_id,
3349
+ MAX(start_datetime) AS last_datetime
3350
+ FROM DateTimes
3351
+ WHERE start_datetime < ?
3352
+ GROUP BY record_id, job_id
3353
+ )
3354
+ SELECT
3355
+ d.id AS datetime_id,
3356
+ r.id AS record_id,
3357
+ d.job_id AS job_id,
3358
+ r.subject,
3359
+ r.description,
3360
+ r.itemtype,
3361
+ d.start_datetime AS instance_ts
3362
+ FROM last_per_job lp
3363
+ JOIN DateTimes d
3364
+ ON d.record_id = lp.record_id
3365
+ AND d.start_datetime = lp.last_datetime
3366
+ AND COALESCE(d.job_id, -1) = COALESCE(lp.job_id, -1)
3367
+ JOIN Records r
3368
+ ON r.id = d.record_id
3369
+ ORDER BY d.start_datetime DESC
3370
+ """,
3371
+ (today,),
3372
+ )
3373
+ return self.cursor.fetchall()
3374
+
3375
+ # def get_next_instances(
3376
+ # self,
3377
+ # ) -> List[Tuple[int, int | None, str, str, str, str]]:
3378
+ # """
3379
+ # Retrieve the next instances of each record/job falling on or after today.
3380
+ #
3381
+ # Returns:
3382
+ # List of tuples:
3383
+ # (record_id, job_id, subject, description, itemtype, last_datetime)
3384
+ # """
3385
+ # today = datetime.now().strftime("%Y%m%dT%H%M")
3386
+ # self.cursor.execute(
3387
+ # """
3388
+ # SELECT
3389
+ # r.id,
3390
+ # d.job_id,
3391
+ # r.subject,
3392
+ # r.description,
3393
+ # r.itemtype,
3394
+ # MIN(d.start_datetime) AS next_datetime
3395
+ # FROM Records r
3396
+ # JOIN DateTimes d ON r.id = d.record_id
3397
+ # WHERE d.start_datetime >= ?
3398
+ # GROUP BY r.id, d.job_id
3399
+ # ORDER BY next_datetime ASC
3400
+ # """,
3401
+ # (today,),
3402
+ # )
3403
+ # return self.cursor.fetchall()
3404
+
3405
+ def get_next_instances(
3406
+ self,
3407
+ ) -> List[Tuple[int, int, int | None, str, str, str, str]]:
3408
+ """
3409
+ Retrieve the next instances of each record/job falling on or after today.
3410
+
3411
+ Returns:
3412
+ List of tuples:
3413
+ (
3414
+ datetime_id, # DateTimes.id
3415
+ record_id,
3416
+ job_id, # may be None
3417
+ subject,
3418
+ description,
3419
+ itemtype,
3420
+ instance_ts # TEXT 'YYYYMMDD' or 'YYYYMMDDTHHMMSS'
3421
+ )
3422
+ """
3423
+ today = datetime.now().strftime("%Y%m%dT%H%M")
3424
+
3425
+ self.cursor.execute(
3426
+ """
3427
+ WITH next_per_job AS (
3428
+ SELECT
3429
+ record_id,
3430
+ job_id,
3431
+ MIN(start_datetime) AS next_datetime
3432
+ FROM DateTimes
3433
+ WHERE start_datetime >= ?
3434
+ GROUP BY record_id, job_id
3435
+ )
3436
+ SELECT
3437
+ d.id AS datetime_id,
3438
+ r.id AS record_id,
3439
+ d.job_id AS job_id,
3440
+ r.subject,
3441
+ r.description,
3442
+ r.itemtype,
3443
+ d.start_datetime AS instance_ts
3444
+ FROM next_per_job np
3445
+ JOIN DateTimes d
3446
+ ON d.record_id = np.record_id
3447
+ AND d.start_datetime = np.next_datetime
3448
+ AND COALESCE(d.job_id, -1) = COALESCE(np.job_id, -1)
3449
+ JOIN Records r
3450
+ ON r.id = d.record_id
3451
+ ORDER BY d.start_datetime ASC
3452
+ """,
3453
+ (today,),
3454
+ )
3455
+ return self.cursor.fetchall()
3456
+
3457
+ def get_next_instance_for_record(
3458
+ self, record_id: int
3459
+ ) -> tuple[str, str | None] | None:
3460
+ """
3461
+ Return (start_datetime, end_datetime|NULL) as compact local-naive strings
3462
+ for the next instance of a single record, or None if none.
3463
+ """
3464
+ # start_datetime sorted ascending; end_datetime can be NULL
3465
+ self.cursor.execute(
3466
+ """
3467
+ SELECT start_datetime, end_datetime
3468
+ FROM DateTimes
3469
+ WHERE record_id = ?
3470
+ AND start_datetime >= ?
3471
+ ORDER BY start_datetime ASC
3472
+ LIMIT 1
3473
+ """,
3474
+ # now in compact local-naive format
3475
+ (_fmt_naive(datetime.now()),),
3476
+ )
3477
+ row = self.cursor.fetchone()
3478
+ if row:
3479
+ return row[0], row[1]
3480
+ return None
3481
+
3482
+ def get_next_start_datetimes_for_record(
3483
+ self, record_id: int, job_id: int | None = None
3484
+ ) -> list[str]:
3485
+ """
3486
+ Return up to 2 upcoming start datetimes (as compact local-naive strings)
3487
+ for the given record (and optional job), sorted ascending.
3488
+ """
3489
+ sql = """
3490
+ SELECT start_datetime
3491
+ FROM DateTimes
3492
+ WHERE record_id = ?
3493
+ """
3494
+ # params = [record_id, _fmt_naive(datetime.now())]
3495
+ params = [
3496
+ record_id,
3497
+ ]
3498
+
3499
+ if job_id is not None:
3500
+ sql += " AND job_id = ?"
3501
+ params.append(job_id)
3502
+
3503
+ sql += " ORDER BY start_datetime ASC LIMIT 2"
3504
+
3505
+ self.cursor.execute(sql, params)
3506
+ return [row[0] for row in self.cursor.fetchall()]
3507
+
3508
+ def find_records(self, regex: str):
3509
+ regex_ci = f"(?i){regex}" # force case-insensitive
3510
+ today = int(datetime.now().timestamp())
3511
+ self.cursor.execute(
3512
+ """
3513
+ WITH
3514
+ LastInstances AS (
3515
+ SELECT record_id, MAX(start_datetime) AS last_datetime
3516
+ FROM DateTimes
3517
+ WHERE start_datetime < ?
3518
+ GROUP BY record_id
3519
+ ),
3520
+ NextInstances AS (
3521
+ SELECT record_id, MIN(start_datetime) AS next_datetime
3522
+ FROM DateTimes
3523
+ WHERE start_datetime >= ?
3524
+ GROUP BY record_id
3525
+ )
3526
+ SELECT r.id, r.subject, r.description, r.itemtype, li.last_datetime, ni.next_datetime
3527
+ FROM Records r
3528
+ LEFT JOIN LastInstances li ON r.id = li.record_id
3529
+ LEFT JOIN NextInstances ni ON r.id = ni.record_id
3530
+ WHERE r.subject REGEXP ? OR r.description REGEXP ?
3531
+ """,
3532
+ (today, today, regex_ci, regex_ci),
3533
+ )
3534
+ return self.cursor.fetchall()
3535
+
3536
+ # FIXME: should access record_id
3537
+ def update_tags_for_record(self, record_data):
3538
+ cur = self.conn.cursor()
3539
+ tags = record_data.pop("tags", [])
3540
+ record_data["tokens"] = json.dumps(record_data.get("tokens", []))
3541
+ record_data["jobs"] = json.dumps(record_data.get("jobs", []))
3542
+ if "id" in record_data:
3543
+ record_id = record_data["id"]
3544
+ columns = [k for k in record_data if k != "id"]
3545
+ assignments = ", ".join([f"{col} = ?" for col in columns])
3546
+ values = [record_data[col] for col in columns]
3547
+ values.append(record_id)
3548
+ cur.execute(f"UPDATE Records SET {assignments} WHERE id = ?", values)
3549
+ cur.execute("DELETE FROM RecordTags WHERE record_id = ?", (record_id,))
3550
+ else:
3551
+ columns = list(record_data.keys())
3552
+ values = [record_data[col] for col in columns]
3553
+ placeholders = ", ".join(["?"] * len(columns))
3554
+ cur.execute(
3555
+ f"INSERT INTO Records ({', '.join(columns)}) VALUES ({placeholders})",
3556
+ values,
3557
+ )
3558
+ record_id = cur.lastrowid
3559
+ for tag in tags:
3560
+ cur.execute("INSERT OR IGNORE INTO Tags (name) VALUES (?)", (tag,))
3561
+ cur.execute("SELECT id FROM Tags WHERE name = ?", (tag,))
3562
+ tag_id = cur.fetchone()[0]
3563
+ cur.execute(
3564
+ "INSERT INTO RecordTags (record_id, tag_id) VALUES (?, ?)",
3565
+ (record_id, tag_id),
3566
+ )
3567
+ self.conn.commit()
3568
+ return record_id
3569
+
3570
+ def get_tags_for_record(self, record_id):
3571
+ cur = self.conn.cursor()
3572
+ cur.execute(
3573
+ """
3574
+ SELECT Tags.name FROM Tags
3575
+ JOIN RecordTags ON Tags.id = RecordTags.tag_id
3576
+ WHERE RecordTags.record_id = ?
3577
+ """,
3578
+ (record_id,),
3579
+ )
3580
+ return [row[0] for row in cur.fetchall()]
3581
+
3582
+ def populate_urgency_from_record(self, record_id: int):
3583
+ record = self.get_record_as_dictionary(record_id)
3584
+ # bug_msg(f"updating urgency for {record_id = }, {record = }")
3585
+
3586
+ record_id = record["id"]
3587
+ itemtype = record["itemtype"]
3588
+ # log_msg(f"{record_id = }, {pinned = }, {record = }")
3589
+ modified_seconds = dt_str_to_seconds(record["modified"])
3590
+ extent_seconds = td_str_to_seconds(record.get("extent", "0m"))
3591
+ # notice_seconds will be 0 in the absence of notice
3592
+ notice_seconds = td_str_to_seconds(record.get("notice", "0m"))
3593
+ rruleset = record.get("rruleset", "")
3594
+ jobs = json.loads(record.get("jobs", "[]"))
3595
+ subject = record["subject"]
3596
+ # priority_map = self.env.config.urgency.priority.model_dump()
3597
+ priority_level = record.get("priority", None)
3598
+ # priority = priority_map.get(priority_level, 0)
3599
+ description = True if record.get("description", "") else False
3600
+
3601
+ if itemtype not in ["^", "~"]:
3602
+ # bug_msg(f"skipping urgency for {record = }")
3603
+ return
3604
+
3605
+ now_seconds = utc_now_to_seconds()
3606
+ pinned = self.is_pinned(record_id)
3607
+
3608
+ # Try to parse due from first RDATE in rruleset
3609
+ due_seconds = None
3610
+ if rruleset.startswith("RDATE:"):
3611
+ due_str = rruleset.split(":", 1)[1].split(",")[0]
3612
+ try:
3613
+ if "T" in due_str:
3614
+ dt = datetime.strptime(due_str.strip(), "%Y%m%dT%H%MZ")
3615
+ else:
3616
+ dt = datetime.strptime(due_str.strip(), "%Y%m%d")
3617
+ due_seconds = round(dt.timestamp())
3618
+ except Exception as e:
3619
+ log_msg(f"Invalid RDATE value: {due_str}\n{e}")
3620
+ if due_seconds and not notice_seconds:
3621
+ # treat due_seconds as the default for a missing @b, i.e.,
3622
+ # make the default to hide a task with an @s due entry before due - interval
3623
+ notice_seconds = due_seconds
3624
+
3625
+ self.cursor.execute("DELETE FROM Urgency WHERE record_id = ?", (record_id,))
3626
+
3627
+ # Handle jobs if present
3628
+ if jobs:
3629
+ for job in jobs:
3630
+ status = job.get("status", "")
3631
+ if status != "available":
3632
+ continue
3633
+ job_id = job.get("id")
3634
+ subject = job.get("display_subject", subject)
3635
+
3636
+ job_due = due_seconds
3637
+ if job_due:
3638
+ b = td_str_to_seconds(job.get("b", "0m"))
3639
+ s = td_str_to_seconds(job.get("s", "0m"))
3640
+ if b:
3641
+ hide = job_due - b > now_seconds
3642
+ if hide:
3643
+ continue
3644
+ job_due += s
3645
+
3646
+ job_extent = td_str_to_seconds(job.get("e", "0m"))
3647
+ blocking = job.get("blocking") # assume already computed elsewhere
3648
+
3649
+ urgency, color, weights = self.compute_urgency.from_args_and_weights(
3650
+ now=now_seconds,
3651
+ modified=modified_seconds,
3652
+ due=job_due,
3653
+ extent=job_extent,
3654
+ priority_level=priority_level,
3655
+ blocking=blocking,
3656
+ description=description,
3657
+ jobs=True,
3658
+ pinned=pinned,
3659
+ )
3660
+
3661
+ self.cursor.execute(
3662
+ """
3663
+ INSERT INTO Urgency (record_id, job_id, subject, urgency, color, status, weights)
3664
+ VALUES (?, ?, ?, ?, ?, ?, ?)
3665
+ """,
3666
+ (
3667
+ record_id,
3668
+ job_id,
3669
+ subject,
3670
+ urgency,
3671
+ color,
3672
+ status,
3673
+ json.dumps(weights),
3674
+ ),
3675
+ )
3676
+
3677
+ else:
3678
+ hide = (
3679
+ due_seconds
3680
+ and notice_seconds
3681
+ and due_seconds - notice_seconds > now_seconds
3682
+ )
3683
+ if not hide:
3684
+ urgency, color, weights = self.compute_urgency.from_args_and_weights(
3685
+ now=now_seconds,
3686
+ modified=modified_seconds,
3687
+ due=due_seconds,
3688
+ extent=extent_seconds,
3689
+ priority_level=priority_level,
3690
+ description=description,
3691
+ jobs=False,
3692
+ pinned=pinned,
3693
+ )
3694
+
3695
+ self.cursor.execute(
3696
+ """
3697
+ INSERT INTO Urgency (record_id, job_id, subject, urgency, color, status, weights)
3698
+ VALUES (?, ?, ?, ?, ?, ?, ?)
3699
+ """,
3700
+ (
3701
+ record_id,
3702
+ None,
3703
+ subject,
3704
+ urgency,
3705
+ color,
3706
+ # record.get("status", "next"),
3707
+ "next",
3708
+ json.dumps(weights),
3709
+ ),
3710
+ )
3711
+
3712
+ self.conn.commit()
3713
+
3714
+ def populate_all_urgency(self):
3715
+ self.cursor.execute("DELETE FROM Urgency")
3716
+ tasks = self.get_all_tasks()
3717
+ for task in tasks:
3718
+ # log_msg(f"adding to urgency: {task['itemtype'] = }, {task = }")
3719
+ self.populate_urgency_from_record(task)
3720
+ self.conn.commit()
3721
+
3722
+ def update_urgency(self, urgency_id: int):
3723
+ """
3724
+ Recalculate urgency score for a given entry using only fields in the Urgency table.
3725
+ """
3726
+ self.cursor.execute("SELECT urgency_id FROM ActiveUrgency WHERE id = 1")
3727
+ row = self.cursor.fetchone()
3728
+ active_id = row[0] if row else None
3729
+
3730
+ self.cursor.execute(
3731
+ """
3732
+ SELECT id, touched, status FROM Urgency WHERE id = ?
3733
+ """,
3734
+ (urgency_id,),
3735
+ )
3736
+ row = self.cursor.fetchone()
3737
+ if not row:
3738
+ return # skip nonexistent
3739
+
3740
+ urgency_id, touched_ts, status = row
3741
+ now_ts = int(time.time())
3742
+
3743
+ # Example scoring
3744
+ age_days = (now_ts - touched_ts) / 86400 if touched_ts else 0
3745
+ active_bonus = 10.0 if urgency_id == active_id else 0.0
3746
+ status_weight = {
3747
+ "next": 5.0,
3748
+ "scheduled": 2.0,
3749
+ "waiting": -1.0,
3750
+ "someday": -5.0,
3751
+ }.get(status, 0.0)
3752
+
3753
+ score = age_days + active_bonus + status_weight
3754
+
3755
+ self.cursor.execute(
3756
+ """
3757
+ UPDATE Urgency SET urgency = ? WHERE id = ?
3758
+ """,
3759
+ (score, urgency_id),
3760
+ )
3761
+ self.conn.commit()
3762
+
3763
+ def update_all_urgencies(self):
3764
+ self.cursor.execute("SELECT id FROM Urgency")
3765
+ for (urgency_id,) in self.cursor.fetchall():
3766
+ self.update_urgency(urgency_id)
3767
+
3768
+ def get_all(self):
3769
+ cur = self.conn.cursor()
3770
+ cur.execute("SELECT * FROM Records")
3771
+ return cur.fetchall()
3772
+
3773
+ def get_record(self, record_id):
3774
+ cur = self.conn.cursor()
3775
+ cur.execute("SELECT * FROM Records WHERE id = ?", (record_id,))
3776
+ return cur.fetchone()
3777
+
3778
+ def get_record_as_dictionary(self, record: int) -> dict | None:
3779
+ # bug_msg(f"get_record_as_dictionary called with {record = } ({type(record)=})")
3780
+ if isinstance(record, dict):
3781
+ return record
3782
+ cur = self.conn.cursor()
3783
+ cur.execute("SELECT * FROM Records WHERE id = ?", (record,))
3784
+ row = cur.fetchone()
3785
+ if row is None:
3786
+ return None
3787
+
3788
+ columns = [column[0] for column in cur.description]
3789
+ return dict(zip(columns, row))
3790
+
3791
+ def get_jobs_for_record(self, record_id):
3792
+ cur = self.conn.cursor()
3793
+ cur.execute("SELECT * FROM Records WHERE record_id = ?", (record_id,))
3794
+ return cur.fetchall()
3795
+
3796
+ def delete_record(self, record_id):
3797
+ cur = self.conn.cursor()
3798
+ cur.execute("DELETE FROM Records WHERE id = ?", (record_id,))
3799
+ self.conn.commit()
3800
+
3801
+ def count_records(self):
3802
+ cur = self.conn.cursor()
3803
+ cur.execute("SELECT COUNT(*) FROM Records")
3804
+ return cur.fetchone()[0]
3805
+
3806
+ def rebuild_busyweeks_from_source(self):
3807
+ """
3808
+ Aggregate all BusyWeeksFromDateTimes → BusyWeeks,
3809
+ collapsing to 35-slot weekly maps:
3810
+ (7 days × [1 all-day + 4 × 6-hour blocks]).
3811
+
3812
+ Ternary encoding:
3813
+ 0 = free
3814
+ 1 = busy
3815
+ 2 = conflict
3816
+ """
3817
+
3818
+ self.cursor.execute("SELECT DISTINCT year_week FROM BusyWeeksFromDateTimes")
3819
+ weeks = [row[0] for row in self.cursor.fetchall()]
3820
+ if not weeks:
3821
+ print("⚠️ No data to aggregate.")
3822
+ return
3823
+
3824
+ # bug_msg(f"Aggregating {len(weeks)} week(s)...")
3825
+
3826
+ for yw in weeks:
3827
+ # --- Gather all event arrays for this week
3828
+ self.cursor.execute(
3829
+ "SELECT busybits FROM BusyWeeksFromDateTimes WHERE year_week = ?",
3830
+ (yw,),
3831
+ )
3832
+ blobs = [
3833
+ np.frombuffer(row[0], dtype=np.uint8) for row in self.cursor.fetchall()
3834
+ ]
3835
+ if not blobs:
3836
+ continue
3837
+
3838
+ n = len(blobs[0])
3839
+ if any(arr.size != n for arr in blobs):
3840
+ print(f"⚠️ Skipping {yw}: inconsistent array sizes")
3841
+ continue
3842
+
3843
+ # Stack vertically -> shape (num_events, 679)
3844
+ stack = np.vstack(blobs)
3845
+
3846
+ # Count per slot
3847
+ counts = stack.sum(axis=0)
3848
+
3849
+ # Collapse fine bits into ternary (0 free / 1 busy / 2 conflict)
3850
+ merged = np.where(counts >= 2, 2, np.where(counts >= 1, 1, 0)).astype(
3851
+ np.uint8
3852
+ )
3853
+
3854
+ # Reduce 679 fine bits → 35 coarse blocks (7 × [1+4])
3855
+ merged = _reduce_to_35_slots(merged)
3856
+
3857
+ # Serialize
3858
+ blob = merged.tobytes()
3859
+
3860
+ bits_str = "".join(str(int(x)) for x in merged)
3861
+ self.cursor.execute(
3862
+ """
3863
+ INSERT INTO BusyWeeks (year_week, busybits)
3864
+ VALUES (?, ?)
3865
+ ON CONFLICT(year_week)
3866
+ DO UPDATE SET busybits = excluded.busybits
3867
+ """,
3868
+ (yw, bits_str),
3869
+ )
3870
+
3871
+ self.conn.commit()
3872
+ # bug_msg("✅ BusyWeeks aggregation complete.")
3873
+
3874
+ def show_busy_week(self, year_week: str):
3875
+ """
3876
+ Display the 7×96 busy/conflict map for a given ISO year-week.
3877
+
3878
+ Reads from BusyWeeks, decodes the blob, and prints 7 lines:
3879
+ - one per weekday (Mon → Sun)
3880
+ - each line shows 96 characters (15-min slots)
3881
+ 0 = free, 1 = busy, 2 = conflict
3882
+
3883
+ Example:
3884
+ Mon 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
3885
+ Tue 000000000000111100000000...
3886
+ ...
3887
+ """
3888
+ self.cursor.execute(
3889
+ "SELECT busybits FROM BusyWeeks WHERE year_week = ?",
3890
+ (year_week,),
3891
+ )
3892
+ row = self.cursor.fetchone()
3893
+ if not row:
3894
+ print(f"No BusyWeeks entry for {year_week}")
3895
+ return
3896
+
3897
+ # Decode the 672-slot array
3898
+ arr = np.frombuffer(row[0], dtype=np.uint8)
3899
+ if arr.size != 672:
3900
+ print(f"Unexpected busybits length: {arr.size}")
3901
+ return
3902
+
3903
+ # Split into 7 days × 96 slots
3904
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
3905
+ slots_per_day = 96
3906
+
3907
+ print(f"🗓 Busy/conflict map for {year_week}\n")
3908
+ for i, day in enumerate(days):
3909
+ start = i * slots_per_day
3910
+ end = start + slots_per_day
3911
+ line = "".join(str(x) for x in arr[start:end])
3912
+ print(f"{day:<4}{line}")
3913
+
3914
+ def show_busy_week_pretty(self, year_week: str):
3915
+ """
3916
+ Display a 7×96 busy/conflict map for a given ISO year-week with color and hour markers.
3917
+ 0 = free, 1 = busy, 2 = conflict (colored red).
3918
+
3919
+ Uses 15-min resolution; 96 slots per day.
3920
+ """
3921
+ console = Console()
3922
+
3923
+ self.cursor.execute(
3924
+ "SELECT busybits FROM BusyWeeks WHERE year_week = ?",
3925
+ (year_week,),
3926
+ )
3927
+ row = self.cursor.fetchone()
3928
+ if not row:
3929
+ console.print(f"[red]No BusyWeeks entry for {year_week}[/red]")
3930
+ return
3931
+
3932
+ arr = np.frombuffer(row[0], dtype=np.uint8)
3933
+ if arr.size != 672:
3934
+ console.print(f"[red]Unexpected busybits length: {arr.size}[/red]")
3935
+ return
3936
+
3937
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
3938
+ slots_per_day = 96 # 96 x 15min = 24h
3939
+ hours = [f"{h:02d}" for h in range(24)]
3940
+
3941
+ # Header row: hour markers
3942
+ header = " " # spacing before first hour
3943
+ for h in hours:
3944
+ header += h + " " * 3 # one char per 15 min slot
3945
+ console.print(f"[bold cyan]🗓 Busy/conflict map for {year_week}[/bold cyan]\n")
3946
+ console.print(header)
3947
+
3948
+ for i, day in enumerate(days):
3949
+ start = i * slots_per_day
3950
+ end = start + slots_per_day
3951
+ line_bits = arr[start:end]
3952
+
3953
+ text_line = Text()
3954
+ for bit in line_bits:
3955
+ if bit == 0:
3956
+ text_line.append("·", style="dim") # free
3957
+ elif bit == 1:
3958
+ text_line.append("█", style="yellow") # busy
3959
+ elif bit == 2:
3960
+ text_line.append("█", style="bold red") # conflict
3961
+
3962
+ console.print(f"{day:<4}{text_line}")
3963
+
3964
+ def get_busy_bits_for_week(self, year_week: str) -> list[int]:
3965
+ """
3966
+ Return a list of 35 ternary busy bits (0=free, 1=busy, 2=conflict)
3967
+ for the given ISO year-week string (e.g. '2025-41').
3968
+ """
3969
+ self.cursor.execute(
3970
+ "SELECT busybits FROM BusyWeeks WHERE year_week = ?", (year_week,)
3971
+ )
3972
+ row = self.cursor.fetchone()
3973
+ if not row:
3974
+ return [0] * 35
3975
+
3976
+ bits_str = row[0]
3977
+ if isinstance(bits_str, bytes):
3978
+ bits_str = bits_str.decode("utf-8")
3979
+
3980
+ bits = [int(ch) for ch in bits_str if ch in "012"]
3981
+ if len(bits) != 35:
3982
+ bits = (bits + [0] * 35)[:35]
3983
+ return bits
3984
+
3985
+ def move_bin(self, bin_name: str, new_parent_name: str) -> bool:
3986
+ """
3987
+ Move a bin under a new parent bin.
3988
+
3989
+ Example:
3990
+ move_bin("whatever", "journal")
3991
+
3992
+ Ensures both bins exist, removes any previous parent link,
3993
+ and inserts a new (bin_id → new_parent_id) link.
3994
+ Prevents cycles and self-parenting.
3995
+ """
3996
+ try:
3997
+ # Ensure the root/unlinked bins exist first
3998
+ root_id, unlinked_id = self.ensure_system_bins()
3999
+
4000
+ # Resolve both bin IDs (creating them if needed)
4001
+ bin_id = self.ensure_bin_exists(bin_name)
4002
+ new_parent_id = self.ensure_bin_exists(new_parent_name)
4003
+
4004
+ # ⚡ Efficiency check: prevent self-parenting before DB recursion
4005
+ if bin_id == new_parent_id:
4006
+ raise ValueError(f"Cannot move {bin_name!r} under itself.")
4007
+
4008
+ # 🌀 Recursive acyclicity check
4009
+ if self.is_descendant(bin_id, new_parent_id):
4010
+ raise ValueError(
4011
+ f"Cannot move {bin_name!r} under {new_parent_name!r}: "
4012
+ "would create a cycle."
4013
+ )
4014
+
4015
+ # Remove any existing parent link(s)
4016
+ self.cursor.execute("DELETE FROM BinLinks WHERE bin_id = ?", (bin_id,))
4017
+
4018
+ # Insert the new parent link
4019
+ self.cursor.execute(
4020
+ """
4021
+ INSERT OR REPLACE INTO BinLinks (bin_id, container_id)
4022
+ VALUES (?, ?)
4023
+ """,
4024
+ (bin_id, new_parent_id),
4025
+ )
4026
+
4027
+ self.conn.commit()
4028
+ print(f"[move_bin] Moved {bin_name!r} → {new_parent_name!r}")
4029
+ return True
4030
+
4031
+ except Exception as e:
4032
+ print(f"[move_bin] Error moving {bin_name!r} → {new_parent_name!r}: {e}")
4033
+ return False
4034
+
4035
+ def is_descendant(self, ancestor_id: int, candidate_id: int) -> bool:
4036
+ """
4037
+ Return True if candidate_id is a descendant of ancestor_id.
4038
+ """
4039
+ self.cursor.execute(
4040
+ """
4041
+ WITH RECURSIVE descendants(id) AS (
4042
+ SELECT bin_id FROM BinLinks WHERE container_id = ?
4043
+ UNION
4044
+ SELECT BinLinks.bin_id
4045
+ FROM BinLinks JOIN descendants ON BinLinks.container_id = descendants.id
4046
+ )
4047
+ SELECT 1 FROM descendants WHERE id = ? LIMIT 1
4048
+ """,
4049
+ (ancestor_id, candidate_id),
4050
+ )
4051
+ return self.cursor.fetchone() is not None
4052
+
4053
+ def ensure_bin_exists(self, name: str) -> int:
4054
+ disp = (name or "").strip()
4055
+ if not disp:
4056
+ raise ValueError("Bin name must be non-empty")
4057
+
4058
+ self.cursor.execute(
4059
+ "SELECT id FROM Bins WHERE name = ? COLLATE NOCASE", (disp,)
4060
+ )
4061
+ row = self.cursor.fetchone()
4062
+ if row:
4063
+ return row[0]
4064
+
4065
+ self.cursor.execute("INSERT INTO Bins (name) VALUES (?)", (disp,))
4066
+ self.conn.commit()
4067
+ bid = self.cursor.lastrowid
4068
+
4069
+ # 👇 cache: record the creation with unknown parent (None) for now
4070
+ if hasattr(self, "bin_cache"):
4071
+ self.bin_cache.on_create(bid, disp, None)
4072
+
4073
+ return bid
4074
+
4075
+ def ensure_bin_path(self, path: str) -> int:
4076
+ """
4077
+ Ensure the given bin path exists.
4078
+ Example:
4079
+ "personal/quotations" will create:
4080
+ - personal → root
4081
+ - quotations → personal
4082
+ If single-level, link under 'unlinked'.
4083
+ Returns the final (leaf) bin_id.
4084
+ """
4085
+ root_id, unlinked_id = self.ensure_system_bins()
4086
+ parts = [p.strip() for p in path.split("/") if p.strip()]
4087
+ if not parts:
4088
+ return root_id
4089
+
4090
+ parent_id = root_id # start at root
4091
+ if len(parts) == 1:
4092
+ parent_id = unlinked_id # single bin goes under 'unlinked'
4093
+
4094
+ for name in parts:
4095
+ bin_id = self.ensure_bin_exists(name)
4096
+ self.cursor.execute(
4097
+ """
4098
+ INSERT OR IGNORE INTO BinLinks (bin_id, container_id)
4099
+ VALUES (?, ?)
4100
+ """,
4101
+ (bin_id, parent_id),
4102
+ )
4103
+ parent_id = bin_id
4104
+
4105
+ self.conn.commit()
4106
+ return parent_id
4107
+
4108
+ def ensure_bin_path(self, path: str) -> int:
4109
+ root_id, unlinked_id = self.ensure_system_bins()
4110
+ parts = [p.strip() for p in path.split("/") if p.strip()]
4111
+ if not parts:
4112
+ return root_id
4113
+
4114
+ parent_id = root_id
4115
+ if len(parts) == 1:
4116
+ parent_id = unlinked_id # single bin goes under 'unlinked'
4117
+
4118
+ for name in parts:
4119
+ bin_id = self.ensure_bin_exists(name)
4120
+ self.cursor.execute(
4121
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
4122
+ (bin_id, parent_id),
4123
+ )
4124
+
4125
+ # 👇 cache: reflect the *actual* parent from DB after the insert/ignore
4126
+ if hasattr(self, "bin_cache"):
4127
+ self.cursor.execute(
4128
+ "SELECT container_id FROM BinLinks WHERE bin_id=?", (bin_id,)
4129
+ )
4130
+ row = self.cursor.fetchone()
4131
+ eff_parent = row[0] if row else None
4132
+ self.bin_cache.on_link(bin_id, eff_parent)
4133
+
4134
+ parent_id = bin_id
4135
+
4136
+ self.conn.commit()
4137
+ return parent_id
4138
+
4139
+ def ensure_system_bins(self) -> tuple[int, int]:
4140
+ root_id = self.ensure_bin_exists("root")
4141
+ unlinked_id = self.ensure_bin_exists("unlinked")
4142
+
4143
+ # link unlinked → root (if not already)
4144
+ self.cursor.execute(
4145
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
4146
+ (unlinked_id, root_id),
4147
+ )
4148
+ # 👇 cache: reflect current effective parent
4149
+ if hasattr(self, "bin_cache"):
4150
+ self.bin_cache.on_link(unlinked_id, root_id)
4151
+
4152
+ # Ensure root has no parent (NULL)
4153
+ self.cursor.execute(
4154
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, NULL)",
4155
+ (root_id,),
4156
+ )
4157
+ # 👇 cache: reflect root’s parent = None
4158
+ if hasattr(self, "bin_cache"):
4159
+ self.bin_cache.on_link(root_id, None)
4160
+
4161
+ self.conn.commit()
4162
+ return root_id, unlinked_id
4163
+
4164
+ def link_record_to_bin_path(self, record_id: int, path: str) -> None:
4165
+ """
4166
+ Ensure the bin path exists and link the record to its leaf bin.
4167
+ Example:
4168
+ record_id = 42, path = "personal/quotations"
4169
+ → ensures bins, links 42 → quotations
4170
+ """
4171
+ leaf_bin_id = self.ensure_bin_path(path)
4172
+
4173
+ self.cursor.execute(
4174
+ """
4175
+ INSERT OR IGNORE INTO ReminderLinks (reminder_id, bin_id)
4176
+ VALUES (?, ?)
4177
+ """,
4178
+ (record_id, leaf_bin_id),
4179
+ )
4180
+ self.conn.commit()
4181
+
4182
+ # === Bin access helpers ===
4183
+ def get_bin_name(self, bin_id: int) -> str:
4184
+ """Return bin name by id."""
4185
+ self.cursor.execute("SELECT name FROM Bins WHERE id=?", (bin_id,))
4186
+ row = self.cursor.fetchone()
4187
+ return row[0] if row else f"[unknown #{bin_id}]"
4188
+
4189
+ def get_parent_bin(self, bin_id: int) -> dict | None:
4190
+ """Return parent bin as {'id': ..., 'name': ...} or None if root."""
4191
+ self.cursor.execute(
4192
+ """
4193
+ SELECT b2.id, b2.name
4194
+ FROM BinLinks bl
4195
+ JOIN Bins b2 ON bl.container_id = b2.id
4196
+ WHERE bl.bin_id = ?
4197
+ """,
4198
+ (bin_id,),
4199
+ )
4200
+ row = self.cursor.fetchone()
4201
+ return {"id": row[0], "name": row[1]} if row else None
4202
+
4203
+ # def get_subbins(self, bin_id: int) -> list[dict]:
4204
+ # """Return bins contained in this bin, with counts of subbins/reminders."""
4205
+ # self.cursor.execute(
4206
+ # """
4207
+ # SELECT b.id, b.name,
4208
+ # (SELECT COUNT(*) FROM BinLinks sub WHERE sub.container_id = b.id) AS subbins,
4209
+ # (SELECT COUNT(*) FROM ReminderLinks rl WHERE rl.bin_id = b.id) AS reminders
4210
+ # FROM BinLinks bl
4211
+ # JOIN Bins b ON bl.bin_id = b.id
4212
+ # WHERE bl.container_id = ?
4213
+ # ORDER BY b.name COLLATE NOCASE
4214
+ # """,
4215
+ # (bin_id,),
4216
+ # )
4217
+ # return [
4218
+ # {"id": row[0], "name": row[1], "subbins": row[2], "reminders": row[3]}
4219
+ # for row in self.cursor.fetchall()
4220
+ # ]
4221
+
4222
+ def get_subbins(
4223
+ self, bin_id: int, custom_order: list[str] | None = None
4224
+ ) -> list[dict]:
4225
+ """
4226
+ Return bins contained in this bin, with counts of subbins/reminders.
4227
+ If custom_order is provided (list of child names in order), place those
4228
+ first in that sequence, then any others alphabetically by name.
4229
+ """
4230
+ self.cursor.execute(
4231
+ """
4232
+ SELECT b.id, b.name,
4233
+ (SELECT COUNT(*) FROM BinLinks sub WHERE sub.container_id = b.id) AS subbins,
4234
+ (SELECT COUNT(*) FROM ReminderLinks rl WHERE rl.bin_id = b.id) AS reminders
4235
+ FROM BinLinks bl
4236
+ JOIN Bins b ON bl.bin_id = b.id
4237
+ WHERE bl.container_id = ?
4238
+ """,
4239
+ (bin_id,),
4240
+ )
4241
+ results = [
4242
+ {"id": row[0], "name": row[1], "subbins": row[2], "reminders": row[3]}
4243
+ for row in self.cursor.fetchall()
4244
+ ]
4245
+
4246
+ if custom_order:
4247
+
4248
+ def sort_key(ch):
4249
+ try:
4250
+ idx = custom_order.index(ch["name"])
4251
+ return (0, idx)
4252
+ except ValueError:
4253
+ return (1, ch["name"].lower())
4254
+
4255
+ return sorted(results, key=sort_key)
4256
+ else:
4257
+ return sorted(results, key=lambda ch: ch["name"].lower())
4258
+
4259
+ # def apply_flags(self, record_id: int, subject: str) -> str:
4260
+ # """
4261
+ # Append any flags from Records.flags (e.g. 𝕒𝕘𝕠𝕣) to the given subject.
4262
+ # """
4263
+ # row = self.get_record_as_dictionary(record_id)
4264
+ # if not row:
4265
+ # return subject
4266
+ #
4267
+ # flags = f" {row.get('flags')}" or ""
4268
+ # log_msg(f"{row = }, {flags = }")
4269
+ # if not flags:
4270
+ # return subject
4271
+ #
4272
+ # return subject + flags
4273
+
4274
+ def get_reminders_in_bin(self, bin_id: int) -> list[dict]:
4275
+ """Return reminders linked to this bin."""
4276
+ self.cursor.execute(
4277
+ """
4278
+ SELECT r.id, r.subject, r.itemtype
4279
+ FROM ReminderLinks rl
4280
+ JOIN Records r ON rl.reminder_id = r.id
4281
+ WHERE rl.bin_id = ?
4282
+ ORDER BY r.subject COLLATE NOCASE
4283
+ """,
4284
+ (bin_id,),
4285
+ )
4286
+ return [
4287
+ {
4288
+ "id": row[0],
4289
+ # "subject": self.apply_flags(row[0], row[1]),
4290
+ "subject": row[1],
4291
+ "itemtype": row[2],
4292
+ }
4293
+ for row in self.cursor.fetchall()
4294
+ ]
4295
+
4296
+ # ---------- New, non-colliding helpers ----------
4297
+
4298
+ def ensure_root_exists(self) -> int:
4299
+ """Return id for 'root' (creating/anchoring it if needed)."""
4300
+ root_id, _ = self.ensure_system_bins()
4301
+ return root_id
4302
+
4303
+ def ensure_root_children(self, names: list[str]) -> dict[str, int]:
4304
+ """
4305
+ Ensure lowercased children live directly under root; returns {name: id}.
4306
+ Idempotent and corrects mis-parented roots.
4307
+ """
4308
+ root_id = self.ensure_root_exists()
4309
+ out: dict[str, int] = {}
4310
+ for name in names:
4311
+ nm = (name or "").strip().lower() # ← roots are canonical lowercase
4312
+ cid = self.ensure_bin_exists(nm)
4313
+
4314
+ parent = self.get_parent_bin(cid) # {'id','name'} or None
4315
+ if not parent or parent["name"].lower() != "root":
4316
+ self.move_bin(nm, "root") # cycle-safe re-anchor
4317
+
4318
+ self.cursor.execute(
4319
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
4320
+ (cid, root_id),
4321
+ )
4322
+ out[nm] = cid
4323
+
4324
+ self.conn.commit()
4325
+ return out
4326
+
4327
+ def ensure_bin(
4328
+ self, name: str, parent_id: int | None = None, *, allow_reparent: bool = False
4329
+ ) -> int:
4330
+ nm = (name or "").strip()
4331
+ if not nm:
4332
+ raise ValueError("Bin name must be non-empty")
4333
+ bin_id = self.ensure_bin_exists(nm)
4334
+ if parent_id is None:
4335
+ parent_id = self.ensure_root_exists()
4336
+
4337
+ parent = self.get_parent_bin(bin_id)
4338
+ if parent is None:
4339
+ # no parent yet — just insert
4340
+ self.cursor.execute(
4341
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
4342
+ (bin_id, parent_id),
4343
+ )
4344
+ self.conn.commit()
4345
+ else:
4346
+ # already has a parent
4347
+ if allow_reparent and parent["id"] != parent_id:
4348
+ # figure out parent's name for move_bin(); cheapest is to query it
4349
+ desired_parent_name = self.get_bin_name(parent_id)
4350
+ self.move_bin(nm, desired_parent_name)
4351
+
4352
+ return bin_id
4353
+
4354
+ def link_record_to_bin(self, record_id: int, bin_id: int) -> None:
4355
+ self.cursor.execute(
4356
+ "INSERT OR IGNORE INTO ReminderLinks(reminder_id, bin_id) VALUES (?, ?)",
4357
+ (record_id, bin_id),
4358
+ )
4359
+ self.conn.commit()
4360
+
4361
+ def unlink_record_from_bins(
4362
+ self, record_id: int, *, only_tag_bins: bool | None = None
4363
+ ) -> None:
4364
+ """
4365
+ only_tag_bins=None -> unlink ALL links for record_id
4366
+ only_tag_bins=True -> unlink only tags:*
4367
+ only_tag_bins=False -> unlink only non-tags
4368
+ """
4369
+ if only_tag_bins is None:
4370
+ self.cursor.execute(
4371
+ "DELETE FROM ReminderLinks WHERE reminder_id=?", (record_id,)
4372
+ )
4373
+ elif only_tag_bins is True:
4374
+ self.cursor.execute(
4375
+ """
4376
+ DELETE FROM ReminderLinks
4377
+ WHERE reminder_id=?
4378
+ AND bin_id IN (SELECT id FROM Bins WHERE name LIKE 'tags:%')
4379
+ """,
4380
+ (record_id,),
4381
+ )
4382
+ else:
4383
+ self.cursor.execute(
4384
+ """
4385
+ DELETE FROM ReminderLinks
4386
+ WHERE reminder_id=?
4387
+ AND bin_id NOT IN (SELECT id FROM Bins WHERE name LIKE 'tags:%')
4388
+ """,
4389
+ (record_id,),
4390
+ )
4391
+ self.conn.commit()
4392
+
4393
+ # ---- tokens → links glue (single source of truth) ----
4394
+
4395
+ def _tokens_list(self, tokens_obj) -> list[dict]:
4396
+ """Accept list or JSON string; normalize to list[dict]."""
4397
+ if tokens_obj is None:
4398
+ return []
4399
+ if isinstance(tokens_obj, str):
4400
+ try:
4401
+ import json
4402
+
4403
+ return json.loads(tokens_obj) or []
4404
+ except Exception:
4405
+ return []
4406
+ return list(tokens_obj)
4407
+
4408
+ # def _extract_tag_and_bin_names(self, item) -> tuple[list[str], list[str]]:
4409
+ # """
4410
+ # Read '@t <name>' and '@b <name>' from item.tokens.
4411
+ # tokens are dicts; we rely on keys: t='@', k in {'t','b'}, token='@t blue'
4412
+ # """
4413
+ # tokens = self._tokens_list(getattr(item, "tokens", []))
4414
+ # tags: list[str] = []
4415
+ # bins: list[str] = []
4416
+ # for t in tokens:
4417
+ # if t.get("t") != "@":
4418
+ # continue
4419
+ # k = t.get("k")
4420
+ # raw = t.get("token", "")
4421
+ # value = ""
4422
+ # if isinstance(raw, str) and " " in raw:
4423
+ # value = raw.split(" ", 1)[1].strip()
4424
+ # if not value:
4425
+ # continue
4426
+ # if k == "t":
4427
+ # tags.append(value)
4428
+ # elif k == "b":
4429
+ # bins.append(value)
4430
+ # return tags, bins
4431
+
4432
+ def relink_bins_for_record(
4433
+ self, record_id: int, item, *, default_parent_name: str = "unlinked"
4434
+ ) -> None:
4435
+ """
4436
+ Rebuild ReminderLinks for bins only.
4437
+
4438
+ Behavior:
4439
+ - Always unlinks all existing bin links for this record.
4440
+ - Preferred input: item.bin_paths (list[list[str]]).
4441
+ - Fallback: simple '@b <leaf>' tokens via item.simple_bins (list[str]).
4442
+ - No tag handling — hashtags are now stored in Hashtags table separately.
4443
+ """
4444
+
4445
+ # Ensure required default parent exists (usually "unlinked").
4446
+ defaults = self.ensure_root_children([default_parent_name])
4447
+ default_parent_id = defaults[default_parent_name]
4448
+
4449
+ # -------- 1) Clear all existing bin links --------
4450
+ self.unlink_record_from_bins(record_id)
4451
+
4452
+ # -------- 2) Preferred: hierarchical bin paths --------
4453
+ bin_paths: list[list[str]] = getattr(item, "bin_paths", []) or []
4454
+ if bin_paths:
4455
+ # BinPathProcessor handles creation, normalization, parent fixes, linking.
4456
+ _norm_tokens, _log, _leaf_ids = self.binproc.assign_record_many(
4457
+ record_id, bin_paths
4458
+ )
4459
+ return # fully handled
4460
+
4461
+ # -------- 3) Fallback: simple '@b <leaf>' tokens --------
4462
+ simple_bins: list[str] = getattr(item, "simple_bins", []) or []
4463
+ for name in simple_bins:
4464
+ nm = name.strip()
4465
+ if not nm:
4466
+ continue
4467
+ bid = self.ensure_bin(nm, parent_id=default_parent_id)
4468
+ self.link_record_to_bin(record_id, bid)
4469
+
4470
+ ###VVV new for tagged bin treated
4471
+ def get_root_bin_id(self) -> int:
4472
+ # Reuse your existing, tested anchor
4473
+ return self.ensure_root_exists()
4474
+
4475
+ def _make_crumb(self, bin_id: int | None):
4476
+ """Return [(id, name), ...] from root to current."""
4477
+ if bin_id is None:
4478
+ rid = self.ensure_root_exists()
4479
+ return [(rid, "root")]
4480
+ # climb using your get_parent_bin
4481
+ chain = []
4482
+ cur = bin_id
4483
+ while cur is not None:
4484
+ name = self.get_bin_name(cur)
4485
+ chain.append((cur, name))
4486
+ parent = self.get_parent_bin(cur) # {'id','name'} or None
4487
+ cur = parent["id"] if parent else None
4488
+ return list(reversed(chain)) or [(self.ensure_root_exists(), "root")]
4489
+
4490
+ def get_bin_summary(self, bin_id: int | None, *, filter_text: str | None = None):
4491
+ """
4492
+ Returns:
4493
+ children -> [ChildBinRow]
4494
+ reminders -> [ReminderRow]
4495
+ crumb -> [(id, name), ...]
4496
+ Uses ONLY DatabaseManager public methods you showed.
4497
+ """
4498
+ # 1) children (uses your counts + sort)
4499
+ raw_children = self.get_subbins(
4500
+ bin_id if bin_id is not None else self.get_root_bin_id()
4501
+ )
4502
+ # shape: {"id","name","subbins","reminders"}
4503
+ children = [
4504
+ ChildBinRow(
4505
+ bin_id=c["id"],
4506
+ name=c["name"],
4507
+ child_ct=c["subbins"],
4508
+ rem_ct=c["reminders"],
4509
+ )
4510
+ for c in raw_children
4511
+ ]
4512
+
4513
+ # 2) reminders (linked via ReminderLinks)
4514
+ raw_reminders = self.get_reminders_in_bin(
4515
+ bin_id if bin_id is not None else self.get_root_bin_id()
4516
+ )
4517
+ # shape: {"id","subject","itemtype"}
4518
+ reminders = [
4519
+ ReminderRow(
4520
+ record_id=r["id"],
4521
+ subject=r["subject"],
4522
+ # keep optional fields absent; view handles it
4523
+ )
4524
+ for r in raw_reminders
4525
+ ]
4526
+
4527
+ # 3) apply filter (controller-level; no new SQL)
4528
+ if filter_text:
4529
+ f = filter_text.casefold()
4530
+ children = [c for c in children if f in c.name.casefold()]
4531
+ reminders = [r for r in reminders if f in r.subject.casefold()]
4532
+
4533
+ # 4) crumb
4534
+ crumb = self._make_crumb(
4535
+ bin_id if bin_id is not None else self.get_root_bin_id()
4536
+ )
4537
+ return children, reminders, crumb
4538
+
4539
+ def get_reminder_details(self, record_id: int) -> str:
4540
+ # Minimal, safe detail using your existing schema
4541
+ row = self.cursor.execute(
4542
+ "SELECT subject, itemtype FROM Records WHERE id=?",
4543
+ (record_id,),
4544
+ ).fetchone()
4545
+ if not row:
4546
+ return "[b]Unknown reminder[/b]"
4547
+ subject, itemtype = row
4548
+ return f"[b]{subject}[/b]\n[dim]type:[/dim] {itemtype or '—'}"