tklr-dgraham 0.0.0rc11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tklr-dgraham might be problematic. Click here for more details.

tklr/model.py ADDED
@@ -0,0 +1,3973 @@
1
+ from __future__ import annotations
2
+ import os
3
+ import sqlite3
4
+ import json
5
+ from typing import Optional
6
+ from datetime import date, datetime, time, timedelta
7
+ from dateutil.rrule import rrulestr
8
+ from dateutil.parser import parse
9
+
10
+ from typing import List, Tuple, Optional, Dict, Any, Set, Iterable
11
+ from rich import print
12
+ from tklr.tklr_env import TklrEnvironment
13
+ from dateutil import tz
14
+ from dateutil.tz import gettz
15
+ import math
16
+ import numpy as np
17
+ from pathlib import Path
18
+ from dataclasses import dataclass, field
19
+
20
+ import shutil
21
+
22
+ # from textwrap import indent
23
+ from rich.console import Console
24
+ from rich.text import Text
25
+
26
+
27
+ from .shared import (
28
+ HRS_MINS,
29
+ log_msg,
30
+ format_datetime,
31
+ datetime_from_timestamp,
32
+ duration_in_words,
33
+ datetime_in_words,
34
+ fmt_local_compact,
35
+ parse_local_compact,
36
+ fmt_utc_z,
37
+ parse_utc_z,
38
+ get_anchor,
39
+ )
40
+
41
+ import re
42
+ from .item import Item
43
+ from collections import defaultdict, deque
44
+
45
+
46
+ anniversary_regex = re.compile(r"!(\d{4})!")
47
+
48
+ BIN_ROOTS = {
49
+ "activities",
50
+ "journal",
51
+ "library",
52
+ "people",
53
+ "places",
54
+ "projects",
55
+ "seedbed",
56
+ "tags",
57
+ "unlinked",
58
+ }
59
+
60
+ BIN_PATHS = [
61
+ ["books", "library"],
62
+ ["movies", "library"],
63
+ ["series", "library"],
64
+ ["poetry", "library"],
65
+ ["quotations", "library"],
66
+ ["tosser", "seedbed"],
67
+ ["seed", "seedbed"],
68
+ ["seedling", "seedbed"],
69
+ ["plant", "seedbed"],
70
+ ["keeper", "seedbed"],
71
+ ]
72
+
73
+
74
+ def regexp(pattern, value):
75
+ try:
76
+ return re.search(pattern, value) is not None
77
+ except TypeError:
78
+ return False # Handle None values gracefully
79
+
80
+
81
+ def utc_now_string():
82
+ """Return current UTC time as 'YYYYMMDDTHHMMSS'."""
83
+ return datetime.utcnow().strftime("%Y%m%dT%H%MZ")
84
+
85
+
86
+ def utc_now_to_seconds():
87
+ return round(datetime.utcnow().timestamp())
88
+
89
+
90
+ def is_date(obj):
91
+ return isinstance(obj, date) and not isinstance(obj, datetime)
92
+
93
+
94
+ DATE_FMT = "%Y%m%d"
95
+ DT_FMT = "%Y%m%dT%H%M"
96
+
97
+
98
+ def _fmt_date(d: date) -> str:
99
+ return d.strftime(DATE_FMT)
100
+
101
+
102
+ def _fmt_naive(dt: datetime) -> str:
103
+ return dt.strftime(DT_FMT)
104
+
105
+
106
+ def _fmt_utc(dt_aware_utc: datetime) -> str:
107
+ return dt_aware_utc.astimezone(tz.UTC).strftime(DT_FMT) + "Z"
108
+
109
+
110
+ def _to_local_naive(dt: datetime) -> datetime:
111
+ """
112
+ Convert aware -> local-naive; leave naive unchanged.
113
+ Assumes dt is datetime (not date).
114
+ """
115
+ if dt.tzinfo is not None:
116
+ dt = dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
117
+ return dt
118
+
119
+
120
+ def _to_key(dt: datetime) -> str:
121
+ """Naive-local datetime -> 'YYYYMMDDTHHMMSS' string key."""
122
+ return dt.strftime("%Y%m%dT%H%M")
123
+
124
+
125
+ def _today_key() -> str:
126
+ """'YYYYMMDDTHHMMSS' for now in local time, used for lexicographic comparisons."""
127
+ return datetime.now().strftime("%Y%m%dT%H%M")
128
+
129
+
130
+ def _split_span_local_days(
131
+ start_local: datetime, end_local: datetime
132
+ ) -> list[tuple[datetime, datetime]]:
133
+ """
134
+ Split a local-naive span into same-day segments.
135
+ Inclusive start, inclusive end per segment.
136
+ """
137
+ if end_local <= start_local:
138
+ return [(start_local, end_local)]
139
+
140
+ segs: list[tuple[datetime, datetime]] = []
141
+ cur_start = start_local
142
+
143
+ while cur_start.date() < end_local.date():
144
+ day_end = datetime.combine(cur_start.date(), time(23, 59, 59))
145
+ segs.append((cur_start, day_end))
146
+ next_day_start = datetime.combine(
147
+ cur_start.date() + timedelta(days=1), time(0, 0, 0)
148
+ )
149
+ cur_start = next_day_start
150
+
151
+ segs.append((cur_start, end_local))
152
+ return segs
153
+
154
+
155
+ def td_str_to_td(duration_str: str) -> timedelta:
156
+ """Convert a duration string like '1h30m20s' into a timedelta."""
157
+ duration_str = duration_str.strip()
158
+ sign = "+"
159
+ if duration_str[0] in ["+", "-"]:
160
+ sign = duration_str[0]
161
+ duration_str = duration_str[1:]
162
+
163
+ pattern = r"(?:(\d+)w)?(?:(\d+)d)?(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?"
164
+ match = re.fullmatch(pattern, duration_str.strip())
165
+ if not match:
166
+ raise ValueError(f"Invalid duration format: '{duration_str}'")
167
+ weeks, days, hours, minutes, seconds = [int(x) if x else 0 for x in match.groups()]
168
+ if sign == "-":
169
+ return -timedelta(
170
+ weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds
171
+ )
172
+ else:
173
+ return timedelta(
174
+ weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds
175
+ )
176
+
177
+
178
+ def td_str_to_seconds(duration_str: str) -> int:
179
+ """Convert a duration string like '1h30m20s' into a timedelta."""
180
+ duration_str = duration_str.strip()
181
+ if not duration_str:
182
+ return 0
183
+ sign = "+"
184
+ if duration_str[0] in ["+", "-"]:
185
+ sign = duration_str[0]
186
+ duration_str = duration_str[1:]
187
+
188
+ pattern = r"(?:(\d+)w)?(?:(\d+)d)?(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?"
189
+ match = re.fullmatch(pattern, duration_str.strip())
190
+ if not match:
191
+ raise ValueError(f"Invalid duration format: '{duration_str}'")
192
+ weeks, days, hours, minutes, seconds = [int(x) if x else 0 for x in match.groups()]
193
+
194
+ # log_msg(f"{weeks = }, {days = }, {hours = }, {minutes = }, {seconds = }")
195
+
196
+ if sign == "-":
197
+ return -(weeks * 604800 + days * 86400 + hours * 3600 + minutes * 60 + seconds)
198
+ else:
199
+ return weeks * 604800 + days * 86400 + hours * 3600 + minutes * 60 + seconds
200
+
201
+
202
+ def dt_str_to_seconds(datetime_str: str) -> int:
203
+ """Convert a datetime string like '20250601T090000' into a datetime object."""
204
+ if not datetime_str:
205
+ return None
206
+ if "T" not in datetime_str:
207
+ datetime_str += "T000000"
208
+ try:
209
+ return round(datetime.strptime(datetime_str[:13], "%Y%m%dT%H%M").timestamp())
210
+
211
+ except ValueError:
212
+ return round(
213
+ datetime.strptime(datetime_str.rstrip("Z"), "%Y%m%dT0000").timestamp()
214
+ ) # Allow date-only
215
+
216
+
217
+ def dt_to_dtstr(dt_obj: datetime) -> str:
218
+ """Convert a datetime object to 'YYYYMMDDTHHMM' format."""
219
+ if is_date:
220
+ return dt_obj.strftime("%Y%m%d")
221
+ return dt_obj.strftime("%Y%m%dT%H%M")
222
+
223
+
224
+ def td_to_tdstr(td_obj: timedelta) -> str:
225
+ """Convert a timedelta object to a compact string like '1h30m20s'."""
226
+ total = int(td_obj.total_seconds())
227
+ if total == 0:
228
+ return "0s"
229
+
230
+ w, remainder = divmod(total, 604800)
231
+
232
+ d, remainder = divmod(total, 86400)
233
+
234
+ h, remainder = divmod(remainder, 3600)
235
+
236
+ m, s = divmod(remainder, 60)
237
+
238
+ parts = []
239
+ if w:
240
+ parts.append(f"{d}w")
241
+ if d:
242
+ parts.append(f"{d}d")
243
+ if h:
244
+ parts.append(f"{h}h")
245
+ if m:
246
+ parts.append(f"{m}m")
247
+ if s:
248
+ parts.append(f"{s}s")
249
+
250
+ return "".join(parts)
251
+
252
+
253
+ # If you already have these helpers elsewhere, import and reuse them.
254
+ def _fmt_compact_local_naive(dt: datetime) -> str:
255
+ """Return local-naive 'YYYYMMDD' or 'YYYYMMDDTHHMMSS'."""
256
+ if dt.tzinfo is not None:
257
+ dt = dt.astimezone(tz.tzlocal()).replace(tzinfo=None)
258
+ if dt.hour == 0 and dt.minute == 0 and dt.second == 0:
259
+ return dt.strftime("%Y%m%d")
260
+ return dt.strftime("%Y%m%dT%H%M")
261
+
262
+
263
+ def _shift_from_parent(parent_dt: datetime, seconds: int) -> datetime:
264
+ """
265
+ Positive seconds = '&s 5d' means 5 days BEFORE parent => subtract.
266
+ Negative seconds => AFTER parent => add.
267
+ """
268
+ return parent_dt - timedelta(seconds=seconds)
269
+
270
+
271
+ def _parse_jobs_json(jobs_json: str | None) -> list[dict]:
272
+ """
273
+ Parse your jobs list. Expects a list of dicts like:
274
+ {"~": "create plan", "s": "1w", "e": "1h", "i": 1, "status": "...", ...}
275
+ Returns a normalized list with keys: job_id, offset_str, extent_str, status.
276
+ """
277
+ if not jobs_json:
278
+ return []
279
+ try:
280
+ data = json.loads(jobs_json)
281
+ except Exception:
282
+ return []
283
+
284
+ jobs = []
285
+ if isinstance(data, list):
286
+ for j in data:
287
+ if isinstance(j, dict):
288
+ log_msg(f"json jobs: {j = }")
289
+ jobs.append(
290
+ {
291
+ "job_id": j.get("id"),
292
+ "offset_str": (j.get("s") or "").strip(),
293
+ "extent_str": (j.get("e") or "").strip(),
294
+ "status": (j.get("status") or "").strip().lower(),
295
+ "display_subject": (j.get("display_subject") or "").strip(),
296
+ }
297
+ )
298
+ return jobs
299
+
300
+
301
+ # 6-hour windows within a day (local-naive)
302
+ WINDOWS = [
303
+ (0, 6), # bit 1: 00:00 - 06:00
304
+ (6, 12), # bit 2: 06:00 - 12:00
305
+ (12, 18), # bit 3: 12:00 - 18:00
306
+ (18, 24), # bit 4: 18:00 - 24:00
307
+ ]
308
+
309
+
310
+ def bits_to_int(bitstring: str) -> int:
311
+ """'0000101...' → integer."""
312
+ return int(bitstring, 2)
313
+
314
+
315
+ def int_to_bits(value: int) -> str:
316
+ """Integer → 35-bit '010...'."""
317
+ return format(value, "035b")
318
+
319
+
320
+ def or_aggregate(values: list[int]) -> int:
321
+ """Bitwise OR aggregate."""
322
+ acc = 0
323
+ for v in values:
324
+ acc |= v
325
+ return acc
326
+
327
+
328
+ def _parse_local_naive(ts: str) -> datetime:
329
+ # "YYYYmmddTHHMM" → naive local datetime
330
+ return datetime.strptime(ts, "%Y%m%dT%H%M")
331
+
332
+
333
+ def _iso_year_week(d: datetime) -> str:
334
+ y, w, _ = d.isocalendar()
335
+ return f"{y:04d}-{w:02d}"
336
+
337
+
338
+ def fine_busy_bits_for_event(
339
+ start_str: str, end_str: str | None
340
+ ) -> dict[str, np.ndarray]:
341
+ """
342
+ Return dict of {year_week: 679-slot uint8 array}
343
+ (7 days × (1 all-day + 96 fifteen-minute blocks))
344
+ """
345
+ start = parse(start_str)
346
+
347
+ # --- handle end rules ---
348
+ end = parse(end_str) if end_str else None
349
+
350
+ if end is None and (start.hour != 0 or start.minute != 0):
351
+ # zero-extent event: contributes nothing
352
+ return {}
353
+
354
+ slot_minutes = 15
355
+ slots_per_day = 96
356
+ slots_per_week = 7 * (1 + slots_per_day) # 679
357
+ weeks: dict[str, np.ndarray] = {}
358
+
359
+ def yw_key(dt: datetime) -> str:
360
+ y, w, _ = dt.isocalendar()
361
+ return f"{y:04d}-{w:02d}"
362
+
363
+ cur = start
364
+ busy_count = 0
365
+ while True:
366
+ yw = yw_key(cur)
367
+ if yw not in weeks:
368
+ weeks[yw] = np.zeros(slots_per_week, dtype=np.uint8)
369
+
370
+ day_index = cur.weekday() # Mon=0
371
+ base = day_index * (1 + slots_per_day)
372
+
373
+ if end is None:
374
+ # all-day flag only
375
+ weeks[yw][base] = 1
376
+ else:
377
+ day_start = datetime.combine(cur.date(), datetime.min.time())
378
+ day_end = datetime.combine(cur.date(), datetime.max.time())
379
+ s = max(start, day_start)
380
+ e = min(end, day_end)
381
+
382
+ s_idx = (s.hour * 60 + s.minute) // slot_minutes
383
+ e_idx = (e.hour * 60 + e.minute) // slot_minutes
384
+ log_msg(f"{s_idx = }, {e_idx = }, {e_idx - s_idx = } ")
385
+ weeks[yw][base + 1 + s_idx : base + 1 + e_idx + 1] = 1
386
+ busy_count += np.count_nonzero(weeks[yw])
387
+
388
+ if end is None or cur.date() >= end.date():
389
+ break
390
+ cur += timedelta(days=1)
391
+ log_msg(f"{start_str = }, {end_str = }, {busy_count = }")
392
+ return weeks
393
+
394
+
395
+ def _reduce_to_35_slots(arr: np.ndarray) -> np.ndarray:
396
+ """
397
+ Convert 679 fine bits (7 × (1 + 96)) into 35 coarse slots
398
+ (7 × [1 all-day + 4 × 6-hour blocks]).
399
+ """
400
+ days = 7
401
+ allday_bits = arr.reshape(days, 97)[:, 0]
402
+ quarters = arr.reshape(days, 97)[:, 1:] # 7×96
403
+
404
+ coarse = np.zeros((days, 5), dtype=np.uint8)
405
+
406
+ for d in range(days):
407
+ # all-day stays as-is
408
+ coarse[d, 0] = allday_bits[d]
409
+
410
+ # 4 six-hour ranges
411
+ for i in range(4):
412
+ start = i * 24 # 6h = 24 × 15min
413
+ end = start + 24
414
+ chunk = quarters[d, start:end]
415
+ if np.any(chunk == 2):
416
+ coarse[d, i + 1] = 2
417
+ elif np.any(chunk == 1):
418
+ coarse[d, i + 1] = 1
419
+ else:
420
+ coarse[d, i + 1] = 0
421
+
422
+ return coarse.flatten()
423
+
424
+
425
+ class SafeDict(dict):
426
+ def __missing__(self, key):
427
+ # Return a placeholder or empty string
428
+ return f"{{{key}}}"
429
+
430
+
431
+ @dataclass
432
+ class BinPathConfig:
433
+ allow_reparent: bool = True
434
+ standard_roots: Set[str] = field(
435
+ default_factory=lambda: BIN_ROOTS
436
+ ) # anchored at root
437
+ standard_paths: List[List[str]] = field(default_factory=lambda: BIN_PATHS)
438
+
439
+
440
+ class BinPathProcessor:
441
+ def __init__(self, model, cfg: Optional[BinPathConfig] = None):
442
+ """
443
+ model: your Model instance (ensure_system_bins, ensure_root_children, move_bin, etc.)
444
+ """
445
+ self.m = model
446
+ self.cfg = cfg or BinPathConfig()
447
+ # Ensure system bins + standard roots exist at startup
448
+ self.m.ensure_system_bins()
449
+ if self.cfg.standard_roots:
450
+ self.m.ensure_root_children(sorted(self.cfg.standard_roots)) # idempotent
451
+ # NEW: ensure standard child paths exist + are correctly anchored
452
+ for parts in self.cfg.standard_paths or []:
453
+ try:
454
+ # parts: ["leaf", "parent", "grandparent", ...]
455
+ # apply_parts ensures/repairs hierarchy without touching records
456
+ _norm, _log, _leaf_id = self.apply_parts(parts)
457
+ # You could log _log somewhere if desired
458
+ except Exception as e:
459
+ # Fail soft: don’t break startup if one path is weird
460
+ print(f"[binpaths] error applying standard path {parts!r}: {e}")
461
+
462
+ @staticmethod
463
+ def canon(name: str) -> str:
464
+ return (name or "").strip()
465
+
466
+ def _is_unlinked(self, bin_id: int) -> bool:
467
+ """
468
+ Unlinked if no parent row in BinLinks OR parent is the explicit 'unlinked' bin.
469
+ """
470
+ parent = self.m.get_parent_bin(bin_id) # {'id','name'} or None
471
+ if parent is None:
472
+ return True
473
+ return self.canon(parent["name"]) == "unlinked"
474
+
475
+ def _ensure_standard_root_anchor(self, name: str) -> None:
476
+ """
477
+ Ensure standard roots exist directly under root.
478
+ """
479
+ self.m.ensure_root_children([name]) # puts child under root if missing
480
+
481
+ # --- New: operate on already-split parts instead of parsing a string ---
482
+
483
+ def apply_parts(self, parts: List[str]) -> Tuple[str, List[str], int]:
484
+ """
485
+ Process a bin path given as parts, e.g. ["lille","france","places"].
486
+ Interpretation: parts[0] is the leaf, following are ancestors (nearest first).
487
+ Returns: (normalized_token '@b <leaf>', log, leaf_bin_id)
488
+ """
489
+ log: List[str] = []
490
+
491
+ parts = [p for p in (parts or []) if (p or "").strip()]
492
+ if not parts:
493
+ raise ValueError("Empty @b parts")
494
+
495
+ leaf_name = self.canon(parts[0])
496
+ ancestors = [self.canon(p) for p in parts[1:]] # nearest first
497
+ log.append(f"Parsed leaf='{leaf_name}', ancestors={ancestors!r}")
498
+
499
+ # Ensure system bins present
500
+ root_id, unlinked_id = self.m.ensure_system_bins()
501
+
502
+ # Ensure leaf exists
503
+ leaf_id = self.m.ensure_bin_exists(leaf_name)
504
+ normalized = f"@b {leaf_name}"
505
+
506
+ # No ancestors case
507
+ if not ancestors:
508
+ if not self._is_unlinked(leaf_id):
509
+ log.append("Leaf already linked (not under 'unlinked'); no changes.")
510
+ return normalized, log, leaf_id
511
+ self._attach_if_missing(leaf_name, "unlinked", log)
512
+ log.append("Leaf had no parent; placed under 'unlinked'.")
513
+ return normalized, log, leaf_id
514
+
515
+ # Walk up the chain: leaf -> parent -> grandparent...
516
+ child_name = leaf_name
517
+ for anc in ancestors:
518
+ if anc in self.cfg.standard_roots:
519
+ self._ensure_standard_root_anchor(anc)
520
+ self._attach_if_missing(child_name, anc, log)
521
+ child_name = anc
522
+
523
+ top = ancestors[-1]
524
+ if top in self.cfg.standard_roots:
525
+ log.append(f"Ensured standard root '{top}' is anchored under root.")
526
+ return normalized, log, leaf_id
527
+
528
+ def _attach_if_missing(
529
+ self, child_name: str, parent_name: str, log: List[str]
530
+ ) -> None:
531
+ """
532
+ Attach child under parent if not already so; reparenting via move_bin (cycle-safe).
533
+ """
534
+ try:
535
+ child_id = self.m.ensure_bin_exists(child_name)
536
+ parent_id = self.m.ensure_bin_exists(parent_name)
537
+
538
+ parent = self.m.get_parent_bin(child_id)
539
+ if parent and self.canon(parent["name"]) == self.canon(parent_name):
540
+ log.append(f"'{child_name}' already under '{parent_name}'.")
541
+ return
542
+
543
+ if (
544
+ (not self.cfg.allow_reparent)
545
+ and parent
546
+ and self.canon(parent["name"]) != self.canon(parent_name)
547
+ ):
548
+ log.append(
549
+ f"Skipped reparenting '{child_name}' (existing parent='{parent['name']}') "
550
+ f"-> requested '{parent_name}' (allow_reparent=False)"
551
+ )
552
+ return
553
+
554
+ ok = self.m.move_bin(child_name, parent_name)
555
+ log.append(
556
+ f"{'Attached' if ok else 'Failed to attach'} '{child_name}' under '{parent_name}'."
557
+ )
558
+ except Exception as e:
559
+ log.append(f"Error attaching '{child_name}' -> '{parent_name}': {e}")
560
+
561
+ # Convenience wrappers for your controller:
562
+
563
+ def assign_record_via_parts(
564
+ self, record_id: int, parts: List[str]
565
+ ) -> Tuple[str, List[str], int]:
566
+ """
567
+ Ensure/repair hierarchy for {parts} and link the record to the leaf.
568
+ """
569
+ normalized, log, leaf_id = self.apply_parts(parts)
570
+ self.m.link_record_to_bin(record_id, leaf_id) # idempotent
571
+ log.append(
572
+ f"Linked record {record_id} → bin {leaf_id} ('{self.m.get_bin_name(leaf_id)}')."
573
+ )
574
+ return normalized, log, leaf_id
575
+
576
+ def assign_record_many(
577
+ self, record_id: int, list_of_parts: List[List[str]]
578
+ ) -> Tuple[List[str], List[str], List[int]]:
579
+ """
580
+ Process multiple bin paths for a single record.
581
+ Returns: (normalized_tokens, combined_log, leaf_ids)
582
+ """
583
+ norm_tokens: List[str] = []
584
+ combined_log: List[str] = []
585
+ leaf_ids: List[int] = []
586
+
587
+ # De-duplicate exact paths to avoid redundant work
588
+ seen = set()
589
+ for parts in list_of_parts or []:
590
+ key = tuple(self.canon(p) for p in parts if (p or "").strip())
591
+ if not key or key in seen:
592
+ continue
593
+ seen.add(key)
594
+
595
+ norm, log, leaf_id = self.assign_record_via_parts(record_id, list(key))
596
+ norm_tokens.append(norm)
597
+ combined_log.extend(log)
598
+ leaf_ids.append(leaf_id)
599
+
600
+ return norm_tokens, combined_log, leaf_ids
601
+
602
+
603
+ # bin_cache.py
604
+ def _rev_path_for(
605
+ bid: int, name: Dict[int, str], parent: Dict[int, Optional[int]]
606
+ ) -> str:
607
+ parts: List[str] = []
608
+ cur = bid
609
+ while cur is not None:
610
+ parts.append(name[cur])
611
+ cur = parent.get(cur)
612
+ return "/".join(parts) # leaf → ... → root
613
+
614
+
615
+ class BinCache:
616
+ """
617
+ Incremental cache for bins/links with a simple public API:
618
+
619
+ - name_to_binpath(): Dict[str, str] # { leaf_lower: "Leaf/Parent/.../Root" }
620
+
621
+ Update methods you call from your existing model helpers:
622
+
623
+ - on_create(bid, name, parent_id)
624
+ - on_rename(bid, new_name)
625
+ - on_link(bid, parent_id) # (re)parent; also used by move
626
+ - on_unlink(bid) # set parent to None
627
+ - on_delete(bid) # delete a bin and its subtree
628
+ """
629
+
630
+ def __init__(self, conn: sqlite3.Connection):
631
+ self.conn = conn
632
+ self.name: Dict[int, str] = {}
633
+ self.parent: Dict[int, Optional[int]] = {}
634
+ self.children: Dict[Optional[int], Set[int]] = defaultdict(set)
635
+ self.rev_path: Dict[int, str] = {}
636
+ self._name_to_binpath: Dict[str, str] = {}
637
+ self._load_all()
638
+
639
+ # ---------- initial build ----------
640
+
641
+ def _load_all(self) -> None:
642
+ rows = self.conn.execute("""
643
+ SELECT b.id, b.name, bl.container_id
644
+ FROM Bins b
645
+ LEFT JOIN BinLinks bl ON bl.bin_id = b.id
646
+ """).fetchall()
647
+
648
+ self.name.clear()
649
+ self.parent.clear()
650
+ self.children.clear()
651
+ for bid, nm, par in rows:
652
+ self.name[bid] = nm
653
+ self.parent[bid] = par
654
+ self.children[par].add(bid)
655
+
656
+ # compute reversed (leaf→root) paths
657
+ self.rev_path = {
658
+ bid: _rev_path_for(bid, self.name, self.parent) for bid in self.name
659
+ }
660
+ self._rebuild_name_dict()
661
+ log_msg(f"{self.name_to_binpath() = }")
662
+
663
+ def _rebuild_name_dict(self) -> None:
664
+ self._name_to_binpath = {
665
+ nm.lower(): self.rev_path[bid] for bid, nm in self.name.items()
666
+ }
667
+
668
+ # ---------- subtree utilities ----------
669
+
670
+ def _iter_subtree(self, root_id: int) -> Iterable[int]:
671
+ q = deque([root_id])
672
+ while q:
673
+ x = q.popleft()
674
+ yield x
675
+ for c in self.children.get(x, ()):
676
+ q.append(c)
677
+
678
+ def _refresh_paths_for_subtree(self, root_id: int) -> None:
679
+ # recompute rev_path for root and descendants; update name_to_binpath values
680
+ for bid in self._iter_subtree(root_id):
681
+ self.rev_path[bid] = _rev_path_for(bid, self.name, self.parent)
682
+ for bid in self._iter_subtree(root_id):
683
+ self._name_to_binpath[self.name[bid].lower()] = self.rev_path[bid]
684
+
685
+ # ---------- mutations you call ----------
686
+
687
+ def on_create(self, bid: int, nm: str, parent_id: Optional[int]) -> None:
688
+ self.name[bid] = nm
689
+ self.parent[bid] = parent_id
690
+ self.children[parent_id].add(bid)
691
+ self.rev_path[bid] = _rev_path_for(bid, self.name, self.parent)
692
+ self._name_to_binpath[nm.lower()] = self.rev_path[bid]
693
+
694
+ def on_rename(self, bid: int, new_name: str) -> None:
695
+ old = self.name[bid]
696
+ if old.lower() != new_name.lower():
697
+ self._name_to_binpath.pop(old.lower(), None)
698
+ self.name[bid] = new_name
699
+ self._refresh_paths_for_subtree(bid)
700
+
701
+ def on_link(self, bid: int, new_parent_id: Optional[int]) -> None:
702
+ old_parent = self.parent.get(bid)
703
+ if old_parent == new_parent_id:
704
+ # nothing changed
705
+ return
706
+ if old_parent in self.children:
707
+ self.children[old_parent].discard(bid)
708
+ self.children[new_parent_id].add(bid)
709
+ self.parent[bid] = new_parent_id
710
+ self._refresh_paths_for_subtree(bid)
711
+
712
+ def on_unlink(self, bid: int) -> None:
713
+ old_parent = self.parent.get(bid)
714
+ if old_parent in self.children:
715
+ self.children[old_parent].discard(bid)
716
+ self.parent[bid] = None
717
+ self._refresh_paths_for_subtree(bid)
718
+
719
+ def on_delete(self, bid: int) -> None:
720
+ # remove whole subtree
721
+ to_rm = list(self._iter_subtree(bid))
722
+ par = self.parent.get(bid)
723
+ if par in self.children:
724
+ self.children[par].discard(bid)
725
+ for x in to_rm:
726
+ self._name_to_binpath.pop(self.name[x].lower(), None)
727
+ # detach from parent/children maps
728
+ p = self.parent.get(x)
729
+ if p in self.children:
730
+ self.children[p].discard(x)
731
+ self.children.pop(x, None)
732
+ self.parent.pop(x, None)
733
+ self.rev_path.pop(x, None)
734
+ self.name.pop(x, None)
735
+
736
+ # ---------- query ----------
737
+
738
+ def name_to_binpath(self) -> Dict[str, str]:
739
+ return self._name_to_binpath
740
+
741
+
742
+ class UrgencyComputer:
743
+ def __init__(self, env: TklrEnvironment):
744
+ self.env = env
745
+ self.urgency = env.config.urgency
746
+
747
+ self.MIN_URGENCY = self.urgency.colors.min_urgency
748
+ self.MIN_HEX_COLOR = self.urgency.colors.min_hex_color
749
+ self.MAX_HEX_COLOR = self.urgency.colors.max_hex_color
750
+ self.STEPS = self.urgency.colors.steps
751
+ self.BUCKETS = self.get_urgency_color_buckets()
752
+
753
+ def hex_to_rgb(self, hex_color: str) -> Tuple[int, int, int]:
754
+ hex_color = hex_color.lstrip("#")
755
+ return tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4))
756
+
757
+ def rgb_to_hex(self, rgb: Tuple[int, int, int]) -> str:
758
+ return "#{:02x}{:02x}{:02x}".format(*rgb)
759
+
760
+ def get_urgency_color_buckets(self) -> List[str]:
761
+ neg_rgb = self.hex_to_rgb(self.MIN_HEX_COLOR)
762
+ max_rgb = self.hex_to_rgb(self.MAX_HEX_COLOR)
763
+
764
+ buckets = []
765
+ for i in range(self.STEPS):
766
+ t = i / (self.STEPS - 1)
767
+ rgb = tuple(
768
+ round(neg + t * (maxc - neg)) for neg, maxc in zip(neg_rgb, max_rgb)
769
+ )
770
+ buckets.append(self.rgb_to_hex(rgb))
771
+ return buckets
772
+
773
+ def urgency_to_bucket_color(self, urgency: float) -> str:
774
+ if urgency <= self.MIN_URGENCY:
775
+ return self.MIN_HEX_COLOR
776
+ if urgency >= 1.0:
777
+ return self.MAX_HEX_COLOR
778
+
779
+ i = min(
780
+ int((urgency - self.MIN_URGENCY) * len(self.BUCKETS)), len(self.BUCKETS) - 1
781
+ )
782
+ return self.BUCKETS[i]
783
+
784
+ def compute_partitioned_urgency(self, weights: dict[str, float]) -> float:
785
+ """
786
+ Compute urgency from signed weights:
787
+ - Positive weights push urgency up
788
+ - Negative weights pull urgency down
789
+ - Equal weights → urgency = 0
790
+
791
+ Returns:
792
+ urgency ∈ [-1.0, 1.0]
793
+ """
794
+ Wp = 0.0 + sum(w for w in weights.values() if w > 0)
795
+
796
+ Wn = 0.0 + sum(abs(w) for w in weights.values() if w < 0)
797
+
798
+ urgency = (Wp - Wn) / (2 + Wn + Wp)
799
+ # log_msg(f"{Wp = }, {Wn = }, {Wp - Wn = }, {Wp + Wn = }, {urgency = }")
800
+ return urgency
801
+
802
+ def urgency_due(self, due_seconds: int, now_seconds: int) -> float:
803
+ """
804
+ This function calculates the urgency contribution for a task based
805
+ on its due datetime relative to the current datetime and returns
806
+ a float value between 0.0 when (now <= due - interval) and max when
807
+ (now >= due).
808
+ """
809
+ due_max = self.urgency.due.max
810
+ interval = self.urgency.due.interval
811
+ if due_seconds and due_max and interval:
812
+ interval_seconds = td_str_to_seconds(interval)
813
+ # log_msg(f"{due_max = }, {interval = }, {interval_seconds = }")
814
+ return max(
815
+ 0.0,
816
+ min(
817
+ due_max,
818
+ due_max * (1.0 - (now_seconds - due_seconds) / interval_seconds),
819
+ ),
820
+ )
821
+ return 0.0
822
+
823
+ def urgency_pastdue(self, due_seconds: int, now_seconds: int) -> float:
824
+ """
825
+ This function calculates the urgency contribution for a task based
826
+ on its due datetime relative to the current datetime and returns
827
+ a float value between 0.0 when (now <= due) and max when
828
+ (now >= due + interval).
829
+ """
830
+
831
+ pastdue_max = self.urgency.pastdue.max
832
+ interval = self.urgency.pastdue.interval
833
+ if due_seconds and pastdue_max and interval:
834
+ interval_seconds = td_str_to_seconds(interval)
835
+ return max(
836
+ 0.0,
837
+ min(
838
+ pastdue_max,
839
+ pastdue_max * (now_seconds - due_seconds) / interval_seconds,
840
+ ),
841
+ )
842
+ return 0.0
843
+
844
+ def urgency_recent(self, modified_seconds: int, now_seconds: int) -> float:
845
+ """
846
+ This function calculates the urgency contribution for a task based
847
+ on the current datetime relative to the (last) modified datetime. It
848
+ represents a combination of a decreasing contribution from recent_max
849
+ based on how recently it was modified and an increasing contribution
850
+ from 0 based on how long ago it was modified. The maximum of the two
851
+ is the age contribution.
852
+ """
853
+ recent_contribution = 0.0
854
+ recent_interval = self.urgency.recent.interval
855
+ recent_max = self.urgency.recent.max
856
+ # log_msg(f"{recent_interval = }")
857
+ if recent_max and recent_interval:
858
+ recent_interval_seconds = td_str_to_seconds(recent_interval)
859
+ recent_contribution = max(
860
+ 0.0,
861
+ min(
862
+ recent_max,
863
+ recent_max
864
+ * (1 - (now_seconds - modified_seconds) / recent_interval_seconds),
865
+ ),
866
+ )
867
+ # log_msg(f"computed {recent_contribution = }")
868
+ return recent_contribution
869
+
870
+ def urgency_age(self, modified_seconds: int, now_seconds: int) -> float:
871
+ """
872
+ This function calculates the urgency contribution for a task based
873
+ on the current datetime relative to the (last) modified datetime. It
874
+ represents a combination of a decreasing contribution from recent_max
875
+ based on how recently it was modified and an increasing contribution
876
+ from 0 based on how long ago it was modified. The maximum of the two
877
+ is the age contribution.
878
+ """
879
+ age_contribution = 0
880
+ age_interval = self.urgency.age.interval
881
+ age_max = self.urgency.age.max
882
+ # log_msg(f"{age_interval = }")
883
+ if age_max and age_interval:
884
+ age_interval_seconds = td_str_to_seconds(age_interval)
885
+ age_contribution = max(
886
+ 0.0,
887
+ min(
888
+ age_max,
889
+ age_max * (now_seconds - modified_seconds) / age_interval_seconds,
890
+ ),
891
+ )
892
+ # log_msg(f"computed {age_contribution = }")
893
+ return age_contribution
894
+
895
+ def urgency_priority(self, priority_level: int) -> float:
896
+ priority = self.urgency.priority.root.get(str(priority_level), 0.0)
897
+ # log_msg(f"computed {priority = }")
898
+ return priority
899
+
900
+ def urgency_extent(self, extent_seconds: int) -> float:
901
+ extent_max = 1.0
902
+ extent_interval = td_str_to_seconds(self.urgency.extent.interval)
903
+ extent = max(
904
+ 0.0, min(extent_max, extent_max * extent_seconds / extent_interval)
905
+ )
906
+ # log_msg(f"{extent_seconds = }, {extent = }")
907
+ return extent
908
+
909
+ def urgency_blocking(self, num_blocking: int) -> float:
910
+ blocking = 0.0
911
+ if num_blocking:
912
+ blocking_max = self.urgency.blocking.max
913
+ blocking_count = self.urgency.blocking.count
914
+ if blocking_max and blocking_count:
915
+ blocking = max(
916
+ 0.0, min(blocking_max, blocking_max * num_blocking / blocking_count)
917
+ )
918
+ # log_msg(f"computed {blocking = }")
919
+ return blocking
920
+
921
+ def urgency_tags(self, num_tags: int) -> float:
922
+ tags = 0.0
923
+ tags_max = self.urgency.tags.max
924
+ tags_count = self.urgency.tags.count
925
+ if tags_max and tags_count:
926
+ tags = max(0.0, min(tags_max, tags_max * num_tags / tags_count))
927
+ # log_msg(f"computed {tags = }")
928
+ return tags
929
+
930
+ def urgency_description(self, has_description: bool) -> float:
931
+ description_max = self.urgency.description.max
932
+ description = 0.0
933
+ if has_description and description_max:
934
+ description = description_max
935
+ # log_msg(f"computed {description = }")
936
+ return description
937
+
938
+ def urgency_project(self, has_project: bool) -> float:
939
+ project_max = self.urgency.project.max
940
+ project = 0.0
941
+ if has_project and project_max:
942
+ project = project_max
943
+ # log_msg(f"computed {project = }")
944
+ return project
945
+
946
+ def from_args_and_weights(self, **kwargs):
947
+ if bool(kwargs.get("pinned", False)):
948
+ return 1.0, self.urgency_to_bucket_color(1.0), {}
949
+ weights = {
950
+ "due": self.urgency_due(kwargs.get("due"), kwargs["now"]),
951
+ "pastdue": self.urgency_pastdue(kwargs.get("due"), kwargs["now"]),
952
+ "age": self.urgency_age(kwargs["modified"], kwargs["now"]),
953
+ "recent": self.urgency_recent(kwargs["modified"], kwargs["now"]),
954
+ "priority": self.urgency_priority(kwargs.get("priority_level")),
955
+ "extent": self.urgency_extent(kwargs["extent"]),
956
+ "blocking": self.urgency_blocking(kwargs.get("blocking", 0.0)),
957
+ "tags": self.urgency_tags(kwargs.get("tags", 0)),
958
+ "description": self.urgency_description(kwargs.get("description", False)),
959
+ "project": 1.0 if bool(kwargs.get("jobs", False)) else 0.0,
960
+ }
961
+ if bool(kwargs.get("pinned", False)):
962
+ urgency = 1.0
963
+ # log_msg("pinned, ignoring weights, returning urgency 1.0")
964
+ else:
965
+ urgency = self.compute_partitioned_urgency(weights)
966
+ # log_msg(f"{weights = }\n returning {urgency = }")
967
+ return urgency, self.urgency_to_bucket_color(urgency), weights
968
+
969
+
970
+ class DatabaseManager:
971
+ def __init__(self, db_path: str, env: TklrEnvironment, reset: bool = False):
972
+ self.db_path = db_path
973
+ self.env = env
974
+ self.AMPM = env.config.ui.ampm
975
+ self.ALERTS = env.config.alerts
976
+ self.urgency = self.env.config.urgency
977
+
978
+ if reset and os.path.exists(self.db_path):
979
+ os.remove(self.db_path)
980
+
981
+ self.conn = sqlite3.connect(self.db_path)
982
+ self.cursor = self.conn.cursor()
983
+ self.conn.create_function("REGEXP", 2, regexp)
984
+ self.conn.create_function("REGEXP", 2, regexp)
985
+ self.setup_database()
986
+ self.compute_urgency = UrgencyComputer(env)
987
+ self.binproc = BinPathProcessor(
988
+ self,
989
+ BinPathConfig(
990
+ allow_reparent=True, # or False if you want conservative behavior
991
+ standard_roots=BIN_ROOTS, # <— same set, all lowercase
992
+ ),
993
+ )
994
+ self.bin_cache = BinCache(self.conn)
995
+ log_msg(f"{self.bin_cache.name_to_binpath() = }")
996
+ self.populate_dependent_tables()
997
+
998
+ def format_datetime(self, fmt_dt: str) -> str:
999
+ return format_datetime(fmt_dt, self.ampm)
1000
+
1001
+ def datetime_in_words(self, fmt_dt: str) -> str:
1002
+ return datetime_in_words(fmt_dt, self.ampm)
1003
+
1004
+ def setup_database(self):
1005
+ """
1006
+ Create (if missing) all tables and indexes for tklr.
1007
+
1008
+ Simplified tags model:
1009
+ - Tags live ONLY in Records.tags (JSON text).
1010
+ - No separate Tags / RecordTags tables.
1011
+
1012
+ Other notes:
1013
+ - Timestamps are stored as TEXT in UTC (e.g., 'YYYYMMDDTHHMMSS') unless otherwise noted.
1014
+ - DateTimes.start/end are local-naive TEXT ('YYYYMMDD' or 'YYYYMMDDTHHMMSS').
1015
+ """
1016
+ # FK safety
1017
+ self.cursor.execute("PRAGMA foreign_keys = ON")
1018
+
1019
+ # --- Optional cleanup of old tag tables (safe if they don't exist) ---
1020
+ self.cursor.execute("DROP TABLE IF EXISTS RecordTags;")
1021
+ self.cursor.execute("DROP TABLE IF EXISTS Tags;")
1022
+
1023
+ # ---------------- Records ----------------
1024
+ self.cursor.execute("""
1025
+ CREATE TABLE IF NOT EXISTS Records (
1026
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1027
+ itemtype TEXT, -- '*','~','^','%','?','+','x'
1028
+ subject TEXT,
1029
+ description TEXT,
1030
+ rruleset TEXT, -- serialized ruleset
1031
+ timezone TEXT, -- TZ name or 'float'
1032
+ extent TEXT, -- optional JSON or text
1033
+ alerts TEXT, -- JSON
1034
+ notice TEXT,
1035
+ context TEXT,
1036
+ jobs TEXT, -- JSON
1037
+ tags TEXT, -- JSON list[str], normalized in code
1038
+ priority INTEGER CHECK (priority IN (1,2,3,4,5)),
1039
+ tokens TEXT, -- JSON text (parsed tokens)
1040
+ processed INTEGER, -- 0/1
1041
+ created TEXT, -- 'YYYYMMDDTHHMMSS' UTC
1042
+ modified TEXT -- 'YYYYMMDDTHHMMSS' UTC
1043
+ );
1044
+ """)
1045
+
1046
+ # ---------------- Pinned ----------------
1047
+ self.cursor.execute("""
1048
+ CREATE TABLE IF NOT EXISTS Pinned (
1049
+ record_id INTEGER PRIMARY KEY,
1050
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1051
+ );
1052
+ """)
1053
+ self.cursor.execute("""
1054
+ CREATE INDEX IF NOT EXISTS idx_pinned_record
1055
+ ON Pinned(record_id);
1056
+ """)
1057
+
1058
+ # ---------------- Urgency (NO pinned column) ----------------
1059
+ self.cursor.execute("""
1060
+ CREATE TABLE IF NOT EXISTS Urgency (
1061
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1062
+ record_id INTEGER NOT NULL, -- References Records.id
1063
+ job_id INTEGER, -- NULL if not part of a project
1064
+ subject TEXT NOT NULL,
1065
+ urgency REAL NOT NULL,
1066
+ color TEXT, -- optional precomputed color
1067
+ status TEXT NOT NULL, -- "next","waiting","scheduled",…
1068
+ weights TEXT, -- JSON of component weights (optional)
1069
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1070
+ );
1071
+ """)
1072
+ self.cursor.execute("""
1073
+ CREATE INDEX IF NOT EXISTS idx_urgency_record
1074
+ ON Urgency(record_id);
1075
+ """)
1076
+ self.cursor.execute("""
1077
+ CREATE INDEX IF NOT EXISTS idx_urgency_urgency
1078
+ ON Urgency(urgency DESC);
1079
+ """)
1080
+
1081
+ # ---------------- Completions ----------------
1082
+ self.cursor.execute("""
1083
+ CREATE TABLE IF NOT EXISTS Completions (
1084
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1085
+ record_id INTEGER NOT NULL,
1086
+ completed TEXT NOT NULL, -- UTC-aware: "YYYYMMDDTHHMMZ"
1087
+ due TEXT, -- optional UTC-aware: "YYYYMMDDTHHMMZ"
1088
+ FOREIGN KEY(record_id) REFERENCES Records(id) ON DELETE CASCADE
1089
+ );
1090
+ """)
1091
+ self.cursor.execute("""
1092
+ CREATE INDEX IF NOT EXISTS idx_completions_record_id
1093
+ ON Completions(record_id);
1094
+ """)
1095
+ self.cursor.execute("""
1096
+ CREATE INDEX IF NOT EXISTS idx_completions_completed
1097
+ ON Completions(completed);
1098
+ """)
1099
+ self.cursor.execute("""
1100
+ CREATE INDEX IF NOT EXISTS idx_completions_record_due
1101
+ ON Completions(record_id, due);
1102
+ """)
1103
+
1104
+ # ---------------- DateTimes ----------------
1105
+ self.cursor.execute("""
1106
+ CREATE TABLE IF NOT EXISTS DateTimes (
1107
+ record_id INTEGER NOT NULL,
1108
+ job_id INTEGER, -- nullable; link to specific job if any
1109
+ start_datetime TEXT NOT NULL, -- 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive)
1110
+ end_datetime TEXT, -- NULL if instantaneous; same formats as start
1111
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1112
+ );
1113
+ """)
1114
+ # enforce uniqueness across (record_id, job_id, start, end)
1115
+ self.cursor.execute("""
1116
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_datetimes_unique
1117
+ ON DateTimes(
1118
+ record_id,
1119
+ COALESCE(job_id, -1),
1120
+ start_datetime,
1121
+ COALESCE(end_datetime, '')
1122
+ );
1123
+ """)
1124
+ # range query helper
1125
+ self.cursor.execute("""
1126
+ CREATE INDEX IF NOT EXISTS idx_datetimes_start
1127
+ ON DateTimes(start_datetime);
1128
+ """)
1129
+
1130
+ # ---------------- GeneratedWeeks (cache of week ranges) ----------------
1131
+ self.cursor.execute("""
1132
+ CREATE TABLE IF NOT EXISTS GeneratedWeeks (
1133
+ start_year INTEGER,
1134
+ start_week INTEGER,
1135
+ end_year INTEGER,
1136
+ end_week INTEGER
1137
+ );
1138
+ """)
1139
+
1140
+ # ---------------- Alerts ----------------
1141
+ self.cursor.execute("""
1142
+ CREATE TABLE IF NOT EXISTS Alerts (
1143
+ alert_id INTEGER PRIMARY KEY AUTOINCREMENT,
1144
+ record_id INTEGER NOT NULL,
1145
+ record_name TEXT NOT NULL,
1146
+ trigger_datetime TEXT NOT NULL, -- 'YYYYMMDDTHHMMSS' (local-naive)
1147
+ start_datetime TEXT NOT NULL, -- 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive)
1148
+ alert_name TEXT NOT NULL,
1149
+ alert_command TEXT NOT NULL,
1150
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1151
+ );
1152
+ """)
1153
+ # Prevent duplicates: one alert per (record, start, name, trigger)
1154
+ self.cursor.execute("""
1155
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_alerts_unique
1156
+ ON Alerts(record_id, start_datetime, alert_name, COALESCE(trigger_datetime,''));
1157
+ """)
1158
+ # Helpful for “what’s due now”
1159
+ self.cursor.execute("""
1160
+ CREATE INDEX IF NOT EXISTS idx_alerts_trigger
1161
+ ON Alerts(trigger_datetime);
1162
+ """)
1163
+
1164
+ # ---------------- Notice (days remaining notices) ----------------
1165
+ self.cursor.execute("""
1166
+ CREATE TABLE IF NOT EXISTS Notice (
1167
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1168
+ record_id INTEGER NOT NULL,
1169
+ days_remaining INTEGER NOT NULL,
1170
+ FOREIGN KEY (record_id) REFERENCES Records(id) ON DELETE CASCADE
1171
+ );
1172
+ """)
1173
+
1174
+ # ---------------- Bins & Links ----------------
1175
+ self.cursor.execute("PRAGMA foreign_keys = ON;")
1176
+
1177
+ self.cursor.execute("""
1178
+ CREATE TABLE IF NOT EXISTS Bins (
1179
+ id INTEGER PRIMARY KEY,
1180
+ name TEXT NOT NULL CHECK (length(trim(name)) > 0)
1181
+ );
1182
+ """)
1183
+
1184
+ self.cursor.execute("""
1185
+ CREATE UNIQUE INDEX IF NOT EXISTS uq_bins_name_nocase
1186
+ ON Bins(name COLLATE NOCASE);
1187
+ """)
1188
+
1189
+ self.cursor.execute("""
1190
+ CREATE TABLE IF NOT EXISTS BinLinks (
1191
+ bin_id INTEGER NOT NULL,
1192
+ container_id INTEGER,
1193
+ FOREIGN KEY (bin_id) REFERENCES Bins(id) ON DELETE CASCADE,
1194
+ FOREIGN KEY (container_id) REFERENCES Bins(id) ON DELETE SET NULL,
1195
+ UNIQUE(bin_id)
1196
+ );
1197
+ """)
1198
+
1199
+ self.cursor.execute("""
1200
+ CREATE INDEX IF NOT EXISTS idx_binlinks_container
1201
+ ON BinLinks(container_id);
1202
+ """)
1203
+
1204
+ self.cursor.execute("""
1205
+ CREATE TABLE IF NOT EXISTS ReminderLinks (
1206
+ reminder_id INTEGER NOT NULL,
1207
+ bin_id INTEGER NOT NULL,
1208
+ FOREIGN KEY (reminder_id) REFERENCES Records(id) ON DELETE CASCADE,
1209
+ FOREIGN KEY (bin_id) REFERENCES Bins(id) ON DELETE CASCADE,
1210
+ UNIQUE(reminder_id, bin_id)
1211
+ );
1212
+ """)
1213
+
1214
+ self.cursor.execute("""
1215
+ CREATE INDEX IF NOT EXISTS idx_reminderlinks_bin
1216
+ ON ReminderLinks(bin_id);
1217
+ """)
1218
+
1219
+ self.cursor.execute("""
1220
+ CREATE INDEX IF NOT EXISTS idx_reminderlinks_reminder
1221
+ ON ReminderLinks(reminder_id);
1222
+ """)
1223
+
1224
+ # ---------------- Busy tables (unchanged) ----------------
1225
+ self.setup_busy_tables()
1226
+ # Seed default top-level bins (idempotent)
1227
+
1228
+ self.ensure_root_children(sorted(BIN_ROOTS))
1229
+
1230
+ self.conn.commit()
1231
+
1232
+ def setup_busy_tables(self):
1233
+ """
1234
+ Create / reset busy cache tables and triggers.
1235
+
1236
+ Design:
1237
+ - BusyWeeksFromDateTimes: per (record_id, year_week) cache of fine-grained busybits (BLOB, 672 slots).
1238
+ FK references Records(id) — not DateTimes — since we aggregate per record/week.
1239
+ - BusyWeeks: per year_week aggregated ternary bits (TEXT, 35 chars).
1240
+ - BusyUpdateQueue: queue of record_ids to recompute.
1241
+
1242
+ Triggers enqueue record_id on any insert/update/delete in DateTimes.
1243
+ """
1244
+
1245
+ # Make schema idempotent and remove any old incompatible objects.
1246
+ self.cursor.execute("PRAGMA foreign_keys=ON")
1247
+
1248
+ # Drop old triggers (names must match what you used previously)
1249
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_insert")
1250
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_update")
1251
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_delete")
1252
+ self.cursor.execute("DROP TRIGGER IF EXISTS trig_busy_records_delete")
1253
+
1254
+ # Drop old tables if they exist (to get rid of the bad FK)
1255
+ self.cursor.execute("DROP TABLE IF EXISTS BusyWeeksFromDateTimes")
1256
+ self.cursor.execute("DROP TABLE IF EXISTS BusyWeeks")
1257
+ self.cursor.execute("DROP TABLE IF EXISTS BusyUpdateQueue")
1258
+
1259
+ # Recreate BusyWeeks (aggregate per week)
1260
+ self.cursor.execute("""
1261
+ CREATE TABLE IF NOT EXISTS BusyWeeks (
1262
+ year_week TEXT PRIMARY KEY,
1263
+ busybits TEXT NOT NULL -- 35-char string of '0','1','2'
1264
+ );
1265
+ """)
1266
+
1267
+ # Recreate BusyWeeksFromDateTimes (per record/week)
1268
+ # PRIMARY KEY enforces one row per (record, week)
1269
+ # FK to Records(id) so deletes of records cascade cleanly
1270
+ self.cursor.execute("""
1271
+ CREATE TABLE IF NOT EXISTS BusyWeeksFromDateTimes (
1272
+ record_id INTEGER NOT NULL,
1273
+ year_week TEXT NOT NULL,
1274
+ busybits BLOB NOT NULL, -- 672 slots (15-min blocks)
1275
+ PRIMARY KEY (record_id, year_week),
1276
+ FOREIGN KEY(record_id) REFERENCES Records(id) ON DELETE CASCADE
1277
+ );
1278
+ """)
1279
+
1280
+ # Update queue for incremental recomputation
1281
+ self.cursor.execute("""
1282
+ CREATE TABLE IF NOT EXISTS BusyUpdateQueue (
1283
+ record_id INTEGER PRIMARY KEY
1284
+ );
1285
+ """)
1286
+
1287
+ # Triggers on DateTimes to enqueue affected record
1288
+ self.cursor.execute("""
1289
+ CREATE TRIGGER IF NOT EXISTS trig_busy_insert
1290
+ AFTER INSERT ON DateTimes
1291
+ BEGIN
1292
+ INSERT OR IGNORE INTO BusyUpdateQueue(record_id)
1293
+ VALUES (NEW.record_id);
1294
+ END;
1295
+ """)
1296
+
1297
+ self.cursor.execute("""
1298
+ CREATE TRIGGER IF NOT EXISTS trig_busy_update
1299
+ AFTER UPDATE ON DateTimes
1300
+ BEGIN
1301
+ INSERT OR IGNORE INTO BusyUpdateQueue(record_id)
1302
+ VALUES (NEW.record_id);
1303
+ END;
1304
+ """)
1305
+
1306
+ self.cursor.execute("""
1307
+ CREATE TRIGGER IF NOT EXISTS trig_busy_delete
1308
+ AFTER DELETE ON DateTimes
1309
+ BEGIN
1310
+ INSERT OR IGNORE INTO BusyUpdateQueue(record_id)
1311
+ VALUES (OLD.record_id);
1312
+ END;
1313
+ """)
1314
+
1315
+ # If a record is deleted, clean any cache rows (cascades remove BusyWeeksFromDateTimes).
1316
+ # Also clear from the queue if present.
1317
+ self.cursor.execute("""
1318
+ CREATE TRIGGER IF NOT EXISTS trig_busy_records_delete
1319
+ AFTER DELETE ON Records
1320
+ BEGIN
1321
+ DELETE FROM BusyUpdateQueue WHERE record_id = OLD.id;
1322
+ -- BusyWeeksFromDateTimes rows are removed by FK ON DELETE CASCADE.
1323
+ END;
1324
+ """)
1325
+
1326
+ self.conn.commit()
1327
+
1328
+ def backup_to(self, dest_db: Path) -> Path:
1329
+ """
1330
+ Create a consistent SQLite snapshot of the current database at dest_db.
1331
+ Uses the live connection (self.conn) to copy committed state.
1332
+ Returns the final backup path.
1333
+ """
1334
+ dest_db = Path(dest_db)
1335
+ tmp = dest_db.with_suffix(dest_db.suffix + ".tmp")
1336
+ dest_db.parent.mkdir(parents=True, exist_ok=True)
1337
+
1338
+ # Ensure we copy a committed state
1339
+ self.conn.commit()
1340
+
1341
+ # Copy using SQLite's backup API
1342
+ with sqlite3.connect(str(tmp)) as dst:
1343
+ self.conn.backup(dst) # full backup
1344
+ # Tidy destination file only
1345
+ dst.execute("PRAGMA wal_checkpoint(TRUNCATE);")
1346
+ dst.execute("VACUUM;")
1347
+ dst.commit()
1348
+
1349
+ # Preserve timestamps/permissions from the source file if available
1350
+ try:
1351
+ # Adjust attribute name if your manager stores the DB path differently
1352
+ src_path = Path(
1353
+ getattr(
1354
+ self,
1355
+ "db_path",
1356
+ self.conn.execute("PRAGMA database_list").fetchone()[2],
1357
+ )
1358
+ )
1359
+ shutil.copystat(src_path, tmp)
1360
+ except Exception:
1361
+ pass
1362
+
1363
+ tmp.replace(dest_db)
1364
+ return dest_db
1365
+
1366
+ def populate_dependent_tables(self):
1367
+ """Populate all tables derived from current Records (Tags, DateTimes, Alerts, notice)."""
1368
+ log_msg("populate dependent tables")
1369
+ yr, wk = datetime.now().isocalendar()[:2]
1370
+ log_msg(f"Generating weeks for 12 weeks starting from {yr} week number {wk}")
1371
+ self.extend_datetimes_for_weeks(yr, wk, 12)
1372
+ # self.populate_tags()
1373
+ log_msg("calling populate_alerts")
1374
+ self.populate_alerts()
1375
+ log_msg("calling populate_notice")
1376
+ self.populate_notice()
1377
+ log_msg("calling populate_busy_from_datetimes")
1378
+ self.populate_busy_from_datetimes() # 👈 new step: source layer
1379
+ self.rebuild_busyweeks_from_source() # 👈 add this line
1380
+ self.populate_all_urgency()
1381
+ self.ensure_system_bins()
1382
+
1383
+ def _normalize_tags(self, tags) -> list[str]:
1384
+ """Return a sorted, de-duplicated, lowercased list of tag strings."""
1385
+ if tags is None:
1386
+ return []
1387
+ if isinstance(tags, str):
1388
+ parts = [p for p in re.split(r"[,\s]+", tags) if p]
1389
+ else:
1390
+ parts = list(tags)
1391
+ return sorted({p.strip().lower() for p in parts if p and p.strip()})
1392
+
1393
+ def add_item(self, item: Item) -> int:
1394
+ try:
1395
+ timestamp = utc_now_string()
1396
+ self.cursor.execute(
1397
+ """
1398
+ INSERT INTO Records (
1399
+ itemtype, subject, description, rruleset, timezone,
1400
+ extent, alerts, notice, context, jobs, priority,
1401
+ tokens, processed, created, modified
1402
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1403
+ """,
1404
+ (
1405
+ item.itemtype,
1406
+ item.subject,
1407
+ item.description,
1408
+ item.rruleset,
1409
+ item.tz_str,
1410
+ item.extent,
1411
+ json.dumps(item.alerts),
1412
+ item.notice,
1413
+ item.context,
1414
+ json.dumps(item.jobs),
1415
+ item.priority,
1416
+ json.dumps(item.tokens),
1417
+ 0,
1418
+ timestamp,
1419
+ timestamp,
1420
+ ),
1421
+ )
1422
+ self.conn.commit()
1423
+
1424
+ record_id = self.cursor.lastrowid
1425
+ self.relink_bins_and_tags_for_record(record_id, item) # ← add this
1426
+ return record_id
1427
+
1428
+ except Exception as e:
1429
+ print(f"Error adding {item}: {e}")
1430
+ raise
1431
+
1432
+ def update_item(self, record_id: int, item: Item):
1433
+ try:
1434
+ fields, values = [], []
1435
+
1436
+ def set_field(name, value):
1437
+ if value is not None:
1438
+ fields.append(f"{name} = ?")
1439
+ values.append(value)
1440
+
1441
+ set_field("itemtype", item.itemtype)
1442
+ set_field("subject", item.subject)
1443
+ set_field("description", item.description)
1444
+ set_field("rruleset", item.rruleset)
1445
+ set_field("timezone", item.tz_str)
1446
+ set_field("extent", item.extent)
1447
+ set_field(
1448
+ "alerts", json.dumps(item.alerts) if item.alerts is not None else None
1449
+ )
1450
+ set_field("notice", item.notice)
1451
+ set_field("context", item.context)
1452
+ set_field("jobs", json.dumps(item.jobs) if item.jobs is not None else None)
1453
+ set_field("priority", item.priority)
1454
+ set_field(
1455
+ "tokens", json.dumps(item.tokens) if item.tokens is not None else None
1456
+ )
1457
+ set_field("processed", 0)
1458
+
1459
+ fields.append("modified = ?")
1460
+ values.append(utc_now_string())
1461
+ values.append(record_id)
1462
+
1463
+ sql = f"UPDATE Records SET {', '.join(fields)} WHERE id = ?"
1464
+
1465
+ self.cursor.execute(sql, values)
1466
+ self.conn.commit()
1467
+ self.relink_bins_and_tags_for_record(record_id, item) # ← add this
1468
+
1469
+ except Exception as e:
1470
+ print(f"Error updating record {record_id}: {e}")
1471
+ raise
1472
+
1473
+ def save_record(self, item: Item, record_id: int | None = None):
1474
+ """Insert or update a record and refresh associated tables."""
1475
+ timestamp = utc_now_string()
1476
+
1477
+ if record_id is None:
1478
+ # Insert new record
1479
+ self.cursor.execute(
1480
+ """
1481
+ INSERT INTO Records (
1482
+ itemtype, subject, description, rruleset, timezone,
1483
+ extent, alerts, notice, context, jobs, tags,
1484
+ tokens, processed, created, modified
1485
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1486
+ """,
1487
+ (
1488
+ item.itemtype,
1489
+ item.subject,
1490
+ item.description,
1491
+ item.rruleset,
1492
+ item.tz_str,
1493
+ item.extent,
1494
+ json.dumps(item.alerts),
1495
+ item.notice,
1496
+ item.context,
1497
+ json.dumps(item.jobs),
1498
+ # json.dumps(item.tags),
1499
+ json.dumps(item.tokens),
1500
+ 0,
1501
+ timestamp,
1502
+ timestamp,
1503
+ ),
1504
+ )
1505
+ record_id = self.cursor.lastrowid
1506
+ else:
1507
+ # Update existing record
1508
+ self.cursor.execute(
1509
+ """
1510
+ UPDATE Records
1511
+ SET itemtype = ?, subject = ?, description = ?, rruleset = ?, timezone = ?,
1512
+ extent = ?, alerts = ?, notice = ?, context = ?, jobs = ?,
1513
+ tokens = ?, modified = ?
1514
+ WHERE id = ?
1515
+ """,
1516
+ (
1517
+ item.itemtype,
1518
+ item.subject,
1519
+ item.description,
1520
+ item.rruleset,
1521
+ item.tz_str,
1522
+ item.extent,
1523
+ json.dumps(item.alerts),
1524
+ item.notice,
1525
+ item.context,
1526
+ json.dumps(item.jobs),
1527
+ # json.dumps(item.tags),
1528
+ json.dumps(item.tokens),
1529
+ timestamp,
1530
+ record_id,
1531
+ ),
1532
+ )
1533
+
1534
+ self.conn.commit()
1535
+
1536
+ # Refresh auxiliary tables
1537
+ self.generate_datetimes_for_record(record_id)
1538
+ self.populate_alerts_for_record(record_id)
1539
+ if item.notice:
1540
+ self.populate_notice_for_record(record_id)
1541
+ if item.itemtype in ["~", "^"]:
1542
+ self.populate_urgency_from_record(record_id)
1543
+
1544
+ def get_record_tags(self, record_id: int) -> list[str]:
1545
+ self.cursor.execute(
1546
+ "SELECT COALESCE(tags,'[]') FROM Records WHERE id=?", (record_id,)
1547
+ )
1548
+ row = self.cursor.fetchone()
1549
+ try:
1550
+ return self._normalize_tags(json.loads(row[0])) if row and row[0] else []
1551
+ except Exception:
1552
+ return []
1553
+
1554
+ def find_records_with_any_tags(self, tags: list[str]) -> list[tuple]:
1555
+ want = set(self._normalize_tags(tags))
1556
+ self.cursor.execute("SELECT id, subject, COALESCE(tags,'[]') FROM Records")
1557
+ out = []
1558
+ for rid, subj, tags_json in self.cursor.fetchall():
1559
+ try:
1560
+ have = (
1561
+ set(self._normalize_tags(json.loads(tags_json)))
1562
+ if tags_json
1563
+ else set()
1564
+ )
1565
+ except Exception:
1566
+ have = set()
1567
+ if want & have:
1568
+ out.append((rid, subj))
1569
+ return out
1570
+
1571
+ def add_completion(
1572
+ self,
1573
+ record_id: int,
1574
+ completion: tuple[datetime, datetime | None],
1575
+ ) -> None:
1576
+ """Store a completion record as UTC-aware compact strings."""
1577
+ if completion is None:
1578
+ return
1579
+
1580
+ completed_dt, due_dt = completion
1581
+ self.cursor.execute(
1582
+ """
1583
+ INSERT INTO Completions (record_id, completed, due)
1584
+ VALUES (?, ?, ?)
1585
+ """,
1586
+ (
1587
+ record_id,
1588
+ _fmt_utc(completed_dt),
1589
+ _fmt_utc(due_dt) if due_dt else None,
1590
+ ),
1591
+ )
1592
+ self.conn.commit()
1593
+
1594
+ def get_completions(self, record_id: int):
1595
+ """
1596
+ Return all completions for a given record, sorted newest first.
1597
+
1598
+ Returns:
1599
+ [(record_id, subject, description, itemtype, due_dt, completed_dt)]
1600
+ """
1601
+ self.cursor.execute(
1602
+ """
1603
+ SELECT
1604
+ r.id,
1605
+ r.subject,
1606
+ r.description,
1607
+ r.itemtype,
1608
+ c.due,
1609
+ c.completed
1610
+ FROM Completions c
1611
+ JOIN Records r ON c.record_id = r.id
1612
+ WHERE r.id = ?
1613
+ ORDER BY c.completed DESC
1614
+ """,
1615
+ (record_id,),
1616
+ )
1617
+ rows = self.cursor.fetchall()
1618
+ return [
1619
+ (
1620
+ rid,
1621
+ subj,
1622
+ desc,
1623
+ itype,
1624
+ parse_utc(due) if due else None,
1625
+ parse_utc(comp),
1626
+ )
1627
+ for (rid, subj, desc, itype, due, comp) in rows
1628
+ ]
1629
+
1630
+ def touch_record(self, record_id: int):
1631
+ """
1632
+ Update the 'modified' timestamp for the given record to the current UTC time.
1633
+ """
1634
+ now = utc_now_string()
1635
+ self.cursor.execute(
1636
+ """
1637
+ UPDATE Records SET modified = ? WHERE id = ?
1638
+ """,
1639
+ (now, record_id),
1640
+ )
1641
+ self.conn.commit()
1642
+
1643
+ def toggle_pinned(self, record_id: int) -> None:
1644
+ self.cursor.execute("SELECT 1 FROM Pinned WHERE record_id=?", (record_id,))
1645
+ if self.cursor.fetchone():
1646
+ self.cursor.execute("DELETE FROM Pinned WHERE record_id=?", (record_id,))
1647
+ else:
1648
+ self.cursor.execute(
1649
+ "INSERT INTO Pinned(record_id) VALUES (?)", (record_id,)
1650
+ )
1651
+ self.conn.commit()
1652
+
1653
+ def is_pinned(self, record_id: int) -> bool:
1654
+ self.cursor.execute(
1655
+ "SELECT 1 FROM Pinned WHERE record_id=? LIMIT 1", (record_id,)
1656
+ )
1657
+ return self.cursor.fetchone() is not None
1658
+
1659
+ def get_due_alerts(self):
1660
+ """Retrieve alerts that need execution within the next 6 seconds."""
1661
+ # now = round(datetime.now().timestamp())
1662
+ now = datetime.now()
1663
+ now_minus = _fmt_naive(now - timedelta(seconds=2))
1664
+ now_plus = _fmt_naive(now + timedelta(seconds=5))
1665
+ # log_msg(f"{now_minus = }, {now_plus = }")
1666
+
1667
+ self.cursor.execute(
1668
+ """
1669
+ SELECT alert_id, record_id, trigger_datetime, start_datetime, alert_name, alert_command
1670
+ FROM Alerts
1671
+ WHERE (trigger_datetime) BETWEEN ? AND ?
1672
+ """,
1673
+ (now_minus, now_plus),
1674
+ )
1675
+
1676
+ return self.cursor.fetchall()
1677
+
1678
+ def get_active_alerts(self):
1679
+ """Retrieve alerts that will trigger on or after the current moment and before midnight."""
1680
+
1681
+ self.cursor.execute(
1682
+ """
1683
+ SELECT alert_id, record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command
1684
+ FROM Alerts
1685
+ ORDER BY trigger_datetime ASC
1686
+ """,
1687
+ )
1688
+
1689
+ alerts = self.cursor.fetchall()
1690
+ log_msg(f"{alerts = }")
1691
+
1692
+ if not alerts:
1693
+ return []
1694
+
1695
+ results = []
1696
+ for alert in alerts:
1697
+ (
1698
+ alert_id,
1699
+ record_id,
1700
+ record_name,
1701
+ trigger_datetime,
1702
+ start_datetime,
1703
+ alert_name,
1704
+ alert_command,
1705
+ ) = alert
1706
+ results.append(
1707
+ [
1708
+ alert_id,
1709
+ record_id,
1710
+ record_name,
1711
+ trigger_datetime,
1712
+ start_datetime,
1713
+ alert_name,
1714
+ alert_command,
1715
+ ]
1716
+ )
1717
+
1718
+ return results
1719
+
1720
+ def get_all_tasks(self) -> list[dict]:
1721
+ """
1722
+ Retrieve all task and project records from the database.
1723
+
1724
+ Returns:
1725
+ A list of dictionaries representing task and project records.
1726
+ """
1727
+ self.cursor.execute(
1728
+ """
1729
+ SELECT * FROM Records
1730
+ WHERE itemtype IN ('~', '^')
1731
+ ORDER BY id
1732
+ """
1733
+ )
1734
+ columns = [column[0] for column in self.cursor.description]
1735
+ return [dict(zip(columns, row)) for row in self.cursor.fetchall()]
1736
+
1737
+ def get_job_display_subject(self, record_id: int, job_id: int | None) -> str | None:
1738
+ """
1739
+ Return the display_subject for a given record_id + job_id pair.
1740
+ Falls back to None if not found or no display_subject is present.
1741
+ """
1742
+ if job_id is None:
1743
+ return None
1744
+
1745
+ self.cursor.execute("SELECT jobs FROM Records WHERE id=?", (record_id,))
1746
+ row = self.cursor.fetchone()
1747
+ if not row or not row[0]:
1748
+ return None
1749
+
1750
+ jobs = _parse_jobs_json(row[0])
1751
+ for job in jobs:
1752
+ log_msg(f"{job = }")
1753
+ if job.get("job_id") == job_id:
1754
+ return job.get("display_subject") or None
1755
+
1756
+ return None
1757
+
1758
+ def get_job_dict(self, record_id: int, job_id: int | None) -> dict | None:
1759
+ """
1760
+ Return the full job dictionary for the given record_id + job_id pair.
1761
+ Returns None if not found.
1762
+
1763
+ """
1764
+ log_msg(f"getting job_dict for {record_id = }, {job_id = }")
1765
+ if job_id is None:
1766
+ return None
1767
+
1768
+ self.cursor.execute("SELECT jobs FROM Records WHERE id=?", (record_id,))
1769
+ row = self.cursor.fetchone()
1770
+ if not row or not row[0]:
1771
+ return None
1772
+
1773
+ jobs = _parse_jobs_json(row[0])
1774
+ log_msg(f"{jobs = }")
1775
+ for job in jobs:
1776
+ if job.get("job_id") == job_id:
1777
+ return job # Return the full dictionary
1778
+
1779
+ log_msg(f"returning None for {record_id = }, {job_id = }")
1780
+ return None
1781
+
1782
+ def get_all_alerts(self):
1783
+ """Retrieve all stored alerts for debugging."""
1784
+ self.cursor.execute("""
1785
+ SELECT alert_id, record_id, record_name, start_datetime, timedelta, command
1786
+ FROM Alerts
1787
+ ORDER BY start_datetime ASC
1788
+ """)
1789
+ alerts = self.cursor.fetchall()
1790
+
1791
+ if not alerts:
1792
+ return [
1793
+ "🔔 No alerts found.",
1794
+ ]
1795
+
1796
+ results = [
1797
+ "🔔 Current Alerts:",
1798
+ ]
1799
+ for alert in alerts:
1800
+ alert_id, record_id, record_name, start_dt, td, command = alert
1801
+ execution_time = start_dt - td # When the alert is scheduled to run
1802
+ formatted_time = datetime_from_timestamp(execution_time).strftime(
1803
+ "%Y-%m-%d %H:%M"
1804
+ )
1805
+
1806
+ results.append([alert_id, record_id, record_name, formatted_time, command])
1807
+
1808
+ return results
1809
+
1810
+ def mark_alert_executed(self, alert_id):
1811
+ """Optional: Mark alert as executed to prevent duplicate execution."""
1812
+ self.cursor.execute(
1813
+ """
1814
+ DELETE FROM Alerts WHERE alert_id = ?
1815
+ """,
1816
+ (alert_id,),
1817
+ )
1818
+ self.conn.commit()
1819
+
1820
+ def create_alert(
1821
+ self,
1822
+ command_name,
1823
+ timedelta,
1824
+ start_datetime,
1825
+ record_id,
1826
+ record_name,
1827
+ record_description,
1828
+ record_location,
1829
+ ):
1830
+ if command_name == "n":
1831
+ alert_command = "{name} {when} at {start}"
1832
+ else:
1833
+ alert_command = self.ALERTS.get(command_name, "")
1834
+ if not alert_command:
1835
+ log_msg(f"❌ Alert command not found for '{command_name}'")
1836
+ return None # Explicitly return None if command is missing
1837
+
1838
+ name = record_name
1839
+ description = record_description
1840
+ location = record_location
1841
+
1842
+ if timedelta > 0:
1843
+ when = f"in {duration_in_words(timedelta)}"
1844
+ elif timedelta == 0:
1845
+ when = "now"
1846
+ else:
1847
+ when = f"{duration_in_words(-timedelta)} ago"
1848
+
1849
+ start = format_datetime(start_datetime, HRS_MINS)
1850
+ time_fmt = datetime_in_words(start_datetime)
1851
+
1852
+ alert_command = alert_command.format(
1853
+ name=name,
1854
+ when=when,
1855
+ time=time_fmt,
1856
+ description=description,
1857
+ location=location,
1858
+ start=start,
1859
+ )
1860
+ log_msg(f"formatted alert {alert_command = }")
1861
+ return alert_command
1862
+
1863
+ def create_alert(
1864
+ self,
1865
+ command_name,
1866
+ timedelta,
1867
+ start_datetime,
1868
+ record_id,
1869
+ record_name,
1870
+ record_description,
1871
+ record_location,
1872
+ ):
1873
+ if command_name == "n":
1874
+ alert_command_template = "{name} {when} at {start}"
1875
+ else:
1876
+ alert_command_template = self.ALERTS.get(command_name, "")
1877
+ if not alert_command_template:
1878
+ log_msg(f"❌ Alert command not found for '{command_name}'")
1879
+ return None
1880
+
1881
+ name = record_name
1882
+ description = record_description
1883
+ location = record_location
1884
+
1885
+ if timedelta > 0:
1886
+ when = f"in {duration_in_words(timedelta)}"
1887
+ elif timedelta == 0:
1888
+ when = "now"
1889
+ else:
1890
+ when = f"{duration_in_words(-timedelta)} ago"
1891
+
1892
+ start = format_datetime(start_datetime, HRS_MINS)
1893
+ start_words = datetime_in_words(start_datetime)
1894
+
1895
+ # Prepare dict of available fields
1896
+ field_values = {
1897
+ "name": name,
1898
+ "when": when,
1899
+ "start": start,
1900
+ "time": start_words,
1901
+ "description": description,
1902
+ "location": location,
1903
+ }
1904
+
1905
+ # Use SafeDict to avoid KeyError for missing placeholders
1906
+ formatted = None
1907
+ try:
1908
+ formatted = alert_command_template.format_map(SafeDict(field_values))
1909
+ except Exception as e:
1910
+ log_msg(f"❌ Alert formatting error for command '{command_name}': {e}")
1911
+ # Fallback: use a minimal template or use the raw template
1912
+ formatted = alert_command_template.format_map(SafeDict(field_values))
1913
+
1914
+ log_msg(f"formatted alert: {formatted!r}")
1915
+ return formatted
1916
+
1917
+ def get_notice_for_today(self):
1918
+ self.cursor.execute("""
1919
+ SELECT Records.itemtype, Records.subject, notice.days_remaining
1920
+ FROM notice
1921
+ JOIN Records ON notice.record_id = Records.id
1922
+ ORDER BY notice.days_remaining ASC
1923
+ """)
1924
+ return [
1925
+ (
1926
+ record_id,
1927
+ itemtype,
1928
+ subject,
1929
+ int(round(days_remaining)),
1930
+ )
1931
+ for (
1932
+ record_id,
1933
+ itemtype,
1934
+ subject,
1935
+ days_remaining,
1936
+ ) in self.cursor.fetchall()
1937
+ ]
1938
+
1939
+ def get_tokens(self, record_id: int):
1940
+ """
1941
+ Retrieve the tokens field from a record and return it as a list of dictionaries.
1942
+ Returns an empty list if the field is null, empty, or if the record is not found.
1943
+ """
1944
+ self.cursor.execute(
1945
+ "SELECT tokens, rruleset, created, modified FROM Records WHERE id = ?",
1946
+ (record_id,),
1947
+ )
1948
+ return [
1949
+ (
1950
+ # " ".join([t["token"] for t in json.loads(tokens)]),
1951
+ json.loads(tokens),
1952
+ rruleset,
1953
+ created,
1954
+ modified,
1955
+ )
1956
+ for (
1957
+ tokens,
1958
+ rruleset,
1959
+ created,
1960
+ modified,
1961
+ ) in self.cursor.fetchall()
1962
+ ]
1963
+
1964
+ def populate_alerts(self):
1965
+ """
1966
+ Populate the Alerts table for all records that have alerts defined.
1967
+ Inserts alerts that will trigger between now and local end-of-day.
1968
+ Uses TEXT datetimes ('YYYYMMDD' or 'YYYYMMDDTHHMMSS', local-naive).
1969
+ """
1970
+
1971
+ # --- small helpers for TEXT <-> datetime (local-naive) ---
1972
+ from datetime import datetime, timedelta
1973
+
1974
+ def _parse_local_text_dt(s: str) -> datetime:
1975
+ """Parse 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (local-naive) into datetime."""
1976
+ s = (s or "").strip()
1977
+ if not s:
1978
+ raise ValueError("empty datetime text")
1979
+ if "T" in s:
1980
+ # datetime
1981
+ return datetime.strptime(s, "%Y%m%dT%H%M")
1982
+ else:
1983
+ # date-only -> treat as midnight local
1984
+ return datetime.strptime(s, "%Y%m%d")
1985
+
1986
+ def _to_text_dt(dt: datetime, is_date_only: bool = False) -> str:
1987
+ """
1988
+ Render datetime back to TEXT storage.
1989
+ If is_date_only=True, keep 'YYYYMMDD'; else use 'YYYYMMDDTHHMMSS'.
1990
+ """
1991
+ if is_date_only:
1992
+ return dt.strftime("%Y%m%d")
1993
+ return dt.strftime("%Y%m%dT%H%M")
1994
+
1995
+ def _is_date_only_text(s: str) -> bool:
1996
+ return "T" not in (s or "")
1997
+
1998
+ # --- time window (local-naive) ---
1999
+ now = datetime.now()
2000
+ end_of_day = now.replace(hour=23, minute=59, second=59, microsecond=0)
2001
+
2002
+ # Targeted delete: remove alerts in [now, end_of_day] so we can repopulate without duplicates.
2003
+ self.cursor.execute(
2004
+ """
2005
+ DELETE FROM Alerts
2006
+ WHERE trigger_datetime >= ?
2007
+ AND trigger_datetime <= ?
2008
+ """,
2009
+ (now.strftime("%Y%m%dT%H%M"), end_of_day.strftime("%Y%m%dT%H%M")),
2010
+ )
2011
+ self.conn.commit()
2012
+
2013
+ # Find records that have alerts and at least one DateTimes row
2014
+ self.cursor.execute(
2015
+ """
2016
+ SELECT R.id, R.subject, R.description, R.context, R.alerts, D.start_datetime
2017
+ FROM Records R
2018
+ JOIN DateTimes D ON R.id = D.record_id
2019
+ WHERE R.alerts IS NOT NULL AND R.alerts != ''
2020
+ """
2021
+ )
2022
+ records = self.cursor.fetchall()
2023
+ if not records:
2024
+ print("🔔 No records with alerts found.")
2025
+ return
2026
+
2027
+ for (
2028
+ record_id,
2029
+ record_name,
2030
+ record_description,
2031
+ record_location,
2032
+ alerts_json,
2033
+ start_text,
2034
+ ) in records:
2035
+ # start_text is local-naive TEXT ('YYYYMMDD' or 'YYYYMMDDTHHMMSS')
2036
+ try:
2037
+ start_dt = _parse_local_text_dt(start_text)
2038
+ except Exception as e:
2039
+ # bad/malformed DateTimes row; skip gracefully
2040
+ print(
2041
+ f"⚠️ Skipping record {record_id}: invalid start_datetime {start_text!r}: {e}"
2042
+ )
2043
+ continue
2044
+
2045
+ is_date_only = _is_date_only_text(start_text)
2046
+
2047
+ try:
2048
+ alert_list = json.loads(alerts_json)
2049
+ if not isinstance(alert_list, list):
2050
+ continue
2051
+ except Exception:
2052
+ continue
2053
+
2054
+ for alert in alert_list:
2055
+ if ":" not in alert:
2056
+ continue # ignore malformed alerts like "10m" with no command
2057
+ time_part, command_part = alert.split(":", 1)
2058
+
2059
+ # support multiple lead times and multiple commands per line
2060
+ try:
2061
+ lead_secs_list = [
2062
+ td_str_to_seconds(t.strip()) for t in time_part.split(",")
2063
+ ]
2064
+ except Exception:
2065
+ continue
2066
+ commands = [
2067
+ cmd.strip() for cmd in command_part.split(",") if cmd.strip()
2068
+ ]
2069
+ if not commands:
2070
+ continue
2071
+
2072
+ # For date-only starts, we alert relative to midnight (00:00:00) of that day
2073
+ if is_date_only:
2074
+ effective_start_dt = start_dt.replace(
2075
+ hour=0, minute=0, second=0, microsecond=0
2076
+ )
2077
+ else:
2078
+ effective_start_dt = start_dt
2079
+
2080
+ for lead_secs in lead_secs_list:
2081
+ trigger_dt = effective_start_dt - timedelta(seconds=lead_secs)
2082
+
2083
+ # only alerts that trigger today between now and end_of_day
2084
+ if not (now <= trigger_dt <= end_of_day):
2085
+ continue
2086
+
2087
+ trigger_text = _to_text_dt(trigger_dt) # always 'YYYYMMDDTHHMMSS'
2088
+ start_store_text = _to_text_dt(
2089
+ effective_start_dt, is_date_only=is_date_only
2090
+ )
2091
+
2092
+ for alert_name in commands:
2093
+ # If you have a helper that *builds* the command string, call it;
2094
+ # otherwise keep your existing create_alert signature but pass TEXTs.
2095
+ alert_command = self.create_alert(
2096
+ alert_name,
2097
+ lead_secs,
2098
+ start_store_text, # now TEXT, not epoch
2099
+ record_id,
2100
+ record_name,
2101
+ record_description,
2102
+ record_location,
2103
+ )
2104
+
2105
+ if not alert_command:
2106
+ continue
2107
+
2108
+ # Unique index will prevent duplicates; OR IGNORE keeps this idempotent.
2109
+ self.cursor.execute(
2110
+ """
2111
+ INSERT OR IGNORE INTO Alerts
2112
+ (record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command)
2113
+ VALUES (?, ?, ?, ?, ?, ?)
2114
+ """,
2115
+ (
2116
+ record_id,
2117
+ record_name,
2118
+ trigger_text,
2119
+ start_store_text,
2120
+ alert_name,
2121
+ alert_command,
2122
+ ),
2123
+ )
2124
+
2125
+ self.conn.commit()
2126
+ print("✅ Alerts table updated with today's relevant alerts.")
2127
+
2128
+ def populate_alerts_for_record(self, record_id: int):
2129
+ """Regenerate alerts for a specific record, but only if any are scheduled for today."""
2130
+
2131
+ # Clear old alerts for this record
2132
+ self.cursor.execute("DELETE FROM Alerts WHERE record_id = ?", (record_id,))
2133
+
2134
+ # Look up the record’s alert data and start datetimes
2135
+ self.cursor.execute(
2136
+ """
2137
+ SELECT R.subject, R.description, R.context, R.alerts, D.start_datetime
2138
+ FROM Records R
2139
+ JOIN DateTimes D ON R.id = D.record_id
2140
+ WHERE R.id = ? AND R.alerts IS NOT NULL AND R.alerts != ''
2141
+ """,
2142
+ (record_id,),
2143
+ )
2144
+ records = self.cursor.fetchall()
2145
+ if not records:
2146
+ log_msg(f"🔕 No alerts to populate for record {record_id}")
2147
+ return
2148
+
2149
+ now = round(datetime.now().timestamp())
2150
+ midnight = round(
2151
+ datetime.now().replace(hour=23, minute=59, second=59).timestamp()
2152
+ )
2153
+
2154
+ for subject, description, context, alerts_json, start_ts in records:
2155
+ # start_dt = datetime.fromtimestamp(start_ts)
2156
+ alerts = json.loads(alerts_json)
2157
+ for alert in alerts:
2158
+ if ":" not in alert:
2159
+ continue
2160
+ time_part, command_part = alert.split(":")
2161
+ timedelta_values = [
2162
+ td_to_seconds(t.strip()) for t in time_part.split(",")
2163
+ ]
2164
+ commands = [cmd.strip() for cmd in command_part.split(",")]
2165
+
2166
+ for td in timedelta_values:
2167
+ trigger = start_ts - td
2168
+ if now <= trigger < midnight:
2169
+ for name in commands:
2170
+ alert_command = self.create_alert(
2171
+ name,
2172
+ td,
2173
+ start_ts,
2174
+ record_id,
2175
+ subject,
2176
+ description,
2177
+ context,
2178
+ )
2179
+ if alert_command:
2180
+ self.cursor.execute(
2181
+ "INSERT INTO Alerts (record_id, record_name, trigger_datetime, start_datetime, alert_name, alert_command) VALUES (?, ?, ?, ?, ?, ?)",
2182
+ (
2183
+ record_id,
2184
+ subject,
2185
+ trigger,
2186
+ start_ts,
2187
+ name,
2188
+ alert_command,
2189
+ ),
2190
+ )
2191
+
2192
+ self.conn.commit()
2193
+ log_msg(f"✅ Alerts updated for record {record_id}")
2194
+
2195
+ def get_generated_weeks_range(self) -> tuple[int, int, int, int] | None:
2196
+ row = self.cursor.execute(
2197
+ "SELECT start_year, start_week, end_year, end_week FROM GeneratedWeeks"
2198
+ ).fetchone()
2199
+ return tuple(row) if row else None
2200
+
2201
+ @staticmethod
2202
+ def _week_key(year: int, week: int) -> tuple[int, int]:
2203
+ return (year, week)
2204
+
2205
+ def is_week_in_generated(self, year: int, week: int) -> bool:
2206
+ rng = self.get_generated_weeks_range()
2207
+ if not rng:
2208
+ return False
2209
+ sy, sw, ey, ew = rng
2210
+ return (
2211
+ self._week_key(sy, sw)
2212
+ <= self._week_key(year, week)
2213
+ <= self._week_key(ey, ew)
2214
+ )
2215
+
2216
+ @staticmethod
2217
+ def _iso_date(year: int, week: int, weekday: int = 1) -> datetime:
2218
+ # ISO: %G (ISO year), %V (ISO week), %u (1..7, Monday=1)
2219
+ return datetime.strptime(f"{year} {week} {weekday}", "%G %V %u")
2220
+
2221
+ def _weeks_between(self, a: tuple[int, int], b: tuple[int, int]) -> int:
2222
+ da = self._iso_date(*a)
2223
+ db = self._iso_date(*b)
2224
+ return (db - da).days // 7
2225
+
2226
+ def ensure_week_generated_with_topup(
2227
+ self,
2228
+ year: int,
2229
+ week: int,
2230
+ cushion: int = 6,
2231
+ topup_threshold: int = 2,
2232
+ ) -> bool:
2233
+ """
2234
+ Ensure (year, week) exists in DateTimes.
2235
+ - If it's outside the cached range (earlier or later): extend to include it (+ cushion).
2236
+ - If it's inside but within `topup_threshold` weeks of either edge, extend a bit past that edge.
2237
+ Returns True if any extension was performed.
2238
+ """
2239
+ rng = self.get_generated_weeks_range()
2240
+
2241
+ # No range yet: seed it from requested week
2242
+ if not rng:
2243
+ self.extend_datetimes_for_weeks(year, week, cushion + 1)
2244
+ return True
2245
+
2246
+ sy, sw, ey, ew = rng
2247
+ wk_key = self._week_key(year, week)
2248
+
2249
+ # Outside range -> extend starting at requested week
2250
+ if wk_key < self._week_key(sy, sw) or wk_key > self._week_key(ey, ew):
2251
+ self.extend_datetimes_for_weeks(year, week, cushion + 1)
2252
+ return True
2253
+
2254
+ # Inside range: check “near left” edge
2255
+ if self._weeks_between((sy, sw), (year, week)) <= topup_threshold:
2256
+ earlier_start = self._iso_date(sy, sw) - timedelta(weeks=cushion)
2257
+ e_y, e_w = earlier_start.isocalendar()[:2]
2258
+ self.extend_datetimes_for_weeks(e_y, e_w, cushion + 1)
2259
+ return True
2260
+
2261
+ # Inside range: check “near right” edge
2262
+ if self._weeks_between((year, week), (ey, ew)) <= topup_threshold:
2263
+ start_after = self._iso_date(ey, ew) + timedelta(weeks=1)
2264
+ n_y, n_w = start_after.isocalendar()[:2]
2265
+ self.extend_datetimes_for_weeks(n_y, n_w, cushion)
2266
+ return True
2267
+
2268
+ return False
2269
+
2270
+ def extend_datetimes_for_weeks(self, start_year, start_week, weeks):
2271
+ """
2272
+ Extend the DateTimes table by generating data for the specified number of weeks
2273
+ starting from a given year and week.
2274
+
2275
+ Args:
2276
+ start_year (int): The starting year.
2277
+ start_week (int): The starting ISO week.
2278
+ weeks (int): Number of weeks to generate.
2279
+ """
2280
+ start = datetime.strptime(f"{start_year} {start_week} 1", "%G %V %u")
2281
+ end = start + timedelta(weeks=weeks)
2282
+
2283
+ start_year, start_week = start.isocalendar()[:2]
2284
+ end_year, end_week = end.isocalendar()[:2]
2285
+
2286
+ self.cursor.execute(
2287
+ "SELECT start_year, start_week, end_year, end_week FROM GeneratedWeeks"
2288
+ )
2289
+ cached_ranges = self.cursor.fetchall()
2290
+
2291
+ # Determine the full range that needs to be generated
2292
+ min_year = (
2293
+ min(cached_ranges, key=lambda x: x[0])[0] if cached_ranges else start_year
2294
+ )
2295
+ min_week = (
2296
+ min(cached_ranges, key=lambda x: x[1])[1] if cached_ranges else start_week
2297
+ )
2298
+ max_year = (
2299
+ max(cached_ranges, key=lambda x: x[2])[2] if cached_ranges else end_year
2300
+ )
2301
+ max_week = (
2302
+ max(cached_ranges, key=lambda x: x[3])[3] if cached_ranges else end_week
2303
+ )
2304
+
2305
+ # Expand the range to include gaps and requested period
2306
+ if start_year < min_year or (start_year == min_year and start_week < min_week):
2307
+ min_year, min_week = start_year, start_week
2308
+ if end_year > max_year or (end_year == max_year and end_week > max_week):
2309
+ max_year, max_week = end_year, end_week
2310
+
2311
+ first_day = datetime.strptime(f"{min_year} {min_week} 1", "%G %V %u")
2312
+ last_day = datetime.strptime(
2313
+ f"{max_year} {max_week} 1", "%G %V %u"
2314
+ ) + timedelta(days=6)
2315
+
2316
+ # Generate new datetimes for the extended range
2317
+ log_msg(f"generating datetimes for {first_day = } {last_day = }")
2318
+ self.generate_datetimes_for_period(first_day, last_day)
2319
+
2320
+ # Update the GeneratedWeeks table
2321
+ self.cursor.execute("DELETE FROM GeneratedWeeks") # Clear old entries
2322
+ self.cursor.execute(
2323
+ """
2324
+ INSERT INTO GeneratedWeeks (start_year, start_week, end_year, end_week)
2325
+ VALUES (?, ?, ?, ?)
2326
+ """,
2327
+ (min_year, min_week, max_year, max_week),
2328
+ )
2329
+
2330
+ self.conn.commit()
2331
+
2332
+ def generate_datetimes(self, rule_str, extent, start_date, end_date):
2333
+ """
2334
+ Generate occurrences for a given rruleset within the specified date range.
2335
+
2336
+ Args:
2337
+ rule_str (str): The rrule string defining the recurrence rule.
2338
+ extent (int): The duration of each occurrence in minutes.
2339
+ start_date (datetime): The start of the range.
2340
+ end_date (datetime): The end of the range.
2341
+
2342
+ Returns:
2343
+ List[Tuple[datetime, datetime]]: A list of (start_dt, end_dt) tuples.
2344
+ """
2345
+
2346
+ log_msg(
2347
+ f"getting datetimes for {rule_str} between {start_date = } and {end_date = }"
2348
+ )
2349
+ rule = rrulestr(rule_str, dtstart=start_date)
2350
+ occurrences = list(rule.between(start_date, end_date, inc=True))
2351
+ print(f"{rule_str = }\n{occurrences = }")
2352
+ extent = td_str_to_td(extent) if isinstance(extent, str) else extent
2353
+ log_msg(
2354
+ f"Generating for {len(occurrences) = } between {start_date = } and {end_date = } with {extent = } for {rule_str = }."
2355
+ )
2356
+
2357
+ # Create (start, end) pairs
2358
+ results = []
2359
+ for start_dt in occurrences:
2360
+ end_dt = start_dt + extent if extent else start_dt
2361
+ results.append((start_dt, end_dt))
2362
+
2363
+ return results
2364
+
2365
+ def generate_datetimes_for_record(
2366
+ self,
2367
+ record_id: int,
2368
+ *,
2369
+ window: tuple[datetime, datetime] | None = None,
2370
+ clear_existing: bool = True,
2371
+ ) -> None:
2372
+ """
2373
+ Regenerate DateTimes rows for a single record.
2374
+
2375
+ Behavior:
2376
+ • If the record has jobs (project): generate rows for jobs ONLY (job_id set).
2377
+ • If the record has no jobs (event or single task): generate rows for the parent
2378
+ itself (job_id NULL).
2379
+ • Notes / unscheduled: nothing.
2380
+
2381
+ Infinite rules: constrained to `window` when provided.
2382
+ Finite rules: generated fully (window ignored).
2383
+ """
2384
+ # Fetch core fields including itemtype and jobs JSON
2385
+ self.cursor.execute(
2386
+ "SELECT itemtype, rruleset, extent, jobs, processed FROM Records WHERE id=?",
2387
+ (record_id,),
2388
+ )
2389
+ row = self.cursor.fetchone()
2390
+ if not row:
2391
+ log_msg(f"⚠️ No record found id={record_id}")
2392
+ return
2393
+
2394
+ itemtype, rruleset, record_extent, jobs_json, processed = row
2395
+ rule_str = (rruleset or "").replace("\\N", "\n").replace("\\n", "\n")
2396
+
2397
+ # Nothing to do without any schedule
2398
+ if not rule_str:
2399
+ return
2400
+
2401
+ # Optional: clear existing rows for this record
2402
+ if clear_existing:
2403
+ self.cursor.execute(
2404
+ "DELETE FROM DateTimes WHERE record_id = ?", (record_id,)
2405
+ )
2406
+
2407
+ # Parse jobs (if any)
2408
+ jobs = _parse_jobs_json(jobs_json)
2409
+ has_jobs = bool(jobs)
2410
+ # log_msg(f"{has_jobs = }, {jobs = }")
2411
+
2412
+ has_rrule = "RRULE" in rule_str
2413
+ is_finite = (not has_rrule) or ("COUNT=" in rule_str) or ("UNTIL=" in rule_str)
2414
+ is_aware = "Z" in rule_str
2415
+
2416
+ # Build parent recurrence iterator
2417
+ try:
2418
+ rule = rrulestr(rule_str)
2419
+ except Exception as e:
2420
+ log_msg(
2421
+ f"rrulestr failed for record {record_id}: {e}\n---\n{rule_str}\n---"
2422
+ )
2423
+ return
2424
+
2425
+ def _iter_parent_occurrences():
2426
+ if is_finite:
2427
+ anchor = datetime.min
2428
+ anchor = get_anchor(is_aware)
2429
+
2430
+ try:
2431
+ cur = rule.after(anchor, inc=True)
2432
+ except TypeError:
2433
+ log_msg(
2434
+ f"exception processing {anchor = } with {is_aware = } in {record_id = }"
2435
+ )
2436
+ cur = None
2437
+
2438
+ while cur is not None:
2439
+ yield cur
2440
+ cur = rule.after(cur, inc=False)
2441
+ else:
2442
+ if window:
2443
+ lo, hi = window
2444
+ try:
2445
+ occs = rule.between(lo, hi, inc=True)
2446
+ except TypeError:
2447
+ if lo.tzinfo is None:
2448
+ lo = lo.replace(tzinfo=tz.UTC)
2449
+ if hi.tzinfo is None:
2450
+ hi = hi.replace(tzinfo=tz.UTC)
2451
+ occs = rule.between(lo, hi, inc=True)
2452
+ for cur in occs:
2453
+ yield cur
2454
+ else:
2455
+ # default horizon for infinite rules
2456
+ start = datetime.now()
2457
+ end = start + timedelta(weeks=12)
2458
+ try:
2459
+ occs = rule.between(start, end, inc=True)
2460
+ except TypeError:
2461
+ occs = rule.between(
2462
+ start.replace(tzinfo=tz.UTC),
2463
+ end.replace(tzinfo=tz.UTC),
2464
+ inc=True,
2465
+ )
2466
+ for cur in occs:
2467
+ yield cur
2468
+
2469
+ extent_sec_record = td_str_to_seconds(record_extent or "")
2470
+
2471
+ # ---- PATH A: Projects with jobs -> generate job rows only ----
2472
+ if has_jobs:
2473
+ log_msg(f"{record_id = } has jobs")
2474
+ for parent_dt in _iter_parent_occurrences():
2475
+ parent_local = _to_local_naive(
2476
+ parent_dt
2477
+ if isinstance(parent_dt, datetime)
2478
+ else datetime.combine(parent_dt, datetime.min.time())
2479
+ )
2480
+ for j in jobs:
2481
+ log_msg(f"job: {j = }")
2482
+ if j.get("status") == "finished":
2483
+ continue
2484
+ job_id = j.get("job_id")
2485
+ off_sec = td_str_to_seconds(j.get("offset_str") or "")
2486
+ job_start = _shift_from_parent(parent_local, off_sec)
2487
+ job_extent_sec = (
2488
+ td_str_to_seconds(j.get("extent_str") or "")
2489
+ or extent_sec_record
2490
+ )
2491
+
2492
+ if job_extent_sec:
2493
+ job_end = job_start + timedelta(seconds=job_extent_sec)
2494
+ try:
2495
+ # preferred: split across days if you have this helper
2496
+ for seg_start, seg_end in _split_span_local_days(
2497
+ job_start, job_end
2498
+ ):
2499
+ s_txt = _fmt_naive(seg_start)
2500
+ e_txt = (
2501
+ None
2502
+ if seg_end == seg_start
2503
+ else _fmt_naive(seg_end)
2504
+ )
2505
+ log_msg(
2506
+ f"inserting job datetimes {s_txt = }, {e_txt = } for {record_id = }, {job_id = }"
2507
+ )
2508
+ self.cursor.execute(
2509
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, ?, ?, ?)",
2510
+ (record_id, job_id, s_txt, e_txt),
2511
+ )
2512
+ log_msg("success")
2513
+ except NameError:
2514
+ # fallback: single row
2515
+ self.cursor.execute(
2516
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, ?, ?, ?)",
2517
+ (
2518
+ record_id,
2519
+ job_id,
2520
+ _fmt_naive(job_start),
2521
+ _fmt_naive(job_end),
2522
+ ),
2523
+ )
2524
+ except Exception as e:
2525
+ log_msg(f"error: {e}")
2526
+ else:
2527
+ self.cursor.execute(
2528
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, ?, ?, NULL)",
2529
+ (record_id, job_id, _fmt_naive(job_start)),
2530
+ )
2531
+
2532
+ # ---- PATH B: Events / single tasks (no jobs) -> generate parent rows ----
2533
+ else:
2534
+ for cur in _iter_parent_occurrences():
2535
+ # cur can be aware/naive datetime (or, rarely, date)
2536
+ if isinstance(cur, datetime):
2537
+ start_local = _to_local_naive(cur)
2538
+ else:
2539
+ start_local = (
2540
+ cur # date; treated as local-naive midnight by _fmt_naive
2541
+ )
2542
+
2543
+ if extent_sec_record:
2544
+ end_local = (
2545
+ start_local + timedelta(seconds=extent_sec_record)
2546
+ if isinstance(start_local, datetime)
2547
+ else datetime.combine(start_local, datetime.min.time())
2548
+ + timedelta(seconds=extent_sec_record)
2549
+ )
2550
+ try:
2551
+ for seg_start, seg_end in _split_span_local_days(
2552
+ start_local, end_local
2553
+ ):
2554
+ s_txt = _fmt_naive(seg_start)
2555
+ e_txt = (
2556
+ None if seg_end == seg_start else _fmt_naive(seg_end)
2557
+ )
2558
+ self.cursor.execute(
2559
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, NULL, ?, ?)",
2560
+ (record_id, s_txt, e_txt),
2561
+ )
2562
+ except NameError:
2563
+ self.cursor.execute(
2564
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, NULL, ?, ?)",
2565
+ (record_id, _fmt_naive(start_local), _fmt_naive(end_local)),
2566
+ )
2567
+ else:
2568
+ self.cursor.execute(
2569
+ "INSERT OR IGNORE INTO DateTimes (record_id, job_id, start_datetime, end_datetime) VALUES (?, NULL, ?, NULL)",
2570
+ (record_id, _fmt_naive(start_local)),
2571
+ )
2572
+
2573
+ # Mark finite as processed only when we generated full set (no window)
2574
+ if is_finite and not window:
2575
+ self.cursor.execute(
2576
+ "UPDATE Records SET processed = 1 WHERE id = ?", (record_id,)
2577
+ )
2578
+ self.conn.commit()
2579
+
2580
+ def get_events_for_period(self, start_date: datetime, end_date: datetime):
2581
+ """
2582
+ Retrieve all events that occur or overlap within [start_date, end_date),
2583
+ ordered by start time.
2584
+
2585
+ Returns rows as:
2586
+ (start_datetime, end_datetime, itemtype, subject, record_id, job_id)
2587
+
2588
+ DateTimes table stores TEXT:
2589
+ - date-only: 'YYYYMMDD'
2590
+ - datetime: 'YYYYMMDDTHHMMSS'
2591
+ - end_datetime may be NULL (instantaneous)
2592
+
2593
+ Overlap rule:
2594
+ normalized_end >= period_start_key
2595
+ normalized_start < period_end_key
2596
+ """
2597
+ start_key = _to_key(start_date)
2598
+ end_key = _to_key(end_date)
2599
+
2600
+ sql = """
2601
+ SELECT
2602
+ dt.start_datetime,
2603
+ dt.end_datetime,
2604
+ r.itemtype,
2605
+ r.subject,
2606
+ r.id,
2607
+ dt.job_id
2608
+ FROM DateTimes dt
2609
+ JOIN Records r ON dt.record_id = r.id
2610
+ WHERE
2611
+ -- normalized end >= period start
2612
+ (
2613
+ CASE
2614
+ WHEN dt.end_datetime IS NULL THEN
2615
+ CASE
2616
+ WHEN LENGTH(dt.start_datetime) = 8 THEN dt.start_datetime || 'T000000'
2617
+ ELSE dt.start_datetime
2618
+ END
2619
+ WHEN LENGTH(dt.end_datetime) = 8 THEN dt.end_datetime || 'T235959'
2620
+ ELSE dt.end_datetime
2621
+ END
2622
+ ) >= ?
2623
+ AND
2624
+ -- normalized start < period end
2625
+ (
2626
+ CASE
2627
+ WHEN LENGTH(dt.start_datetime) = 8 THEN dt.start_datetime || 'T000000'
2628
+ ELSE dt.start_datetime
2629
+ END
2630
+ ) < ?
2631
+ ORDER BY
2632
+ CASE
2633
+ WHEN LENGTH(dt.start_datetime) = 8 THEN dt.start_datetime || 'T000000'
2634
+ ELSE dt.start_datetime
2635
+ END
2636
+ """
2637
+ self.cursor.execute(sql, (start_key, end_key))
2638
+ return self.cursor.fetchall()
2639
+
2640
+ def generate_datetimes_for_period(self, start_date: datetime, end_date: datetime):
2641
+ self.cursor.execute("SELECT id FROM Records")
2642
+ for (record_id,) in self.cursor.fetchall():
2643
+ self.generate_datetimes_for_record(
2644
+ record_id,
2645
+ window=(start_date, end_date),
2646
+ clear_existing=True,
2647
+ )
2648
+
2649
+ def get_notice_for_events(self):
2650
+ """
2651
+ Retrieve (record_id, days_remaining, subject) from notice joined with Records
2652
+ for events only (itemtype '*').
2653
+
2654
+ Returns:
2655
+ List[Tuple[int, int, str]]: A list of (record_id, days_remaining, subject)
2656
+ """
2657
+ self.cursor.execute(
2658
+ """
2659
+ SELECT n.record_id, n.days_remaining, r.subject
2660
+ FROM notice n
2661
+ JOIN Records r ON n.record_id = r.id
2662
+ WHERE r.itemtype = '*'
2663
+ ORDER BY n.days_remaining
2664
+ """
2665
+ )
2666
+ return self.cursor.fetchall()
2667
+
2668
+ def get_drafts(self):
2669
+ """
2670
+ Retrieve all draft records (itemtype '?') with their ID and subject.
2671
+
2672
+ Returns:
2673
+ List[Tuple[int, str]]: A list of (id, subject)
2674
+ """
2675
+ self.cursor.execute(
2676
+ """
2677
+ SELECT id, subject
2678
+ FROM Records
2679
+ WHERE itemtype = '?'
2680
+ ORDER BY id
2681
+ """
2682
+ )
2683
+ return self.cursor.fetchall()
2684
+
2685
+ def get_urgency(self):
2686
+ """
2687
+ Return tasks for the Agenda view, with pinned-first ordering.
2688
+
2689
+ Rows:
2690
+ (record_id, job_id, subject, urgency, color, status, weights, pinned_int)
2691
+ """
2692
+ self.cursor.execute(
2693
+ """
2694
+ SELECT
2695
+ u.record_id,
2696
+ u.job_id,
2697
+ u.subject,
2698
+ u.urgency,
2699
+ u.color,
2700
+ u.status,
2701
+ u.weights,
2702
+ CASE WHEN p.record_id IS NULL THEN 0 ELSE 1 END AS pinned
2703
+ FROM Urgency AS u
2704
+ LEFT JOIN Pinned AS p ON p.record_id = u.record_id
2705
+ ORDER BY pinned DESC, u.urgency DESC, u.id ASC
2706
+ """
2707
+ )
2708
+ return self.cursor.fetchall()
2709
+
2710
+ def process_events(self, start_date, end_date):
2711
+ """
2712
+ Process events and split across days for display.
2713
+
2714
+ Args:
2715
+ start_date (datetime): The start of the period.
2716
+ end_date (datetime): The end of the period.
2717
+
2718
+ Returns:
2719
+ Dict[int, Dict[int, Dict[int, List[Tuple]]]]: Nested dictionary grouped by year, week, and weekday.
2720
+ """
2721
+ from collections import defaultdict
2722
+ from datetime import datetime, timedelta
2723
+ from dateutil.tz import gettz
2724
+
2725
+ # Retrieve all events for the specified period
2726
+ events = self.get_events_for_period(start_date, end_date)
2727
+ # Group events by ISO year, week, and weekday
2728
+ grouped_events = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
2729
+
2730
+ for start_ts, end_ts, itemtype, subject, id, job_id in events:
2731
+ start_dt = (
2732
+ datetime_from_timestamp(start_ts)
2733
+ # .replace(tzinfo=gettz("UTC"))
2734
+ # .astimezone()
2735
+ # .replace(tzinfo=None)
2736
+ )
2737
+ end_dt = (
2738
+ datetime_from_timestamp(end_ts)
2739
+ # .replace(tzinfo=gettz("UTC"))
2740
+ # .astimezone()
2741
+ # .replace(tzinfo=None)
2742
+ )
2743
+
2744
+ iso_year, iso_week, iso_weekday = start_dt.isocalendar()
2745
+ grouped_events[iso_year][iso_week][iso_weekday].append((start_dt, end_dt))
2746
+
2747
+ return grouped_events
2748
+
2749
+ def populate_notice(self):
2750
+ """
2751
+ Populate the notice table for all records with valid notice entries.
2752
+ This clears existing entries and recomputes them from current record data.
2753
+ """
2754
+ self.cursor.execute("DELETE FROM Notice;")
2755
+ self.conn.commit()
2756
+
2757
+ # Fetch both record_id and notice value
2758
+ self.cursor.execute(
2759
+ "SELECT id, notice FROM Records WHERE notice IS NOT NULL AND notice != ''"
2760
+ )
2761
+ for record_id, notice in self.cursor.fetchall():
2762
+ self.populate_notice_for_record(record_id)
2763
+
2764
+ self.conn.commit()
2765
+
2766
+ def populate_notice_for_record(self, record_id: int):
2767
+ self.cursor.execute("SELECT notice FROM Records WHERE id = ?", (record_id,))
2768
+ row = self.cursor.fetchone()
2769
+ if not row or not row[0]:
2770
+ return # no notice for this record
2771
+ notice_str = row[0]
2772
+
2773
+ self.cursor.execute(
2774
+ "SELECT start_datetime FROM DateTimes WHERE record_id = ? ORDER BY start_datetime ASC",
2775
+ (record_id,),
2776
+ )
2777
+ occurrences = self.cursor.fetchall()
2778
+
2779
+ today = date.today()
2780
+ offset = td_str_to_td(notice_str)
2781
+
2782
+ for (start_ts,) in occurrences:
2783
+ scheduled_dt = datetime_from_timestamp(start_ts)
2784
+ notice_dt = scheduled_dt - offset
2785
+ if notice_dt.date() <= today < scheduled_dt.date():
2786
+ days_remaining = (scheduled_dt.date() - today).days
2787
+ self.cursor.execute(
2788
+ "INSERT INTO notice (record_id, days_remaining) VALUES (?, ?)",
2789
+ (record_id, days_remaining),
2790
+ )
2791
+ break # Only insert for the earliest qualifying instance
2792
+
2793
+ self.conn.commit()
2794
+
2795
+ def populate_busy_from_datetimes(self):
2796
+ """
2797
+ Build BusyWeeksFromDateTimes from DateTimes.
2798
+ For each (record_id, year_week) pair, accumulate busybits
2799
+ across all event segments — merging with np.maximum().
2800
+ """
2801
+ import numpy as np
2802
+
2803
+ log_msg("🧩 Rebuilding BusyWeeksFromDateTimes…")
2804
+ self.cursor.execute("DELETE FROM BusyWeeksFromDateTimes")
2805
+
2806
+ # Only include Records that are events (itemtype='*')
2807
+ self.cursor.execute("""
2808
+ SELECT dt.record_id, dt.start_datetime, dt.end_datetime
2809
+ FROM DateTimes AS dt
2810
+ JOIN Records AS r ON r.id = dt.record_id
2811
+ WHERE r.itemtype = '*'
2812
+ """)
2813
+ rows = self.cursor.fetchall()
2814
+ if not rows:
2815
+ print("⚠️ No event DateTimes entries found.")
2816
+ return
2817
+
2818
+ total_inserted = 0
2819
+ for record_id, start_str, end_str in rows:
2820
+ weeks = fine_busy_bits_for_event(start_str, end_str)
2821
+ for yw, arr in weeks.items():
2822
+ # ensure numpy array
2823
+ arr = np.asarray(arr, dtype=np.uint8)
2824
+
2825
+ # check if a row already exists for (record_id, week)
2826
+ self.cursor.execute(
2827
+ "SELECT busybits FROM BusyWeeksFromDateTimes WHERE record_id=? AND year_week=?",
2828
+ (record_id, yw),
2829
+ )
2830
+ row = self.cursor.fetchone()
2831
+ if row:
2832
+ existing = np.frombuffer(row[0], dtype=np.uint8)
2833
+ merged = np.maximum(existing, arr)
2834
+ else:
2835
+ merged = arr
2836
+
2837
+ # upsert
2838
+ self.cursor.execute(
2839
+ """
2840
+ INSERT INTO BusyWeeksFromDateTimes (record_id, year_week, busybits)
2841
+ VALUES (?, ?, ?)
2842
+ ON CONFLICT(record_id, year_week)
2843
+ DO UPDATE SET busybits = excluded.busybits
2844
+ """,
2845
+ (record_id, yw, merged.tobytes()),
2846
+ )
2847
+ total_inserted += 1
2848
+
2849
+ self.conn.commit()
2850
+ print(f"✅ BusyWeeksFromDateTimes populated ({total_inserted} week-records).")
2851
+
2852
+ def get_last_instances(
2853
+ self,
2854
+ ) -> List[Tuple[int, int | None, str, str, str, str]]:
2855
+ """
2856
+ Retrieve the last instances of each record/job falling before today.
2857
+
2858
+ Returns:
2859
+ List of tuples:
2860
+ (record_id, job_id, subject, description, itemtype, last_datetime)
2861
+ """
2862
+ today = datetime.now().strftime("%Y%m%dT%H%M")
2863
+ self.cursor.execute(
2864
+ """
2865
+ SELECT
2866
+ r.id,
2867
+ d.job_id,
2868
+ r.subject,
2869
+ r.description,
2870
+ r.itemtype,
2871
+ MAX(d.start_datetime) AS last_datetime
2872
+ FROM Records r
2873
+ JOIN DateTimes d ON r.id = d.record_id
2874
+ WHERE d.start_datetime < ?
2875
+ GROUP BY r.id, d.job_id
2876
+ ORDER BY last_datetime DESC
2877
+ """,
2878
+ (today,),
2879
+ )
2880
+ return self.cursor.fetchall()
2881
+
2882
+ def get_next_instances(
2883
+ self,
2884
+ ) -> List[Tuple[int, int | None, str, str, str, str]]:
2885
+ """
2886
+ Retrieve the next instances of each record/job falling on or after today.
2887
+
2888
+ Returns:
2889
+ List of tuples:
2890
+ (record_id, job_id, subject, description, itemtype, last_datetime)
2891
+ """
2892
+ today = datetime.now().strftime("%Y%m%dT%H%M")
2893
+ self.cursor.execute(
2894
+ """
2895
+ SELECT
2896
+ r.id,
2897
+ d.job_id,
2898
+ r.subject,
2899
+ r.description,
2900
+ r.itemtype,
2901
+ MIN(d.start_datetime) AS next_datetime
2902
+ FROM Records r
2903
+ JOIN DateTimes d ON r.id = d.record_id
2904
+ WHERE d.start_datetime >= ?
2905
+ GROUP BY r.id, d.job_id
2906
+ ORDER BY next_datetime ASC
2907
+ """,
2908
+ (today,),
2909
+ )
2910
+ return self.cursor.fetchall()
2911
+
2912
+ def get_next_instance_for_record(
2913
+ self, record_id: int
2914
+ ) -> tuple[str, str | None] | None:
2915
+ """
2916
+ Return (start_datetime, end_datetime|NULL) as compact local-naive strings
2917
+ for the next instance of a single record, or None if none.
2918
+ """
2919
+ # start_datetime sorted ascending; end_datetime can be NULL
2920
+ self.cursor.execute(
2921
+ """
2922
+ SELECT start_datetime, end_datetime
2923
+ FROM DateTimes
2924
+ WHERE record_id = ?
2925
+ AND start_datetime >= ?
2926
+ ORDER BY start_datetime ASC
2927
+ LIMIT 1
2928
+ """,
2929
+ # now in compact local-naive format
2930
+ (_fmt_naive(datetime.now()),),
2931
+ )
2932
+ row = self.cursor.fetchone()
2933
+ if row:
2934
+ return row[0], row[1]
2935
+ return None
2936
+
2937
+ def get_next_start_datetimes_for_record(
2938
+ self, record_id: int, job_id: int | None = None
2939
+ ) -> list[str]:
2940
+ """
2941
+ Return up to 2 upcoming start datetimes (as compact local-naive strings)
2942
+ for the given record (and optional job), sorted ascending.
2943
+ """
2944
+ sql = """
2945
+ SELECT start_datetime
2946
+ FROM DateTimes
2947
+ WHERE record_id = ?
2948
+ """
2949
+ # params = [record_id, _fmt_naive(datetime.now())]
2950
+ params = [
2951
+ record_id,
2952
+ ]
2953
+
2954
+ if job_id is not None:
2955
+ sql += " AND job_id = ?"
2956
+ params.append(job_id)
2957
+
2958
+ sql += " ORDER BY start_datetime ASC LIMIT 2"
2959
+
2960
+ self.cursor.execute(sql, params)
2961
+ return [row[0] for row in self.cursor.fetchall()]
2962
+
2963
+ def find_records(self, regex: str):
2964
+ regex_ci = f"(?i){regex}" # force case-insensitive
2965
+ today = int(datetime.now().timestamp())
2966
+ self.cursor.execute(
2967
+ """
2968
+ WITH
2969
+ LastInstances AS (
2970
+ SELECT record_id, MAX(start_datetime) AS last_datetime
2971
+ FROM DateTimes
2972
+ WHERE start_datetime < ?
2973
+ GROUP BY record_id
2974
+ ),
2975
+ NextInstances AS (
2976
+ SELECT record_id, MIN(start_datetime) AS next_datetime
2977
+ FROM DateTimes
2978
+ WHERE start_datetime >= ?
2979
+ GROUP BY record_id
2980
+ )
2981
+ SELECT r.id, r.subject, r.description, r.itemtype, li.last_datetime, ni.next_datetime
2982
+ FROM Records r
2983
+ LEFT JOIN LastInstances li ON r.id = li.record_id
2984
+ LEFT JOIN NextInstances ni ON r.id = ni.record_id
2985
+ WHERE r.subject REGEXP ? OR r.description REGEXP ?
2986
+ """,
2987
+ (today, today, regex_ci, regex_ci),
2988
+ )
2989
+ return self.cursor.fetchall()
2990
+
2991
+ # FIXME: should access record_id
2992
+ def update_tags_for_record(self, record_data):
2993
+ cur = self.conn.cursor()
2994
+ tags = record_data.pop("tags", [])
2995
+ record_data["tokens"] = json.dumps(record_data.get("tokens", []))
2996
+ record_data["jobs"] = json.dumps(record_data.get("jobs", []))
2997
+ if "id" in record_data:
2998
+ record_id = record_data["id"]
2999
+ columns = [k for k in record_data if k != "id"]
3000
+ assignments = ", ".join([f"{col} = ?" for col in columns])
3001
+ values = [record_data[col] for col in columns]
3002
+ values.append(record_id)
3003
+ cur.execute(f"UPDATE Records SET {assignments} WHERE id = ?", values)
3004
+ cur.execute("DELETE FROM RecordTags WHERE record_id = ?", (record_id,))
3005
+ else:
3006
+ columns = list(record_data.keys())
3007
+ values = [record_data[col] for col in columns]
3008
+ placeholders = ", ".join(["?"] * len(columns))
3009
+ cur.execute(
3010
+ f"INSERT INTO Records ({', '.join(columns)}) VALUES ({placeholders})",
3011
+ values,
3012
+ )
3013
+ record_id = cur.lastrowid
3014
+ for tag in tags:
3015
+ cur.execute("INSERT OR IGNORE INTO Tags (name) VALUES (?)", (tag,))
3016
+ cur.execute("SELECT id FROM Tags WHERE name = ?", (tag,))
3017
+ tag_id = cur.fetchone()[0]
3018
+ cur.execute(
3019
+ "INSERT INTO RecordTags (record_id, tag_id) VALUES (?, ?)",
3020
+ (record_id, tag_id),
3021
+ )
3022
+ self.conn.commit()
3023
+ return record_id
3024
+
3025
+ def get_tags_for_record(self, record_id):
3026
+ cur = self.conn.cursor()
3027
+ cur.execute(
3028
+ """
3029
+ SELECT Tags.name FROM Tags
3030
+ JOIN RecordTags ON Tags.id = RecordTags.tag_id
3031
+ WHERE RecordTags.record_id = ?
3032
+ """,
3033
+ (record_id,),
3034
+ )
3035
+ return [row[0] for row in cur.fetchall()]
3036
+
3037
+ def populate_urgency_from_record(self, record: dict):
3038
+ if record["itemtype"] not in ["^", "~"]:
3039
+ log_msg(f"skipping urgency for {record = }")
3040
+ return
3041
+ record_id = record["id"]
3042
+ pinned = self.is_pinned(record_id)
3043
+ # log_msg(f"{record_id = }, {pinned = }, {record = }")
3044
+ now_seconds = utc_now_to_seconds()
3045
+ modified_seconds = dt_str_to_seconds(record["modified"])
3046
+ extent_seconds = td_str_to_seconds(record.get("extent", "0m"))
3047
+ # notice_seconds will be 0 in the absence of notice
3048
+ notice_seconds = td_str_to_seconds(record.get("notice", "0m"))
3049
+ rruleset = record.get("rruleset", "")
3050
+ jobs = json.loads(record.get("jobs", "[]"))
3051
+ subject = record["subject"]
3052
+ # priority_map = self.env.config.urgency.priority.model_dump()
3053
+ priority_level = record.get("priority", None)
3054
+ # priority = priority_map.get(priority_level, 0)
3055
+ description = True if record.get("description", "") else False
3056
+
3057
+ # Try to parse due from first RDATE in rruleset
3058
+ due_seconds = None
3059
+ if rruleset.startswith("RDATE:"):
3060
+ due_str = rruleset.split(":", 1)[1].split(",")[0]
3061
+ try:
3062
+ if "T" in due_str:
3063
+ dt = datetime.strptime(due_str.strip(), "%Y%m%dT%H%MZ")
3064
+ else:
3065
+ dt = datetime.strptime(due_str.strip(), "%Y%m%d")
3066
+ due_seconds = round(dt.timestamp())
3067
+ except Exception as e:
3068
+ log_msg(f"Invalid RDATE value: {due_str}\n{e}")
3069
+ if due_seconds and not notice_seconds:
3070
+ # treat due_seconds as the default for a missing @b, i.e.,
3071
+ # make the default to hide a task with an @s due entry before due - interval
3072
+ notice_seconds = due_seconds
3073
+
3074
+ self.cursor.execute("DELETE FROM Urgency WHERE record_id = ?", (record_id,))
3075
+
3076
+ # Handle jobs if present
3077
+ if jobs:
3078
+ for job in jobs:
3079
+ status = job.get("status", "")
3080
+ if status != "available":
3081
+ continue
3082
+ job_id = job.get("id")
3083
+ subject = job.get("display_subject", subject)
3084
+
3085
+ job_due = due_seconds
3086
+ if job_due:
3087
+ b = td_str_to_seconds(job.get("b", "0m"))
3088
+ s = td_str_to_seconds(job.get("s", "0m"))
3089
+ if b:
3090
+ hide = job_due - b > now_seconds
3091
+ if hide:
3092
+ continue
3093
+ job_due += s
3094
+
3095
+ job_extent = td_str_to_seconds(job.get("e", "0m"))
3096
+ blocking = job.get("blocking") # assume already computed elsewhere
3097
+
3098
+ urgency, color, weights = self.compute_urgency.from_args_and_weights(
3099
+ now=now_seconds,
3100
+ modified=modified_seconds,
3101
+ due=job_due,
3102
+ extent=job_extent,
3103
+ priority_level=priority_level,
3104
+ blocking=blocking,
3105
+ description=description,
3106
+ jobs=True,
3107
+ pinned=pinned,
3108
+ )
3109
+
3110
+ self.cursor.execute(
3111
+ """
3112
+ INSERT INTO Urgency (record_id, job_id, subject, urgency, color, status, weights)
3113
+ VALUES (?, ?, ?, ?, ?, ?, ?)
3114
+ """,
3115
+ (
3116
+ record_id,
3117
+ job_id,
3118
+ subject,
3119
+ urgency,
3120
+ color,
3121
+ status,
3122
+ json.dumps(weights),
3123
+ ),
3124
+ )
3125
+
3126
+ else:
3127
+ hide = (
3128
+ due_seconds
3129
+ and notice_seconds
3130
+ and due_seconds - notice_seconds > now_seconds
3131
+ )
3132
+ if not hide:
3133
+ urgency, color, weights = self.compute_urgency.from_args_and_weights(
3134
+ now=now_seconds,
3135
+ modified=modified_seconds,
3136
+ due=due_seconds,
3137
+ extent=extent_seconds,
3138
+ priority_level=priority_level,
3139
+ description=description,
3140
+ jobs=False,
3141
+ pinned=pinned,
3142
+ )
3143
+
3144
+ self.cursor.execute(
3145
+ """
3146
+ INSERT INTO Urgency (record_id, job_id, subject, urgency, color, status, weights)
3147
+ VALUES (?, ?, ?, ?, ?, ?, ?)
3148
+ """,
3149
+ (
3150
+ record_id,
3151
+ None,
3152
+ subject,
3153
+ urgency,
3154
+ color,
3155
+ record.get("status", "next"),
3156
+ json.dumps(weights),
3157
+ ),
3158
+ )
3159
+
3160
+ self.conn.commit()
3161
+
3162
+ def populate_all_urgency(self):
3163
+ self.cursor.execute("DELETE FROM Urgency")
3164
+ tasks = self.get_all_tasks()
3165
+ for task in tasks:
3166
+ # log_msg(f"adding to urgency: {task['itemtype'] = }, {task = }")
3167
+ self.populate_urgency_from_record(task)
3168
+ self.conn.commit()
3169
+
3170
+ def update_urgency(self, urgency_id: int):
3171
+ """
3172
+ Recalculate urgency score for a given entry using only fields in the Urgency table.
3173
+ """
3174
+ self.cursor.execute("SELECT urgency_id FROM ActiveUrgency WHERE id = 1")
3175
+ row = self.cursor.fetchone()
3176
+ active_id = row[0] if row else None
3177
+
3178
+ self.cursor.execute(
3179
+ """
3180
+ SELECT id, touched, status FROM Urgency WHERE id = ?
3181
+ """,
3182
+ (urgency_id,),
3183
+ )
3184
+ row = self.cursor.fetchone()
3185
+ if not row:
3186
+ return # skip nonexistent
3187
+
3188
+ urgency_id, touched_ts, status = row
3189
+ now_ts = int(time.time())
3190
+
3191
+ # Example scoring
3192
+ age_days = (now_ts - touched_ts) / 86400 if touched_ts else 0
3193
+ active_bonus = 10.0 if urgency_id == active_id else 0.0
3194
+ status_weight = {
3195
+ "next": 5.0,
3196
+ "scheduled": 2.0,
3197
+ "waiting": -1.0,
3198
+ "someday": -5.0,
3199
+ }.get(status, 0.0)
3200
+
3201
+ score = age_days + active_bonus + status_weight
3202
+
3203
+ self.cursor.execute(
3204
+ """
3205
+ UPDATE Urgency SET urgency = ? WHERE id = ?
3206
+ """,
3207
+ (score, urgency_id),
3208
+ )
3209
+ self.conn.commit()
3210
+
3211
+ def update_all_urgencies(self):
3212
+ self.cursor.execute("SELECT id FROM Urgency")
3213
+ for (urgency_id,) in self.cursor.fetchall():
3214
+ self.update_urgency(urgency_id)
3215
+
3216
+ def get_all(self):
3217
+ cur = self.conn.cursor()
3218
+ cur.execute("SELECT * FROM Records")
3219
+ return cur.fetchall()
3220
+
3221
+ def get_record(self, record_id):
3222
+ cur = self.conn.cursor()
3223
+ cur.execute("SELECT * FROM Records WHERE id = ?", (record_id,))
3224
+ return cur.fetchone()
3225
+
3226
+ def get_jobs_for_record(self, record_id):
3227
+ cur = self.conn.cursor()
3228
+ cur.execute("SELECT * FROM Records WHERE record_id = ?", (record_id,))
3229
+ return cur.fetchall()
3230
+
3231
+ def get_tagged(self, tag):
3232
+ cur = self.conn.cursor()
3233
+ cur.execute(
3234
+ """
3235
+ SELECT Records.* FROM Records
3236
+ JOIN RecordTags ON Records.id = RecordTags.record_id
3237
+ JOIN Tags ON Tags.id = RecordTags.tag_id
3238
+ WHERE Tags.name = ?
3239
+ """,
3240
+ (tag,),
3241
+ )
3242
+ return cur.fetchall()
3243
+
3244
+ def delete_record(self, record_id):
3245
+ cur = self.conn.cursor()
3246
+ cur.execute("DELETE FROM Records WHERE id = ?", (record_id,))
3247
+ self.conn.commit()
3248
+
3249
+ def count_records(self):
3250
+ cur = self.conn.cursor()
3251
+ cur.execute("SELECT COUNT(*) FROM Records")
3252
+ return cur.fetchone()[0]
3253
+
3254
+ def rebuild_busyweeks_from_source(self):
3255
+ """
3256
+ Aggregate all BusyWeeksFromDateTimes → BusyWeeks,
3257
+ collapsing to 35-slot weekly maps:
3258
+ (7 days × [1 all-day + 4 × 6-hour blocks]).
3259
+
3260
+ Ternary encoding:
3261
+ 0 = free
3262
+ 1 = busy
3263
+ 2 = conflict
3264
+ """
3265
+
3266
+ self.cursor.execute("SELECT DISTINCT year_week FROM BusyWeeksFromDateTimes")
3267
+ weeks = [row[0] for row in self.cursor.fetchall()]
3268
+ if not weeks:
3269
+ print("⚠️ No data to aggregate.")
3270
+ return
3271
+
3272
+ print(f"Aggregating {len(weeks)} week(s)...")
3273
+
3274
+ for yw in weeks:
3275
+ # --- Gather all event arrays for this week
3276
+ self.cursor.execute(
3277
+ "SELECT busybits FROM BusyWeeksFromDateTimes WHERE year_week = ?",
3278
+ (yw,),
3279
+ )
3280
+ blobs = [
3281
+ np.frombuffer(row[0], dtype=np.uint8) for row in self.cursor.fetchall()
3282
+ ]
3283
+ if not blobs:
3284
+ continue
3285
+
3286
+ n = len(blobs[0])
3287
+ if any(arr.size != n for arr in blobs):
3288
+ print(f"⚠️ Skipping {yw}: inconsistent array sizes")
3289
+ continue
3290
+
3291
+ # Stack vertically -> shape (num_events, 679)
3292
+ stack = np.vstack(blobs)
3293
+
3294
+ # Count per slot
3295
+ counts = stack.sum(axis=0)
3296
+
3297
+ # Collapse fine bits into ternary (0 free / 1 busy / 2 conflict)
3298
+ merged = np.where(counts >= 2, 2, np.where(counts >= 1, 1, 0)).astype(
3299
+ np.uint8
3300
+ )
3301
+
3302
+ # Reduce 679 fine bits → 35 coarse blocks (7 × [1+4])
3303
+ merged = _reduce_to_35_slots(merged)
3304
+
3305
+ # Serialize
3306
+ blob = merged.tobytes()
3307
+
3308
+ bits_str = "".join(str(int(x)) for x in merged)
3309
+ self.cursor.execute(
3310
+ """
3311
+ INSERT INTO BusyWeeks (year_week, busybits)
3312
+ VALUES (?, ?)
3313
+ ON CONFLICT(year_week)
3314
+ DO UPDATE SET busybits = excluded.busybits
3315
+ """,
3316
+ (yw, bits_str),
3317
+ )
3318
+
3319
+ self.conn.commit()
3320
+ print("✅ BusyWeeks aggregation complete.")
3321
+
3322
+ def show_busy_week(self, year_week: str):
3323
+ """
3324
+ Display the 7×96 busy/conflict map for a given ISO year-week.
3325
+
3326
+ Reads from BusyWeeks, decodes the blob, and prints 7 lines:
3327
+ - one per weekday (Mon → Sun)
3328
+ - each line shows 96 characters (15-min slots)
3329
+ 0 = free, 1 = busy, 2 = conflict
3330
+
3331
+ Example:
3332
+ Mon 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
3333
+ Tue 000000000000111100000000...
3334
+ ...
3335
+ """
3336
+ self.cursor.execute(
3337
+ "SELECT busybits FROM BusyWeeks WHERE year_week = ?",
3338
+ (year_week,),
3339
+ )
3340
+ row = self.cursor.fetchone()
3341
+ if not row:
3342
+ print(f"No BusyWeeks entry for {year_week}")
3343
+ return
3344
+
3345
+ # Decode the 672-slot array
3346
+ arr = np.frombuffer(row[0], dtype=np.uint8)
3347
+ if arr.size != 672:
3348
+ print(f"Unexpected busybits length: {arr.size}")
3349
+ return
3350
+
3351
+ # Split into 7 days × 96 slots
3352
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
3353
+ slots_per_day = 96
3354
+
3355
+ print(f"🗓 Busy/conflict map for {year_week}\n")
3356
+ for i, day in enumerate(days):
3357
+ start = i * slots_per_day
3358
+ end = start + slots_per_day
3359
+ line = "".join(str(x) for x in arr[start:end])
3360
+ print(f"{day:<4}{line}")
3361
+
3362
+ def show_busy_week_pretty(self, year_week: str):
3363
+ """
3364
+ Display a 7×96 busy/conflict map for a given ISO year-week with color and hour markers.
3365
+ 0 = free, 1 = busy, 2 = conflict (colored red).
3366
+
3367
+ Uses 15-min resolution; 96 slots per day.
3368
+ """
3369
+ console = Console()
3370
+
3371
+ self.cursor.execute(
3372
+ "SELECT busybits FROM BusyWeeks WHERE year_week = ?",
3373
+ (year_week,),
3374
+ )
3375
+ row = self.cursor.fetchone()
3376
+ if not row:
3377
+ console.print(f"[red]No BusyWeeks entry for {year_week}[/red]")
3378
+ return
3379
+
3380
+ arr = np.frombuffer(row[0], dtype=np.uint8)
3381
+ if arr.size != 672:
3382
+ console.print(f"[red]Unexpected busybits length: {arr.size}[/red]")
3383
+ return
3384
+
3385
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
3386
+ slots_per_day = 96 # 96 x 15min = 24h
3387
+ hours = [f"{h:02d}" for h in range(24)]
3388
+
3389
+ # Header row: hour markers
3390
+ header = " " # spacing before first hour
3391
+ for h in hours:
3392
+ header += h + " " * 3 # one char per 15 min slot
3393
+ console.print(f"[bold cyan]🗓 Busy/conflict map for {year_week}[/bold cyan]\n")
3394
+ console.print(header)
3395
+
3396
+ for i, day in enumerate(days):
3397
+ start = i * slots_per_day
3398
+ end = start + slots_per_day
3399
+ line_bits = arr[start:end]
3400
+
3401
+ text_line = Text()
3402
+ for bit in line_bits:
3403
+ if bit == 0:
3404
+ text_line.append("·", style="dim") # free
3405
+ elif bit == 1:
3406
+ text_line.append("█", style="yellow") # busy
3407
+ elif bit == 2:
3408
+ text_line.append("█", style="bold red") # conflict
3409
+
3410
+ console.print(f"{day:<4}{text_line}")
3411
+
3412
+ def get_busy_bits_for_week(self, year_week: str) -> list[int]:
3413
+ """
3414
+ Return a list of 35 ternary busy bits (0=free, 1=busy, 2=conflict)
3415
+ for the given ISO year-week string (e.g. '2025-41').
3416
+ """
3417
+ self.cursor.execute(
3418
+ "SELECT busybits FROM BusyWeeks WHERE year_week = ?", (year_week,)
3419
+ )
3420
+ row = self.cursor.fetchone()
3421
+ if not row:
3422
+ return [0] * 35
3423
+
3424
+ bits_str = row[0]
3425
+ if isinstance(bits_str, bytes):
3426
+ bits_str = bits_str.decode("utf-8")
3427
+
3428
+ bits = [int(ch) for ch in bits_str if ch in "012"]
3429
+ if len(bits) != 35:
3430
+ bits = (bits + [0] * 35)[:35]
3431
+ return bits
3432
+
3433
+ def move_bin(self, bin_name: str, new_parent_name: str) -> bool:
3434
+ """
3435
+ Move a bin under a new parent bin.
3436
+
3437
+ Example:
3438
+ move_bin("whatever", "journal")
3439
+
3440
+ Ensures both bins exist, removes any previous parent link,
3441
+ and inserts a new (bin_id → new_parent_id) link.
3442
+ Prevents cycles and self-parenting.
3443
+ """
3444
+ try:
3445
+ # Ensure the root/unlinked bins exist first
3446
+ root_id, unlinked_id = self.ensure_system_bins()
3447
+
3448
+ # Resolve both bin IDs (creating them if needed)
3449
+ bin_id = self.ensure_bin_exists(bin_name)
3450
+ new_parent_id = self.ensure_bin_exists(new_parent_name)
3451
+
3452
+ # ⚡ Efficiency check: prevent self-parenting before DB recursion
3453
+ if bin_id == new_parent_id:
3454
+ raise ValueError(f"Cannot move {bin_name!r} under itself.")
3455
+
3456
+ # 🌀 Recursive acyclicity check
3457
+ if self.is_descendant(bin_id, new_parent_id):
3458
+ raise ValueError(
3459
+ f"Cannot move {bin_name!r} under {new_parent_name!r}: "
3460
+ "would create a cycle."
3461
+ )
3462
+
3463
+ # Remove any existing parent link(s)
3464
+ self.cursor.execute("DELETE FROM BinLinks WHERE bin_id = ?", (bin_id,))
3465
+
3466
+ # Insert the new parent link
3467
+ self.cursor.execute(
3468
+ """
3469
+ INSERT OR REPLACE INTO BinLinks (bin_id, container_id)
3470
+ VALUES (?, ?)
3471
+ """,
3472
+ (bin_id, new_parent_id),
3473
+ )
3474
+
3475
+ self.conn.commit()
3476
+ print(f"[move_bin] Moved {bin_name!r} → {new_parent_name!r}")
3477
+ return True
3478
+
3479
+ except Exception as e:
3480
+ print(f"[move_bin] Error moving {bin_name!r} → {new_parent_name!r}: {e}")
3481
+ return False
3482
+
3483
+ def is_descendant(self, ancestor_id: int, candidate_id: int) -> bool:
3484
+ """
3485
+ Return True if candidate_id is a descendant of ancestor_id.
3486
+ """
3487
+ self.cursor.execute(
3488
+ """
3489
+ WITH RECURSIVE descendants(id) AS (
3490
+ SELECT bin_id FROM BinLinks WHERE container_id = ?
3491
+ UNION
3492
+ SELECT BinLinks.bin_id
3493
+ FROM BinLinks JOIN descendants ON BinLinks.container_id = descendants.id
3494
+ )
3495
+ SELECT 1 FROM descendants WHERE id = ? LIMIT 1
3496
+ """,
3497
+ (ancestor_id, candidate_id),
3498
+ )
3499
+ return self.cursor.fetchone() is not None
3500
+
3501
+ def ensure_bin_exists(self, name: str) -> int:
3502
+ disp = (name or "").strip()
3503
+ if not disp:
3504
+ raise ValueError("Bin name must be non-empty")
3505
+
3506
+ self.cursor.execute(
3507
+ "SELECT id FROM Bins WHERE name = ? COLLATE NOCASE", (disp,)
3508
+ )
3509
+ row = self.cursor.fetchone()
3510
+ if row:
3511
+ return row[0]
3512
+
3513
+ self.cursor.execute("INSERT INTO Bins (name) VALUES (?)", (disp,))
3514
+ self.conn.commit()
3515
+ bid = self.cursor.lastrowid
3516
+
3517
+ # 👇 cache: record the creation with unknown parent (None) for now
3518
+ if hasattr(self, "bin_cache"):
3519
+ self.bin_cache.on_create(bid, disp, None)
3520
+
3521
+ return bid
3522
+
3523
+ def ensure_bin_path(self, path: str) -> int:
3524
+ """
3525
+ Ensure the given bin path exists.
3526
+ Example:
3527
+ "personal/quotations" will create:
3528
+ - personal → root
3529
+ - quotations → personal
3530
+ If single-level, link under 'unlinked'.
3531
+ Returns the final (leaf) bin_id.
3532
+ """
3533
+ root_id, unlinked_id = self.ensure_system_bins()
3534
+ parts = [p.strip() for p in path.split("/") if p.strip()]
3535
+ if not parts:
3536
+ return root_id
3537
+
3538
+ parent_id = root_id # start at root
3539
+ if len(parts) == 1:
3540
+ parent_id = unlinked_id # single bin goes under 'unlinked'
3541
+
3542
+ for name in parts:
3543
+ bin_id = self.ensure_bin_exists(name)
3544
+ self.cursor.execute(
3545
+ """
3546
+ INSERT OR IGNORE INTO BinLinks (bin_id, container_id)
3547
+ VALUES (?, ?)
3548
+ """,
3549
+ (bin_id, parent_id),
3550
+ )
3551
+ parent_id = bin_id
3552
+
3553
+ self.conn.commit()
3554
+ return parent_id
3555
+
3556
+ def ensure_bin_path(self, path: str) -> int:
3557
+ root_id, unlinked_id = self.ensure_system_bins()
3558
+ parts = [p.strip() for p in path.split("/") if p.strip()]
3559
+ if not parts:
3560
+ return root_id
3561
+
3562
+ parent_id = root_id
3563
+ if len(parts) == 1:
3564
+ parent_id = unlinked_id # single bin goes under 'unlinked'
3565
+
3566
+ for name in parts:
3567
+ bin_id = self.ensure_bin_exists(name)
3568
+ self.cursor.execute(
3569
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
3570
+ (bin_id, parent_id),
3571
+ )
3572
+
3573
+ # 👇 cache: reflect the *actual* parent from DB after the insert/ignore
3574
+ if hasattr(self, "bin_cache"):
3575
+ self.cursor.execute(
3576
+ "SELECT container_id FROM BinLinks WHERE bin_id=?", (bin_id,)
3577
+ )
3578
+ row = self.cursor.fetchone()
3579
+ eff_parent = row[0] if row else None
3580
+ self.bin_cache.on_link(bin_id, eff_parent)
3581
+
3582
+ parent_id = bin_id
3583
+
3584
+ self.conn.commit()
3585
+ return parent_id
3586
+
3587
+ def ensure_system_bins(self) -> tuple[int, int]:
3588
+ root_id = self.ensure_bin_exists("root")
3589
+ unlinked_id = self.ensure_bin_exists("unlinked")
3590
+
3591
+ # link unlinked → root (if not already)
3592
+ self.cursor.execute(
3593
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
3594
+ (unlinked_id, root_id),
3595
+ )
3596
+ # 👇 cache: reflect current effective parent
3597
+ if hasattr(self, "bin_cache"):
3598
+ self.bin_cache.on_link(unlinked_id, root_id)
3599
+
3600
+ # Ensure root has no parent (NULL)
3601
+ self.cursor.execute(
3602
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, NULL)",
3603
+ (root_id,),
3604
+ )
3605
+ # 👇 cache: reflect root’s parent = None
3606
+ if hasattr(self, "bin_cache"):
3607
+ self.bin_cache.on_link(root_id, None)
3608
+
3609
+ self.conn.commit()
3610
+ return root_id, unlinked_id
3611
+
3612
+ def link_record_to_bin_path(self, record_id: int, path: str) -> None:
3613
+ """
3614
+ Ensure the bin path exists and link the record to its leaf bin.
3615
+ Example:
3616
+ record_id = 42, path = "personal/quotations"
3617
+ → ensures bins, links 42 → quotations
3618
+ """
3619
+ leaf_bin_id = self.ensure_bin_path(path)
3620
+
3621
+ self.cursor.execute(
3622
+ """
3623
+ INSERT OR IGNORE INTO ReminderLinks (reminder_id, bin_id)
3624
+ VALUES (?, ?)
3625
+ """,
3626
+ (record_id, leaf_bin_id),
3627
+ )
3628
+ self.conn.commit()
3629
+
3630
+ # === Bin access helpers ===
3631
+ def get_bin_name(self, bin_id: int) -> str:
3632
+ """Return bin name by id."""
3633
+ self.cursor.execute("SELECT name FROM Bins WHERE id=?", (bin_id,))
3634
+ row = self.cursor.fetchone()
3635
+ return row[0] if row else f"[unknown #{bin_id}]"
3636
+
3637
+ def get_parent_bin(self, bin_id: int) -> dict | None:
3638
+ """Return parent bin as {'id': ..., 'name': ...} or None if root."""
3639
+ self.cursor.execute(
3640
+ """
3641
+ SELECT b2.id, b2.name
3642
+ FROM BinLinks bl
3643
+ JOIN Bins b2 ON bl.container_id = b2.id
3644
+ WHERE bl.bin_id = ?
3645
+ """,
3646
+ (bin_id,),
3647
+ )
3648
+ row = self.cursor.fetchone()
3649
+ return {"id": row[0], "name": row[1]} if row else None
3650
+
3651
+ def get_subbins(self, bin_id: int) -> list[dict]:
3652
+ """Return bins contained in this bin, with counts of subbins/reminders."""
3653
+ self.cursor.execute(
3654
+ """
3655
+ SELECT b.id, b.name,
3656
+ (SELECT COUNT(*) FROM BinLinks sub WHERE sub.container_id = b.id) AS subbins,
3657
+ (SELECT COUNT(*) FROM ReminderLinks rl WHERE rl.bin_id = b.id) AS reminders
3658
+ FROM BinLinks bl
3659
+ JOIN Bins b ON bl.bin_id = b.id
3660
+ WHERE bl.container_id = ?
3661
+ ORDER BY b.name COLLATE NOCASE
3662
+ """,
3663
+ (bin_id,),
3664
+ )
3665
+ return [
3666
+ {"id": row[0], "name": row[1], "subbins": row[2], "reminders": row[3]}
3667
+ for row in self.cursor.fetchall()
3668
+ ]
3669
+
3670
+ def get_reminders_in_bin(self, bin_id: int) -> list[dict]:
3671
+ """Return reminders linked to this bin."""
3672
+ self.cursor.execute(
3673
+ """
3674
+ SELECT r.id, r.subject, r.itemtype
3675
+ FROM ReminderLinks rl
3676
+ JOIN Records r ON rl.reminder_id = r.id
3677
+ WHERE rl.bin_id = ?
3678
+ ORDER BY r.subject COLLATE NOCASE
3679
+ """,
3680
+ (bin_id,),
3681
+ )
3682
+ return [
3683
+ {"id": row[0], "subject": row[1], "itemtype": row[2]}
3684
+ for row in self.cursor.fetchall()
3685
+ ]
3686
+
3687
+ # ---------- New, non-colliding helpers ----------
3688
+
3689
+ def ensure_root_exists(self) -> int:
3690
+ """Return id for 'root' (creating/anchoring it if needed)."""
3691
+ root_id, _ = self.ensure_system_bins()
3692
+ return root_id
3693
+
3694
+ def ensure_root_children(self, names: list[str]) -> dict[str, int]:
3695
+ """
3696
+ Ensure lowercased children live directly under root; returns {name: id}.
3697
+ Idempotent and corrects mis-parented roots.
3698
+ """
3699
+ root_id = self.ensure_root_exists()
3700
+ out: dict[str, int] = {}
3701
+ for name in names:
3702
+ nm = (name or "").strip().lower() # ← roots are canonical lowercase
3703
+ cid = self.ensure_bin_exists(nm)
3704
+
3705
+ parent = self.get_parent_bin(cid) # {'id','name'} or None
3706
+ if not parent or parent["name"].lower() != "root":
3707
+ self.move_bin(nm, "root") # cycle-safe re-anchor
3708
+
3709
+ self.cursor.execute(
3710
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
3711
+ (cid, root_id),
3712
+ )
3713
+ out[nm] = cid
3714
+
3715
+ self.conn.commit()
3716
+ return out
3717
+
3718
+ def ensure_bin(
3719
+ self, name: str, parent_id: int | None = None, *, allow_reparent: bool = False
3720
+ ) -> int:
3721
+ nm = (name or "").strip()
3722
+ if not nm:
3723
+ raise ValueError("Bin name must be non-empty")
3724
+ bin_id = self.ensure_bin_exists(nm)
3725
+ if parent_id is None:
3726
+ parent_id = self.ensure_root_exists()
3727
+
3728
+ parent = self.get_parent_bin(bin_id)
3729
+ if parent is None:
3730
+ # no parent yet — just insert
3731
+ self.cursor.execute(
3732
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
3733
+ (bin_id, parent_id),
3734
+ )
3735
+ self.conn.commit()
3736
+ else:
3737
+ # already has a parent
3738
+ if allow_reparent and parent["id"] != parent_id:
3739
+ # figure out parent's name for move_bin(); cheapest is to query it
3740
+ desired_parent_name = self.get_bin_name(parent_id)
3741
+ self.move_bin(nm, desired_parent_name)
3742
+
3743
+ return bin_id
3744
+
3745
+ def get_or_create_tag_bin(self, tag_name: str) -> int:
3746
+ """
3747
+ Ensure 'tags' under root, and a child bin 'tags:<name>' that lives under 'tags'.
3748
+ """
3749
+ canon = (tag_name or "").strip().lower()
3750
+ if not canon:
3751
+ raise ValueError("Tag name must be non-empty")
3752
+
3753
+ parents = self.ensure_root_children(["tags"])
3754
+ tags_parent_id = parents["tags"]
3755
+
3756
+ tag_bin_name = f"tags:{canon}"
3757
+ bid = self.ensure_bin_exists(tag_bin_name)
3758
+
3759
+ # Re-anchor under 'tags' if needed
3760
+ parent = self.get_parent_bin(bid)
3761
+ if not parent or parent["name"].lower() != "tags":
3762
+ self.move_bin(tag_bin_name, "tags")
3763
+
3764
+ # Ensure a link row exists (no-op if it already does)
3765
+ self.cursor.execute(
3766
+ "INSERT OR IGNORE INTO BinLinks (bin_id, container_id) VALUES (?, ?)",
3767
+ (bid, tags_parent_id),
3768
+ )
3769
+ self.conn.commit()
3770
+ return bid
3771
+
3772
+ def link_record_to_bin(self, record_id: int, bin_id: int) -> None:
3773
+ self.cursor.execute(
3774
+ "INSERT OR IGNORE INTO ReminderLinks(reminder_id, bin_id) VALUES (?, ?)",
3775
+ (record_id, bin_id),
3776
+ )
3777
+ self.conn.commit()
3778
+
3779
+ def unlink_record_from_bins(
3780
+ self, record_id: int, *, only_tag_bins: bool | None = None
3781
+ ) -> None:
3782
+ """
3783
+ only_tag_bins=None -> unlink ALL links for record_id
3784
+ only_tag_bins=True -> unlink only tags:*
3785
+ only_tag_bins=False -> unlink only non-tags
3786
+ """
3787
+ if only_tag_bins is None:
3788
+ self.cursor.execute(
3789
+ "DELETE FROM ReminderLinks WHERE reminder_id=?", (record_id,)
3790
+ )
3791
+ elif only_tag_bins is True:
3792
+ self.cursor.execute(
3793
+ """
3794
+ DELETE FROM ReminderLinks
3795
+ WHERE reminder_id=?
3796
+ AND bin_id IN (SELECT id FROM Bins WHERE name LIKE 'tags:%')
3797
+ """,
3798
+ (record_id,),
3799
+ )
3800
+ else:
3801
+ self.cursor.execute(
3802
+ """
3803
+ DELETE FROM ReminderLinks
3804
+ WHERE reminder_id=?
3805
+ AND bin_id NOT IN (SELECT id FROM Bins WHERE name LIKE 'tags:%')
3806
+ """,
3807
+ (record_id,),
3808
+ )
3809
+ self.conn.commit()
3810
+
3811
+ # ---- tokens → links glue (single source of truth) ----
3812
+
3813
+ def _tokens_list(self, tokens_obj) -> list[dict]:
3814
+ """Accept list or JSON string; normalize to list[dict]."""
3815
+ if tokens_obj is None:
3816
+ return []
3817
+ if isinstance(tokens_obj, str):
3818
+ try:
3819
+ import json
3820
+
3821
+ return json.loads(tokens_obj) or []
3822
+ except Exception:
3823
+ return []
3824
+ return list(tokens_obj)
3825
+
3826
+ def _extract_tag_and_bin_names(self, item) -> tuple[list[str], list[str]]:
3827
+ """
3828
+ Read '@t <name>' and '@b <name>' from item.tokens.
3829
+ tokens are dicts; we rely on keys: t='@', k in {'t','b'}, token='@t blue'
3830
+ """
3831
+ tokens = self._tokens_list(getattr(item, "tokens", []))
3832
+ tags: list[str] = []
3833
+ bins: list[str] = []
3834
+ for t in tokens:
3835
+ if t.get("t") != "@":
3836
+ continue
3837
+ k = t.get("k")
3838
+ raw = t.get("token", "")
3839
+ value = ""
3840
+ if isinstance(raw, str) and " " in raw:
3841
+ value = raw.split(" ", 1)[1].strip()
3842
+ if not value:
3843
+ continue
3844
+ if k == "t":
3845
+ tags.append(value)
3846
+ elif k == "b":
3847
+ bins.append(value)
3848
+ return tags, bins
3849
+
3850
+ def relink_bins_and_tags_for_record(
3851
+ self, record_id: int, item, *, default_parent_name: str = "unlinked"
3852
+ ) -> None:
3853
+ """
3854
+ Rebuild ReminderLinks from item:
3855
+ - Tags: use existing @t handling
3856
+ - Bins: prefer item.bin_paths (list[list[str]]); fallback to simple '@b <leaf>' tokens
3857
+ """
3858
+ # Ensure parents we depend on exist (tags + default parent)
3859
+ defaults = self.ensure_root_children(["tags", default_parent_name])
3860
+ default_parent_id = defaults[default_parent_name]
3861
+
3862
+ # ---- 1) Unlink everything for a deterministic rebuild ----
3863
+ self.unlink_record_from_bins(record_id, only_tag_bins=None)
3864
+
3865
+ # ---- 2) Tags (unchanged) ----
3866
+ tags, simple_bins = self._extract_tag_and_bin_names(
3867
+ item
3868
+ ) # your existing helper
3869
+ for name in tags:
3870
+ bid = self.get_or_create_tag_bin(name)
3871
+ self.link_record_to_bin(record_id, bid)
3872
+
3873
+ # ---- 3) Bins via paths (preferred) ----
3874
+ bin_paths: list[list[str]] = getattr(item, "bin_paths", []) or []
3875
+ if bin_paths:
3876
+ # Uses BinPathProcessor to ensure/repair hierarchy and link the record to each leaf
3877
+ _norm_tokens, _log, _leaf_ids = self.binproc.assign_record_many(
3878
+ record_id, bin_paths
3879
+ )
3880
+ # Optional: surface _log lines somewhere (stdout/UI)
3881
+ # for line in _log: print(f"[bins] {line}")
3882
+ return # we're done; paths fully handled
3883
+
3884
+ # ---- 4) Fallback (back-compat): simple '@b <leaf>' tokens ----
3885
+ # Keep existing behavior: ensure leaf under 'unlinked' and link record.
3886
+ for name in simple_bins:
3887
+ nm = (name or "").strip()
3888
+ if not nm:
3889
+ continue
3890
+ bid = self.ensure_bin(
3891
+ nm, parent_id=default_parent_id
3892
+ ) # puts leaf under 'unlinked' if new
3893
+ self.link_record_to_bin(record_id, bid)
3894
+
3895
+ ###VVV new for tagged bin treated
3896
+ def get_root_bin_id(self) -> int:
3897
+ # Reuse your existing, tested anchor
3898
+ return self.ensure_root_exists()
3899
+
3900
+ def _make_crumb(self, bin_id: int | None):
3901
+ """Return [(id, name), ...] from root to current."""
3902
+ if bin_id is None:
3903
+ rid = self.ensure_root_exists()
3904
+ return [(rid, "root")]
3905
+ # climb using your get_parent_bin
3906
+ chain = []
3907
+ cur = bin_id
3908
+ while cur is not None:
3909
+ name = self.get_bin_name(cur)
3910
+ chain.append((cur, name))
3911
+ parent = self.get_parent_bin(cur) # {'id','name'} or None
3912
+ cur = parent["id"] if parent else None
3913
+ return list(reversed(chain)) or [(self.ensure_root_exists(), "root")]
3914
+
3915
+ def get_bin_summary(self, bin_id: int | None, *, filter_text: str | None = None):
3916
+ """
3917
+ Returns:
3918
+ children -> [ChildBinRow]
3919
+ reminders -> [ReminderRow]
3920
+ crumb -> [(id, name), ...]
3921
+ Uses ONLY DatabaseManager public methods you showed.
3922
+ """
3923
+ # 1) children (uses your counts + sort)
3924
+ raw_children = self.get_subbins(
3925
+ bin_id if bin_id is not None else self.get_root_bin_id()
3926
+ )
3927
+ # shape: {"id","name","subbins","reminders"}
3928
+ children = [
3929
+ ChildBinRow(
3930
+ bin_id=c["id"],
3931
+ name=c["name"],
3932
+ child_ct=c["subbins"],
3933
+ rem_ct=c["reminders"],
3934
+ )
3935
+ for c in raw_children
3936
+ ]
3937
+
3938
+ # 2) reminders (linked via ReminderLinks)
3939
+ raw_reminders = self.get_reminders_in_bin(
3940
+ bin_id if bin_id is not None else self.get_root_bin_id()
3941
+ )
3942
+ # shape: {"id","subject","itemtype"}
3943
+ reminders = [
3944
+ ReminderRow(
3945
+ record_id=r["id"],
3946
+ subject=r["subject"],
3947
+ # keep optional fields absent; view handles it
3948
+ )
3949
+ for r in raw_reminders
3950
+ ]
3951
+
3952
+ # 3) apply filter (controller-level; no new SQL)
3953
+ if filter_text:
3954
+ f = filter_text.casefold()
3955
+ children = [c for c in children if f in c.name.casefold()]
3956
+ reminders = [r for r in reminders if f in r.subject.casefold()]
3957
+
3958
+ # 4) crumb
3959
+ crumb = self._make_crumb(
3960
+ bin_id if bin_id is not None else self.get_root_bin_id()
3961
+ )
3962
+ return children, reminders, crumb
3963
+
3964
+ def get_reminder_details(self, record_id: int) -> str:
3965
+ # Minimal, safe detail using your existing schema
3966
+ row = self.cursor.execute(
3967
+ "SELECT subject, itemtype FROM Records WHERE id=?",
3968
+ (record_id,),
3969
+ ).fetchone()
3970
+ if not row:
3971
+ return "[b]Unknown reminder[/b]"
3972
+ subject, itemtype = row
3973
+ return f"[b]{subject}[/b]\n[dim]type:[/dim] {itemtype or '—'}"