tklr-dgraham 0.0.0rc22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tklr/__init__.py +0 -0
- tklr/cli/main.py +528 -0
- tklr/cli/migrate_etm_to_tklr.py +764 -0
- tklr/common.py +1296 -0
- tklr/controller.py +3635 -0
- tklr/item.py +4014 -0
- tklr/list_colors.py +234 -0
- tklr/model.py +4548 -0
- tklr/shared.py +739 -0
- tklr/sounds/alert.mp3 +0 -0
- tklr/tklr_env.py +493 -0
- tklr/use_system.py +64 -0
- tklr/versioning.py +21 -0
- tklr/view.py +3503 -0
- tklr/view_textual.css +296 -0
- tklr_dgraham-0.0.0rc22.dist-info/METADATA +814 -0
- tklr_dgraham-0.0.0rc22.dist-info/RECORD +20 -0
- tklr_dgraham-0.0.0rc22.dist-info/WHEEL +5 -0
- tklr_dgraham-0.0.0rc22.dist-info/entry_points.txt +2 -0
- tklr_dgraham-0.0.0rc22.dist-info/top_level.txt +1 -0
tklr/item.py
ADDED
|
@@ -0,0 +1,4014 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from copy import deepcopy
|
|
3
|
+
import shutil
|
|
4
|
+
import json
|
|
5
|
+
|
|
6
|
+
# from dateutil.parser import parse as duparse
|
|
7
|
+
from dateutil.rrule import rruleset, rrulestr
|
|
8
|
+
from datetime import date, datetime, timedelta
|
|
9
|
+
from datetime import tzinfo
|
|
10
|
+
|
|
11
|
+
# from dateutil.tz import gettz
|
|
12
|
+
# import pytz
|
|
13
|
+
import textwrap
|
|
14
|
+
from dateutil import tz
|
|
15
|
+
from dateutil.tz import gettz
|
|
16
|
+
|
|
17
|
+
# from collections import defaultdict
|
|
18
|
+
from math import ceil
|
|
19
|
+
|
|
20
|
+
from typing import Iterable, List
|
|
21
|
+
|
|
22
|
+
from typing import Union, Optional, Tuple
|
|
23
|
+
from zoneinfo import ZoneInfo
|
|
24
|
+
|
|
25
|
+
# item.py
|
|
26
|
+
from dataclasses import dataclass
|
|
27
|
+
from dateutil.parser import parse as parse_dt
|
|
28
|
+
|
|
29
|
+
# from tklr.model import dt_to_dtstr
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from urllib.parse import urlparse
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
from .shared import (
|
|
35
|
+
log_msg,
|
|
36
|
+
bug_msg,
|
|
37
|
+
_to_local_naive,
|
|
38
|
+
print_msg,
|
|
39
|
+
fmt_local_compact,
|
|
40
|
+
parse_local_compact,
|
|
41
|
+
fmt_utc_z,
|
|
42
|
+
parse_utc_z,
|
|
43
|
+
timedelta_str_to_seconds,
|
|
44
|
+
)
|
|
45
|
+
from tzlocal import get_localzone_name
|
|
46
|
+
|
|
47
|
+
local_timezone = get_localzone_name() # e.g., "America/New_York"
|
|
48
|
+
|
|
49
|
+
JOB_PATTERN = re.compile(r"^@~ ( *)([^&]*)(?:(&.*))?")
|
|
50
|
+
LETTER_SET = set("abcdefghijklmnopqrstuvwxyz") # Define once
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def is_date(obj):
|
|
54
|
+
if isinstance(obj, date) and not isinstance(obj, datetime):
|
|
55
|
+
return True
|
|
56
|
+
return False
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def is_same_datetime(a: datetime, b: datetime) -> bool:
|
|
60
|
+
"""Return True if datetimes a and b refer to the same moment:
|
|
61
|
+
- If both naive: compare directly. Same datetime or date.
|
|
62
|
+
- If both aware: compare after converting to UTC.
|
|
63
|
+
- Else: return False."""
|
|
64
|
+
if a.tzinfo is None and b.tzinfo is None:
|
|
65
|
+
return a == b
|
|
66
|
+
if a.tzinfo is not None and b.tzinfo is not None:
|
|
67
|
+
a_utc = a.astimezone(tz.UTC)
|
|
68
|
+
b_utc = b.astimezone(tz.UTC)
|
|
69
|
+
return a_utc == b_utc
|
|
70
|
+
# One has tzinfo, the other doesn't — treat as non-equal (or raise)
|
|
71
|
+
return False
|
|
72
|
+
|
|
73
|
+
def is_datetime(obj):
|
|
74
|
+
if isinstance(obj, date) and isinstance(obj, datetime):
|
|
75
|
+
return True
|
|
76
|
+
return False
|
|
77
|
+
|
|
78
|
+
def is_same_datetime(a: datetime, b: datetime) -> bool:
|
|
79
|
+
"""Return True if datetimes a and b refer to the same moment:
|
|
80
|
+
- If both naive: compare directly. Same datetime or date.
|
|
81
|
+
- If both aware: compare after converting to UTC.
|
|
82
|
+
- Else: return False."""
|
|
83
|
+
if a.tzinfo is None and b.tzinfo is None:
|
|
84
|
+
return a == b
|
|
85
|
+
if a.tzinfo is not None and b.tzinfo is not None:
|
|
86
|
+
a_utc = a.astimezone(tz.UTC)
|
|
87
|
+
b_utc = b.astimezone(tz.UTC)
|
|
88
|
+
return a_utc == b_utc
|
|
89
|
+
# One has tzinfo, the other doesn't — treat as non-equal (or raise)
|
|
90
|
+
return False
|
|
91
|
+
|
|
92
|
+
def _is_date_only(obj) -> bool:
|
|
93
|
+
return isinstance(obj, date) and not isinstance(obj, datetime)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _is_datetime(obj) -> bool:
|
|
97
|
+
return isinstance(obj, datetime)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# --- serialization you already use elsewhere (kept explicit here) ---
|
|
101
|
+
def _fmt_date(d: date) -> str:
|
|
102
|
+
return d.strftime("%Y%m%d")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _fmt_naive(dt: datetime) -> str:
|
|
106
|
+
# no timezone, naive
|
|
107
|
+
return dt.strftime("%Y%m%dT%H%M")
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _fmt_utc_Z(dt: datetime) -> str:
|
|
111
|
+
# dt must be UTC-aware
|
|
112
|
+
return dt.strftime("%Y%m%dT%H%MZ")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _local_tzname() -> str:
|
|
116
|
+
# string name is sometimes handy for UI/logging
|
|
117
|
+
try:
|
|
118
|
+
return get_localzone_name()
|
|
119
|
+
except Exception:
|
|
120
|
+
return "local"
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _ensure_utc(dt: datetime) -> datetime:
|
|
124
|
+
# make UTC aware
|
|
125
|
+
return dt.astimezone(tz.UTC)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _attach_zone(dt: datetime, zone) -> datetime:
|
|
129
|
+
# if dt is naive, attach zone; else convert to zone
|
|
130
|
+
if dt.tzinfo is None:
|
|
131
|
+
return dt.replace(tzinfo=zone)
|
|
132
|
+
return dt.astimezone(zone)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _parts(s: str) -> List[str]:
|
|
136
|
+
return [p for p in s.split("/") if p]
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _norm(s: str) -> str:
|
|
140
|
+
return "/".join(_parts(s)).lower()
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _ordered_prefix_matches(paths: List[str], frag: str, limit: int = 24) -> List[str]:
|
|
144
|
+
segs = [s.lower() for s in _parts(frag)]
|
|
145
|
+
out: List[str] = []
|
|
146
|
+
for p in paths:
|
|
147
|
+
toks = [t.lower() for t in p.split("/")]
|
|
148
|
+
if len(toks) >= len(segs) and all(
|
|
149
|
+
toks[i].startswith(segs[i]) for i in range(len(segs))
|
|
150
|
+
):
|
|
151
|
+
out.append(p)
|
|
152
|
+
if len(out) >= limit:
|
|
153
|
+
break
|
|
154
|
+
out.sort(key=lambda s: (s.count("/"), s))
|
|
155
|
+
return out
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _lcp(strings: List[str]) -> str:
|
|
159
|
+
if not strings:
|
|
160
|
+
return ""
|
|
161
|
+
a, b = min(strings), max(strings)
|
|
162
|
+
i = 0
|
|
163
|
+
while i < len(a) and i < len(b) and a[i] == b[i]:
|
|
164
|
+
i += 1
|
|
165
|
+
return a[:i]
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def dtstr_to_compact(dt: str) -> str:
|
|
169
|
+
obj = parse_dt(dt)
|
|
170
|
+
if not obj:
|
|
171
|
+
return False, f"Could not parse {obj = }"
|
|
172
|
+
|
|
173
|
+
# If the parser returns a datetime at 00:00:00, treat it as a date (your chosen convention)
|
|
174
|
+
# if isinstance(obj, datetime) and obj.hour == obj.minute == obj.second == 0:
|
|
175
|
+
# return True, obj.strftime("%Y%m%d")
|
|
176
|
+
|
|
177
|
+
if isinstance(obj, date) and not isinstance(obj, datetime):
|
|
178
|
+
return True, obj.strftime("%Y%m%d")
|
|
179
|
+
|
|
180
|
+
return True, obj.strftime("%Y%m%dT%H%M")
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def local_dtstr_to_utc(dt: str) -> str:
|
|
184
|
+
obj = parse_dt(dt)
|
|
185
|
+
if not obj:
|
|
186
|
+
return False, f"Could not parse {obj = }"
|
|
187
|
+
|
|
188
|
+
# If the parser returns a datetime at 00:00:00, treat it as a date (your chosen convention)
|
|
189
|
+
# if isinstance(obj, datetime) and obj.hour == obj.minute == obj.second == 0:
|
|
190
|
+
# return True, obj.strftime("%Y%m%d")
|
|
191
|
+
|
|
192
|
+
if isinstance(obj, date) and not isinstance(obj, datetime):
|
|
193
|
+
return True, obj.strftime("%Y%m%d")
|
|
194
|
+
|
|
195
|
+
return True, obj.astimezone(tz.UTC).strftime("%Y%m%dT%H%MZ")
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
# --- parse a possible trailing " z <tzspec>" directive ---
|
|
199
|
+
def _split_z_directive(text: str) -> tuple[str, str | None]:
|
|
200
|
+
"""
|
|
201
|
+
Accepts things like:
|
|
202
|
+
"2025-08-24 12:00" -> ("2025-08-24 12:00", None)
|
|
203
|
+
"2025-08-24 12:00 z none" -> ("2025-08-24 12:00", "none")
|
|
204
|
+
"2025-08-24 12:00 z Europe/Berlin" -> ("2025-08-24 12:00", "Europe/Berlin")
|
|
205
|
+
Only splits on the *last* " z " sequence to avoid false positives in subject text.
|
|
206
|
+
"""
|
|
207
|
+
s = text.strip()
|
|
208
|
+
marker = " z "
|
|
209
|
+
idx = s.rfind(marker)
|
|
210
|
+
if idx == -1:
|
|
211
|
+
return s, None
|
|
212
|
+
main = s[:idx].strip()
|
|
213
|
+
tail = s[idx + len(marker) :].strip()
|
|
214
|
+
return (main or s), (tail or None)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
# --- helpers used by do_offset / finish ---------------------------------
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def td_str_to_td(s: str) -> timedelta:
|
|
221
|
+
"""Parse a compact td string like '1w2d3h45m10s' -> timedelta."""
|
|
222
|
+
# If you already have td_str_to_td, use that instead and remove this.
|
|
223
|
+
|
|
224
|
+
units = {"w": 7 * 24 * 3600, "d": 24 * 3600, "h": 3600, "m": 60, "s": 1}
|
|
225
|
+
total = 0
|
|
226
|
+
for num, unit in re.findall(r"(\d+)\s*([wdhms])", s.lower()):
|
|
227
|
+
total += int(num) * units[unit]
|
|
228
|
+
return timedelta(seconds=total)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def td_to_td_str(td: timedelta) -> str:
|
|
232
|
+
"""Turn a timedelta back into a compact string like '1w2d3h'."""
|
|
233
|
+
secs = int(td.total_seconds())
|
|
234
|
+
parts = []
|
|
235
|
+
for label, size in (("w", 604800), ("d", 86400), ("h", 3600), ("m", 60), ("s", 1)):
|
|
236
|
+
if secs >= size:
|
|
237
|
+
q, secs = divmod(secs, size)
|
|
238
|
+
parts.append(f"{q}{label}")
|
|
239
|
+
return "".join(parts) or "0s"
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _parse_o_body(body: str) -> tuple[timedelta, bool]:
|
|
243
|
+
"""
|
|
244
|
+
Parse the body of @o. Supports:
|
|
245
|
+
'@o 3d' -> fixed interval 3 days
|
|
246
|
+
'@o ~3d' -> learning interval starting at 3 days
|
|
247
|
+
'@o learn 3d' -> same as '~3d'
|
|
248
|
+
Returns (td, learn).
|
|
249
|
+
"""
|
|
250
|
+
b = body.strip().lower()
|
|
251
|
+
learn = b.startswith("~")
|
|
252
|
+
if learn:
|
|
253
|
+
b = b[1:]
|
|
254
|
+
td = td_str_to_td(b)
|
|
255
|
+
return td, learn
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def parse_f_token(f_token):
|
|
259
|
+
"""
|
|
260
|
+
Return (completion_dt, due_dt) from a single @f token.
|
|
261
|
+
The second value may be None if not provided.
|
|
262
|
+
"""
|
|
263
|
+
try:
|
|
264
|
+
token_str = f_token["token"].split(maxsplit=1)[1]
|
|
265
|
+
parts = [p.strip() for p in token_str.split(",", 1)]
|
|
266
|
+
completion = parse_dt(parts[0])
|
|
267
|
+
due = parse_dt(parts[1]) if len(parts) > 1 else None
|
|
268
|
+
return completion, due
|
|
269
|
+
except Exception:
|
|
270
|
+
return None, None
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def parse(dt_str: str, zone: tzinfo = None):
|
|
274
|
+
"""
|
|
275
|
+
User-facing parser with a trailing 'z' directive:
|
|
276
|
+
|
|
277
|
+
<datetime> -> aware in local tz, normalized to UTC (returns datetime)
|
|
278
|
+
<datetime> z none -> naive (no tz), as typed (returns datetime)
|
|
279
|
+
<datetime> z <TZNAME> -> aware in TZNAME, normalized to UTC (returns datetime)
|
|
280
|
+
<date> -> returns date (if parsed time is 00:00:00)
|
|
281
|
+
|
|
282
|
+
Returns: datetime (UTC or naive) or date; None on failure.
|
|
283
|
+
"""
|
|
284
|
+
if not dt_str or not isinstance(dt_str, str):
|
|
285
|
+
return None
|
|
286
|
+
|
|
287
|
+
s = dt_str.strip()
|
|
288
|
+
|
|
289
|
+
# Look for a trailing "z <arg>" (case-insensitive), e.g. " ... z none" or " ... z Europe/Berlin"
|
|
290
|
+
m = re.search(r"\bz\s+(\S+)\s*$", s, flags=re.IGNORECASE)
|
|
291
|
+
z_arg = None
|
|
292
|
+
if m:
|
|
293
|
+
z_arg = m.group(1) # e.g. "none" or "Europe/Berlin"
|
|
294
|
+
s = s[: m.start()].rstrip() # remove the trailing z directive
|
|
295
|
+
|
|
296
|
+
try:
|
|
297
|
+
# Parse the main date/time text. (If you have dayfirst/yearfirst config, add it here.)
|
|
298
|
+
obj = parse_dt(s)
|
|
299
|
+
except Exception as e:
|
|
300
|
+
log_msg(f"error: {e}, {s = }")
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
# If the parser returns a datetime at 00:00:00, treat it as a date (your chosen convention)
|
|
304
|
+
if isinstance(obj, datetime) and obj.hour == obj.minute == obj.second == 0:
|
|
305
|
+
return obj.date()
|
|
306
|
+
|
|
307
|
+
# If we got a pure date already, return it as-is
|
|
308
|
+
if isinstance(obj, date) and not isinstance(obj, datetime):
|
|
309
|
+
return obj
|
|
310
|
+
|
|
311
|
+
# From here on, obj is a datetime
|
|
312
|
+
# Case: explicit naive requested
|
|
313
|
+
if z_arg and z_arg.lower() == "none":
|
|
314
|
+
# Return *naive* datetime exactly as parsed (strip any tzinfo, if present)
|
|
315
|
+
if obj.tzinfo is not None:
|
|
316
|
+
obj = obj.astimezone(tz.UTC).replace(
|
|
317
|
+
tzinfo=None
|
|
318
|
+
) # normalize then drop tzinfo
|
|
319
|
+
else:
|
|
320
|
+
obj = obj.replace(tzinfo=None)
|
|
321
|
+
return obj
|
|
322
|
+
|
|
323
|
+
# Otherwise: aware (local by default, or the provided zone)
|
|
324
|
+
if z_arg:
|
|
325
|
+
zone = tz.gettz(z_arg)
|
|
326
|
+
if zone is None:
|
|
327
|
+
return None # unknown timezone name
|
|
328
|
+
else:
|
|
329
|
+
# default to the local machine timezone
|
|
330
|
+
zone = tz.gettz(get_localzone_name())
|
|
331
|
+
|
|
332
|
+
# Attach/convert to the chosen zone, then normalize to UTC
|
|
333
|
+
if obj.tzinfo is None:
|
|
334
|
+
aware = obj.replace(tzinfo=zone)
|
|
335
|
+
else:
|
|
336
|
+
aware = obj.astimezone(zone)
|
|
337
|
+
|
|
338
|
+
return aware.astimezone(tz.UTC)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
# def parse_pair(dt_pair_str: str) -> str:
|
|
342
|
+
# """ """
|
|
343
|
+
# dt_strs = [x.strip() for x in dt_pair_str.split(",")]
|
|
344
|
+
# return [parse(x) for x in dt_strs]
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def parse_completion_value(v: str) -> tuple[datetime | None, datetime | None]:
|
|
348
|
+
"""
|
|
349
|
+
Parse '@f' or '&f' value text entered in *user format* (e.g. '2024-3-1 12a, 2024-3-1 10a')
|
|
350
|
+
into (finished_dt, due_dt).
|
|
351
|
+
"""
|
|
352
|
+
parts = [p.strip() for p in v.split(",")]
|
|
353
|
+
completed = parse_dt(parts[0]) if parts and parts[0] else None
|
|
354
|
+
due = parse_dt(parts[1]) if len(parts) > 1 and parts[1] else None
|
|
355
|
+
return completed, due
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def _parse_compact_dt(s: str) -> datetime:
|
|
359
|
+
"""
|
|
360
|
+
Accepts 'YYYYMMDD' or 'YYYYMMDDTHHMMSS' (optionally with trailing 'Z')
|
|
361
|
+
and returns a naive datetime (local) for the 'THHMMSS' case, or
|
|
362
|
+
midnight local for date-only.
|
|
363
|
+
"""
|
|
364
|
+
s = (s or "").strip()
|
|
365
|
+
if not s:
|
|
366
|
+
raise ValueError("empty datetime string")
|
|
367
|
+
|
|
368
|
+
z = s.endswith("Z")
|
|
369
|
+
if z:
|
|
370
|
+
s = s[:-1]
|
|
371
|
+
|
|
372
|
+
if "T" in s:
|
|
373
|
+
# YYYYMMDDTHHMMSS
|
|
374
|
+
return datetime.strptime(s, "%Y%m%dT%H%M")
|
|
375
|
+
else:
|
|
376
|
+
# YYYYMMDD -> midnight (local-naive)
|
|
377
|
+
d = datetime.strptime(s, "%Y%m%d").date()
|
|
378
|
+
return datetime(d.year, d.month, d.day, 0, 0, 0)
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
class CustomJSONEncoder(json.JSONEncoder):
|
|
382
|
+
def default(self, obj):
|
|
383
|
+
if isinstance(obj, datetime):
|
|
384
|
+
return obj.isoformat()
|
|
385
|
+
if isinstance(obj, timedelta):
|
|
386
|
+
return str(obj)
|
|
387
|
+
if isinstance(obj, set):
|
|
388
|
+
return list(obj)
|
|
389
|
+
if isinstance(obj, ZoneInfo):
|
|
390
|
+
return obj.key
|
|
391
|
+
return super().default(obj)
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def dt_to_dtstr(dt_obj: Union[datetime, date]) -> str:
|
|
395
|
+
"""Convert a datetime object to 'YYYYMMDDTHHMMSS' format."""
|
|
396
|
+
if isinstance(dt_obj, date) and not isinstance(dt_obj, datetime):
|
|
397
|
+
return dt_obj.strftime("%Y%m%d")
|
|
398
|
+
return dt_obj.strftime("%Y%m%d%H%M")
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def as_timezone(dt: datetime, timezone: ZoneInfo) -> datetime:
|
|
402
|
+
if is_date(dt):
|
|
403
|
+
return dt
|
|
404
|
+
return dt.astimezone(timezone)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def enforce_date(dt: datetime) -> datetime:
|
|
408
|
+
"""
|
|
409
|
+
Force dt to behave like a date (no meaningful time component).
|
|
410
|
+
"""
|
|
411
|
+
if is_datetime(dt):
|
|
412
|
+
return dt.date()
|
|
413
|
+
if is_date:
|
|
414
|
+
return dt
|
|
415
|
+
raise ValueError(f"{dt = } cannot be converted to a date ")
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
def localize_rule_instances(
|
|
419
|
+
rule: Iterable[Union[datetime, date]],
|
|
420
|
+
timezone: Union[ZoneInfo, None],
|
|
421
|
+
to_localtime: bool = False,
|
|
422
|
+
):
|
|
423
|
+
"""
|
|
424
|
+
Iterate over instances from a rule parsed by rrulestr.
|
|
425
|
+
|
|
426
|
+
- Dates are yielded unchanged.
|
|
427
|
+
- Naive datetimes are assigned the given timezone.
|
|
428
|
+
- Aware datetimes are optionally converted to system localtime.
|
|
429
|
+
"""
|
|
430
|
+
if timezone == "local":
|
|
431
|
+
timezone = get_localzone_name()
|
|
432
|
+
|
|
433
|
+
for dt in rule:
|
|
434
|
+
if is_date(dt) or not to_localtime:
|
|
435
|
+
yield dt
|
|
436
|
+
else:
|
|
437
|
+
# dt is a datetime
|
|
438
|
+
if dt.tzinfo is None:
|
|
439
|
+
if timezone is not None:
|
|
440
|
+
dt = dt.replace(tzinfo=timezone)
|
|
441
|
+
else:
|
|
442
|
+
dt = dt.replace(
|
|
443
|
+
# tzinfo=tz.UTC
|
|
444
|
+
tzinfo=tz.tzlocal()
|
|
445
|
+
) # fallback to UTC if timezone missing
|
|
446
|
+
if to_localtime:
|
|
447
|
+
dt = dt.astimezone()
|
|
448
|
+
|
|
449
|
+
yield dt
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def localize_datetime_list(
|
|
453
|
+
dts: List[datetime], timezone: ZoneInfo, to_localtime: bool = False
|
|
454
|
+
) -> List[datetime]:
|
|
455
|
+
"""
|
|
456
|
+
Localize a list of datetime objects.
|
|
457
|
+
|
|
458
|
+
- Attach timezone to naive datetimes
|
|
459
|
+
- Optionally convert to system local time
|
|
460
|
+
- Returns a new list of timezone-aware datetimes
|
|
461
|
+
"""
|
|
462
|
+
localized = []
|
|
463
|
+
for dt in dts:
|
|
464
|
+
if dt.tzinfo is None:
|
|
465
|
+
dt = dt.replace(tzinfo=timezone)
|
|
466
|
+
if to_localtime:
|
|
467
|
+
dt = dt.astimezone()
|
|
468
|
+
localized.append(dt)
|
|
469
|
+
return localized
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
def preview_rule_instances(
|
|
473
|
+
rule: rruleset,
|
|
474
|
+
timezone: Union[ZoneInfo, None] = None,
|
|
475
|
+
count: int = 10,
|
|
476
|
+
after: Optional[Union[datetime, date]] = None,
|
|
477
|
+
to_localtime: bool = False,
|
|
478
|
+
) -> List[Union[datetime, date]]:
|
|
479
|
+
instances = []
|
|
480
|
+
generator = localize_rule_instances(rule, timezone, to_localtime)
|
|
481
|
+
|
|
482
|
+
if after is None:
|
|
483
|
+
after_datetime = datetime.now().astimezone()
|
|
484
|
+
after_date = date.today()
|
|
485
|
+
|
|
486
|
+
for dt in list(generator):
|
|
487
|
+
if is_date(dt):
|
|
488
|
+
if dt < after_date:
|
|
489
|
+
continue
|
|
490
|
+
else:
|
|
491
|
+
if dt.astimezone() < after_datetime:
|
|
492
|
+
continue
|
|
493
|
+
|
|
494
|
+
instances.append(dt)
|
|
495
|
+
if len(instances) >= count:
|
|
496
|
+
break
|
|
497
|
+
|
|
498
|
+
return instances
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def preview_upcoming_instances(
|
|
502
|
+
rule: rruleset, timezone: ZoneInfo, count: int = 10, to_localtime: bool = False
|
|
503
|
+
) -> List[datetime]:
|
|
504
|
+
"""
|
|
505
|
+
Shortcut to preview the next N upcoming localized instances, starting from now.
|
|
506
|
+
"""
|
|
507
|
+
now = datetime.now().astimezone()
|
|
508
|
+
return preview_rule_instances(
|
|
509
|
+
rule, timezone, count=count, after=now, to_localtime=to_localtime
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
def pp_set(s):
|
|
514
|
+
return "{}" if not s else str(s)
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def is_lowercase_letter(char):
|
|
518
|
+
return char in LETTER_SET # O(1) lookup
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
type_keys = {
|
|
522
|
+
"*": "event",
|
|
523
|
+
"~": "task",
|
|
524
|
+
"^": "project",
|
|
525
|
+
"%": "note",
|
|
526
|
+
"+": "goal",
|
|
527
|
+
"?": "draft",
|
|
528
|
+
"x": "finished",
|
|
529
|
+
# '✓': 'finished', # more a property of a task than an item type
|
|
530
|
+
}
|
|
531
|
+
common_methods = list("cdgblmnstuxz") + ["k", "#"]
|
|
532
|
+
|
|
533
|
+
repeating_methods = list("o") + [
|
|
534
|
+
"r",
|
|
535
|
+
"rr",
|
|
536
|
+
"rc",
|
|
537
|
+
"rd", # monthdays
|
|
538
|
+
"rm", # months
|
|
539
|
+
"rH", # hours
|
|
540
|
+
"rM", # minutes
|
|
541
|
+
"rE",
|
|
542
|
+
"ri",
|
|
543
|
+
"rs",
|
|
544
|
+
"ru",
|
|
545
|
+
"rW", # week numbers
|
|
546
|
+
"rw", # week days
|
|
547
|
+
]
|
|
548
|
+
|
|
549
|
+
datetime_methods = list("anew+-")
|
|
550
|
+
|
|
551
|
+
task_methods = list("ofp")
|
|
552
|
+
|
|
553
|
+
job_methods = list("efhp") + [
|
|
554
|
+
"~",
|
|
555
|
+
"~r",
|
|
556
|
+
"~j",
|
|
557
|
+
"~a",
|
|
558
|
+
"~b",
|
|
559
|
+
"~c",
|
|
560
|
+
"~d",
|
|
561
|
+
"~e",
|
|
562
|
+
"~f",
|
|
563
|
+
"~i",
|
|
564
|
+
"~l",
|
|
565
|
+
"~m",
|
|
566
|
+
"~p",
|
|
567
|
+
"~s",
|
|
568
|
+
"~u",
|
|
569
|
+
]
|
|
570
|
+
|
|
571
|
+
multiple_allowed = [
|
|
572
|
+
"a",
|
|
573
|
+
"b",
|
|
574
|
+
"u",
|
|
575
|
+
"r",
|
|
576
|
+
"t",
|
|
577
|
+
"~",
|
|
578
|
+
"~r",
|
|
579
|
+
"~t",
|
|
580
|
+
"~a",
|
|
581
|
+
]
|
|
582
|
+
|
|
583
|
+
wrap_methods = ["w"]
|
|
584
|
+
|
|
585
|
+
required = {"*": ["s"], "~": [], "^": ["~"], "%": [], "?": [], "x": []}
|
|
586
|
+
|
|
587
|
+
all_keys = common_methods + datetime_methods + job_methods + repeating_methods
|
|
588
|
+
|
|
589
|
+
allowed = {
|
|
590
|
+
"*": common_methods + datetime_methods + repeating_methods + wrap_methods,
|
|
591
|
+
"x": common_methods + datetime_methods + task_methods + repeating_methods + ["~"],
|
|
592
|
+
"~": common_methods + datetime_methods + task_methods + repeating_methods,
|
|
593
|
+
"+": common_methods + datetime_methods + task_methods,
|
|
594
|
+
"^": common_methods + datetime_methods + job_methods + repeating_methods,
|
|
595
|
+
"%": common_methods + datetime_methods,
|
|
596
|
+
"?": all_keys,
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
requires = {
|
|
601
|
+
"a": ["s"],
|
|
602
|
+
"n": ["s"],
|
|
603
|
+
"o": ["s"],
|
|
604
|
+
"+": ["s"],
|
|
605
|
+
"q": ["s"],
|
|
606
|
+
"-": ["rr"],
|
|
607
|
+
"r": ["s"],
|
|
608
|
+
"rr": ["s"],
|
|
609
|
+
"~s": ["s"],
|
|
610
|
+
"~a": ["s"],
|
|
611
|
+
"~b": ["s"],
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
class Paragraph:
|
|
616
|
+
# Placeholder to preserve line breaks
|
|
617
|
+
NON_PRINTING_CHAR = "\u200b"
|
|
618
|
+
# Placeholder for spaces within special tokens
|
|
619
|
+
PLACEHOLDER = "\u00a0"
|
|
620
|
+
# Placeholder for hyphens to prevent word breaks
|
|
621
|
+
NON_BREAKING_HYPHEN = "\u2011"
|
|
622
|
+
|
|
623
|
+
def __init__(self, para: str):
|
|
624
|
+
self.para = para
|
|
625
|
+
|
|
626
|
+
def preprocess_text(self, text):
|
|
627
|
+
# Regex to find "@\S" patterns and replace spaces within the pattern with PLACEHOLDER
|
|
628
|
+
text = re.sub(
|
|
629
|
+
r"(@\S+\s\S+)",
|
|
630
|
+
lambda m: m.group(0).replace(" ", Paragraph.PLACEHOLDER),
|
|
631
|
+
text,
|
|
632
|
+
)
|
|
633
|
+
# Replace hyphens within words with NON_BREAKING_HYPHEN
|
|
634
|
+
text = re.sub(
|
|
635
|
+
r"(\S)-(\S)",
|
|
636
|
+
lambda m: m.group(1) + Paragraph.NON_BREAKING_HYPHEN + m.group(2),
|
|
637
|
+
text,
|
|
638
|
+
)
|
|
639
|
+
return text
|
|
640
|
+
|
|
641
|
+
def postprocess_text(self, text):
|
|
642
|
+
text = text.replace(Paragraph.PLACEHOLDER, " ")
|
|
643
|
+
text = text.replace(Paragraph.NON_BREAKING_HYPHEN, "-")
|
|
644
|
+
return text
|
|
645
|
+
|
|
646
|
+
def wrap(
|
|
647
|
+
self, text: str, indent: int = 3, width: int = shutil.get_terminal_size()[0] - 3
|
|
648
|
+
):
|
|
649
|
+
# Preprocess to replace spaces within specific "@\S" patterns with PLACEHOLDER
|
|
650
|
+
text = self.preprocess_text(text)
|
|
651
|
+
|
|
652
|
+
# Split text into paragraphs
|
|
653
|
+
paragraphs = text.split("\n")
|
|
654
|
+
|
|
655
|
+
# Wrap each paragraph
|
|
656
|
+
wrapped_paragraphs = []
|
|
657
|
+
for para in paragraphs:
|
|
658
|
+
leading_whitespace = re.match(r"^\s*", para).group()
|
|
659
|
+
initial_indent = leading_whitespace
|
|
660
|
+
|
|
661
|
+
# Determine subsequent_indent based on the first non-whitespace character
|
|
662
|
+
stripped_para = para.lstrip()
|
|
663
|
+
if stripped_para.startswith(("^", "~", "*", "%", "?", "+")):
|
|
664
|
+
subsequent_indent = initial_indent + " " * 2
|
|
665
|
+
elif stripped_para.startswith(("@", "&")):
|
|
666
|
+
subsequent_indent = initial_indent + " " * 3
|
|
667
|
+
else:
|
|
668
|
+
subsequent_indent = initial_indent + " " * indent
|
|
669
|
+
|
|
670
|
+
wrapped = textwrap.fill(
|
|
671
|
+
para,
|
|
672
|
+
initial_indent="",
|
|
673
|
+
subsequent_indent=subsequent_indent,
|
|
674
|
+
width=width,
|
|
675
|
+
)
|
|
676
|
+
wrapped_paragraphs.append(wrapped)
|
|
677
|
+
|
|
678
|
+
# Join paragraphs with newline followed by non-printing character
|
|
679
|
+
wrapped_text = ("\n" + Paragraph.NON_PRINTING_CHAR).join(wrapped_paragraphs)
|
|
680
|
+
|
|
681
|
+
# Postprocess to replace PLACEHOLDER and NON_BREAKING_HYPHEN back with spaces and hyphens
|
|
682
|
+
wrapped_text = self.postprocess_text(wrapped_text)
|
|
683
|
+
return wrapped_text
|
|
684
|
+
|
|
685
|
+
def unwrap(wrapped_text):
|
|
686
|
+
# Split wrapped text into paragraphs
|
|
687
|
+
paragraphs = wrapped_text.split("\n" + Paragraph.NON_PRINTING_CHAR)
|
|
688
|
+
|
|
689
|
+
# Replace newlines followed by spaces in each paragraph with a single space
|
|
690
|
+
unwrapped_paragraphs = []
|
|
691
|
+
for para in paragraphs:
|
|
692
|
+
unwrapped = re.sub(r"\n\s*", " ", para)
|
|
693
|
+
unwrapped_paragraphs.append(unwrapped)
|
|
694
|
+
|
|
695
|
+
# Join paragraphs with original newlines
|
|
696
|
+
unwrapped_text = "\n".join(unwrapped_paragraphs)
|
|
697
|
+
|
|
698
|
+
return unwrapped_text
|
|
699
|
+
|
|
700
|
+
|
|
701
|
+
# @dataclass
|
|
702
|
+
# class FinishResult:
|
|
703
|
+
# new_relative_tokens: list # tokens to persist
|
|
704
|
+
# new_rruleset: str | None # possibly None/"" if no more repeats
|
|
705
|
+
# due_ts_used: int | None # the occurrence this finish applies to
|
|
706
|
+
# finished_final: bool # True -> no more occurrences
|
|
707
|
+
|
|
708
|
+
|
|
709
|
+
class Item:
|
|
710
|
+
token_keys = {
|
|
711
|
+
"itemtype": [
|
|
712
|
+
"item type",
|
|
713
|
+
"character from * (event), ~ (task), ^ (project), % (note), x (finished) or ? (draft)",
|
|
714
|
+
"do_itemtype",
|
|
715
|
+
],
|
|
716
|
+
"subject": [
|
|
717
|
+
"subject",
|
|
718
|
+
"item subject. Append an '@' to add an option.",
|
|
719
|
+
"do_summary",
|
|
720
|
+
],
|
|
721
|
+
"s": ["scheduled", "starting date or datetime", "do_s"],
|
|
722
|
+
"r": ["recurrence", "recurrence rule", "do_rrule"],
|
|
723
|
+
"o": ["offset", "offset rule", "do_offset"],
|
|
724
|
+
"~": ["job", "job entry", "do_job"],
|
|
725
|
+
"+": ["rdate", "recurrence dates", "do_rdate"],
|
|
726
|
+
"-": ["exdate", "exception dates", "do_exdate"],
|
|
727
|
+
"a": ["alerts", "list of alerts", "do_alert"],
|
|
728
|
+
"n": ["notice", "timeperiod", "do_notice"],
|
|
729
|
+
"c": ["context", "context", "do_string"],
|
|
730
|
+
"d": ["description", "item description", "do_description"],
|
|
731
|
+
"e": ["extent", "timeperiod", "do_extent"],
|
|
732
|
+
"w": ["wrap", "list of two timeperiods", "do_two_periods"],
|
|
733
|
+
"f": ["finish", "completion done -> due", "do_f"],
|
|
734
|
+
"g": ["goto", "url or filepath", "do_string"],
|
|
735
|
+
"h": [
|
|
736
|
+
"completions",
|
|
737
|
+
"list of completion datetimes",
|
|
738
|
+
"do_fs",
|
|
739
|
+
],
|
|
740
|
+
"b": ["bin", "forward slash delimited string", "do_b"],
|
|
741
|
+
"l": [
|
|
742
|
+
"label",
|
|
743
|
+
"label for job clone",
|
|
744
|
+
"do_string",
|
|
745
|
+
],
|
|
746
|
+
"m": ["mask", "string to be masked", "do_mask"],
|
|
747
|
+
"p": [
|
|
748
|
+
"priority",
|
|
749
|
+
"priority from 1 (someday), 2 (low), 3 (medium), 4 (high) to 5 (next)",
|
|
750
|
+
"do_priority",
|
|
751
|
+
],
|
|
752
|
+
"w": ["wrap", "wrap before, after", "do_wrap"],
|
|
753
|
+
"z": [
|
|
754
|
+
"timezone",
|
|
755
|
+
"a timezone entry such as 'US/Eastern' or 'Europe/Paris' or 'none' to specify a naive datetime, i.e., one without timezone information",
|
|
756
|
+
"do_timezone",
|
|
757
|
+
],
|
|
758
|
+
"@": ["@-key", "", "do_at"],
|
|
759
|
+
"rr": [
|
|
760
|
+
"repetition frequency",
|
|
761
|
+
"character from (y)ear, (m)onth, (w)eek, (d)ay, (h)our, mi(n)ute. Append an '&' to add a repetition option.",
|
|
762
|
+
"do_frequency",
|
|
763
|
+
],
|
|
764
|
+
"ri": ["interval", "positive integer", "do_interval"],
|
|
765
|
+
"rm": ["months", "list of integers in 1 ... 12", "do_months"],
|
|
766
|
+
"rd": [
|
|
767
|
+
"monthdays",
|
|
768
|
+
"list of integers 1 ... 31, possibly prepended with a minus sign to count backwards from the end of the month",
|
|
769
|
+
"do_monthdays",
|
|
770
|
+
],
|
|
771
|
+
"rE": [
|
|
772
|
+
"easterdays",
|
|
773
|
+
"number of days before (-), on (0) or after (+) Easter",
|
|
774
|
+
"do_easterdays",
|
|
775
|
+
],
|
|
776
|
+
"rH": ["hours", "list of integers in 0 ... 23", "do_hours"],
|
|
777
|
+
"rM": ["minutes", "list of integers in 0 ... 59", "do_minutes"],
|
|
778
|
+
"rw": [
|
|
779
|
+
"weekdays",
|
|
780
|
+
"list from SU, MO, ..., SA, possibly prepended with a positive or negative integer",
|
|
781
|
+
"do_weekdays",
|
|
782
|
+
],
|
|
783
|
+
"rW": [
|
|
784
|
+
"week numbers",
|
|
785
|
+
"list of integers in 1, ... 53",
|
|
786
|
+
"do_weeknumbers",
|
|
787
|
+
],
|
|
788
|
+
"rc": ["count", "integer number of repetitions", "do_count"],
|
|
789
|
+
"ru": ["until", "datetime", "do_until"],
|
|
790
|
+
"rs": ["set positions", "integer", "do_setpositions"],
|
|
791
|
+
"r?": ["repetition &-key", "enter &-key", "do_ampr"],
|
|
792
|
+
"~~": [
|
|
793
|
+
"subject",
|
|
794
|
+
"do_string",
|
|
795
|
+
],
|
|
796
|
+
"~a": [
|
|
797
|
+
"alert",
|
|
798
|
+
"list of timeperiods before job is scheduled followed by a colon and a list of commands",
|
|
799
|
+
"do_alert",
|
|
800
|
+
],
|
|
801
|
+
"~n": ["notice", " notice period", "do_notice"],
|
|
802
|
+
"~c": ["context", " string", "do_string"],
|
|
803
|
+
"~d": ["description", " string", "do_description"],
|
|
804
|
+
"~e": ["extent", " timeperiod", "do_extent"],
|
|
805
|
+
"~f": ["finish", " completion done -> due", "do_f"],
|
|
806
|
+
"~i": ["unique id", " integer or string", "do_string"],
|
|
807
|
+
"~l": ["label", " string", "do_string"],
|
|
808
|
+
"~m": ["mask", "string to be masked", "do_mask"],
|
|
809
|
+
"~r": [
|
|
810
|
+
"id and list of requirement ids",
|
|
811
|
+
"list of ids of immediate prereqs",
|
|
812
|
+
"do_requires",
|
|
813
|
+
],
|
|
814
|
+
"~s": [
|
|
815
|
+
"scheduled",
|
|
816
|
+
"timeperiod after task scheduled when job is scheduled",
|
|
817
|
+
"do_duration",
|
|
818
|
+
],
|
|
819
|
+
"~u": ["used time", "timeperiod: datetime", "do_usedtime"],
|
|
820
|
+
"~?": ["job &-key", "enter &-key", "do_ampj"],
|
|
821
|
+
"k": ["konnection", "not implemented", "do_nothing"],
|
|
822
|
+
"#": ["etm record number", "not implemented", "do_nothing"],
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
wkd_list = ["SU", "MO", "TU", "WE", "TH", "FR", "SA"]
|
|
826
|
+
wkd_str = ", ".join(wkd_list)
|
|
827
|
+
|
|
828
|
+
freq_map = dict(
|
|
829
|
+
y="YEARLY", m="MONTHLY", w="WEEKLY", d="DAILY", h="HOURLY", n="MINUTELY"
|
|
830
|
+
)
|
|
831
|
+
|
|
832
|
+
key_to_param = dict(
|
|
833
|
+
i="INTERVAL",
|
|
834
|
+
c="COUNT",
|
|
835
|
+
s="BYSETPOS",
|
|
836
|
+
u="UNTIL",
|
|
837
|
+
m="BYMONTH",
|
|
838
|
+
d="BYMONTHDAY",
|
|
839
|
+
W="BYWEEKNO",
|
|
840
|
+
w="BYWEEKDAY",
|
|
841
|
+
H="BYHOUR",
|
|
842
|
+
M="BYMINUTE",
|
|
843
|
+
E="BYEASTER",
|
|
844
|
+
)
|
|
845
|
+
param_to_key = {v: k for k, v in key_to_param.items()}
|
|
846
|
+
|
|
847
|
+
def __init__(self, *args, **kwargs):
|
|
848
|
+
"""
|
|
849
|
+
Compatible constructor that accepts:
|
|
850
|
+
- Item(entry_str)
|
|
851
|
+
- Item(raw=entry_str)
|
|
852
|
+
- Item(env, entry_str)
|
|
853
|
+
- Item(env=env, raw=entry_str)
|
|
854
|
+
- Item(entry_str, env=env)
|
|
855
|
+
"""
|
|
856
|
+
# --- resolve arguments flexibly ---
|
|
857
|
+
self.controller = kwargs.get("controller")
|
|
858
|
+
|
|
859
|
+
env = kwargs.get("env")
|
|
860
|
+
raw = kwargs.get("raw")
|
|
861
|
+
self.final: bool = bool(kwargs.get("final", False)) # ← NEW
|
|
862
|
+
|
|
863
|
+
# try positional decoding without importing the type
|
|
864
|
+
a = args[0] if len(args) > 0 else None
|
|
865
|
+
b = args[1] if len(args) > 1 else None
|
|
866
|
+
|
|
867
|
+
# heuristics: strings are raw; non-strings are likely env
|
|
868
|
+
if raw is None and isinstance(a, str):
|
|
869
|
+
raw = a
|
|
870
|
+
a = None
|
|
871
|
+
if env is None and a is not None and not isinstance(a, str):
|
|
872
|
+
env = a
|
|
873
|
+
a = None
|
|
874
|
+
|
|
875
|
+
if raw is None and isinstance(b, str):
|
|
876
|
+
raw = b
|
|
877
|
+
b = None
|
|
878
|
+
if env is None and b is not None and not isinstance(b, str):
|
|
879
|
+
env = b
|
|
880
|
+
b = None
|
|
881
|
+
|
|
882
|
+
# iso standard defaults
|
|
883
|
+
self.datefmt = "%Y-%m-%d"
|
|
884
|
+
self.timefmt = "%H:%M"
|
|
885
|
+
|
|
886
|
+
# --- environment / config ---
|
|
887
|
+
self.env = env
|
|
888
|
+
|
|
889
|
+
# --- core parse state ---
|
|
890
|
+
self.entry = ""
|
|
891
|
+
self.previous_entry = ""
|
|
892
|
+
self.itemtype = ""
|
|
893
|
+
self.subject = ""
|
|
894
|
+
self.context = ""
|
|
895
|
+
self.description = ""
|
|
896
|
+
self.token_map = {}
|
|
897
|
+
self.parse_ok = False
|
|
898
|
+
self.parse_message = ""
|
|
899
|
+
self.previous_tokens = []
|
|
900
|
+
self.relative_tokens = []
|
|
901
|
+
self.last_result = ()
|
|
902
|
+
self.bin_paths = []
|
|
903
|
+
self.tokens = []
|
|
904
|
+
self.messages = []
|
|
905
|
+
self.validate_messages = []
|
|
906
|
+
|
|
907
|
+
# --- schedule / tokens / jobs ---
|
|
908
|
+
self.extent = ""
|
|
909
|
+
self.rruleset = ""
|
|
910
|
+
self.rrule_tokens = []
|
|
911
|
+
self.rrule_components = []
|
|
912
|
+
self.rrule_parts = []
|
|
913
|
+
self.job_tokens = []
|
|
914
|
+
self.token_store = None
|
|
915
|
+
self.rrules = []
|
|
916
|
+
self.jobs = []
|
|
917
|
+
self.bins = []
|
|
918
|
+
self.jobset = []
|
|
919
|
+
self.priority = None
|
|
920
|
+
self.alerts = []
|
|
921
|
+
self.notice = ""
|
|
922
|
+
|
|
923
|
+
# --- date/time collections (strings) ---
|
|
924
|
+
self.s_kind = ""
|
|
925
|
+
self.s_tz = None
|
|
926
|
+
self.rdates = []
|
|
927
|
+
self.exdates = []
|
|
928
|
+
self.rdate_str = ""
|
|
929
|
+
self.exdate_str = ""
|
|
930
|
+
self.rruleset_dict = {}
|
|
931
|
+
|
|
932
|
+
# --- DTSTART / RDATE (preserve your sentinels) ---
|
|
933
|
+
self.dtstart = None
|
|
934
|
+
self.dtstart_str = None # important: keep None (not "")
|
|
935
|
+
self.rdstart_str = ""
|
|
936
|
+
|
|
937
|
+
# --- timezone defaults (match your previous working code) ---
|
|
938
|
+
|
|
939
|
+
self.timezone = get_localzone_name()
|
|
940
|
+
self.tz_str = local_timezone
|
|
941
|
+
|
|
942
|
+
# TODO: remove these
|
|
943
|
+
self.skip_token_positions = set()
|
|
944
|
+
self.token_group_anchors = {}
|
|
945
|
+
|
|
946
|
+
# --- other flags / features ---
|
|
947
|
+
self.completion = None # for the completed datetime from @f entry - not a tuple
|
|
948
|
+
self.completions = [] # (completed, due) to update Completions table - a (completed, due) tuple
|
|
949
|
+
self.over = ""
|
|
950
|
+
self.has_f = False # True if there is an @f to process after parsing tokens
|
|
951
|
+
self.has_s = False # True if there is an @s to process after parsing tokens
|
|
952
|
+
|
|
953
|
+
# --- optional initial parse ---
|
|
954
|
+
self.ampm = False
|
|
955
|
+
self.yearfirst = True
|
|
956
|
+
self.dayfirst = False
|
|
957
|
+
self.history_weight = 3
|
|
958
|
+
if self.env:
|
|
959
|
+
self.ampm = self.env.config.ui.ampm
|
|
960
|
+
self.timefmt = "%-I:%M%p" if self.ampm else "%H:%M"
|
|
961
|
+
self.dayfirst = self.env.config.ui.dayfirst
|
|
962
|
+
self.yearfirst = self.env.config.ui.yearfirst
|
|
963
|
+
self.history_weight = self.env.config.ui.history_weight
|
|
964
|
+
_yr = "%Y"
|
|
965
|
+
_dm = "%d-%m" if self.dayfirst else "%m-%d"
|
|
966
|
+
self.datefmt = f"{_yr}-{_dm}" if self.yearfirst else f"{_dm}-{_yr}"
|
|
967
|
+
self.datetimefmt = f"{self.datefmt} {self.timefmt}"
|
|
968
|
+
|
|
969
|
+
# print(f"{self.ampm = }, {self.yearfirst = }, {self.dayfirst = }")
|
|
970
|
+
#
|
|
971
|
+
# dayfirst yearfirst date interpretation standard
|
|
972
|
+
# ======== ========= ======== ============== ========
|
|
973
|
+
# True True 12-10-11 2012-11-10 Y-D-M ??
|
|
974
|
+
# True False 12-10-11 2011-10-12 D-M-Y EU
|
|
975
|
+
# False True 12-10-11 2012-10-11 Y-M-D ISO 8601
|
|
976
|
+
# False False 12-10-11 2011-12-10 M-D-Y US
|
|
977
|
+
# dayfirst D-M else M-D
|
|
978
|
+
# yearfirst first else last
|
|
979
|
+
# DM = %d-%m if dayfirst else "%m-%d"
|
|
980
|
+
# DMY = f"%Y-{DM}" if yearfirst else f"{DM}-%Y"
|
|
981
|
+
|
|
982
|
+
if raw:
|
|
983
|
+
self.entry = raw
|
|
984
|
+
self.parse_input(raw)
|
|
985
|
+
# if self.final:
|
|
986
|
+
# self.finalize_record()
|
|
987
|
+
#
|
|
988
|
+
|
|
989
|
+
def get_name_to_binpath(self) -> dict:
|
|
990
|
+
if self.final or not self.controller:
|
|
991
|
+
return {}
|
|
992
|
+
return self.controller.get_name_to_binpath()
|
|
993
|
+
|
|
994
|
+
def to_entry(self) -> str:
|
|
995
|
+
"""
|
|
996
|
+
Rebuild a tklr entry string from this Item’s fields.
|
|
997
|
+
"""
|
|
998
|
+
# --- map itemtype ---
|
|
999
|
+
itemtype = self.itemtype
|
|
1000
|
+
|
|
1001
|
+
# --- start with type and subject ---
|
|
1002
|
+
parts = [f"{itemtype} {self.subject}"]
|
|
1003
|
+
|
|
1004
|
+
# --- description (optional, inline or multi-line) ---
|
|
1005
|
+
if self.description:
|
|
1006
|
+
parts.append(self.description)
|
|
1007
|
+
|
|
1008
|
+
# --- scheduling tokens ---
|
|
1009
|
+
if getattr(self, "dtstart_str", None):
|
|
1010
|
+
dt = self._get_start_dt()
|
|
1011
|
+
if dt:
|
|
1012
|
+
parts.append(f"@s {self.fmt_user(dt)}")
|
|
1013
|
+
|
|
1014
|
+
if getattr(self, "extent", None):
|
|
1015
|
+
parts.append(f"@e {self.extent}")
|
|
1016
|
+
|
|
1017
|
+
if getattr(self, "rruleset", None):
|
|
1018
|
+
parts.append(f"@r {self.rruleset}")
|
|
1019
|
+
|
|
1020
|
+
if getattr(self, "notice", None):
|
|
1021
|
+
parts.append(f"@n {self.notice}")
|
|
1022
|
+
|
|
1023
|
+
# --- context ---
|
|
1024
|
+
if getattr(self, "context", None):
|
|
1025
|
+
parts.append(f"@c {self.context}")
|
|
1026
|
+
|
|
1027
|
+
# --- jobs ---
|
|
1028
|
+
if getattr(self, "jobs", None) and self.jobs not in ("[]", None):
|
|
1029
|
+
try:
|
|
1030
|
+
jobs = json.loads(self.jobs)
|
|
1031
|
+
except Exception:
|
|
1032
|
+
jobs = []
|
|
1033
|
+
for j in jobs:
|
|
1034
|
+
subj = j.get("summary") or j.get("subject")
|
|
1035
|
+
if subj:
|
|
1036
|
+
parts.append(f"@~ {subj}")
|
|
1037
|
+
|
|
1038
|
+
return " ".join(parts)
|
|
1039
|
+
|
|
1040
|
+
def parse_input(self, entry: str):
|
|
1041
|
+
"""
|
|
1042
|
+
Parses the input string to extract tokens, then processes and validates the tokens.
|
|
1043
|
+
"""
|
|
1044
|
+
# digits = "1234567890" * ceil(len(entry) / 10)
|
|
1045
|
+
|
|
1046
|
+
self._tokenize(entry)
|
|
1047
|
+
# NOTE: _tokenize sets self.itemtype and self.subject
|
|
1048
|
+
|
|
1049
|
+
message = self.validate()
|
|
1050
|
+
if message:
|
|
1051
|
+
self.parse_ok = False
|
|
1052
|
+
self.parse_message = message
|
|
1053
|
+
print(f"parse failed: {message = }")
|
|
1054
|
+
return
|
|
1055
|
+
|
|
1056
|
+
self.mark_grouped_tokens()
|
|
1057
|
+
self._parse_tokens(entry)
|
|
1058
|
+
|
|
1059
|
+
self.parse_ok = True
|
|
1060
|
+
self.previous_entry = entry
|
|
1061
|
+
self.previous_tokens = self.relative_tokens.copy()
|
|
1062
|
+
|
|
1063
|
+
if self.final:
|
|
1064
|
+
self.finalize_record()
|
|
1065
|
+
|
|
1066
|
+
def _remove_instance_from_plus_tokens(
|
|
1067
|
+
self, tokens: list[dict], tok_str: str
|
|
1068
|
+
) -> bool:
|
|
1069
|
+
"""
|
|
1070
|
+
Remove an entry matching tok_local_str (local format e.g. '20251122T1000')
|
|
1071
|
+
from any @+ tokens in the list. If removal empties a @+ list,
|
|
1072
|
+
remove that token entirely. Return True if removal happened.
|
|
1073
|
+
"""
|
|
1074
|
+
changed = False
|
|
1075
|
+
new_tokens = []
|
|
1076
|
+
for tok in tokens:
|
|
1077
|
+
if tok.get("k") == "+":
|
|
1078
|
+
body = tok["token"][2:].strip()
|
|
1079
|
+
parts = [p.strip() for p in body.split(",") if p.strip()]
|
|
1080
|
+
# filter out the matching part
|
|
1081
|
+
filtered = [ p for p in parts if p != tok_str ]
|
|
1082
|
+
if len(filtered) < len(parts):
|
|
1083
|
+
changed = True
|
|
1084
|
+
if filtered:
|
|
1085
|
+
tok["token"] = "@+ " + ", ".join(filtered)
|
|
1086
|
+
new_tokens.append(tok)
|
|
1087
|
+
# else: drop this token entirely
|
|
1088
|
+
else:
|
|
1089
|
+
new_tokens.append(tok)
|
|
1090
|
+
else:
|
|
1091
|
+
new_tokens.append(tok)
|
|
1092
|
+
if changed:
|
|
1093
|
+
tokens[:] = new_tokens
|
|
1094
|
+
return changed
|
|
1095
|
+
|
|
1096
|
+
def finalize_record(self):
|
|
1097
|
+
"""
|
|
1098
|
+
When the entry and token list is complete:
|
|
1099
|
+
1) finalize jobs, processing any &f entries and adding @f when all jobs are finished
|
|
1100
|
+
2) finalize_rruleset so that next instances will be available
|
|
1101
|
+
3) process @f entries (&f entries will have been done by finalize_jobs)
|
|
1102
|
+
|
|
1103
|
+
"""
|
|
1104
|
+
if self.has_s:
|
|
1105
|
+
self._set_start_dt()
|
|
1106
|
+
|
|
1107
|
+
if self.collect_grouped_tokens({"r"}):
|
|
1108
|
+
rruleset = self.finalize_rruleset()
|
|
1109
|
+
bug_msg(f"got rruleset {rruleset = }")
|
|
1110
|
+
if rruleset:
|
|
1111
|
+
self.rruleset = rruleset
|
|
1112
|
+
elif self.rdstart_str is not None:
|
|
1113
|
+
# @s but not @r
|
|
1114
|
+
self.rruleset = self.rdstart_str
|
|
1115
|
+
|
|
1116
|
+
if self.itemtype == "^":
|
|
1117
|
+
jobset = self.build_jobs()
|
|
1118
|
+
success, finalized = self.finalize_jobs(jobset)
|
|
1119
|
+
# rruleset is needed to get the next two occurrences
|
|
1120
|
+
bug_msg(f"{self.has_f = }, {self.completion = }, {self.has_s = }")
|
|
1121
|
+
|
|
1122
|
+
if self.has_f:
|
|
1123
|
+
"""
|
|
1124
|
+
if has_o, get learn, td from do_offset,
|
|
1125
|
+
if learn, compute new td as weighted average of td and @f - @s
|
|
1126
|
+
change @s to @f + td
|
|
1127
|
+
remove @f
|
|
1128
|
+
do not mark as finished, x, offsets are never finished
|
|
1129
|
+
"""
|
|
1130
|
+
due, next = self._get_first_two_occurrences()
|
|
1131
|
+
bug_msg(f"{self.subject = }, {self.completion = }, {due = }, {next = }")
|
|
1132
|
+
# if self._has_o():
|
|
1133
|
+
# bug_msg(f"offset {self._get_o() = }")
|
|
1134
|
+
bug_msg(f"old {self.rruleset = }, {self.rdstart_str = }")
|
|
1135
|
+
self.finish()
|
|
1136
|
+
self.has_f = False
|
|
1137
|
+
bug_msg(f"new {self.rruleset = }, {self.rdstart_str = }")
|
|
1138
|
+
|
|
1139
|
+
|
|
1140
|
+
self.tokens = self._strip_positions(self.relative_tokens)
|
|
1141
|
+
bug_msg(f"{self.tokens = }")
|
|
1142
|
+
|
|
1143
|
+
def validate(self):
|
|
1144
|
+
self.validate_messages = []
|
|
1145
|
+
|
|
1146
|
+
def fmt_error(message: str):
|
|
1147
|
+
# return [x.strip() for x in message.split(",")]
|
|
1148
|
+
self.validate_messages.append(message)
|
|
1149
|
+
return message
|
|
1150
|
+
|
|
1151
|
+
errors = []
|
|
1152
|
+
|
|
1153
|
+
if len(self.entry.strip()) < 1 or len(self.relative_tokens) < 1:
|
|
1154
|
+
# nothing to validate without itemtype and subject
|
|
1155
|
+
return fmt_error("""\
|
|
1156
|
+
A reminder must begin with an itemtype character
|
|
1157
|
+
from: * (event), ~ (task), ^ (project), % (note),
|
|
1158
|
+
x (finished) or ? (draft)
|
|
1159
|
+
""")
|
|
1160
|
+
|
|
1161
|
+
if len(self.relative_tokens) < 2:
|
|
1162
|
+
# nothing to validate without itemtype and subject
|
|
1163
|
+
return fmt_error(
|
|
1164
|
+
"A subject must be provided for the reminder after the itemtype."
|
|
1165
|
+
)
|
|
1166
|
+
|
|
1167
|
+
self.itemtype = self.relative_tokens[0]["token"]
|
|
1168
|
+
if not self.itemtype:
|
|
1169
|
+
return "no itemtype"
|
|
1170
|
+
|
|
1171
|
+
subject = self.relative_tokens[1]["token"]
|
|
1172
|
+
allowed_fortype = allowed[self.itemtype]
|
|
1173
|
+
# required_fortype = required[self.itemtype]
|
|
1174
|
+
needed = deepcopy(required[self.itemtype])
|
|
1175
|
+
|
|
1176
|
+
current_atkey = None
|
|
1177
|
+
used_atkeys = []
|
|
1178
|
+
used_ampkeys = []
|
|
1179
|
+
count = 0
|
|
1180
|
+
# print(f"{len(self.relative_tokens) = }")
|
|
1181
|
+
for token in self.relative_tokens:
|
|
1182
|
+
count += 1
|
|
1183
|
+
if token.get("incomplete", False):
|
|
1184
|
+
type = token["t"]
|
|
1185
|
+
need = (
|
|
1186
|
+
f"required: {', '.join(needed)}\n" if needed and type == "@" else ""
|
|
1187
|
+
)
|
|
1188
|
+
options = []
|
|
1189
|
+
options = (
|
|
1190
|
+
[x for x in allowed_fortype if len(x) == 1]
|
|
1191
|
+
if type == "@"
|
|
1192
|
+
else [x[-1] for x in allowed_fortype if len(x) == 2]
|
|
1193
|
+
)
|
|
1194
|
+
optional = f"optional: {', '.join(options)}" if options else ""
|
|
1195
|
+
return fmt_error(f"{token['t']} incomplete\n{need}{optional}")
|
|
1196
|
+
if token["t"] == "@":
|
|
1197
|
+
# print(f"{token['token']}; {used_atkeys = }")
|
|
1198
|
+
used_ampkeys = []
|
|
1199
|
+
this_atkey = token["k"]
|
|
1200
|
+
# bug_msg(f"{this_atkey = }")
|
|
1201
|
+
if this_atkey not in all_keys:
|
|
1202
|
+
return fmt_error(f"@{this_atkey}, Unrecognized @-key")
|
|
1203
|
+
if this_atkey not in allowed_fortype:
|
|
1204
|
+
return fmt_error(
|
|
1205
|
+
f"@{this_atkey}, The use of this @-key is not supported in type '{self.itemtype}' reminders"
|
|
1206
|
+
)
|
|
1207
|
+
if this_atkey in used_atkeys and this_atkey not in multiple_allowed:
|
|
1208
|
+
return fmt_error(
|
|
1209
|
+
f"@{current_atkey}, Multiple instances of this @-key are not allowed"
|
|
1210
|
+
)
|
|
1211
|
+
current_atkey = this_atkey
|
|
1212
|
+
used_atkeys.append(current_atkey)
|
|
1213
|
+
if this_atkey in ["r", "~"]:
|
|
1214
|
+
used_atkeys.append(f"{current_atkey}{current_atkey}")
|
|
1215
|
+
used_ampkeys = []
|
|
1216
|
+
if current_atkey in needed:
|
|
1217
|
+
needed.remove(current_atkey)
|
|
1218
|
+
if current_atkey in requires:
|
|
1219
|
+
for _key in requires[current_atkey]:
|
|
1220
|
+
if _key not in used_atkeys and _key not in needed:
|
|
1221
|
+
needed.append(_key)
|
|
1222
|
+
elif token["t"] == "&":
|
|
1223
|
+
this_ampkey = f"{current_atkey}{token['k']}"
|
|
1224
|
+
# bug_msg(f"{current_atkey = }, {this_ampkey = }")
|
|
1225
|
+
if current_atkey not in ["r", "~"]:
|
|
1226
|
+
return fmt_error(
|
|
1227
|
+
f"&{token['k']}, The use of &-keys is not supported for @{current_atkey}"
|
|
1228
|
+
)
|
|
1229
|
+
|
|
1230
|
+
if this_ampkey not in all_keys:
|
|
1231
|
+
return fmt_error(
|
|
1232
|
+
f"&{token['k']}, This &-key is not supported for @{current_atkey}"
|
|
1233
|
+
)
|
|
1234
|
+
if this_ampkey in used_ampkeys and this_ampkey not in multiple_allowed:
|
|
1235
|
+
return fmt_error(
|
|
1236
|
+
f"&{this_ampkey}, Multiple instances of this &-key are not supported"
|
|
1237
|
+
)
|
|
1238
|
+
used_ampkeys.append(this_ampkey)
|
|
1239
|
+
|
|
1240
|
+
if needed:
|
|
1241
|
+
needed_keys = ", ".join("@" + k for k in needed)
|
|
1242
|
+
needed_msg = (
|
|
1243
|
+
# f"Required keys not yet provided: {needed_keys} in {self.entry = }"
|
|
1244
|
+
f"Required keys not yet provided: {needed_keys = }\n {used_atkeys = }, {used_ampkeys = }"
|
|
1245
|
+
)
|
|
1246
|
+
else:
|
|
1247
|
+
needed_msg = ""
|
|
1248
|
+
return needed_msg
|
|
1249
|
+
|
|
1250
|
+
def fmt_user(self, dt: date | datetime) -> str:
|
|
1251
|
+
"""
|
|
1252
|
+
User friendly formatting for dates and datetimes using env settings
|
|
1253
|
+
for ampm, yearfirst, dayfirst and two_digit year.
|
|
1254
|
+
"""
|
|
1255
|
+
# Simple user-facing formatter; tweak to match your prefs
|
|
1256
|
+
if isinstance(dt, datetime):
|
|
1257
|
+
d = dt
|
|
1258
|
+
if d.tzinfo == tz.UTC or not self.timezone: #and not getattr(self, "final", False):
|
|
1259
|
+
d = d.astimezone()
|
|
1260
|
+
return d.strftime(self.datetimefmt)
|
|
1261
|
+
if isinstance(dt, date):
|
|
1262
|
+
return dt.strftime(self.datefmt)
|
|
1263
|
+
raise ValueError(f"Error: {dt} must either be a date or datetime")
|
|
1264
|
+
|
|
1265
|
+
def fmt_verbose(self, dt: date | datetime) -> str:
|
|
1266
|
+
"""
|
|
1267
|
+
User friendly formatting for dates and datetimes using env settings
|
|
1268
|
+
for ampm, yearfirst, dayfirst and two_digit year.
|
|
1269
|
+
"""
|
|
1270
|
+
# Simple user-facing formatter; tweak to match your prefs
|
|
1271
|
+
if isinstance(dt, datetime):
|
|
1272
|
+
d = dt
|
|
1273
|
+
if d.tzinfo == tz.UTC or not self.timezone: # and not getattr(self, "final", False):
|
|
1274
|
+
d = d.astimezone()
|
|
1275
|
+
return d.strftime(f"%a, %b %-d %Y {self.timefmt}")
|
|
1276
|
+
if isinstance(dt, date):
|
|
1277
|
+
return dt.strftime("%a, %b %-d %Y")
|
|
1278
|
+
raise ValueError(f"Error: {dt} must either be a date or datetime")
|
|
1279
|
+
|
|
1280
|
+
def fmt_compact(self, dt: datetime) -> str:
|
|
1281
|
+
"""
|
|
1282
|
+
Compact formatting for dates and datetimes using env settings
|
|
1283
|
+
for ampm, yearfirst, dayfirst and two_digit year.
|
|
1284
|
+
"""
|
|
1285
|
+
log_msg(f"formatting {dt = }")
|
|
1286
|
+
# Simple user-facing formatter; tweak to match your prefs
|
|
1287
|
+
if isinstance(dt, datetime):
|
|
1288
|
+
return _fmt_naive(dt)
|
|
1289
|
+
if isinstance(dt, date):
|
|
1290
|
+
return _fmt_date(dt)
|
|
1291
|
+
raise ValueError(f"Error: {dt} must either be a date or datetime")
|
|
1292
|
+
|
|
1293
|
+
def parse_user_dt_for_s(
|
|
1294
|
+
self, user_text: str
|
|
1295
|
+
) -> tuple[date | datetime | None, str, str | None]:
|
|
1296
|
+
"""
|
|
1297
|
+
Returns (obj, kind, tz_name_used)
|
|
1298
|
+
kind ∈ {'date','naive','aware','error'}
|
|
1299
|
+
tz_name_used: tz string ('' means local), or None for date/naive/error
|
|
1300
|
+
On error: (None, 'error', <message>)
|
|
1301
|
+
"""
|
|
1302
|
+
core, zdir = _split_z_directive(user_text)
|
|
1303
|
+
|
|
1304
|
+
try:
|
|
1305
|
+
obj = parse_dt(core, dayfirst=self.dayfirst, yearfirst=self.yearfirst)
|
|
1306
|
+
except Exception as e:
|
|
1307
|
+
# return None, "error", f"Could not parse '{core}': {e.__class__.__name__}"
|
|
1308
|
+
return None, "error", f"Error parsing '{core}'"
|
|
1309
|
+
|
|
1310
|
+
# DATE if midnight or a pure date object
|
|
1311
|
+
if _is_date_only(obj) or (
|
|
1312
|
+
_is_datetime(obj)
|
|
1313
|
+
and obj.hour == obj.minute == obj.second == 0
|
|
1314
|
+
and obj.tzinfo is None
|
|
1315
|
+
):
|
|
1316
|
+
if _is_datetime(obj):
|
|
1317
|
+
obj = obj.date()
|
|
1318
|
+
return obj, "date", None
|
|
1319
|
+
|
|
1320
|
+
# DATETIME
|
|
1321
|
+
if zdir and zdir.lower() == "none":
|
|
1322
|
+
# NAIVE: keep naive (strip tz if present)
|
|
1323
|
+
if _is_datetime(obj) and obj.tzinfo is not None:
|
|
1324
|
+
obj = obj.replace(tzinfo=None)
|
|
1325
|
+
return obj, "naive", None
|
|
1326
|
+
|
|
1327
|
+
# AWARE
|
|
1328
|
+
if zdir:
|
|
1329
|
+
zone = tz.gettz(zdir)
|
|
1330
|
+
if zone is None:
|
|
1331
|
+
# >>> HARD FAIL on invalid tz <<<
|
|
1332
|
+
return None, "error", f"Unknown timezone: {zdir!r}"
|
|
1333
|
+
tz_used = zdir
|
|
1334
|
+
else:
|
|
1335
|
+
zone = tz.tzlocal()
|
|
1336
|
+
tz_used = "" # '' means "local tz"
|
|
1337
|
+
|
|
1338
|
+
obj_aware = _attach_zone(obj, zone)
|
|
1339
|
+
obj_utc = _ensure_utc(obj_aware)
|
|
1340
|
+
return obj_utc, "aware", zone
|
|
1341
|
+
|
|
1342
|
+
def collect_grouped_tokens(self, anchor_keys: set[str]) -> list[list[dict]]:
|
|
1343
|
+
"""
|
|
1344
|
+
Collect multiple groups of @-tokens and their immediately trailing &-tokens.
|
|
1345
|
+
|
|
1346
|
+
anchor_keys: e.g. {'r', '~', 's'} — only these @-keys start a group.
|
|
1347
|
+
|
|
1348
|
+
Returns:
|
|
1349
|
+
List of token groups: each group is a list of relative tokens:
|
|
1350
|
+
[ [anchor_tok, &tok, &tok, ...], ... ]
|
|
1351
|
+
"""
|
|
1352
|
+
groups: list[list[dict]] = []
|
|
1353
|
+
current_group: list[dict] = []
|
|
1354
|
+
collecting = False
|
|
1355
|
+
|
|
1356
|
+
for token in self.relative_tokens:
|
|
1357
|
+
if token.get("t") == "@" and token.get("k") in anchor_keys:
|
|
1358
|
+
if current_group:
|
|
1359
|
+
groups.append(current_group)
|
|
1360
|
+
current_group = [token]
|
|
1361
|
+
collecting = True
|
|
1362
|
+
elif collecting and token.get("t") == "&":
|
|
1363
|
+
current_group.append(token)
|
|
1364
|
+
elif collecting:
|
|
1365
|
+
# hit a non-& token, close the current group
|
|
1366
|
+
groups.append(current_group)
|
|
1367
|
+
current_group = []
|
|
1368
|
+
collecting = False
|
|
1369
|
+
|
|
1370
|
+
if current_group:
|
|
1371
|
+
groups.append(current_group)
|
|
1372
|
+
|
|
1373
|
+
bug_msg(f"{groups = }")
|
|
1374
|
+
return groups
|
|
1375
|
+
|
|
1376
|
+
def mark_grouped_tokens(self):
|
|
1377
|
+
"""
|
|
1378
|
+
Build:
|
|
1379
|
+
- skip_token_positions: set of (s,e) spans for &-tokens that belong to an @-group,
|
|
1380
|
+
so your dispatcher can skip re-processing them.
|
|
1381
|
+
- token_group_anchors: map (s,e) of each grouped &-token -> (s,e) of its @-anchor.
|
|
1382
|
+
Also prepares self.token_group_map via build_token_group_map().
|
|
1383
|
+
"""
|
|
1384
|
+
self.skip_token_positions = set()
|
|
1385
|
+
self.token_group_anchors = {}
|
|
1386
|
+
|
|
1387
|
+
anchor_keys = {"r", "~"}
|
|
1388
|
+
|
|
1389
|
+
groups = self.collect_grouped_tokens(anchor_keys)
|
|
1390
|
+
|
|
1391
|
+
for group in groups:
|
|
1392
|
+
anchor = group[0]
|
|
1393
|
+
anchor_pos = (anchor["s"], anchor["e"])
|
|
1394
|
+
for token in group[1:]:
|
|
1395
|
+
pos = (token["s"], token["e"])
|
|
1396
|
+
self.skip_token_positions.add(pos)
|
|
1397
|
+
self.token_group_anchors[pos] = anchor_pos
|
|
1398
|
+
|
|
1399
|
+
# Build the easy-to-consume map (e.g., token_group_map['s'] -> [("z","CET")])
|
|
1400
|
+
self.build_token_group_map(groups)
|
|
1401
|
+
|
|
1402
|
+
def build_token_group_map(self, groups: list[list[dict]]):
|
|
1403
|
+
"""
|
|
1404
|
+
Convert grouped tokens into a simple dict:
|
|
1405
|
+
self.token_group_map = {
|
|
1406
|
+
'r': [('i','2'), ('c','10'), ...],
|
|
1407
|
+
's': [('z','CET'), ...],
|
|
1408
|
+
'~': [('f','20250824T120000'), ...],
|
|
1409
|
+
}
|
|
1410
|
+
Keys are only present if that @-anchor appears in self.relative_tokens.
|
|
1411
|
+
"""
|
|
1412
|
+
tgm: dict[str, list[tuple[str, str]]] = {}
|
|
1413
|
+
for group in groups:
|
|
1414
|
+
anchor = group[0]
|
|
1415
|
+
if anchor.get("t") != "@":
|
|
1416
|
+
continue
|
|
1417
|
+
akey = anchor.get("k") # 'r', '~', or 's'
|
|
1418
|
+
if not akey:
|
|
1419
|
+
continue
|
|
1420
|
+
pairs: list[tuple[str, str]] = []
|
|
1421
|
+
for tok in group[1:]:
|
|
1422
|
+
if tok.get("t") != "&":
|
|
1423
|
+
continue
|
|
1424
|
+
k = (tok.get("k") or "").strip()
|
|
1425
|
+
# raw value after '&x ':
|
|
1426
|
+
try:
|
|
1427
|
+
_, v = tok["token"].split(" ", 1)
|
|
1428
|
+
v = v.strip()
|
|
1429
|
+
except Exception as e:
|
|
1430
|
+
log_msg(f"error: {e = }")
|
|
1431
|
+
v = ""
|
|
1432
|
+
pairs.append((k, v))
|
|
1433
|
+
if pairs:
|
|
1434
|
+
tgm.setdefault(akey, []).extend(pairs)
|
|
1435
|
+
|
|
1436
|
+
# bug_msg(f"token_group_map {tgm = }")
|
|
1437
|
+
|
|
1438
|
+
self.token_group_map = tgm
|
|
1439
|
+
|
|
1440
|
+
def add_token(self, token: dict):
|
|
1441
|
+
"""
|
|
1442
|
+
keys: token (entry str), t (type: itemtype, subject, @, &),
|
|
1443
|
+
k (key: a, b, c, d, ... for type @ and &.
|
|
1444
|
+
type itemtype and subject have no key)
|
|
1445
|
+
add_token takes a token dict and
|
|
1446
|
+
1) appends the token as is to self.relative_tokens
|
|
1447
|
+
2) extract the token, t and k fields, expands the datetime value(s) for k in list("sf+-")
|
|
1448
|
+
and appends the resulting dict to self.stored_tokens
|
|
1449
|
+
"""
|
|
1450
|
+
|
|
1451
|
+
self.tokens.append(token)
|
|
1452
|
+
|
|
1453
|
+
def _tokenize(self, entry: str):
|
|
1454
|
+
# bug_msg(f"_tokenize {entry = }")
|
|
1455
|
+
|
|
1456
|
+
self.entry = entry
|
|
1457
|
+
self.errors = []
|
|
1458
|
+
self.tokens = []
|
|
1459
|
+
self.messages = []
|
|
1460
|
+
|
|
1461
|
+
if not entry:
|
|
1462
|
+
self.messages.append(
|
|
1463
|
+
(False, ": ".join(Item.token_keys["itemtype"][:2]), [])
|
|
1464
|
+
)
|
|
1465
|
+
return
|
|
1466
|
+
|
|
1467
|
+
self.relative_tokens = []
|
|
1468
|
+
self.stored_tokens = []
|
|
1469
|
+
|
|
1470
|
+
# First: itemtype
|
|
1471
|
+
itemtype = entry[0]
|
|
1472
|
+
if itemtype not in {"*", "~", "^", "%", "x", "?"}:
|
|
1473
|
+
self.messages.append(
|
|
1474
|
+
(
|
|
1475
|
+
False,
|
|
1476
|
+
f"Invalid itemtype '{itemtype}' (expected *, ~, ^, %, x or ?)",
|
|
1477
|
+
[],
|
|
1478
|
+
)
|
|
1479
|
+
)
|
|
1480
|
+
return
|
|
1481
|
+
|
|
1482
|
+
self.relative_tokens.append(
|
|
1483
|
+
{"token": itemtype, "s": 0, "e": 1, "t": "itemtype"}
|
|
1484
|
+
)
|
|
1485
|
+
self.itemtype = itemtype
|
|
1486
|
+
|
|
1487
|
+
rest = entry[1:].lstrip()
|
|
1488
|
+
offset = 1 + len(entry[1:]) - len(rest)
|
|
1489
|
+
|
|
1490
|
+
# Find start of first @-key to get subject
|
|
1491
|
+
at_pos = rest.find("@")
|
|
1492
|
+
subject = rest[:at_pos].strip() if at_pos != -1 else rest
|
|
1493
|
+
if subject:
|
|
1494
|
+
start = offset
|
|
1495
|
+
end = offset + len(subject) + 1 # trailing space
|
|
1496
|
+
subject_token = subject + " "
|
|
1497
|
+
self.relative_tokens.append(
|
|
1498
|
+
{"token": subject_token, "s": start, "e": end, "t": "subject"}
|
|
1499
|
+
)
|
|
1500
|
+
self.subject = subject
|
|
1501
|
+
else:
|
|
1502
|
+
self.errors.append("Missing subject")
|
|
1503
|
+
|
|
1504
|
+
remainder = rest[len(subject) :]
|
|
1505
|
+
|
|
1506
|
+
pattern = (
|
|
1507
|
+
r"(?:(?<=^)|(?<=\s))(@[\w~+\-]+ [^@&]+)|(?:(?<=^)|(?<=\s))(&\w+ [^@&]+)"
|
|
1508
|
+
)
|
|
1509
|
+
for match in re.finditer(pattern, remainder):
|
|
1510
|
+
token = match.group(0)
|
|
1511
|
+
start_pos = match.start() + offset + len(subject)
|
|
1512
|
+
end_pos = match.end() + offset + len(subject)
|
|
1513
|
+
|
|
1514
|
+
token_type = "@" if token.startswith("@") else "&"
|
|
1515
|
+
key = token[1:3].strip()
|
|
1516
|
+
self.relative_tokens.append(
|
|
1517
|
+
{
|
|
1518
|
+
"token": token,
|
|
1519
|
+
"s": start_pos,
|
|
1520
|
+
"e": end_pos,
|
|
1521
|
+
"t": token_type,
|
|
1522
|
+
"k": key,
|
|
1523
|
+
}
|
|
1524
|
+
)
|
|
1525
|
+
|
|
1526
|
+
# Detect and append a potential partial token at the end
|
|
1527
|
+
partial_token = None
|
|
1528
|
+
if entry.endswith("@") or re.search(r"@([a-zA-Z])$", entry):
|
|
1529
|
+
match = re.search(r"@([a-zA-Z]?)$", entry)
|
|
1530
|
+
if match:
|
|
1531
|
+
partial_token = {
|
|
1532
|
+
"token": "@" + match.group(1),
|
|
1533
|
+
"s": len(entry) - len(match.group(0)),
|
|
1534
|
+
"e": len(entry),
|
|
1535
|
+
"t": "@",
|
|
1536
|
+
"k": match.group(1),
|
|
1537
|
+
"incomplete": True,
|
|
1538
|
+
}
|
|
1539
|
+
|
|
1540
|
+
elif entry.endswith("&") or re.search(r"&([a-zA-Z]+)$", entry):
|
|
1541
|
+
match = re.search(r"&([a-zA-Z]*)$", entry)
|
|
1542
|
+
if match:
|
|
1543
|
+
# Optionally find parent group (r or j)
|
|
1544
|
+
parent = None
|
|
1545
|
+
for tok in reversed(self.relative_tokens):
|
|
1546
|
+
if tok["t"] == "@" and tok["k"] in ["r", "~"]:
|
|
1547
|
+
parent = tok["k"]
|
|
1548
|
+
break
|
|
1549
|
+
partial_token = {
|
|
1550
|
+
"token": "&" + match.group(1),
|
|
1551
|
+
"s": len(entry) - len(match.group(0)),
|
|
1552
|
+
"e": len(entry),
|
|
1553
|
+
"t": "&",
|
|
1554
|
+
"k": match.group(1),
|
|
1555
|
+
"parent": parent,
|
|
1556
|
+
"incomplete": True,
|
|
1557
|
+
}
|
|
1558
|
+
|
|
1559
|
+
if partial_token:
|
|
1560
|
+
self.relative_tokens.append(partial_token)
|
|
1561
|
+
|
|
1562
|
+
def _parse_tokens(self, entry: str):
|
|
1563
|
+
if not self.previous_entry:
|
|
1564
|
+
self._parse_all_tokens()
|
|
1565
|
+
return
|
|
1566
|
+
|
|
1567
|
+
self.mark_grouped_tokens()
|
|
1568
|
+
|
|
1569
|
+
changes = self._find_changes(self.previous_entry, entry)
|
|
1570
|
+
affected_tokens = self._identify_affected_tokens(changes)
|
|
1571
|
+
|
|
1572
|
+
dispatched_anchors = set()
|
|
1573
|
+
|
|
1574
|
+
for token in affected_tokens:
|
|
1575
|
+
start_pos, end_pos = token["s"], token["e"]
|
|
1576
|
+
# bug_msg(f"{start_pos = }, {end_pos = }, {len(entry) = }, {token = }")
|
|
1577
|
+
if not self._token_has_changed(token):
|
|
1578
|
+
continue
|
|
1579
|
+
|
|
1580
|
+
if (start_pos, end_pos) in self.skip_token_positions:
|
|
1581
|
+
continue # don't dispatch grouped & tokens alone
|
|
1582
|
+
|
|
1583
|
+
if (start_pos, end_pos) in self.token_group_anchors:
|
|
1584
|
+
anchor_pos = self.token_group_anchors[(start_pos, end_pos)]
|
|
1585
|
+
if anchor_pos in dispatched_anchors:
|
|
1586
|
+
continue
|
|
1587
|
+
anchor_token_info = next(
|
|
1588
|
+
t for t in self.tokens if (t[1], t[2]) == anchor_pos
|
|
1589
|
+
)
|
|
1590
|
+
token_str, anchor_start, anchor_end = anchor_token_info
|
|
1591
|
+
token_type = token["k"]
|
|
1592
|
+
|
|
1593
|
+
# bug_msg(
|
|
1594
|
+
# f"{anchor_start = }, {anchor_end = }, {len(entry) = }, {token_str = }"
|
|
1595
|
+
# )
|
|
1596
|
+
self._dispatch_token(token_str, anchor_start, anchor_end, token_type)
|
|
1597
|
+
dispatched_anchors.add(anchor_pos)
|
|
1598
|
+
continue
|
|
1599
|
+
|
|
1600
|
+
if start_pos == 0:
|
|
1601
|
+
self._dispatch_token(token, start_pos, end_pos, "itemtype")
|
|
1602
|
+
elif start_pos == 2:
|
|
1603
|
+
self._dispatch_token(token, start_pos, end_pos, "subject")
|
|
1604
|
+
else:
|
|
1605
|
+
# bug_msg(f"{end_pos = }, {len(entry) = }")
|
|
1606
|
+
token_type = token["k"]
|
|
1607
|
+
self._dispatch_token(token, start_pos, end_pos, token_type)
|
|
1608
|
+
|
|
1609
|
+
def _parse_all_tokens(self):
|
|
1610
|
+
self.mark_grouped_tokens()
|
|
1611
|
+
|
|
1612
|
+
dispatched_anchors = set()
|
|
1613
|
+
self.stored_tokens = []
|
|
1614
|
+
|
|
1615
|
+
for token in self.relative_tokens:
|
|
1616
|
+
# print(f"parsing {token = }")
|
|
1617
|
+
start_pos, end_pos = token["s"], token["e"]
|
|
1618
|
+
# if token.get("k", "") in ["+", "-", "s", "f"]:
|
|
1619
|
+
# bug_msg(f"identified @+ {token = }")
|
|
1620
|
+
if (start_pos, end_pos) in self.skip_token_positions:
|
|
1621
|
+
continue # skip component of a group
|
|
1622
|
+
|
|
1623
|
+
if (start_pos, end_pos) in self.token_group_anchors:
|
|
1624
|
+
anchor_pos = self.token_group_anchors[(start_pos, end_pos)]
|
|
1625
|
+
if anchor_pos in dispatched_anchors:
|
|
1626
|
+
continue
|
|
1627
|
+
anchor_token_info = next(
|
|
1628
|
+
t for t in self.tokens if (t[1], t[2]) == anchor_pos
|
|
1629
|
+
)
|
|
1630
|
+
token_str, anchor_start, anchor_end = anchor_token_info
|
|
1631
|
+
token_type = token["k"]
|
|
1632
|
+
self._dispatch_token(token_str, anchor_start, anchor_end, token_type)
|
|
1633
|
+
dispatched_anchors.add(anchor_pos)
|
|
1634
|
+
continue
|
|
1635
|
+
|
|
1636
|
+
if start_pos == 0:
|
|
1637
|
+
self._dispatch_token(token, start_pos, end_pos, "itemtype")
|
|
1638
|
+
elif start_pos == 2:
|
|
1639
|
+
self._dispatch_token(token, start_pos, end_pos, "subject")
|
|
1640
|
+
elif "k" in token:
|
|
1641
|
+
token_type = token["k"]
|
|
1642
|
+
self._dispatch_token(token, start_pos, end_pos, token_type)
|
|
1643
|
+
|
|
1644
|
+
def _find_changes(self, previous: str, current: str):
|
|
1645
|
+
# Find the range of changes between the previous and current strings
|
|
1646
|
+
start = 0
|
|
1647
|
+
while (
|
|
1648
|
+
start < len(previous)
|
|
1649
|
+
and start < len(current)
|
|
1650
|
+
and previous[start] == current[start]
|
|
1651
|
+
):
|
|
1652
|
+
start += 1
|
|
1653
|
+
|
|
1654
|
+
end_prev = len(previous)
|
|
1655
|
+
end_curr = len(current)
|
|
1656
|
+
|
|
1657
|
+
while (
|
|
1658
|
+
end_prev > start
|
|
1659
|
+
and end_curr > start
|
|
1660
|
+
and previous[end_prev - 1] == current[end_curr - 1]
|
|
1661
|
+
):
|
|
1662
|
+
end_prev -= 1
|
|
1663
|
+
end_curr -= 1
|
|
1664
|
+
|
|
1665
|
+
return start, end_curr
|
|
1666
|
+
|
|
1667
|
+
def _identify_affected_tokens(self, changes):
|
|
1668
|
+
start, end = changes
|
|
1669
|
+
affected_tokens = []
|
|
1670
|
+
for token in self.relative_tokens:
|
|
1671
|
+
start_pos, end_pos = token["s"], token["e"]
|
|
1672
|
+
if start <= end_pos and end >= start_pos:
|
|
1673
|
+
affected_tokens.append(token)
|
|
1674
|
+
return affected_tokens
|
|
1675
|
+
|
|
1676
|
+
def _token_has_changed(self, token):
|
|
1677
|
+
return token not in self.previous_tokens
|
|
1678
|
+
|
|
1679
|
+
def _dispatch_token(self, token, start_pos, end_pos, token_type):
|
|
1680
|
+
# bug_msg(f"dispatch_token {token = }")
|
|
1681
|
+
if token_type in self.token_keys:
|
|
1682
|
+
method_name = self.token_keys[token_type][2]
|
|
1683
|
+
method = getattr(self, method_name)
|
|
1684
|
+
# bug_msg(f"{method_name = } returned {method = }")
|
|
1685
|
+
is_valid, result, sub_tokens = method(token)
|
|
1686
|
+
self.last_result = (is_valid, result, token)
|
|
1687
|
+
# bug_msg(f"{is_valid = }, {result = }, {sub_tokens = }")
|
|
1688
|
+
if is_valid:
|
|
1689
|
+
self.parse_ok = is_valid
|
|
1690
|
+
else:
|
|
1691
|
+
self.parse_ok = False
|
|
1692
|
+
log_msg(f"Error processing '{token_type}' {token = } : {result}")
|
|
1693
|
+
else:
|
|
1694
|
+
self.parse_ok = False
|
|
1695
|
+
log_msg(f"No handler for token: {token}")
|
|
1696
|
+
|
|
1697
|
+
def _extract_job_node_and_summary(self, text):
|
|
1698
|
+
# bug_msg(f"{text = }")
|
|
1699
|
+
match = JOB_PATTERN.match(text)
|
|
1700
|
+
if match:
|
|
1701
|
+
number = len(match.group(1)) // 2
|
|
1702
|
+
summary = match.group(2).rstrip()
|
|
1703
|
+
content = match.group(3)
|
|
1704
|
+
if content:
|
|
1705
|
+
# the leading space is needed for parsing
|
|
1706
|
+
content = f" {content}"
|
|
1707
|
+
return number, summary, content
|
|
1708
|
+
return None, text # If no match, return None for number and the entire string
|
|
1709
|
+
|
|
1710
|
+
@classmethod
|
|
1711
|
+
def from_dict(cls, data: dict):
|
|
1712
|
+
# Reconstruct the entry string from tokens
|
|
1713
|
+
entry_str = " ".join(t["token"] for t in json.loads(data["tokens"]))
|
|
1714
|
+
return cls(entry_str)
|
|
1715
|
+
|
|
1716
|
+
@classmethod
|
|
1717
|
+
def from_item(cls, data: dict):
|
|
1718
|
+
# Reconstruct the entry string from tokens
|
|
1719
|
+
entry_str = " ".join(t["token"] for t in json.loads(data["tokens"]))
|
|
1720
|
+
return cls(entry_str)
|
|
1721
|
+
|
|
1722
|
+
@classmethod
|
|
1723
|
+
def do_itemtype(cls, token):
|
|
1724
|
+
# Process subject token
|
|
1725
|
+
if "t" in token and token["t"] == "itemtype":
|
|
1726
|
+
return True, token["token"].strip(), []
|
|
1727
|
+
else:
|
|
1728
|
+
return False, "itemtype cannot be empty", []
|
|
1729
|
+
|
|
1730
|
+
@classmethod
|
|
1731
|
+
def do_summary(cls, token):
|
|
1732
|
+
# Process subject token
|
|
1733
|
+
if "t" in token and token["t"] == "subject":
|
|
1734
|
+
return True, token["token"].strip(), []
|
|
1735
|
+
else:
|
|
1736
|
+
return False, "subject cannot be empty", []
|
|
1737
|
+
|
|
1738
|
+
@classmethod
|
|
1739
|
+
def do_duration(cls, arg: str):
|
|
1740
|
+
""" """
|
|
1741
|
+
if not arg:
|
|
1742
|
+
return False, f"time period {arg}"
|
|
1743
|
+
ok, res = timedelta_str_to_seconds(arg)
|
|
1744
|
+
return ok, res
|
|
1745
|
+
|
|
1746
|
+
def do_priority(self, token):
|
|
1747
|
+
# Process datetime token
|
|
1748
|
+
x = re.sub("^@. ", "", token["token"].strip()).lower()
|
|
1749
|
+
try:
|
|
1750
|
+
y = int(x)
|
|
1751
|
+
if 1 <= y <= 5:
|
|
1752
|
+
self.priority = y
|
|
1753
|
+
# print(f"set {self.priority = }")
|
|
1754
|
+
return True, y, []
|
|
1755
|
+
else:
|
|
1756
|
+
return False, x, []
|
|
1757
|
+
except ValueError:
|
|
1758
|
+
print(f"failed priority {token = }, {x = }")
|
|
1759
|
+
return False, x, []
|
|
1760
|
+
|
|
1761
|
+
def do_notice(self, token):
|
|
1762
|
+
# Process datetime token
|
|
1763
|
+
notice = re.sub("^[@&]. ", "", token["token"].strip()).lower()
|
|
1764
|
+
# notice = re.sub("^@. ", "", token["token"].strip()).lower()
|
|
1765
|
+
|
|
1766
|
+
ok, notice_obj = timedelta_str_to_seconds(notice)
|
|
1767
|
+
# bug_msg(f"{token = }, {ok = }, {notice_obj = }")
|
|
1768
|
+
if ok:
|
|
1769
|
+
self.notice = notice
|
|
1770
|
+
return True, notice_obj, []
|
|
1771
|
+
else:
|
|
1772
|
+
log_msg(f"failed to set self.notice: {notice = }, {notice_obj = }")
|
|
1773
|
+
return False, notice_obj, []
|
|
1774
|
+
|
|
1775
|
+
def do_extent(self, token):
|
|
1776
|
+
# Process datetime token
|
|
1777
|
+
extent = re.sub("^[@&]. ", "", token["token"].strip()).lower()
|
|
1778
|
+
ok, extent_obj = timedelta_str_to_seconds(extent)
|
|
1779
|
+
# bug_msg(f"{token = }, {ok = }, {extent_obj = }")
|
|
1780
|
+
if ok:
|
|
1781
|
+
self.extent = extent
|
|
1782
|
+
return True, extent_obj, []
|
|
1783
|
+
else:
|
|
1784
|
+
return False, extent_obj, []
|
|
1785
|
+
|
|
1786
|
+
def do_wrap(self, token):
|
|
1787
|
+
_w = re.sub("^@. ", "", token["token"].strip()).lower()
|
|
1788
|
+
_w_parts = [x.strip() for x in _w.split(",")]
|
|
1789
|
+
if len(_w_parts) != 2:
|
|
1790
|
+
return False, f"Invalid: {_w_parts}", []
|
|
1791
|
+
wrap = []
|
|
1792
|
+
msgs = []
|
|
1793
|
+
|
|
1794
|
+
ok, _b_obj = timedelta_str_to_seconds(_w_parts[0])
|
|
1795
|
+
if ok:
|
|
1796
|
+
wrap.append(_b_obj)
|
|
1797
|
+
else:
|
|
1798
|
+
msgs.append(f"Error parsing before {_b_obj}")
|
|
1799
|
+
|
|
1800
|
+
ok, _a_obj = timedelta_str_to_seconds(_w_parts[1])
|
|
1801
|
+
if ok:
|
|
1802
|
+
wrap.append(_a_obj)
|
|
1803
|
+
else:
|
|
1804
|
+
msgs.append(f"Error parsing after {_a_obj}")
|
|
1805
|
+
if msgs:
|
|
1806
|
+
return False, ", ".join(msgs), []
|
|
1807
|
+
self.wrap = wrap
|
|
1808
|
+
return True, wrap, []
|
|
1809
|
+
|
|
1810
|
+
def do_alert(self, token):
|
|
1811
|
+
"""
|
|
1812
|
+
Process an alert string, validate it and return a corresponding string
|
|
1813
|
+
"""
|
|
1814
|
+
|
|
1815
|
+
alert = token["token"][2:].strip()
|
|
1816
|
+
|
|
1817
|
+
parts = [x.strip() for x in alert.split(":")]
|
|
1818
|
+
if len(parts) != 2:
|
|
1819
|
+
return False, f"Invalid alert format: {alert}", []
|
|
1820
|
+
timedeltas, commands = parts
|
|
1821
|
+
secs = []
|
|
1822
|
+
tds = []
|
|
1823
|
+
cmds = []
|
|
1824
|
+
probs = []
|
|
1825
|
+
issues = []
|
|
1826
|
+
res = ""
|
|
1827
|
+
ok = True
|
|
1828
|
+
for cmd in [x.strip() for x in commands.split(",")]:
|
|
1829
|
+
if is_lowercase_letter(cmd):
|
|
1830
|
+
cmds.append(cmd)
|
|
1831
|
+
else:
|
|
1832
|
+
ok = False
|
|
1833
|
+
probs.append(f" Invalid command: {cmd}")
|
|
1834
|
+
for td in [x.strip() for x in timedeltas.split(",")]:
|
|
1835
|
+
ok, td_seconds = timedelta_str_to_seconds(td)
|
|
1836
|
+
if ok:
|
|
1837
|
+
secs.append(str(td_seconds))
|
|
1838
|
+
tds.append(td)
|
|
1839
|
+
else:
|
|
1840
|
+
ok = False
|
|
1841
|
+
probs.append(f" Invalid timedelta: {td}")
|
|
1842
|
+
if ok:
|
|
1843
|
+
res = f"{', '.join(tds)}: {', '.join(cmds)}"
|
|
1844
|
+
self.alerts.append(res)
|
|
1845
|
+
else:
|
|
1846
|
+
issues.append("; ".join(probs))
|
|
1847
|
+
if issues:
|
|
1848
|
+
return False, "\n".join(issues), []
|
|
1849
|
+
return True, res, []
|
|
1850
|
+
|
|
1851
|
+
def do_requires(self, token):
|
|
1852
|
+
"""
|
|
1853
|
+
Process a requires string for a job.
|
|
1854
|
+
Format:
|
|
1855
|
+
N
|
|
1856
|
+
or
|
|
1857
|
+
N:M[,K...]
|
|
1858
|
+
where N is the primary id, and M,K,... are dependency ids.
|
|
1859
|
+
|
|
1860
|
+
Returns:
|
|
1861
|
+
(True, "", primary, dependencies) on success
|
|
1862
|
+
(False, "error message", None, None) on failure
|
|
1863
|
+
"""
|
|
1864
|
+
requires = token["token"][2:].strip()
|
|
1865
|
+
|
|
1866
|
+
try:
|
|
1867
|
+
if ":" in requires:
|
|
1868
|
+
primary_str, deps_str = requires.split(":", 1)
|
|
1869
|
+
primary = int(primary_str.strip())
|
|
1870
|
+
dependencies = []
|
|
1871
|
+
for part in deps_str.split(","):
|
|
1872
|
+
part = part.strip()
|
|
1873
|
+
if part == "":
|
|
1874
|
+
continue
|
|
1875
|
+
try:
|
|
1876
|
+
dependencies.append(int(part))
|
|
1877
|
+
except ValueError:
|
|
1878
|
+
return (
|
|
1879
|
+
False,
|
|
1880
|
+
f"Invalid dependency value: '{part}' in token '{requires}'",
|
|
1881
|
+
[],
|
|
1882
|
+
)
|
|
1883
|
+
else:
|
|
1884
|
+
primary = int(requires.strip())
|
|
1885
|
+
dependencies = []
|
|
1886
|
+
except ValueError as e:
|
|
1887
|
+
return (
|
|
1888
|
+
False,
|
|
1889
|
+
f"Invalid requires token: '{requires}' ({e})",
|
|
1890
|
+
[],
|
|
1891
|
+
)
|
|
1892
|
+
|
|
1893
|
+
return True, primary, dependencies
|
|
1894
|
+
|
|
1895
|
+
def do_description(self, token):
|
|
1896
|
+
description = re.sub("^@. ", "", token["token"])
|
|
1897
|
+
# bug_msg(f"{token = }, {description = }")
|
|
1898
|
+
if not description:
|
|
1899
|
+
return False, "missing description", []
|
|
1900
|
+
if description:
|
|
1901
|
+
self.description = description
|
|
1902
|
+
# print(f"{self.description = }")
|
|
1903
|
+
return True, description, []
|
|
1904
|
+
else:
|
|
1905
|
+
return False, description, []
|
|
1906
|
+
|
|
1907
|
+
def do_nothing(self, token):
|
|
1908
|
+
return True, "passed", []
|
|
1909
|
+
|
|
1910
|
+
@classmethod
|
|
1911
|
+
def do_paragraph(cls, arg):
|
|
1912
|
+
"""
|
|
1913
|
+
Remove trailing whitespace.
|
|
1914
|
+
"""
|
|
1915
|
+
obj = None
|
|
1916
|
+
rep = arg
|
|
1917
|
+
para = [x.rstrip() for x in arg.split("\n")]
|
|
1918
|
+
if para:
|
|
1919
|
+
all_ok = True
|
|
1920
|
+
obj_lst = []
|
|
1921
|
+
rep_lst = []
|
|
1922
|
+
for p in para:
|
|
1923
|
+
try:
|
|
1924
|
+
res = str(p)
|
|
1925
|
+
obj_lst.append(res)
|
|
1926
|
+
rep_lst.append(res)
|
|
1927
|
+
except Exception as e:
|
|
1928
|
+
log_msg(f"error: {e}")
|
|
1929
|
+
all_ok = False
|
|
1930
|
+
rep_lst.append(f"~{arg}~")
|
|
1931
|
+
|
|
1932
|
+
obj = "\n".join(obj_lst) if all_ok else False
|
|
1933
|
+
rep = "\n".join(rep_lst)
|
|
1934
|
+
if obj:
|
|
1935
|
+
return True, obj
|
|
1936
|
+
else:
|
|
1937
|
+
return False, rep
|
|
1938
|
+
|
|
1939
|
+
@classmethod
|
|
1940
|
+
def do_stringlist(cls, args: List[str]):
|
|
1941
|
+
"""
|
|
1942
|
+
>>> do_stringlist('')
|
|
1943
|
+
(None, '')
|
|
1944
|
+
>>> do_stringlist('red')
|
|
1945
|
+
(['red'], 'red')
|
|
1946
|
+
>>> do_stringlist('red, green, blue')
|
|
1947
|
+
(['red', 'green', 'blue'], 'red, green, blue')
|
|
1948
|
+
>>> do_stringlist('Joe Smith <js2@whatever.com>')
|
|
1949
|
+
(['Joe Smith <js2@whatever.com>'], 'Joe Smith <js2@whatever.com>')
|
|
1950
|
+
"""
|
|
1951
|
+
obj = None
|
|
1952
|
+
rep = args
|
|
1953
|
+
if args:
|
|
1954
|
+
args = [x.strip() for x in args.split(",")]
|
|
1955
|
+
all_ok = True
|
|
1956
|
+
obj_lst = []
|
|
1957
|
+
rep_lst = []
|
|
1958
|
+
for arg in args:
|
|
1959
|
+
try:
|
|
1960
|
+
res = str(arg)
|
|
1961
|
+
obj_lst.append(res)
|
|
1962
|
+
rep_lst.append(res)
|
|
1963
|
+
except Exception as e:
|
|
1964
|
+
log_msg(f"error: {e}")
|
|
1965
|
+
all_ok = False
|
|
1966
|
+
rep_lst.append(f"~{arg}~")
|
|
1967
|
+
obj = obj_lst if all_ok else None
|
|
1968
|
+
rep = ", ".join(rep_lst)
|
|
1969
|
+
return obj, rep
|
|
1970
|
+
|
|
1971
|
+
def do_string(self, token):
|
|
1972
|
+
obj = rep = token["token"][2:].strip()
|
|
1973
|
+
return obj, rep, []
|
|
1974
|
+
|
|
1975
|
+
def do_timezone(self, token: dict):
|
|
1976
|
+
"""Handle @z timezone declaration in user input."""
|
|
1977
|
+
tz_str = token["token"][2:].strip()
|
|
1978
|
+
# print(f"do_timezone: {tz_str = }")
|
|
1979
|
+
if tz_str.lower() in {"none", "naive"}:
|
|
1980
|
+
self.timezone = None
|
|
1981
|
+
self.tz_str = "none"
|
|
1982
|
+
return True, None, []
|
|
1983
|
+
try:
|
|
1984
|
+
self.timezone = ZoneInfo(tz_str)
|
|
1985
|
+
self.tz_str = self.timezone.key
|
|
1986
|
+
return True, self.timezone, []
|
|
1987
|
+
except Exception as e:
|
|
1988
|
+
log_msg(f"error: {e}")
|
|
1989
|
+
self.timezone = None
|
|
1990
|
+
self.tz_str = ""
|
|
1991
|
+
return False, f"Invalid timezone: '{tz_str}'", []
|
|
1992
|
+
|
|
1993
|
+
def do_rrule(self, token):
|
|
1994
|
+
"""
|
|
1995
|
+
Handle an @r ... group. `token` may be a token dict or the raw token string.
|
|
1996
|
+
This only validates / records RRULE components; RDATE/EXDATE are added later
|
|
1997
|
+
by finalize_rruleset().
|
|
1998
|
+
Returns (ok: bool, message: str, extras: list).
|
|
1999
|
+
"""
|
|
2000
|
+
# bug_msg(f"in do_rrule: {token = }")
|
|
2001
|
+
|
|
2002
|
+
# Normalize input to raw text
|
|
2003
|
+
tok_text = token.get("token") if isinstance(token, dict) else str(token)
|
|
2004
|
+
bug_msg(f"{tok_text = }")
|
|
2005
|
+
|
|
2006
|
+
# Find the matching @r group (scan all groups first)
|
|
2007
|
+
group = None
|
|
2008
|
+
r_groups = list(self.collect_grouped_tokens({"r"}))
|
|
2009
|
+
for g in r_groups:
|
|
2010
|
+
if g and g[0].get("token") == tok_text:
|
|
2011
|
+
group = g
|
|
2012
|
+
break
|
|
2013
|
+
|
|
2014
|
+
# Only after scanning all groups decide if it's missing
|
|
2015
|
+
if group is None:
|
|
2016
|
+
msg = (False, f"No matching @r group found for token: {tok_text}", [])
|
|
2017
|
+
self.messages.append(msg)
|
|
2018
|
+
return msg
|
|
2019
|
+
|
|
2020
|
+
# Parse frequency from the anchor token "@r d|w|m|y"
|
|
2021
|
+
anchor = group[0]
|
|
2022
|
+
parts = anchor["token"].split(maxsplit=1)
|
|
2023
|
+
if len(parts) < 2:
|
|
2024
|
+
msg = (False, f"Missing rrule frequency: {tok_text}", [])
|
|
2025
|
+
self.messages.append(msg)
|
|
2026
|
+
return msg
|
|
2027
|
+
|
|
2028
|
+
freq_code = parts[1].strip().lower()
|
|
2029
|
+
if freq_code not in self.freq_map:
|
|
2030
|
+
keys = ", ".join(f"{k} ({v})" for k, v in self.freq_map.items())
|
|
2031
|
+
msg = (
|
|
2032
|
+
False,
|
|
2033
|
+
f"'{freq_code}' is not a supported frequency. Choose from:\n {keys}",
|
|
2034
|
+
[],
|
|
2035
|
+
)
|
|
2036
|
+
self.messages.append(msg)
|
|
2037
|
+
return msg
|
|
2038
|
+
|
|
2039
|
+
# Record a normalized RRULE "component" for your builder
|
|
2040
|
+
# (Keep this lightweight. Don't emit RDATE/EXDATE here.)
|
|
2041
|
+
self.rrule_tokens.append(
|
|
2042
|
+
{"token": f"{self.freq_map[freq_code]}", "t": "&", "k": "FREQ"}
|
|
2043
|
+
)
|
|
2044
|
+
|
|
2045
|
+
# bug_msg(f"{self.rrule_tokens = } processing remaining tokens")
|
|
2046
|
+
# Parse following &-tokens in this @r group (e.g., &i 3, &c 10, &u 20250101, &m..., &w..., &d...)
|
|
2047
|
+
for t in group[1:]:
|
|
2048
|
+
tstr = t.get("token", "")
|
|
2049
|
+
try:
|
|
2050
|
+
key, value = tstr[1:].split(maxsplit=1) # strip leading '&'
|
|
2051
|
+
key = key.upper().strip()
|
|
2052
|
+
value = value.strip()
|
|
2053
|
+
except Exception as e:
|
|
2054
|
+
log_msg(f"error: {e}")
|
|
2055
|
+
continue
|
|
2056
|
+
|
|
2057
|
+
self.rrule_tokens.append({"token": tstr, "t": "&", "k": key, "v": value})
|
|
2058
|
+
|
|
2059
|
+
# bug_msg(f"got {self.rrule_tokens = }")
|
|
2060
|
+
return (True, "", [])
|
|
2061
|
+
|
|
2062
|
+
def do_s(self, token: dict):
|
|
2063
|
+
"""
|
|
2064
|
+
Parse @s, honoring optional trailing 'z <tz>' directive inside the value.
|
|
2065
|
+
Updates self.dtstart_str and self.rdstart_str to seed recurrence.
|
|
2066
|
+
"""
|
|
2067
|
+
try:
|
|
2068
|
+
raw = token["token"][2:].strip()
|
|
2069
|
+
if not raw:
|
|
2070
|
+
return False, "Missing @s value", []
|
|
2071
|
+
|
|
2072
|
+
obj, kind, tz_used = self.parse_user_dt_for_s(raw)
|
|
2073
|
+
if kind == "error":
|
|
2074
|
+
return False, tz_used or f"Invalid @s value: {raw}", []
|
|
2075
|
+
|
|
2076
|
+
userfmt = self.fmt_user(obj)
|
|
2077
|
+
verbosefmt = self.fmt_verbose(obj)
|
|
2078
|
+
|
|
2079
|
+
if kind == "date":
|
|
2080
|
+
compact = self._serialize_date(obj)
|
|
2081
|
+
self.s_kind = "date"
|
|
2082
|
+
self.s_tz = None
|
|
2083
|
+
elif kind == "naive":
|
|
2084
|
+
compact = self._serialize_naive_dt(obj)
|
|
2085
|
+
self.s_kind = "naive"
|
|
2086
|
+
self.s_tz = None
|
|
2087
|
+
else: # aware
|
|
2088
|
+
compact = self._serialize_aware_dt(obj, tz_used)
|
|
2089
|
+
self.s_kind = "aware"
|
|
2090
|
+
self.s_tz = tz_used # '' == local
|
|
2091
|
+
|
|
2092
|
+
# compact = self._serialize_date_or_datetime(obj)
|
|
2093
|
+
|
|
2094
|
+
self.dtstart = compact
|
|
2095
|
+
self.dtstart_str = (
|
|
2096
|
+
f"DTSTART:{compact}"
|
|
2097
|
+
if kind != "date"
|
|
2098
|
+
else f"DTSTART;VALUE=DATE:{compact}"
|
|
2099
|
+
)
|
|
2100
|
+
self.rdstart_str = f"RDATE:{compact}"
|
|
2101
|
+
token["token"] = f"@s {userfmt}"
|
|
2102
|
+
retval = userfmt if self.final else verbosefmt
|
|
2103
|
+
bug_msg(f"@s {token = }, {retval = }")
|
|
2104
|
+
self.has_s = True
|
|
2105
|
+
|
|
2106
|
+
return True, retval, []
|
|
2107
|
+
|
|
2108
|
+
except Exception as e:
|
|
2109
|
+
return False, f"Invalid @s value: {e}", []
|
|
2110
|
+
|
|
2111
|
+
def do_b(self, token: dict) -> Tuple[bool, str, List[str]]:
|
|
2112
|
+
"""
|
|
2113
|
+
Live resolver for '@b Leaf/Parent/.../Root' (leaf→root, '/' only).
|
|
2114
|
+
- If matches exist: preview; auto-lock when unique/exact.
|
|
2115
|
+
- If no matches: show per-segment status, e.g. 'Churchill (new)/quotations/library'.
|
|
2116
|
+
"""
|
|
2117
|
+
path = token["token"][2:].strip() # strip '@b'
|
|
2118
|
+
rev_dict = self.get_name_to_binpath() # {leaf_lower: "Leaf/.../Root"}
|
|
2119
|
+
path = token["token"][2:].strip() # after '@b'
|
|
2120
|
+
parts = [p.strip() for p in path.split("/") if p.strip()]
|
|
2121
|
+
|
|
2122
|
+
# Batch/final or no controller dict → one-shot resolve
|
|
2123
|
+
if self.final or not self.get_name_to_binpath():
|
|
2124
|
+
if not parts:
|
|
2125
|
+
return False, "Missing bin path after @b", []
|
|
2126
|
+
norm = "/".join(parts) # Leaf/Parent/.../Root
|
|
2127
|
+
token["token"] = f"@b {parts[0]}" # keep prefix; no decoration
|
|
2128
|
+
if not token.get("_b_resolved"): # append ONCE (batch runs once anyway)
|
|
2129
|
+
self.bin_paths.append(parts) # store Leaf→…→Root parts
|
|
2130
|
+
token["_b_resolved"] = True
|
|
2131
|
+
return True, token["token"], []
|
|
2132
|
+
|
|
2133
|
+
# Fallback for batch/final or if controller not wired
|
|
2134
|
+
if not rev_dict:
|
|
2135
|
+
if not path:
|
|
2136
|
+
return False, "Missing bin path after @b", []
|
|
2137
|
+
parts = [p.strip() for p in (path or "").split("/") if p.strip()]
|
|
2138
|
+
if not parts:
|
|
2139
|
+
return False, "Missing bin path after @b", []
|
|
2140
|
+
# keep full token; don't truncate to parts[0]
|
|
2141
|
+
token["token"] = f"@b {parts[0]}"
|
|
2142
|
+
# don't append to bin_paths here; do it on save
|
|
2143
|
+
return True, token["token"], []
|
|
2144
|
+
|
|
2145
|
+
raw = token.get("token", "")
|
|
2146
|
+
frag = raw[2:].strip() if raw.startswith("@b") else raw
|
|
2147
|
+
|
|
2148
|
+
if not frag:
|
|
2149
|
+
msg = "@b Type bin as Leaf/Parent/…"
|
|
2150
|
+
token["token"] = msg
|
|
2151
|
+
token.pop("_b_resolved", None)
|
|
2152
|
+
token.pop("_b_new", None)
|
|
2153
|
+
return True, msg, []
|
|
2154
|
+
|
|
2155
|
+
paths = list(rev_dict.values()) # existing reversed paths
|
|
2156
|
+
matches = _ordered_prefix_matches(paths, frag, limit=24)
|
|
2157
|
+
|
|
2158
|
+
if matches:
|
|
2159
|
+
nf = _norm(frag)
|
|
2160
|
+
exact = next((m for m in matches if _norm(m) == nf), None)
|
|
2161
|
+
if exact or len(matches) == 1:
|
|
2162
|
+
resolved = exact or matches[0]
|
|
2163
|
+
token["token"] = f"@b {resolved}"
|
|
2164
|
+
token["_b_new"] = False
|
|
2165
|
+
token["_b_resolved"] = True
|
|
2166
|
+
return True, token["token"], []
|
|
2167
|
+
# ambiguous → preview + suggestions
|
|
2168
|
+
lcp = _lcp(matches)
|
|
2169
|
+
preview = lcp if lcp and len(lcp) >= len(frag) else matches[0]
|
|
2170
|
+
token["token"] = f"@b {preview}"
|
|
2171
|
+
token.pop("_b_resolved", None)
|
|
2172
|
+
token["_b_new"] = False
|
|
2173
|
+
return True, token["token"], matches
|
|
2174
|
+
|
|
2175
|
+
# ---------- No matches → per-segment feedback ----------
|
|
2176
|
+
parts = [p.strip() for p in frag.split("/") if p.strip()]
|
|
2177
|
+
leaf_to_path = {k.lower(): v for k, v in rev_dict.items()}
|
|
2178
|
+
leafnames = set(leaf_to_path.keys())
|
|
2179
|
+
|
|
2180
|
+
# Build a set of existing leaf-first prefixes for quick “does any path start with X?”
|
|
2181
|
+
# Example: for 'quotations/library/root' we add 'quotations', 'quotations/library', ...
|
|
2182
|
+
prefix_set = set()
|
|
2183
|
+
for p in paths:
|
|
2184
|
+
toks = p.split("/")
|
|
2185
|
+
for i in range(1, len(toks) + 1):
|
|
2186
|
+
prefix_set.add("/".join(toks[:i]).lower())
|
|
2187
|
+
|
|
2188
|
+
decorated: list[str] = []
|
|
2189
|
+
for i, seg in enumerate(parts):
|
|
2190
|
+
seg_l = seg.lower()
|
|
2191
|
+
if i == 0:
|
|
2192
|
+
# Leaf segment: does *any* existing path start with this leaf?
|
|
2193
|
+
starts = f"{seg_l}"
|
|
2194
|
+
if starts not in prefix_set and not any(
|
|
2195
|
+
s.startswith(starts + "/") for s in prefix_set
|
|
2196
|
+
):
|
|
2197
|
+
decorated.append(f"{seg} (new)")
|
|
2198
|
+
else:
|
|
2199
|
+
decorated.append(seg)
|
|
2200
|
+
else:
|
|
2201
|
+
# Parent segments: if this segment is an existing leaf name, show its known ancestry
|
|
2202
|
+
if seg_l in leafnames:
|
|
2203
|
+
known = leaf_to_path[seg_l].split(
|
|
2204
|
+
"/"
|
|
2205
|
+
) # e.g., ['quotations','library','root']
|
|
2206
|
+
# drop the leaf itself (known[0]) since we already have 'seg', and (optionally) drop 'root'
|
|
2207
|
+
tail = [x for x in known[1:] if x.lower() != "root"]
|
|
2208
|
+
if tail:
|
|
2209
|
+
decorated.append("/".join([seg] + tail))
|
|
2210
|
+
else:
|
|
2211
|
+
decorated.append(seg)
|
|
2212
|
+
else:
|
|
2213
|
+
# Not an exact leaf; if no prefixes suggest it, mark (new)
|
|
2214
|
+
any_prefix = any(k.startswith(seg_l) for k in leafnames)
|
|
2215
|
+
decorated.append(seg if any_prefix else f"{seg} (new)")
|
|
2216
|
+
|
|
2217
|
+
pretty = "@b " + "/".join(decorated)
|
|
2218
|
+
# Keep the actual token clean (no "(new)"); only the feedback string is decorated
|
|
2219
|
+
token["token"] = f"@b {parts[0]}"
|
|
2220
|
+
token.pop("_b_resolved", None)
|
|
2221
|
+
token["_b_new"] = True
|
|
2222
|
+
return True, pretty, []
|
|
2223
|
+
|
|
2224
|
+
def _looks_like_url(self, s: str) -> bool:
|
|
2225
|
+
"""Heuristic: valid scheme and netloc."""
|
|
2226
|
+
try:
|
|
2227
|
+
parsed = urlparse(s)
|
|
2228
|
+
return bool(parsed.scheme and parsed.netloc)
|
|
2229
|
+
except Exception:
|
|
2230
|
+
return False
|
|
2231
|
+
|
|
2232
|
+
def _looks_like_path(self, s: str) -> bool:
|
|
2233
|
+
"""
|
|
2234
|
+
Heuristic for 'valid' path.
|
|
2235
|
+
Here we allow non-existent paths (so you can create them later),
|
|
2236
|
+
but you could tighten this to Path(...).exists() if you prefer.
|
|
2237
|
+
"""
|
|
2238
|
+
s = (s or "").strip()
|
|
2239
|
+
if not s:
|
|
2240
|
+
return False
|
|
2241
|
+
# Absolute or relative path-ish
|
|
2242
|
+
p = Path(s).expanduser()
|
|
2243
|
+
# Very loose: must have at least one path component
|
|
2244
|
+
return bool(p.parts)
|
|
2245
|
+
|
|
2246
|
+
def do_g(self, token: dict) -> tuple[bool, str, list]:
|
|
2247
|
+
"""
|
|
2248
|
+
@g: goto – must be a URL or file path.
|
|
2249
|
+
|
|
2250
|
+
Returns:
|
|
2251
|
+
(True, "OpenWithDefault", [])
|
|
2252
|
+
(False, "<error message>", [])
|
|
2253
|
+
"""
|
|
2254
|
+
raw = (token.get("token") or "").strip()
|
|
2255
|
+
|
|
2256
|
+
# Strip the '@g' prefix if it's there
|
|
2257
|
+
# e.g. "@g /path/to/file" or "@ghttps://example.com"
|
|
2258
|
+
if raw.startswith("@g"):
|
|
2259
|
+
value = raw[2:].strip()
|
|
2260
|
+
else:
|
|
2261
|
+
# Fallback: take everything after the first space
|
|
2262
|
+
parts = raw.split(None, 1)
|
|
2263
|
+
value = parts[1].strip() if len(parts) > 1 else ""
|
|
2264
|
+
|
|
2265
|
+
if not value:
|
|
2266
|
+
return False, "@g expects a URL or file path, but none was given.", []
|
|
2267
|
+
|
|
2268
|
+
if not (self._looks_like_url(value) or self._looks_like_path(value)):
|
|
2269
|
+
return False, f"@g expects a URL or file path, got {value!r}.", []
|
|
2270
|
+
|
|
2271
|
+
# Optional: stash the parsed value for later convenience
|
|
2272
|
+
token["goto"] = value
|
|
2273
|
+
|
|
2274
|
+
# On success, your design: return special command name
|
|
2275
|
+
return True, "OpenWithDefault", []
|
|
2276
|
+
|
|
2277
|
+
def do_job(self, token):
|
|
2278
|
+
# Process journal token
|
|
2279
|
+
node, summary, tokens_remaining = self._extract_job_node_and_summary(
|
|
2280
|
+
token["token"]
|
|
2281
|
+
)
|
|
2282
|
+
# bug_msg(f"{token = }, {node = }, {summary = }, {tokens_remaining = }")
|
|
2283
|
+
job_params = {"~": summary}
|
|
2284
|
+
job_params["node"] = node
|
|
2285
|
+
# bug_msg(f"{self.job_tokens = }")
|
|
2286
|
+
|
|
2287
|
+
return True, job_params, []
|
|
2288
|
+
|
|
2289
|
+
def do_at(self):
|
|
2290
|
+
print("TODO: do_at() -> show available @ tokens")
|
|
2291
|
+
|
|
2292
|
+
def do_amp(self):
|
|
2293
|
+
print("TODO: do_amp() -> show available & tokens")
|
|
2294
|
+
|
|
2295
|
+
@classmethod
|
|
2296
|
+
def do_weekdays(cls, wkd_str: str):
|
|
2297
|
+
"""
|
|
2298
|
+
Converts a string representation of weekdays into a list of rrule objects.
|
|
2299
|
+
"""
|
|
2300
|
+
print(" ### do_weekdays ### ")
|
|
2301
|
+
wkd_str = wkd_str.upper()
|
|
2302
|
+
wkd_regex = r"(?<![\w-])([+-][1-4])?(MO|TU|WE|TH|FR|SA|SU)(?!\w)"
|
|
2303
|
+
matches = re.findall(wkd_regex, wkd_str)
|
|
2304
|
+
_ = [f"{x[0]}{x[1]}" for x in matches]
|
|
2305
|
+
all = [x.strip() for x in wkd_str.split(",")]
|
|
2306
|
+
bad = [x for x in all if x not in _]
|
|
2307
|
+
problem_str = ""
|
|
2308
|
+
problems = []
|
|
2309
|
+
for x in bad:
|
|
2310
|
+
probs = []
|
|
2311
|
+
i, w = cls.split_int_str(x)
|
|
2312
|
+
if i is not None:
|
|
2313
|
+
abs_i = abs(int(i))
|
|
2314
|
+
if abs_i > 4 or abs_i == 0:
|
|
2315
|
+
probs.append(f"{i} must be between -4 and -1 or between +1 and +4")
|
|
2316
|
+
elif not (i.startswith("+") or i.startswith("-")):
|
|
2317
|
+
probs.append(f"{i} must begin with '+' or '-'")
|
|
2318
|
+
w = w.strip()
|
|
2319
|
+
if not w:
|
|
2320
|
+
probs.append(f"Missing weekday abbreviation from {cls.wkd_str}")
|
|
2321
|
+
elif w not in cls.wkd_list:
|
|
2322
|
+
probs.append(f"{w} must be a weekday abbreviation from {cls.wkd_str}")
|
|
2323
|
+
if probs:
|
|
2324
|
+
problems.append(f"In '{x}': {', '.join(probs)}")
|
|
2325
|
+
else:
|
|
2326
|
+
# undiagnosed problem
|
|
2327
|
+
problems.append(f"{x} is invalid")
|
|
2328
|
+
if problems:
|
|
2329
|
+
probs = []
|
|
2330
|
+
probs.append(", ".join(bad))
|
|
2331
|
+
probs.append("\n", join(problems))
|
|
2332
|
+
probs_str = "\n".join(probs)
|
|
2333
|
+
problem_str = f"Problem entries: {probs_str}"
|
|
2334
|
+
good = []
|
|
2335
|
+
for x in matches:
|
|
2336
|
+
s = f"{x[0]}{x[1]}" if x[0] else f"{x[1]}"
|
|
2337
|
+
good.append(s)
|
|
2338
|
+
good_str = ",".join(good)
|
|
2339
|
+
if problem_str:
|
|
2340
|
+
return False, f"{problem_str}\n{good_str}"
|
|
2341
|
+
else:
|
|
2342
|
+
return True, f"BYDAY={good_str}"
|
|
2343
|
+
|
|
2344
|
+
def do_interval(cls, arg: int):
|
|
2345
|
+
"""
|
|
2346
|
+
Process an integer interval as the rrule frequency.
|
|
2347
|
+
"""
|
|
2348
|
+
try:
|
|
2349
|
+
arg = int(arg)
|
|
2350
|
+
except Exception:
|
|
2351
|
+
return False, "interval must be a postive integer"
|
|
2352
|
+
else:
|
|
2353
|
+
if arg < 1:
|
|
2354
|
+
return False, "interval must be a postive integer"
|
|
2355
|
+
return True, f"INTERVAL={arg}"
|
|
2356
|
+
|
|
2357
|
+
@classmethod
|
|
2358
|
+
def do_months(cls, arg):
|
|
2359
|
+
"""
|
|
2360
|
+
Process a comma separated list of integer month numbers from 1, 2, ..., 12
|
|
2361
|
+
"""
|
|
2362
|
+
print(" ### do_months ### ")
|
|
2363
|
+
monthsstr = (
|
|
2364
|
+
"months: a comma separated list of integer month numbers from 1, 2, ..., 12"
|
|
2365
|
+
)
|
|
2366
|
+
if arg:
|
|
2367
|
+
args = arg.split(",")
|
|
2368
|
+
ok, res = cls.integer_list(args, 0, 12, False, "")
|
|
2369
|
+
if ok:
|
|
2370
|
+
obj = res
|
|
2371
|
+
rep = f"{arg}"
|
|
2372
|
+
else:
|
|
2373
|
+
obj = None
|
|
2374
|
+
rep = f"invalid months: {res}. Required for {monthsstr}"
|
|
2375
|
+
else:
|
|
2376
|
+
obj = None
|
|
2377
|
+
rep = monthsstr
|
|
2378
|
+
if obj is None:
|
|
2379
|
+
return False, rep
|
|
2380
|
+
|
|
2381
|
+
return True, f"BYMONTH={rep}"
|
|
2382
|
+
|
|
2383
|
+
@classmethod
|
|
2384
|
+
def do_count(cls, arg):
|
|
2385
|
+
"""
|
|
2386
|
+
Process an integer count for rrule
|
|
2387
|
+
"""
|
|
2388
|
+
print(" ### do_count ### ")
|
|
2389
|
+
countstr = "count: an integer count for rrule, 1, 2, ... "
|
|
2390
|
+
if arg:
|
|
2391
|
+
args = arg.strip()
|
|
2392
|
+
ok, res = cls.integer(args, 1, None, False, "")
|
|
2393
|
+
if ok:
|
|
2394
|
+
obj = res
|
|
2395
|
+
rep = f"{arg}"
|
|
2396
|
+
else:
|
|
2397
|
+
obj = None
|
|
2398
|
+
rep = f"invalid count: {res}. Required for {countstr}"
|
|
2399
|
+
else:
|
|
2400
|
+
obj = None
|
|
2401
|
+
rep = countstr
|
|
2402
|
+
if obj is None:
|
|
2403
|
+
return False, rep
|
|
2404
|
+
|
|
2405
|
+
return True, f"COUNT={rep}"
|
|
2406
|
+
|
|
2407
|
+
@classmethod
|
|
2408
|
+
def do_monthdays(cls, arg):
|
|
2409
|
+
"""
|
|
2410
|
+
Process a comma separated list of integer month day numbers from 1, 2, ..., 31
|
|
2411
|
+
"""
|
|
2412
|
+
print(" ### do_monthdays ### ")
|
|
2413
|
+
monthdaysstr = "monthdays: a comma separated list of integer month day numbers from 1, 2, ..., 31"
|
|
2414
|
+
if arg:
|
|
2415
|
+
args = arg.split(",")
|
|
2416
|
+
ok, res = cls.integer_list(args, 1, 31, False, "")
|
|
2417
|
+
if ok:
|
|
2418
|
+
obj = res
|
|
2419
|
+
rep = f"{arg}"
|
|
2420
|
+
else:
|
|
2421
|
+
obj = None
|
|
2422
|
+
rep = f"invalid monthdays: {res}. Required for {monthdaysstr}"
|
|
2423
|
+
else:
|
|
2424
|
+
obj = None
|
|
2425
|
+
rep = monthdaysstr
|
|
2426
|
+
if obj is None:
|
|
2427
|
+
return False, rep
|
|
2428
|
+
|
|
2429
|
+
return True, f"BYMONTH={rep}"
|
|
2430
|
+
|
|
2431
|
+
@classmethod
|
|
2432
|
+
def do_hours(cls, arg):
|
|
2433
|
+
"""
|
|
2434
|
+
Process a comma separated list of integer hour numbers from 0, 1, ..., 23
|
|
2435
|
+
"""
|
|
2436
|
+
print(" ### do_hours ### ")
|
|
2437
|
+
hoursstr = (
|
|
2438
|
+
"hours: a comma separated list of integer hour numbers from 0, 1, ..., 23"
|
|
2439
|
+
)
|
|
2440
|
+
if arg:
|
|
2441
|
+
args = arg.split(",")
|
|
2442
|
+
ok, res = cls.integer_list(args, 0, 23, False, "")
|
|
2443
|
+
if ok:
|
|
2444
|
+
obj = res
|
|
2445
|
+
rep = f"{arg}"
|
|
2446
|
+
else:
|
|
2447
|
+
obj = None
|
|
2448
|
+
rep = f"invalid hours: {res}. Required for {hoursstr}"
|
|
2449
|
+
else:
|
|
2450
|
+
obj = None
|
|
2451
|
+
rep = hoursstr
|
|
2452
|
+
if obj is None:
|
|
2453
|
+
return False, rep
|
|
2454
|
+
|
|
2455
|
+
return True, f"BYHOUR={rep}"
|
|
2456
|
+
|
|
2457
|
+
@classmethod
|
|
2458
|
+
def do_minutes(cls, arg):
|
|
2459
|
+
"""
|
|
2460
|
+
Process a comma separated list of integer minute numbers from 0, 2, ..., 59
|
|
2461
|
+
"""
|
|
2462
|
+
print(" ### do_minutes ### ")
|
|
2463
|
+
minutesstr = "minutes: a comma separated list of integer minute numbers from 0, 2, ..., 59"
|
|
2464
|
+
if arg:
|
|
2465
|
+
args = arg.split(",")
|
|
2466
|
+
ok, res = cls.integer_list(args, 0, 59, False, "")
|
|
2467
|
+
if ok:
|
|
2468
|
+
obj = res
|
|
2469
|
+
rep = f"{arg}"
|
|
2470
|
+
else:
|
|
2471
|
+
obj = None
|
|
2472
|
+
rep = f"invalid minutes: {res}. Required for {minutesstr}"
|
|
2473
|
+
else:
|
|
2474
|
+
obj = None
|
|
2475
|
+
rep = minutesstr
|
|
2476
|
+
if obj is None:
|
|
2477
|
+
# bug_msg(f"returning False, {arg = }, {rep = }")
|
|
2478
|
+
return False, rep
|
|
2479
|
+
|
|
2480
|
+
# bug_msg(f"returning True, {arg = }, {rep = },")
|
|
2481
|
+
return True, f"BYMINUTE={rep}"
|
|
2482
|
+
|
|
2483
|
+
@classmethod
|
|
2484
|
+
def do_two_periods(cls, arg: List[str]) -> str:
|
|
2485
|
+
return True, "not implemented", []
|
|
2486
|
+
|
|
2487
|
+
@classmethod
|
|
2488
|
+
def do_mask(cls, arg: str) -> str:
|
|
2489
|
+
return True, "not implemented", []
|
|
2490
|
+
|
|
2491
|
+
def integer(cls, arg, min, max, zero, typ=None):
|
|
2492
|
+
"""
|
|
2493
|
+
:param arg: integer
|
|
2494
|
+
:param min: minimum allowed or None
|
|
2495
|
+
:param max: maximum allowed or None
|
|
2496
|
+
:param zero: zero not allowed if False
|
|
2497
|
+
:param typ: label for message
|
|
2498
|
+
:return: (True, integer) or (False, message)
|
|
2499
|
+
>>> integer(-2, -10, 8, False, 'integer_test')
|
|
2500
|
+
(True, -2)
|
|
2501
|
+
>>> integer(-2, 0, 8, False, 'integer_test')
|
|
2502
|
+
(False, 'integer_test: -2 is less than the allowed minimum')
|
|
2503
|
+
"""
|
|
2504
|
+
msg = ""
|
|
2505
|
+
try:
|
|
2506
|
+
arg = int(arg)
|
|
2507
|
+
except Exception:
|
|
2508
|
+
if typ:
|
|
2509
|
+
return False, "{}: {}".format(typ, arg)
|
|
2510
|
+
else:
|
|
2511
|
+
return False, arg
|
|
2512
|
+
if min is not None and arg < min:
|
|
2513
|
+
msg = "{} is less than the allowed minimum".format(arg)
|
|
2514
|
+
elif max is not None and arg > max:
|
|
2515
|
+
msg = "{} is greater than the allowed maximum".format(arg)
|
|
2516
|
+
elif not zero and arg == 0:
|
|
2517
|
+
msg = "0 is not allowed"
|
|
2518
|
+
if msg:
|
|
2519
|
+
if typ:
|
|
2520
|
+
return False, "{}: {}".format(typ, msg)
|
|
2521
|
+
else:
|
|
2522
|
+
return False, msg
|
|
2523
|
+
else:
|
|
2524
|
+
return True, arg
|
|
2525
|
+
|
|
2526
|
+
@classmethod
|
|
2527
|
+
def integer_list(cls, arg, min, max, zero, typ=None):
|
|
2528
|
+
"""
|
|
2529
|
+
:param arg: comma separated list of integers
|
|
2530
|
+
:param min: minimum allowed or None
|
|
2531
|
+
:param max: maximum allowed or None
|
|
2532
|
+
:param zero: zero not allowed if False
|
|
2533
|
+
:param typ: label for message
|
|
2534
|
+
:return: (True, list of integers) or (False, messages)
|
|
2535
|
+
>>> integer_list([-13, -10, 0, "2", 27], -12, +20, True, 'integer_list test')
|
|
2536
|
+
(False, 'integer_list test: -13 is less than the allowed minimum; 27 is greater than the allowed maximum')
|
|
2537
|
+
>>> integer_list([0, 1, 2, 3, 4], 1, 3, True, "integer_list test")
|
|
2538
|
+
(False, 'integer_list test: 0 is less than the allowed minimum; 4 is greater than the allowed maximum')
|
|
2539
|
+
>>> integer_list("-1, 1, two, 3", None, None, True, "integer_list test")
|
|
2540
|
+
(False, 'integer_list test: -1, 1, two, 3')
|
|
2541
|
+
>>> integer_list([1, "2", 3], None, None, True, "integer_list test")
|
|
2542
|
+
(True, [1, 2, 3])
|
|
2543
|
+
"""
|
|
2544
|
+
if type(arg) == str:
|
|
2545
|
+
try:
|
|
2546
|
+
args = [int(x) for x in arg.split(",")]
|
|
2547
|
+
except Exception:
|
|
2548
|
+
if typ:
|
|
2549
|
+
return False, "{}: {}".format(typ, arg)
|
|
2550
|
+
else:
|
|
2551
|
+
return False, arg
|
|
2552
|
+
elif type(arg) == list:
|
|
2553
|
+
try:
|
|
2554
|
+
args = [int(x) for x in arg]
|
|
2555
|
+
except Exception:
|
|
2556
|
+
if typ:
|
|
2557
|
+
return False, "{}: {}".format(typ, arg)
|
|
2558
|
+
else:
|
|
2559
|
+
return False, arg
|
|
2560
|
+
elif type(arg) == int:
|
|
2561
|
+
args = [arg]
|
|
2562
|
+
msg = []
|
|
2563
|
+
ret = []
|
|
2564
|
+
for arg in args:
|
|
2565
|
+
ok, res = cls.integer(arg, min, max, zero, None)
|
|
2566
|
+
if ok:
|
|
2567
|
+
ret.append(res)
|
|
2568
|
+
else:
|
|
2569
|
+
msg.append(res)
|
|
2570
|
+
if msg:
|
|
2571
|
+
if typ:
|
|
2572
|
+
return False, "{}: {}".format(typ, "; ".join(msg))
|
|
2573
|
+
else:
|
|
2574
|
+
return False, "; ".join(msg)
|
|
2575
|
+
else:
|
|
2576
|
+
return True, ret
|
|
2577
|
+
|
|
2578
|
+
@classmethod
|
|
2579
|
+
def split_int_str(cls, s):
|
|
2580
|
+
match = re.match(r"^([+-]?\d*)(.{1,})$", s)
|
|
2581
|
+
if match:
|
|
2582
|
+
integer_part = match.group(1)
|
|
2583
|
+
string_part = match.group(2)
|
|
2584
|
+
# Convert integer_part to an integer if it's not empty, otherwise None
|
|
2585
|
+
integer_part = integer_part if integer_part else None
|
|
2586
|
+
string_part = string_part if string_part else None
|
|
2587
|
+
return integer_part, string_part
|
|
2588
|
+
return None, None # Default case if no match is found
|
|
2589
|
+
|
|
2590
|
+
# ---- helpers you implement with your existing token machinery ----
|
|
2591
|
+
|
|
2592
|
+
def _get_first_two_occurrences(self) -> tuple[datetime | None, datetime | None]:
|
|
2593
|
+
"""
|
|
2594
|
+
Return (first, second) occurrences from rruleset, which is the
|
|
2595
|
+
ultimate source of truth for this item's schedule.
|
|
2596
|
+
Always return the first two in sequence, even if they’re already past.
|
|
2597
|
+
"""
|
|
2598
|
+
bug_msg(f"{self.rruleset = }")
|
|
2599
|
+
if not (self.rruleset or "").strip():
|
|
2600
|
+
return None, None
|
|
2601
|
+
|
|
2602
|
+
try:
|
|
2603
|
+
rs = rrulestr(self.rruleset)
|
|
2604
|
+
it = iter(rs)
|
|
2605
|
+
first = next(it, None)
|
|
2606
|
+
second = next(it, None)
|
|
2607
|
+
return first, second
|
|
2608
|
+
except Exception as e:
|
|
2609
|
+
bug_msg(f"error {e = }")
|
|
2610
|
+
return None, None
|
|
2611
|
+
|
|
2612
|
+
# def _get_o_interval(self):
|
|
2613
|
+
# """
|
|
2614
|
+
# Return (timedelta, learn_bool) if @o present, else None.
|
|
2615
|
+
# Expects self.over to hold the *original* @o string (e.g. '4d' or '~4d').
|
|
2616
|
+
# """
|
|
2617
|
+
# s = (self.over or "").strip()
|
|
2618
|
+
# if not s:
|
|
2619
|
+
# return None
|
|
2620
|
+
# # FIXME: what about projects?
|
|
2621
|
+
# learn = s.startswith("~")
|
|
2622
|
+
# base = s[1:].strip() if learn else s
|
|
2623
|
+
# ok, seconds = timedelta_str_to_seconds(base)
|
|
2624
|
+
# if not ok:
|
|
2625
|
+
# return None
|
|
2626
|
+
#
|
|
2627
|
+
# return (timedelta(seconds=seconds), learn)
|
|
2628
|
+
#
|
|
2629
|
+
# def _set_o_interval(self, td, learn: bool):
|
|
2630
|
+
# """Write @o token back (e.g., '@o 4d3h ' or '@o ~4d3h ')."""
|
|
2631
|
+
# # convert timedelta -> your TD string; use your existing helper if you have it
|
|
2632
|
+
# seconds = int(td.total_seconds())
|
|
2633
|
+
# # simple example: only days/hours; replace with your own formatter
|
|
2634
|
+
# days, rem = divmod(seconds, 86400)
|
|
2635
|
+
# hours, rem = divmod(rem, 3600)
|
|
2636
|
+
# minutes = rem // 60
|
|
2637
|
+
# parts = []
|
|
2638
|
+
# if days:
|
|
2639
|
+
# parts.append(f"{days}d")
|
|
2640
|
+
# if hours:
|
|
2641
|
+
# parts.append(f"{hours}h")
|
|
2642
|
+
# if minutes:
|
|
2643
|
+
# parts.append(f"{minutes}m")
|
|
2644
|
+
# td_str = "".join(parts) or "0m"
|
|
2645
|
+
#
|
|
2646
|
+
# prefix = "~" if learn else ""
|
|
2647
|
+
# new_token_text = f"@o {prefix}{td_str} "
|
|
2648
|
+
#
|
|
2649
|
+
# tok = next(
|
|
2650
|
+
# (
|
|
2651
|
+
# t
|
|
2652
|
+
# for t in self.relative_tokens
|
|
2653
|
+
# if t.get("t") == "@" and t.get("k") == "o"
|
|
2654
|
+
# ),
|
|
2655
|
+
# None,
|
|
2656
|
+
# )
|
|
2657
|
+
# if tok:
|
|
2658
|
+
# tok["token"] = new_token_text
|
|
2659
|
+
# else:
|
|
2660
|
+
# self.relative_tokens.append({"token": new_token_text, "t": "@", "k": "o"})
|
|
2661
|
+
# # keep original string field too, if you use it elsewhere
|
|
2662
|
+
# self.over = f"{prefix}{td_str}"
|
|
2663
|
+
|
|
2664
|
+
def _smooth_interval(self, old: timedelta, new: timedelta) -> timedelta:
|
|
2665
|
+
"""
|
|
2666
|
+
(w*old + new)/(w+1), then:
|
|
2667
|
+
- if averaged interval > 1 day → round to whole hours (days+hours effectively)
|
|
2668
|
+
- else → round to whole minutes
|
|
2669
|
+
"""
|
|
2670
|
+
hour_seconds = 60 * 60
|
|
2671
|
+
day_seconds = 24 * hour_seconds
|
|
2672
|
+
week_seconds = 7 * day_seconds
|
|
2673
|
+
|
|
2674
|
+
# weighted average as seconds
|
|
2675
|
+
total = old * self.history_weight + new
|
|
2676
|
+
secs = total.total_seconds() / (self.history_weight + 1)
|
|
2677
|
+
|
|
2678
|
+
if secs > week_seconds:
|
|
2679
|
+
# round to integer hours (in seconds)
|
|
2680
|
+
rounded = round(secs / day_seconds) * day_seconds
|
|
2681
|
+
elif secs > day_seconds:
|
|
2682
|
+
# round to integer hours (in seconds)
|
|
2683
|
+
rounded = round(secs / hour_seconds) * hour_seconds
|
|
2684
|
+
else:
|
|
2685
|
+
# round to integer minutes (in seconds)
|
|
2686
|
+
rounded = round(secs / 60) * 60
|
|
2687
|
+
|
|
2688
|
+
return timedelta(seconds=rounded)
|
|
2689
|
+
|
|
2690
|
+
def _is_rdate_only(self) -> bool:
|
|
2691
|
+
"""True if rruleset is only RDATE(+optional EXDATE), i.e. no RRULE."""
|
|
2692
|
+
if not self.rruleset:
|
|
2693
|
+
return False
|
|
2694
|
+
lines = [ln.strip() for ln in self.rruleset.splitlines() if ln.strip()]
|
|
2695
|
+
if not lines:
|
|
2696
|
+
return False
|
|
2697
|
+
# No RRULE anywhere
|
|
2698
|
+
if any(ln.upper().startswith("RRULE") for ln in lines):
|
|
2699
|
+
return False
|
|
2700
|
+
# At least one RDATE (either plain RDATE:... or RDATE;VALUE=DATE:...)
|
|
2701
|
+
has_rdate = any(ln.upper().startswith("RDATE") for ln in lines)
|
|
2702
|
+
return has_rdate
|
|
2703
|
+
|
|
2704
|
+
def _drop_first_rdate(self, first_dt: datetime) -> bool:
|
|
2705
|
+
"""
|
|
2706
|
+
Mark the first RDATE occurrence as completed by appending an @- EXDATE token,
|
|
2707
|
+
then re-parse so rruleset reflects it. Return True if more RDATEs remain.
|
|
2708
|
+
"""
|
|
2709
|
+
# 1) append @- token in the same textual style your parser already understands
|
|
2710
|
+
if first_dt.hour == 0 and first_dt.minute == 0 and first_dt.second == 0:
|
|
2711
|
+
ex_str = first_dt.strftime("%Y%m%d") # date-only
|
|
2712
|
+
else:
|
|
2713
|
+
ex_str = first_dt.strftime("%Y%m%dT%H%M") # datetime
|
|
2714
|
+
|
|
2715
|
+
self.relative_tokens.append({"token": f"@- {ex_str} ", "t": "@", "k": "-"})
|
|
2716
|
+
|
|
2717
|
+
# 2) re-parse to regenerate rruleset/derived fields consistently
|
|
2718
|
+
self._reparse_from_tokens()
|
|
2719
|
+
|
|
2720
|
+
# 3) decide if anything remains (any RDATE not excluded)
|
|
2721
|
+
# Quick check: do we still have any @+ token with a date/datetime != ex_str?
|
|
2722
|
+
remaining = False
|
|
2723
|
+
for tok in self.relative_tokens:
|
|
2724
|
+
if tok.get("t") == "@" and tok.get("k") == "+":
|
|
2725
|
+
body = tok["token"][2:].strip()
|
|
2726
|
+
for piece in (p.strip() for p in body.split(",") if p.strip()):
|
|
2727
|
+
if piece != ex_str:
|
|
2728
|
+
remaining = True
|
|
2729
|
+
break
|
|
2730
|
+
if remaining:
|
|
2731
|
+
break
|
|
2732
|
+
|
|
2733
|
+
return remaining
|
|
2734
|
+
|
|
2735
|
+
def _has_rrule(self) -> bool:
|
|
2736
|
+
"""True if rruleset contains an RRULE line."""
|
|
2737
|
+
if not self.rruleset:
|
|
2738
|
+
return False
|
|
2739
|
+
return any(
|
|
2740
|
+
ln.strip().upper().startswith("RRULE") for ln in self.rruleset.splitlines()
|
|
2741
|
+
)
|
|
2742
|
+
|
|
2743
|
+
def _advance_dtstart_and_decrement_count(self, new_dtstart: datetime) -> None:
|
|
2744
|
+
# bump @s (or create)
|
|
2745
|
+
for tok in self.relative_tokens:
|
|
2746
|
+
if tok.get("t") == "@" and tok.get("k") == "s":
|
|
2747
|
+
tok["token"] = f"@s {new_dtstart.strftime('%Y%m%dT%H%M')} "
|
|
2748
|
+
break
|
|
2749
|
+
else:
|
|
2750
|
+
self.relative_tokens.append(
|
|
2751
|
+
{
|
|
2752
|
+
"token": f"@s {new_dtstart.strftime('%Y%m%dT%H%M')} ",
|
|
2753
|
+
"t": "@",
|
|
2754
|
+
"k": "s",
|
|
2755
|
+
}
|
|
2756
|
+
)
|
|
2757
|
+
|
|
2758
|
+
# decrement &c if present
|
|
2759
|
+
for tok in list(self.relative_tokens):
|
|
2760
|
+
if tok.get("t") == "&" and tok.get("k") == "c":
|
|
2761
|
+
try:
|
|
2762
|
+
parts = tok["token"].split()
|
|
2763
|
+
if len(parts) >= 2 and parts[0] == "&c":
|
|
2764
|
+
cnt = int(parts[1]) - 1
|
|
2765
|
+
if cnt > 0:
|
|
2766
|
+
tok["token"] = f"&c {cnt}"
|
|
2767
|
+
else:
|
|
2768
|
+
self.relative_tokens.remove(tok) # drop when it hits 0
|
|
2769
|
+
except Exception:
|
|
2770
|
+
pass
|
|
2771
|
+
break
|
|
2772
|
+
|
|
2773
|
+
# rebuild rruleset / derived fields from tokens
|
|
2774
|
+
self._reparse_from_tokens()
|
|
2775
|
+
|
|
2776
|
+
def _clear_schedule(self) -> None:
|
|
2777
|
+
"""
|
|
2778
|
+
Clear *all* scheduling: @s, @r and its &-params, @+, @- and rruleset.
|
|
2779
|
+
Leaves non-scheduling tokens (subject, etc.) intact.
|
|
2780
|
+
"""
|
|
2781
|
+
new_tokens = []
|
|
2782
|
+
dropping_group_r = False
|
|
2783
|
+
|
|
2784
|
+
for tok in self.relative_tokens:
|
|
2785
|
+
t = tok.get("t")
|
|
2786
|
+
k = tok.get("k")
|
|
2787
|
+
|
|
2788
|
+
# drop @s
|
|
2789
|
+
if t == "@" and k == "s":
|
|
2790
|
+
continue
|
|
2791
|
+
|
|
2792
|
+
# drop @+ / @-
|
|
2793
|
+
if t == "@" and k in {"+", "-"}:
|
|
2794
|
+
continue
|
|
2795
|
+
|
|
2796
|
+
# drop @r and all following & (r-params) until next non-& token
|
|
2797
|
+
if t == "@" and k == "r":
|
|
2798
|
+
dropping_group_r = True
|
|
2799
|
+
continue
|
|
2800
|
+
|
|
2801
|
+
if dropping_group_r:
|
|
2802
|
+
if t == "&": # r-parameter
|
|
2803
|
+
continue
|
|
2804
|
+
else:
|
|
2805
|
+
dropping_group_r = False
|
|
2806
|
+
# fall through to append this non-& token
|
|
2807
|
+
|
|
2808
|
+
new_tokens.append(tok)
|
|
2809
|
+
|
|
2810
|
+
self.relative_tokens = new_tokens
|
|
2811
|
+
self.rruleset = "" # remove compiled schedule string
|
|
2812
|
+
|
|
2813
|
+
def do_rdate(self, token: dict):
|
|
2814
|
+
"""
|
|
2815
|
+
Process an RDATE token, e.g., "@+ 2024-07-03 14:00, 2024-08-05 09:00".
|
|
2816
|
+
|
|
2817
|
+
Also accepts relative-like input (e.g. "11a tue") which is parsed
|
|
2818
|
+
relative to the current anchor, but when self.final is True we
|
|
2819
|
+
rewrite the token to store the *expanded* canonical forms so that
|
|
2820
|
+
future parses are stable.
|
|
2821
|
+
"""
|
|
2822
|
+
bug_msg(f"processing rdate {token = }")
|
|
2823
|
+
try:
|
|
2824
|
+
# Remove the "@+" prefix and extra whitespace
|
|
2825
|
+
token_body = token["token"][2:].strip()
|
|
2826
|
+
|
|
2827
|
+
# Split on commas to get individual date strings
|
|
2828
|
+
dt_strs = [s.strip() for s in token_body.split(",") if s.strip()]
|
|
2829
|
+
|
|
2830
|
+
# Process each entry
|
|
2831
|
+
rdates: list[str] = []
|
|
2832
|
+
udates: list[str] = []
|
|
2833
|
+
|
|
2834
|
+
for dt_str in dt_strs:
|
|
2835
|
+
bug_msg(f"processing rdate {dt_str = }")
|
|
2836
|
+
if self.s_kind == "aware":
|
|
2837
|
+
dt = parse(dt_str, self.s_tz)
|
|
2838
|
+
dt_fmt = _fmt_utc_Z(dt)
|
|
2839
|
+
elif self.s_kind == "naive":
|
|
2840
|
+
dt = parse(dt_str)
|
|
2841
|
+
dt_fmt = _fmt_naive(dt)
|
|
2842
|
+
else:
|
|
2843
|
+
dt = parse(dt_str)
|
|
2844
|
+
dt_fmt = _fmt_date(dt)
|
|
2845
|
+
|
|
2846
|
+
if dt_fmt not in rdates:
|
|
2847
|
+
rdates.append(dt_fmt)
|
|
2848
|
+
udates.append(self.fmt_user(dt))
|
|
2849
|
+
|
|
2850
|
+
# Append to any existing RDATE start string
|
|
2851
|
+
if self.rdstart_str:
|
|
2852
|
+
self.rdstart_str = f"{self.rdstart_str},{','.join(rdates)}"
|
|
2853
|
+
self.rruleset_dict["START_RDATES"] = self.rdstart_str
|
|
2854
|
+
else:
|
|
2855
|
+
self.rdstart_str = ",".join(rdates)
|
|
2856
|
+
self.rruleset_dict["RDATE"] = self.rdstart_str
|
|
2857
|
+
|
|
2858
|
+
self.rdates = rdates
|
|
2859
|
+
self.token_map["+"] = ", ".join(udates)
|
|
2860
|
+
bug_msg(f"{rdates = }, {self.rdstart_str = }")
|
|
2861
|
+
|
|
2862
|
+
# 🔸 CRITICAL: when final, freeze the token to the canonical absolute forms
|
|
2863
|
+
if getattr(self, "final", False) and rdates:
|
|
2864
|
+
token["token"] = f"@+ {', '.join(rdates)}"
|
|
2865
|
+
# bug_msg(f"finalized @+ token to {token['token'] = }")
|
|
2866
|
+
|
|
2867
|
+
# Prepend RDATE in finalize_rruleset after possible insertion of DTSTART
|
|
2868
|
+
return True, rdates, []
|
|
2869
|
+
except Exception as e:
|
|
2870
|
+
return False, f"Invalid @+ value: {e}", []
|
|
2871
|
+
|
|
2872
|
+
|
|
2873
|
+
def do_exdate(self, token: dict):
|
|
2874
|
+
"""
|
|
2875
|
+
@- … : explicit exclusion dates
|
|
2876
|
+
- Maintain a de-duplicated list of compact dates in self.exdates.
|
|
2877
|
+
- finalize_rruleset() will emit EXDATE using this list in either path.
|
|
2878
|
+
|
|
2879
|
+
When self.final is True, the token text is rewritten to use the
|
|
2880
|
+
expanded canonical forms so that future parses are stable.
|
|
2881
|
+
"""
|
|
2882
|
+
try:
|
|
2883
|
+
token_body = token["token"][2:].strip()
|
|
2884
|
+
dt_strs = [s.strip() for s in token_body.split(",") if s.strip()]
|
|
2885
|
+
|
|
2886
|
+
if not hasattr(self, "exdates") or self.exdates is None:
|
|
2887
|
+
self.exdates = []
|
|
2888
|
+
|
|
2889
|
+
new_ex: list[str] = []
|
|
2890
|
+
udates: list[str] = []
|
|
2891
|
+
|
|
2892
|
+
for dt_str in dt_strs:
|
|
2893
|
+
if self.s_kind == "aware":
|
|
2894
|
+
dt = parse(dt_str, self.s_tz)
|
|
2895
|
+
dt_fmt = _fmt_utc_Z(dt)
|
|
2896
|
+
elif self.s_kind == "naive":
|
|
2897
|
+
dt = parse(dt_str)
|
|
2898
|
+
dt_fmt = _fmt_naive(dt)
|
|
2899
|
+
else:
|
|
2900
|
+
dt = parse(dt_str)
|
|
2901
|
+
dt_fmt = _fmt_date(dt)
|
|
2902
|
+
|
|
2903
|
+
if dt_fmt not in self.exdates and dt_fmt not in new_ex:
|
|
2904
|
+
new_ex.append(dt_fmt)
|
|
2905
|
+
udates.append(self.fmt_user(dt))
|
|
2906
|
+
|
|
2907
|
+
self.exdates.extend(new_ex)
|
|
2908
|
+
self.token_map["-"] = ", ".join(udates)
|
|
2909
|
+
# convenience string if you ever need it
|
|
2910
|
+
self.exdate_str = ",".join(self.exdates) if self.exdates else ""
|
|
2911
|
+
|
|
2912
|
+
# 🔸 CRITICAL: when final, freeze the token to the canonical absolute forms
|
|
2913
|
+
if getattr(self, "final", False) and new_ex:
|
|
2914
|
+
token["token"] = f"@- {', '.join(new_ex)}"
|
|
2915
|
+
# bug_msg(f"finalized @- token to {token['token'] = }")
|
|
2916
|
+
|
|
2917
|
+
return True, new_ex, []
|
|
2918
|
+
except Exception as e:
|
|
2919
|
+
return False, f"Invalid @- value: {e}", []
|
|
2920
|
+
|
|
2921
|
+
def collect_rruleset_tokens(self):
|
|
2922
|
+
"""Return the list of relative tokens used for building the rruleset."""
|
|
2923
|
+
rruleset_tokens = []
|
|
2924
|
+
found_rrule = False
|
|
2925
|
+
|
|
2926
|
+
for token in self.relative_tokens:
|
|
2927
|
+
if not found_rrule:
|
|
2928
|
+
if token["t"] == "@" and token["k"] == "r":
|
|
2929
|
+
found_rrule = True
|
|
2930
|
+
rruleset_tokens.append(token) # relative token
|
|
2931
|
+
else:
|
|
2932
|
+
if token["t"] == "&":
|
|
2933
|
+
rruleset_tokens.append(token) # relative token
|
|
2934
|
+
else:
|
|
2935
|
+
break # stop collecting on first non-& after @r
|
|
2936
|
+
|
|
2937
|
+
return rruleset_tokens
|
|
2938
|
+
|
|
2939
|
+
def finalize_rruleset(self) -> str:
|
|
2940
|
+
"""
|
|
2941
|
+
Build an rruleset string using self.relative_tokens, self.dtstart_str and self.rdstart_str.
|
|
2942
|
+
Emits:
|
|
2943
|
+
- DTSTART (if rrule is present)
|
|
2944
|
+
- RRULE:...
|
|
2945
|
+
- RDATE:... (from your rdstart_str or rdate_str)
|
|
2946
|
+
- EXDATE:... (if you track it)
|
|
2947
|
+
"""
|
|
2948
|
+
rrule_tokens = self.collect_rruleset_tokens()
|
|
2949
|
+
bug_msg(f"{rrule_tokens = }")
|
|
2950
|
+
# rrule_tokens = self.rrule_tokens
|
|
2951
|
+
# bug_msg(f"in finalize_rruleset {self.rrule_tokens = }")
|
|
2952
|
+
if not self.dtstart:
|
|
2953
|
+
return ""
|
|
2954
|
+
# map @r y/m/w/d → RRULE:FREQ=...
|
|
2955
|
+
freq_map = {"y": "YEARLY", "m": "MONTHLY", "w": "WEEKLY", "d": "DAILY"}
|
|
2956
|
+
parts = rrule_tokens[0]["token"].split(maxsplit=1)
|
|
2957
|
+
freq_abbr = parts[1].strip() if len(parts) > 1 else ""
|
|
2958
|
+
freq = freq_map.get(freq_abbr.lower())
|
|
2959
|
+
if not freq:
|
|
2960
|
+
return ""
|
|
2961
|
+
|
|
2962
|
+
rrule_components = {"FREQ": freq}
|
|
2963
|
+
|
|
2964
|
+
# &-tokens
|
|
2965
|
+
for tok in rrule_tokens[1:]:
|
|
2966
|
+
token_str = tok["token"]
|
|
2967
|
+
try:
|
|
2968
|
+
key, value = token_str[1:].split(maxsplit=1) # strip leading '&'
|
|
2969
|
+
except Exception:
|
|
2970
|
+
key = tok.get("k", "")
|
|
2971
|
+
value = tok.get("v", "")
|
|
2972
|
+
# if not (key and value):
|
|
2973
|
+
# continue
|
|
2974
|
+
key = key.strip()
|
|
2975
|
+
value = value.strip()
|
|
2976
|
+
if key == "u":
|
|
2977
|
+
ok, res = local_dtstr_to_utc(value)
|
|
2978
|
+
value = res if ok else ""
|
|
2979
|
+
elif ", " in value:
|
|
2980
|
+
value = ",".join(value.split(", "))
|
|
2981
|
+
component = self.key_to_param.get(key, None)
|
|
2982
|
+
# bug_msg(f"components {key = }, {value = }, {component = }")
|
|
2983
|
+
if component:
|
|
2984
|
+
rrule_components[component] = value
|
|
2985
|
+
|
|
2986
|
+
rrule_line = "RRULE:" + ";".join(
|
|
2987
|
+
f"{k}={v}" for k, v in rrule_components.items()
|
|
2988
|
+
)
|
|
2989
|
+
|
|
2990
|
+
# bug_msg(f"{self.rrule_components = }")
|
|
2991
|
+
|
|
2992
|
+
# bug_msg(f"{rrule_line = }")
|
|
2993
|
+
# Assemble lines safely
|
|
2994
|
+
lines: list[str] = []
|
|
2995
|
+
|
|
2996
|
+
dtstart_str = getattr(self, "dtstart_str", "") or ""
|
|
2997
|
+
if dtstart_str:
|
|
2998
|
+
lines.append(dtstart_str)
|
|
2999
|
+
# bug_msg(f"appended dtstart_str: {lines = }")
|
|
3000
|
+
self.rruleset_dict["DTSTART"] = dtstart_str
|
|
3001
|
+
|
|
3002
|
+
if rrule_line:
|
|
3003
|
+
lines.append(rrule_line)
|
|
3004
|
+
# bug_msg(f"appended rrule_line: {lines = }")
|
|
3005
|
+
# only add the rdates from @+, not @s since we have a rrule_line
|
|
3006
|
+
self.rruleset_dict["RRULE"] = rrule_line
|
|
3007
|
+
if self.rdates:
|
|
3008
|
+
lines.append(f"RDATE:{','.join(self.rdates)}")
|
|
3009
|
+
# bug_msg(f"appended RDATE + rdates: {lines = }")
|
|
3010
|
+
self.rruleset_dict["RDATE"] = ",".join(self.rdates)
|
|
3011
|
+
else:
|
|
3012
|
+
# here we need to include @s since we do not have a rrule_line
|
|
3013
|
+
# NOTE Would we be finalizing rruleset with an rrule_line?
|
|
3014
|
+
rdstart_str = getattr(self, "rdstart_str", "") or ""
|
|
3015
|
+
if rdstart_str:
|
|
3016
|
+
lines.append(rdstart_str)
|
|
3017
|
+
self.rruleset_dict["RDATESONLY"] = rdstart_str
|
|
3018
|
+
bug_msg(f"appended rdstart_str: {lines = }")
|
|
3019
|
+
|
|
3020
|
+
exdate_str = getattr(self, "exdate_str", "") or ""
|
|
3021
|
+
if exdate_str:
|
|
3022
|
+
lines.append(f"EXDATE:{exdate_str}")
|
|
3023
|
+
# bug_msg(f"appended exdate_str: {lines = }")
|
|
3024
|
+
self.rruleset_dict["EXDATE"] = f"EXDATE:{exdate_str}"
|
|
3025
|
+
|
|
3026
|
+
# bug_msg(f"RETURNING {lines = }")
|
|
3027
|
+
|
|
3028
|
+
return "\n".join(lines)
|
|
3029
|
+
|
|
3030
|
+
def build_jobs(self):
|
|
3031
|
+
"""
|
|
3032
|
+
Build self.jobs from @~ + &... token groups.
|
|
3033
|
+
Handles &r id: prereq1, prereq2, … and &f completion pairs.
|
|
3034
|
+
"""
|
|
3035
|
+
job_groups = self.collect_grouped_tokens({"~"})
|
|
3036
|
+
job_entries = []
|
|
3037
|
+
|
|
3038
|
+
for idx, group in enumerate(job_groups, start=1):
|
|
3039
|
+
anchor = group[0]
|
|
3040
|
+
token_str = anchor["token"]
|
|
3041
|
+
|
|
3042
|
+
# job name before first &
|
|
3043
|
+
job_portion = token_str[3:].strip()
|
|
3044
|
+
split_index = job_portion.find("&")
|
|
3045
|
+
job_name = (
|
|
3046
|
+
job_portion[:split_index].strip() if split_index != -1 else job_portion
|
|
3047
|
+
)
|
|
3048
|
+
|
|
3049
|
+
job = {"~": job_name}
|
|
3050
|
+
|
|
3051
|
+
for token in group[1:]:
|
|
3052
|
+
try:
|
|
3053
|
+
k, v = token["token"][1:].split(maxsplit=1)
|
|
3054
|
+
k = k.strip()
|
|
3055
|
+
v = v.strip()
|
|
3056
|
+
|
|
3057
|
+
if k == "r":
|
|
3058
|
+
ok, primary, deps = self.do_requires({"token": f"&r {v}"})
|
|
3059
|
+
if not ok:
|
|
3060
|
+
self.errors.append(primary)
|
|
3061
|
+
continue
|
|
3062
|
+
job["id"] = primary
|
|
3063
|
+
job["reqs"] = deps
|
|
3064
|
+
|
|
3065
|
+
elif k == "f": # completion
|
|
3066
|
+
completed, due = parse_completion_value(v)
|
|
3067
|
+
if completed:
|
|
3068
|
+
job["f"] = self.fmt_compact(completed)
|
|
3069
|
+
self.token_map.setdefault("~f", {})
|
|
3070
|
+
self.token_map["~f"][job.get("id", idx)] = self.fmt_user(
|
|
3071
|
+
completed
|
|
3072
|
+
)
|
|
3073
|
+
if due:
|
|
3074
|
+
job["due"] = self.fmt_compact(due)
|
|
3075
|
+
|
|
3076
|
+
else:
|
|
3077
|
+
job[k] = v
|
|
3078
|
+
|
|
3079
|
+
except Exception as e:
|
|
3080
|
+
self.errors.append(
|
|
3081
|
+
f"Failed to parse job metadata token: {token['token']} ({e})"
|
|
3082
|
+
)
|
|
3083
|
+
|
|
3084
|
+
job_entries.append(job)
|
|
3085
|
+
|
|
3086
|
+
self.jobs = job_entries
|
|
3087
|
+
return job_entries
|
|
3088
|
+
|
|
3089
|
+
|
|
3090
|
+
def finalize_jobs(self, jobs):
|
|
3091
|
+
"""
|
|
3092
|
+
Compute job status (finished / available / waiting)
|
|
3093
|
+
using new &r id: prereqs format and propagate @f if all are done.
|
|
3094
|
+
Also sets a human-friendly `display_subject` per job.
|
|
3095
|
+
"""
|
|
3096
|
+
if not jobs:
|
|
3097
|
+
return False, "No jobs to process"
|
|
3098
|
+
if not self.parse_ok:
|
|
3099
|
+
return False, "Error parsing job tokens"
|
|
3100
|
+
|
|
3101
|
+
# index by id
|
|
3102
|
+
job_map = {j["id"]: j for j in jobs if "id" in j}
|
|
3103
|
+
finished = {jid for jid, j in job_map.items() if j.get("f")}
|
|
3104
|
+
|
|
3105
|
+
# --- transitive dependency expansion ---
|
|
3106
|
+
all_prereqs = {}
|
|
3107
|
+
for jid, job in job_map.items():
|
|
3108
|
+
deps = set(job.get("reqs", []))
|
|
3109
|
+
trans = set(deps)
|
|
3110
|
+
stack = list(deps)
|
|
3111
|
+
while stack:
|
|
3112
|
+
d = stack.pop()
|
|
3113
|
+
if d in job_map:
|
|
3114
|
+
for sd in job_map[d].get("reqs", []):
|
|
3115
|
+
if sd not in trans:
|
|
3116
|
+
trans.add(sd)
|
|
3117
|
+
stack.append(sd)
|
|
3118
|
+
all_prereqs[jid] = trans
|
|
3119
|
+
|
|
3120
|
+
# --- classify ---
|
|
3121
|
+
available, waiting = set(), set()
|
|
3122
|
+
for jid, deps in all_prereqs.items():
|
|
3123
|
+
unmet = deps - finished
|
|
3124
|
+
if jid in finished:
|
|
3125
|
+
continue
|
|
3126
|
+
if unmet:
|
|
3127
|
+
waiting.add(jid)
|
|
3128
|
+
else:
|
|
3129
|
+
available.add(jid)
|
|
3130
|
+
|
|
3131
|
+
# annotate job objects with status
|
|
3132
|
+
for jid, job in job_map.items():
|
|
3133
|
+
if jid in finished:
|
|
3134
|
+
job["status"] = "finished"
|
|
3135
|
+
elif jid in available:
|
|
3136
|
+
job["status"] = "available"
|
|
3137
|
+
elif jid in waiting:
|
|
3138
|
+
job["status"] = "waiting"
|
|
3139
|
+
else:
|
|
3140
|
+
job["status"] = "standalone"
|
|
3141
|
+
|
|
3142
|
+
# --- compute counts for display_subject ---
|
|
3143
|
+
num_available = sum(
|
|
3144
|
+
1 for j in job_map.values() if j.get("status") == "available"
|
|
3145
|
+
)
|
|
3146
|
+
num_waiting = sum(1 for j in job_map.values() if j.get("status") == "waiting")
|
|
3147
|
+
num_finished = sum(1 for j in job_map.values() if j.get("status") == "finished")
|
|
3148
|
+
|
|
3149
|
+
task_subject = getattr(self, "subject", "") or ""
|
|
3150
|
+
if len(task_subject) > 12:
|
|
3151
|
+
task_subject_display = task_subject[:10] + " …"
|
|
3152
|
+
else:
|
|
3153
|
+
task_subject_display = task_subject
|
|
3154
|
+
|
|
3155
|
+
# --- set display_subject per job (restoring old behavior) ---
|
|
3156
|
+
for jid, job in job_map.items():
|
|
3157
|
+
label = job.get("label") or job.get("~") or job.get("name") or f"#{jid}"
|
|
3158
|
+
# e.g. "A ∊ ParentTask 3/2/5"
|
|
3159
|
+
job["display_subject"] = (
|
|
3160
|
+
f"{label} ∊ {task_subject_display} {num_available}/{num_waiting}/{num_finished}"
|
|
3161
|
+
)
|
|
3162
|
+
|
|
3163
|
+
# --- propagate @f if all jobs finished ---
|
|
3164
|
+
if finished and len(finished) == len(job_map):
|
|
3165
|
+
completed_dts = []
|
|
3166
|
+
for job in job_map.values():
|
|
3167
|
+
if "f" in job:
|
|
3168
|
+
cdt, _ = parse_completion_value(job["f"])
|
|
3169
|
+
if cdt:
|
|
3170
|
+
completed_dts.append(cdt)
|
|
3171
|
+
|
|
3172
|
+
if completed_dts:
|
|
3173
|
+
finished_dt = max(completed_dts)
|
|
3174
|
+
tok = {
|
|
3175
|
+
"token": f"@f {self.fmt_user(finished_dt)}",
|
|
3176
|
+
"t": "@",
|
|
3177
|
+
"k": "f",
|
|
3178
|
+
}
|
|
3179
|
+
self.add_token(tok)
|
|
3180
|
+
self.itemtype = "x"
|
|
3181
|
+
self.has_f = True
|
|
3182
|
+
|
|
3183
|
+
# strip per-job @f tokens after promoting to record-level @f
|
|
3184
|
+
for job in job_map.values():
|
|
3185
|
+
job.pop("f", None)
|
|
3186
|
+
|
|
3187
|
+
# --- finalize ---
|
|
3188
|
+
self.jobs = list(job_map.values())
|
|
3189
|
+
self.jobset = json.dumps(self.jobs, cls=CustomJSONEncoder)
|
|
3190
|
+
return True, self.jobs
|
|
3191
|
+
|
|
3192
|
+
def do_f(self, token: dict | str, *, job_id: str | None = None):
|
|
3193
|
+
"""
|
|
3194
|
+
Handle both:
|
|
3195
|
+
@f <datetime>[, <datetime>] (task-level)
|
|
3196
|
+
&f <datetime>[, <datetime>] (job-level)
|
|
3197
|
+
store completion datetime in self.completed
|
|
3198
|
+
"""
|
|
3199
|
+
|
|
3200
|
+
try:
|
|
3201
|
+
if isinstance(token, dict): # task-level @f
|
|
3202
|
+
val = token["token"][2:].strip()
|
|
3203
|
+
else: # job-level &f
|
|
3204
|
+
val = str(token).strip()
|
|
3205
|
+
due, next = self._get_first_two_occurrences()
|
|
3206
|
+
bug_msg(f"{due = }, {next = }")
|
|
3207
|
+
|
|
3208
|
+
completed = parse(val)
|
|
3209
|
+
if not completed:
|
|
3210
|
+
return False, f"Invalid completion value: {val}", []
|
|
3211
|
+
self.completion = completed
|
|
3212
|
+
|
|
3213
|
+
# ---- update token_map ----
|
|
3214
|
+
if job_id is None:
|
|
3215
|
+
# top-level task completion
|
|
3216
|
+
text = (
|
|
3217
|
+
f"@f {self.fmt_user(completed)}, {self.fmt_user(due)}"
|
|
3218
|
+
if due
|
|
3219
|
+
else f"@f {self.fmt_user(completed)}"
|
|
3220
|
+
)
|
|
3221
|
+
self.token_map["f"] = text
|
|
3222
|
+
self.has_f = True
|
|
3223
|
+
token["token"] = text
|
|
3224
|
+
token["t"] = "@"
|
|
3225
|
+
token["k"] = "f"
|
|
3226
|
+
return True, text, []
|
|
3227
|
+
else:
|
|
3228
|
+
# job-level completion
|
|
3229
|
+
self.token_map.setdefault("~f", {})
|
|
3230
|
+
self.token_map["~f"][job_id] = self.fmt_user(completed)
|
|
3231
|
+
return True, f"&f {self.fmt_user(completed)}", []
|
|
3232
|
+
|
|
3233
|
+
except Exception as e:
|
|
3234
|
+
return False, f"Error parsing completion token: {e}", []
|
|
3235
|
+
|
|
3236
|
+
def _serialize_date_or_datetime(self, d: date | datetime, tz_used) -> str:
|
|
3237
|
+
if isinstance(d, date):
|
|
3238
|
+
return self._serialize_date(d)
|
|
3239
|
+
elif isinstance(d, datetime):
|
|
3240
|
+
if d.tzinfo is None:
|
|
3241
|
+
return self._serialize_naive_dt(d)
|
|
3242
|
+
return self._serialize_aware_dt(d, tz.UTC)
|
|
3243
|
+
return f"Error: {d} must either be a date or datetime"
|
|
3244
|
+
|
|
3245
|
+
def _serialize_date(self, d: date) -> str:
|
|
3246
|
+
return d.strftime("%Y%m%d")
|
|
3247
|
+
|
|
3248
|
+
def _serialize_naive_dt(self, dt: datetime) -> str:
|
|
3249
|
+
# ensure naive
|
|
3250
|
+
if dt.tzinfo is not None:
|
|
3251
|
+
dt = dt.replace(tzinfo=None)
|
|
3252
|
+
return dt.strftime("%Y%m%dT%H%M")
|
|
3253
|
+
|
|
3254
|
+
def _serialize_aware_dt(self, dt: datetime, zone) -> str:
|
|
3255
|
+
# Attach or convert to `zone`, then to UTC and append Z
|
|
3256
|
+
if dt.tzinfo is None:
|
|
3257
|
+
dt = dt.replace(tzinfo=zone)
|
|
3258
|
+
else:
|
|
3259
|
+
dt = dt.astimezone(zone)
|
|
3260
|
+
dt_utc = dt.astimezone(tz.UTC)
|
|
3261
|
+
return dt_utc.strftime("%Y%m%dT%H%MZ")
|
|
3262
|
+
|
|
3263
|
+
# --- these need attention - they don't take advantage of what's already in Item ---
|
|
3264
|
+
|
|
3265
|
+
def _has_s(self) -> bool:
|
|
3266
|
+
return any(
|
|
3267
|
+
tok.get("t") == "@" and tok.get("k") == "s" for tok in self.relative_tokens
|
|
3268
|
+
)
|
|
3269
|
+
|
|
3270
|
+
def _get_start_dt(self) -> datetime | None:
|
|
3271
|
+
# return self.dtstart
|
|
3272
|
+
bug_msg(f"{self.subject = }, {self.dtstart = }")
|
|
3273
|
+
# bug_msg(f"{self.relative_tokens = }")
|
|
3274
|
+
tok = next(
|
|
3275
|
+
(
|
|
3276
|
+
t
|
|
3277
|
+
for t in self.relative_tokens
|
|
3278
|
+
if t.get("t") == "@" and t.get("k") == "s"
|
|
3279
|
+
),
|
|
3280
|
+
None,
|
|
3281
|
+
)
|
|
3282
|
+
if not tok:
|
|
3283
|
+
return None
|
|
3284
|
+
val = tok["token"][2:].strip() # strip "@s "
|
|
3285
|
+
bug_msg(
|
|
3286
|
+
f"start_dt: {tok = }, {val = }, {parse(val).astimezone() = }, {parse(val) = }"
|
|
3287
|
+
)
|
|
3288
|
+
try:
|
|
3289
|
+
dt = parse(val).astimezone()
|
|
3290
|
+
return dt.astimezone(tz.UTC)
|
|
3291
|
+
except Exception:
|
|
3292
|
+
return None
|
|
3293
|
+
|
|
3294
|
+
def _set_start_dt(self, dt: datetime | None = None):
|
|
3295
|
+
"""Replace or add an @s token; keep your formatting with trailing space."""
|
|
3296
|
+
# dt = dt | self._get_start_dt()
|
|
3297
|
+
if not dt:
|
|
3298
|
+
return
|
|
3299
|
+
ts = dt.strftime("%Y%m%dT%H%M")
|
|
3300
|
+
# bug_msg(f"starting {self.relative_tokens = }, {ts = }")
|
|
3301
|
+
tok = next(
|
|
3302
|
+
(
|
|
3303
|
+
t
|
|
3304
|
+
for t in self.relative_tokens
|
|
3305
|
+
if t.get("t") == "@" and t.get("k") == "s"
|
|
3306
|
+
),
|
|
3307
|
+
None,
|
|
3308
|
+
)
|
|
3309
|
+
if tok:
|
|
3310
|
+
tok["token"] = f"@s {ts} "
|
|
3311
|
+
# bug_msg(f"{tok["token"] = }")
|
|
3312
|
+
else:
|
|
3313
|
+
self.relative_tokens.append({"token": f"@s {ts} ", "t": "@", "k": "s"})
|
|
3314
|
+
# bug_msg(f"ending {self.relative_tokens = }")
|
|
3315
|
+
|
|
3316
|
+
def _has_r(self) -> bool:
|
|
3317
|
+
return any(
|
|
3318
|
+
t.get("t") == "@" and t.get("k") == "r" for t in self.relative_tokens
|
|
3319
|
+
)
|
|
3320
|
+
|
|
3321
|
+
def _get_count_token(self):
|
|
3322
|
+
# &c N under the @r group
|
|
3323
|
+
for t in self.relative_tokens:
|
|
3324
|
+
if t.get("t") == "&" and t.get("k") == "c":
|
|
3325
|
+
return t
|
|
3326
|
+
return None
|
|
3327
|
+
|
|
3328
|
+
def _decrement_count_if_present(self) -> None:
|
|
3329
|
+
tok = self._get_count_token()
|
|
3330
|
+
if not tok:
|
|
3331
|
+
return
|
|
3332
|
+
parts = tok["token"].split()
|
|
3333
|
+
if len(parts) == 2 and parts[0] == "&c":
|
|
3334
|
+
try:
|
|
3335
|
+
n = int(parts[1])
|
|
3336
|
+
n2 = max(0, n - 1)
|
|
3337
|
+
if n2 > 0:
|
|
3338
|
+
tok["token"] = f"&c {n2}"
|
|
3339
|
+
else:
|
|
3340
|
+
# remove &c 0 entirely
|
|
3341
|
+
self.relative_tokens.remove(tok)
|
|
3342
|
+
except ValueError:
|
|
3343
|
+
pass
|
|
3344
|
+
|
|
3345
|
+
def _get_rdate_token(self):
|
|
3346
|
+
# @+ token (comma list)
|
|
3347
|
+
return next(
|
|
3348
|
+
(
|
|
3349
|
+
t
|
|
3350
|
+
for t in self.relative_tokens
|
|
3351
|
+
if t.get("t") == "@" and t.get("k") == "+"
|
|
3352
|
+
),
|
|
3353
|
+
None,
|
|
3354
|
+
)
|
|
3355
|
+
|
|
3356
|
+
def _parse_rdate_list(self) -> list[str]:
|
|
3357
|
+
"""Return list of compact dt strings (e.g. '20250819T110000') from @+."""
|
|
3358
|
+
tok = self._get_rdate_token()
|
|
3359
|
+
if not tok:
|
|
3360
|
+
return []
|
|
3361
|
+
body = tok["token"][2:].strip() # strip '@+ '
|
|
3362
|
+
parts = [p.strip() for p in body.split(",") if p.strip()]
|
|
3363
|
+
return parts
|
|
3364
|
+
|
|
3365
|
+
def _write_rdate_list(self, items: list[str]) -> None:
|
|
3366
|
+
tok = self._get_rdate_token()
|
|
3367
|
+
if items:
|
|
3368
|
+
joined = ", ".join(items)
|
|
3369
|
+
if tok:
|
|
3370
|
+
tok["token"] = f"@+ {joined}"
|
|
3371
|
+
else:
|
|
3372
|
+
self.relative_tokens.append(
|
|
3373
|
+
{"token": f"@+ {joined}", "t": "@", "k": "+"}
|
|
3374
|
+
)
|
|
3375
|
+
else:
|
|
3376
|
+
if tok:
|
|
3377
|
+
self.relative_tokens.remove(tok)
|
|
3378
|
+
|
|
3379
|
+
def _remove_rdate_exact(self, dt_compact: str) -> None:
|
|
3380
|
+
lst = self._parse_rdate_list()
|
|
3381
|
+
lst2 = [x for x in lst if x != dt_compact]
|
|
3382
|
+
self._write_rdate_list(lst2)
|
|
3383
|
+
|
|
3384
|
+
# --- for finish trial ---
|
|
3385
|
+
|
|
3386
|
+
def _unfinished_jobs(self) -> list[dict]:
|
|
3387
|
+
return [j for j in self.jobs if "f" not in j]
|
|
3388
|
+
|
|
3389
|
+
def _mark_job_finished(self, job_id: int, completed_dt: datetime) -> bool:
|
|
3390
|
+
"""
|
|
3391
|
+
Add &f to the job (in jobs JSON) and also mutate the @~ token group if you keep that as text.
|
|
3392
|
+
Returns True if the job was found and marked.
|
|
3393
|
+
"""
|
|
3394
|
+
if not job_id:
|
|
3395
|
+
return False
|
|
3396
|
+
found = False
|
|
3397
|
+
# Annotate JSON jobs
|
|
3398
|
+
for j in self.jobs:
|
|
3399
|
+
if j.get("i") == job_id and "f" not in j:
|
|
3400
|
+
j["f"] = round(completed_dt.timestamp())
|
|
3401
|
+
found = True
|
|
3402
|
+
break
|
|
3403
|
+
|
|
3404
|
+
# (Optional) If you also keep textual @~… &f … tokens in relative_tokens,
|
|
3405
|
+
# you can append/update them here. Otherwise, finalize_jobs() will rebuild jobs JSON.
|
|
3406
|
+
if found:
|
|
3407
|
+
self.finalize_jobs(self.jobs) # keeps statuses consistent
|
|
3408
|
+
return found
|
|
3409
|
+
|
|
3410
|
+
def _set_itemtype(self, ch: str) -> None:
|
|
3411
|
+
"""Set itemtype and mirror into the first token if that token stores it."""
|
|
3412
|
+
self.itemtype = ch
|
|
3413
|
+
if self.relative_tokens and self.relative_tokens[0].get("t") == "itemtype":
|
|
3414
|
+
# tokens typically look like {'t':'itemtype', 'token':'~'} or similar
|
|
3415
|
+
self.relative_tokens[0]["token"] = ch
|
|
3416
|
+
|
|
3417
|
+
def _is_undated_single_shot(self) -> bool:
|
|
3418
|
+
"""No @s, no RRULE, no @+ -> nothing to schedule (pure one-shot)."""
|
|
3419
|
+
return (
|
|
3420
|
+
(not self._has_s())
|
|
3421
|
+
and (not self._has_rrule())
|
|
3422
|
+
and (not self._find_all("@", "+"))
|
|
3423
|
+
)
|
|
3424
|
+
|
|
3425
|
+
def _has_any_future_instances(self, now_dt: datetime | None = None) -> bool:
|
|
3426
|
+
"""Return True if rruleset/@+ yields at least one occurrence >= now (or at all if now is None)."""
|
|
3427
|
+
rule_str = self.rruleset
|
|
3428
|
+
if not rule_str and not self._find_all("@", "+"):
|
|
3429
|
+
return False
|
|
3430
|
+
try:
|
|
3431
|
+
rs = rrulestr(rule_str) if rule_str else None
|
|
3432
|
+
if rs is None:
|
|
3433
|
+
# RDATE-only path (from @+ mirrored into rruleset)
|
|
3434
|
+
rdates = self._parse_rdate_list() # returns compact strings
|
|
3435
|
+
return bool(rdates)
|
|
3436
|
+
if now_dt is None:
|
|
3437
|
+
# if we don’t care about “future”, just “any occurrences”
|
|
3438
|
+
return next(iter(rs), None) is not None
|
|
3439
|
+
# find first >= now
|
|
3440
|
+
try:
|
|
3441
|
+
got = rs.after(now_dt, inc=True)
|
|
3442
|
+
except TypeError:
|
|
3443
|
+
# handle aware/naive mismatch by using UTC-aware fallback
|
|
3444
|
+
|
|
3445
|
+
anchor = now_dt if now_dt.tzinfo else now_dt.replace(tzinfo=tz.UTC)
|
|
3446
|
+
got = rs.after(anchor, inc=True)
|
|
3447
|
+
return got is not None
|
|
3448
|
+
except Exception:
|
|
3449
|
+
return False
|
|
3450
|
+
|
|
3451
|
+
def _remove_tokens(
|
|
3452
|
+
self, t: str, k: str | None = None, *, max_count: int | None = None
|
|
3453
|
+
) -> int:
|
|
3454
|
+
"""
|
|
3455
|
+
Remove tokens from self.relative_tokens that match:
|
|
3456
|
+
token["t"] == t and (k is None or token["k"] == k)
|
|
3457
|
+
|
|
3458
|
+
Args:
|
|
3459
|
+
t: primary token type (e.g., "@", "&", "itemtype")
|
|
3460
|
+
k: optional subtype (e.g., "f", "s", "r"). If None, match all with type t.
|
|
3461
|
+
max_count: remove at most this many; None = remove all matches.
|
|
3462
|
+
|
|
3463
|
+
Returns:
|
|
3464
|
+
int: number of tokens removed.
|
|
3465
|
+
"""
|
|
3466
|
+
if not hasattr(self, "relative_tokens") or not self.relative_tokens:
|
|
3467
|
+
return 0
|
|
3468
|
+
|
|
3469
|
+
removed = 0
|
|
3470
|
+
new_tokens = []
|
|
3471
|
+
for tok in self.relative_tokens:
|
|
3472
|
+
match = (tok.get("t") == t) and (k is None or tok.get("k") == k)
|
|
3473
|
+
if match and (max_count is None or removed < max_count):
|
|
3474
|
+
removed += 1
|
|
3475
|
+
continue
|
|
3476
|
+
new_tokens.append(tok)
|
|
3477
|
+
|
|
3478
|
+
self.relative_tokens = new_tokens
|
|
3479
|
+
|
|
3480
|
+
# Keep self.completions consistent if we removed @f tokens
|
|
3481
|
+
if t == "@" and (k is None or k == "f"): #TODO: check this
|
|
3482
|
+
self._rebuild_completions_from_tokens()
|
|
3483
|
+
|
|
3484
|
+
return removed
|
|
3485
|
+
|
|
3486
|
+
def _rebuild_completions_from_tokens(self) -> None:
|
|
3487
|
+
"""
|
|
3488
|
+
Rebuild self.completions from remaining @f tokens in relative_tokens.
|
|
3489
|
+
Normalizes to a list[datetime].
|
|
3490
|
+
"""
|
|
3491
|
+
|
|
3492
|
+
comps = []
|
|
3493
|
+
for tok in getattr(self, "relative_tokens", []):
|
|
3494
|
+
if tok.get("t") == "@" and tok.get("k") == "f":
|
|
3495
|
+
# token text looks like "@f 20250828T211259 "
|
|
3496
|
+
try:
|
|
3497
|
+
body = (tok.get("token") or "")[2:].strip() # drop "@f"
|
|
3498
|
+
dt = parse(body)
|
|
3499
|
+
comps.append(dt)
|
|
3500
|
+
except Exception as e:
|
|
3501
|
+
log_msg(f"error: {e}")
|
|
3502
|
+
|
|
3503
|
+
self.completions = comps
|
|
3504
|
+
|
|
3505
|
+
def _clear_schedule(self) -> None:
|
|
3506
|
+
"""Clear any schedule fields/tokens and rruleset mirror."""
|
|
3507
|
+
# remove @s
|
|
3508
|
+
self._remove_tokens("@", "s")
|
|
3509
|
+
# remove @+/@- (optional if you mirror in rruleset)
|
|
3510
|
+
self._remove_tokens("@", "+")
|
|
3511
|
+
self._remove_tokens("@", "-")
|
|
3512
|
+
# remove @r group (&-modifiers) – you likely have a grouped removal util
|
|
3513
|
+
self._remove_tokens("@", "r")
|
|
3514
|
+
self._remove_tokens("&") # if your &-mods only apply to recurrence
|
|
3515
|
+
# clear rruleset string
|
|
3516
|
+
self.rruleset = ""
|
|
3517
|
+
|
|
3518
|
+
def _has_any_occurrences_left(self) -> bool:
|
|
3519
|
+
"""
|
|
3520
|
+
Return True if the current schedule (rruleset and/or RDATEs) still yields
|
|
3521
|
+
at least one occurrence, irrespective of whether it’s past or future.
|
|
3522
|
+
"""
|
|
3523
|
+
rule_str = self.rruleset
|
|
3524
|
+
# If we mirror @+ into RDATE, the rrulestr path below will handle it;
|
|
3525
|
+
# but if you keep @+ separate, fall back to parsing @+ directly:
|
|
3526
|
+
if not rule_str and self._find_all("@", "+"):
|
|
3527
|
+
return bool(self._parse_rdate_list()) # remaining RDATEs?
|
|
3528
|
+
|
|
3529
|
+
if not rule_str:
|
|
3530
|
+
return False
|
|
3531
|
+
|
|
3532
|
+
try:
|
|
3533
|
+
rs = rrulestr(rule_str)
|
|
3534
|
+
return next(iter(rs), None) is not None
|
|
3535
|
+
except Exception:
|
|
3536
|
+
return False
|
|
3537
|
+
|
|
3538
|
+
def _has_o(self) -> bool:
|
|
3539
|
+
return any(
|
|
3540
|
+
t.get("t") == "@" and t.get("k") == "o" for t in self.relative_tokens
|
|
3541
|
+
)
|
|
3542
|
+
|
|
3543
|
+
def _get_o(self) -> tuple[timedelta, bool] | None:
|
|
3544
|
+
"""
|
|
3545
|
+
Read the first @o token and return (interval, learn) or None.
|
|
3546
|
+
"""
|
|
3547
|
+
tok = next(
|
|
3548
|
+
(
|
|
3549
|
+
t
|
|
3550
|
+
for t in self.relative_tokens
|
|
3551
|
+
if t.get("t") == "@" and t.get("k") == "o"
|
|
3552
|
+
),
|
|
3553
|
+
None,
|
|
3554
|
+
)
|
|
3555
|
+
if not tok:
|
|
3556
|
+
return None
|
|
3557
|
+
body = tok["token"][2:].strip() # strip '@o'
|
|
3558
|
+
td, learn = _parse_o_body(body)
|
|
3559
|
+
return td, learn
|
|
3560
|
+
|
|
3561
|
+
def _set_o_interval(self, td: timedelta, learn: bool) -> None:
|
|
3562
|
+
"""
|
|
3563
|
+
Update or create the @o token with a normalized form ('@o 3d', '@o ~3d').
|
|
3564
|
+
"""
|
|
3565
|
+
normalized = f"@o {'~' if learn else ''}{td_to_td_str(td)} "
|
|
3566
|
+
o_tok = next(
|
|
3567
|
+
(
|
|
3568
|
+
t
|
|
3569
|
+
for t in self.relative_tokens
|
|
3570
|
+
if t.get("t") == "@" and t.get("k") == "o"
|
|
3571
|
+
),
|
|
3572
|
+
None,
|
|
3573
|
+
)
|
|
3574
|
+
if o_tok:
|
|
3575
|
+
o_tok["token"] = normalized
|
|
3576
|
+
else:
|
|
3577
|
+
self.relative_tokens.append({"token": normalized, "t": "@", "k": "o"})
|
|
3578
|
+
|
|
3579
|
+
# --- drop-in replacement for do_over -----------------------------------
|
|
3580
|
+
|
|
3581
|
+
def do_offset(self, token):
|
|
3582
|
+
"""
|
|
3583
|
+
Normalize @o (over/offset) token.
|
|
3584
|
+
- Accepts '@o 3d', '@o ~3d', '@o learn 3d'
|
|
3585
|
+
- Stores a normalized token ('@o 3d ' or '@o ~3d ')
|
|
3586
|
+
Returns (ok, seconds, messages) so callers can use the numeric interval if needed.
|
|
3587
|
+
"""
|
|
3588
|
+
try:
|
|
3589
|
+
# token is a relative token dict, like {"token": "@o 3d", "t":"@", "k":"o"}
|
|
3590
|
+
body = token["token"][2:].strip() # remove '@o'
|
|
3591
|
+
td, learn = _parse_o_body(body)
|
|
3592
|
+
|
|
3593
|
+
# Normalize token text
|
|
3594
|
+
normalized = f"@o {'~' if learn else ''}{td_to_td_str(td)} "
|
|
3595
|
+
token["token"] = normalized
|
|
3596
|
+
token["t"] = "@"
|
|
3597
|
+
token["k"] = "o"
|
|
3598
|
+
|
|
3599
|
+
return True, int(td.total_seconds()), []
|
|
3600
|
+
except Exception as e:
|
|
3601
|
+
return False, f"invalid @o interval: {e}", []
|
|
3602
|
+
|
|
3603
|
+
|
|
3604
|
+
def _instance_to_token_format_utc(self, dt: datetime) -> str:
|
|
3605
|
+
"""Convert a datetime to UTC ‘YYYYMMDDTHHMMZ’ format (for DTSTART in rruleset)."""
|
|
3606
|
+
if dt.tzinfo is None:
|
|
3607
|
+
local = dt.replace(tzinfo=tz.tzlocal())
|
|
3608
|
+
else:
|
|
3609
|
+
local = dt
|
|
3610
|
+
dt_utc = local.astimezone(tz.UTC)
|
|
3611
|
+
return dt_utc.strftime("%Y%m%dT%H%MZ")
|
|
3612
|
+
|
|
3613
|
+
def _instance_to_token_format_local(self, dt: datetime | date) -> str:
|
|
3614
|
+
"""Format a date or datetime to local token string (no Z)."""
|
|
3615
|
+
if isinstance(dt, datetime):
|
|
3616
|
+
local = _to_local_naive(dt)
|
|
3617
|
+
return local.strftime("%Y%m%dT%H%M")
|
|
3618
|
+
else:
|
|
3619
|
+
return dt.strftime("%Y%m%d")
|
|
3620
|
+
|
|
3621
|
+
def _is_first_from_rdate(self, first_dt: datetime) -> bool:
|
|
3622
|
+
"""
|
|
3623
|
+
Returns True if the next occurrence (first_dt) matches an explicit @+ list date.
|
|
3624
|
+
"""
|
|
3625
|
+
for tok in self.relative_tokens:
|
|
3626
|
+
if tok.get("k") == "+":
|
|
3627
|
+
token_body = tok["token"][2:].strip()
|
|
3628
|
+
for part in token_body.split(","):
|
|
3629
|
+
part = part.strip()
|
|
3630
|
+
try:
|
|
3631
|
+
dt = parse(part)
|
|
3632
|
+
except Exception:
|
|
3633
|
+
continue
|
|
3634
|
+
if _to_local_naive(dt) == _to_local_naive(first_dt):
|
|
3635
|
+
return True
|
|
3636
|
+
return False
|
|
3637
|
+
|
|
3638
|
+
def _get_sorted_plus_dt_str(self) -> str:
|
|
3639
|
+
"""Find the earliest date in @+ tokens and return it as user-fmt string."""
|
|
3640
|
+
plus_dates = []
|
|
3641
|
+
for tok in self.relative_tokens:
|
|
3642
|
+
if tok.get("k") == "+":
|
|
3643
|
+
body = tok["token"][2:].strip()
|
|
3644
|
+
parts = [x.strip() for x in body.split(",")]
|
|
3645
|
+
parts.sort()
|
|
3646
|
+
plus_dates = parts
|
|
3647
|
+
|
|
3648
|
+
bug_msg(f"{plus_dates = }")
|
|
3649
|
+
if not plus_dates:
|
|
3650
|
+
return ""
|
|
3651
|
+
return plus_dates
|
|
3652
|
+
|
|
3653
|
+
def next_from_rrule(self):
|
|
3654
|
+
rdict = self.rruleset_dict
|
|
3655
|
+
if "RRULE" not in rdict:
|
|
3656
|
+
return None
|
|
3657
|
+
components = [rdict.get("DTSTART", ""), rdict.get("RRULE", ""), rdict.get("EXDATE", "")]
|
|
3658
|
+
bug_msg(f"{components = }")
|
|
3659
|
+
rule = rrulestr("\n".join(components))
|
|
3660
|
+
first_two = list(rule)[:2]
|
|
3661
|
+
if len(first_two) == 2:
|
|
3662
|
+
bug_msg(f"returning {first_two[1] = }")
|
|
3663
|
+
return first_two[1]
|
|
3664
|
+
else:
|
|
3665
|
+
return None
|
|
3666
|
+
|
|
3667
|
+
def finish(self) -> None:
|
|
3668
|
+
"""Process finishing of an item, especially handling repetition."""
|
|
3669
|
+
due_dt = None
|
|
3670
|
+
if offset_tok := next(
|
|
3671
|
+
(t for t in self.relative_tokens if t.get("k") == "o"), None
|
|
3672
|
+
):
|
|
3673
|
+
due_dt = self._get_start_dt()
|
|
3674
|
+
completed_dt = self.completion
|
|
3675
|
+
td = td_str_to_td(offset_tok["token"].split(maxsplit=1)[1])
|
|
3676
|
+
offset_val = offset_tok["token"][3:]
|
|
3677
|
+
bug_msg(
|
|
3678
|
+
f"{offset_val = }, {due_dt = }, {td = }, {offset_val.startswith('~') = }"
|
|
3679
|
+
)
|
|
3680
|
+
if offset_val.startswith("~") and due_dt:
|
|
3681
|
+
# bug_msg("learn mode")
|
|
3682
|
+
actual = completed_dt - due_dt
|
|
3683
|
+
td = self._smooth_interval(td, actual)
|
|
3684
|
+
offset_tok["token"] = f"@o {td_to_td_str(td)}"
|
|
3685
|
+
self._replace_or_add_token("o", td_to_td_str(td))
|
|
3686
|
+
self._replace_or_add_token("s", self.fmt_user(completed_dt + td))
|
|
3687
|
+
bug_msg(f"{actual = }, {td = }")
|
|
3688
|
+
else:
|
|
3689
|
+
self._replace_or_add_token("s", self.fmt_user(completed_dt + td))
|
|
3690
|
+
utc_next = self._instance_to_token_format_utc(completed_dt + td)
|
|
3691
|
+
self.rruleset = f"RDATE:{utc_next}"
|
|
3692
|
+
self.rdstart_str = f"RDATE:{utc_next}"
|
|
3693
|
+
self.dtstart = utc_next
|
|
3694
|
+
bug_msg(f"after processing offset: {self.relative_tokens = }, {self.rruleset = }, {self.dtstart = }")
|
|
3695
|
+
|
|
3696
|
+
return
|
|
3697
|
+
|
|
3698
|
+
# if not offset, use rruleset for due
|
|
3699
|
+
bug_msg(f"{self.rruleset = }, ")
|
|
3700
|
+
first, second = self._get_first_two_occurrences()
|
|
3701
|
+
bug_msg(f"{first = }, {second = }")
|
|
3702
|
+
due = first
|
|
3703
|
+
if due is None:
|
|
3704
|
+
# No upcoming instance — mark done
|
|
3705
|
+
self.itemtype = "x"
|
|
3706
|
+
self.completions =(self.completion, None)
|
|
3707
|
+
return
|
|
3708
|
+
|
|
3709
|
+
is_rrule = bool(self.rruleset and "RRULE" in self.rruleset)
|
|
3710
|
+
has_plus = any(tok.get("k") == "+" for tok in self.relative_tokens)
|
|
3711
|
+
is_rdate_only = (not is_rrule) and has_plus
|
|
3712
|
+
|
|
3713
|
+
if is_rrule:
|
|
3714
|
+
if self._is_first_from_rdate(due):
|
|
3715
|
+
# First instance came from explicit @+ date despite RRULE
|
|
3716
|
+
tok_str = self._instance_to_token_format_local(due)
|
|
3717
|
+
removed = self._remove_instance_from_plus_tokens(self.relative_tokens, tok_str)
|
|
3718
|
+
bug_msg(f"from rdate: {due = }, {tok_str = }, {self.relative_tokens = }")
|
|
3719
|
+
|
|
3720
|
+
else:
|
|
3721
|
+
# First from RRULE — advance @s to next
|
|
3722
|
+
next_due = self.next_from_rrule()
|
|
3723
|
+
if next_due:
|
|
3724
|
+
local_next = _to_local_naive(next_due)
|
|
3725
|
+
self._replace_or_add_token("s", self.fmt_user(local_next))
|
|
3726
|
+
utc_next = self._instance_to_token_format_utc(next_due)
|
|
3727
|
+
self.rruleset = re.sub(
|
|
3728
|
+
r"(?m)^DTSTART:\d{8}T\d{4}Z",
|
|
3729
|
+
f"DTSTART:{utc_next}",
|
|
3730
|
+
self.rruleset,
|
|
3731
|
+
count=1,
|
|
3732
|
+
)
|
|
3733
|
+
bug_msg(f"from rrule: {next_due = }, {utc_next = }, {self.rruleset = }")
|
|
3734
|
+
else:
|
|
3735
|
+
self._remove_tokens({"r", "+", "-"})
|
|
3736
|
+
self.itemtype = "x"
|
|
3737
|
+
|
|
3738
|
+
elif is_rdate_only:
|
|
3739
|
+
# RDATE-only case
|
|
3740
|
+
# tok_str = self._instance_to_token_format_local(first)
|
|
3741
|
+
# Usual suspects: @s + @+. Combine them as a list of fmt_utc_z strings, sort, remove duplicates
|
|
3742
|
+
tok_str = fmt_utc_z(first)
|
|
3743
|
+
plus_dates = self._get_sorted_plus_dt_str()
|
|
3744
|
+
# the first from @+ will either be the new_start or the instance that was finished
|
|
3745
|
+
# either way, it will be removed
|
|
3746
|
+
first_from_plus = plus_dates.pop(0) if plus_dates else None
|
|
3747
|
+
if plus_dates:
|
|
3748
|
+
new_plus = ",".join(plus_dates)
|
|
3749
|
+
self._replace_or_add_token("+", new_plus)
|
|
3750
|
+
else:
|
|
3751
|
+
# there are no more datetimes in @+
|
|
3752
|
+
self._remove_tokens({"+"})
|
|
3753
|
+
if tok_str == self.dtstart:
|
|
3754
|
+
# the first instance is @s
|
|
3755
|
+
if first_from_plus:
|
|
3756
|
+
# there is a new_start available in @+ to replace @s
|
|
3757
|
+
new_start = first_from_plus
|
|
3758
|
+
self._replace_or_add_token("s", new_start)
|
|
3759
|
+
self.dtstart = new_start
|
|
3760
|
+
else:
|
|
3761
|
+
# @s was the only instance
|
|
3762
|
+
self.itemtype = "x"
|
|
3763
|
+
else:
|
|
3764
|
+
# the first instance must be from @+, not @s which will remain unchanged
|
|
3765
|
+
# since first_from_plus has already been removed from @+, nothing left to do
|
|
3766
|
+
pass
|
|
3767
|
+
|
|
3768
|
+
new_plus_dates = self._get_sorted_plus_dt_str()
|
|
3769
|
+
new_plus_date_str = f",{','.join([x for x in plus_dates])}" if plus_dates else ""
|
|
3770
|
+
self.rdstart_str = f"RDATE:{self.dtstart}{new_plus_date_str}"
|
|
3771
|
+
self.rruleset = f"RDATE:{self.dtstart}{new_plus_date_str}"
|
|
3772
|
+
self.rruleset_dict["START_RDATES"] = self.rdstart_str
|
|
3773
|
+
|
|
3774
|
+
bug_msg(f"{self.dtstart = }, {new_plus_date_str = }, {self.rruleset = }")
|
|
3775
|
+
|
|
3776
|
+
else:
|
|
3777
|
+
# one-shot or no repetition
|
|
3778
|
+
self._remove_tokens({"r", "+", "-"})
|
|
3779
|
+
self.itemtype = "x"
|
|
3780
|
+
|
|
3781
|
+
self._remove_tokens({"f"})
|
|
3782
|
+
bug_msg(f"after removing f: {self.relative_tokens = }")
|
|
3783
|
+
self.completions = (self.completion, due)
|
|
3784
|
+
bug_msg(f"after reparsing finish tokens: {self.relative_tokens = }")
|
|
3785
|
+
|
|
3786
|
+
def _replace_or_add_token(self, key: str, dt: str) -> None:
|
|
3787
|
+
"""Replace token with key `key` or add new one for dt."""
|
|
3788
|
+
if isinstance(dt, datetime):
|
|
3789
|
+
dt = self.fmt_user(dt)
|
|
3790
|
+
new_tok = {"token": f"@{key} {dt}", "t": "@", "k": key}
|
|
3791
|
+
# replace if exists
|
|
3792
|
+
for tok in self.relative_tokens:
|
|
3793
|
+
if tok.get("k") == key:
|
|
3794
|
+
bug_msg(f"replaced {key =}; {tok = }; {new_tok = }")
|
|
3795
|
+
tok.update(new_tok)
|
|
3796
|
+
return
|
|
3797
|
+
# else append
|
|
3798
|
+
bug_msg(f"appending {new_tok = }")
|
|
3799
|
+
self.relative_tokens.append(new_tok)
|
|
3800
|
+
|
|
3801
|
+
def _remove_tokens(self, keys: set[str]) -> None:
|
|
3802
|
+
"""Remove tokens with matching keys from self.tokens."""
|
|
3803
|
+
self.relative_tokens = [
|
|
3804
|
+
t for t in self.relative_tokens if t.get("k") not in keys
|
|
3805
|
+
]
|
|
3806
|
+
|
|
3807
|
+
def reparse_finish_tokens(self) -> None:
|
|
3808
|
+
"""
|
|
3809
|
+
Re-run only the token handlers that can be affected by finish():
|
|
3810
|
+
@s, @r, @+.
|
|
3811
|
+
Works directly from self.relative_tokens.
|
|
3812
|
+
"""
|
|
3813
|
+
affected_keys = {"s", "r", "+"}
|
|
3814
|
+
|
|
3815
|
+
for tok in self.relative_tokens:
|
|
3816
|
+
k = tok.get("k")
|
|
3817
|
+
if k in affected_keys:
|
|
3818
|
+
handler = getattr(self, f"do_{k}", None)
|
|
3819
|
+
if handler:
|
|
3820
|
+
ok, msg, extras = handler(tok)
|
|
3821
|
+
if not ok:
|
|
3822
|
+
self.parse_ok = False
|
|
3823
|
+
self.parse_message = msg
|
|
3824
|
+
return
|
|
3825
|
+
# process any extra tokens the handler produces
|
|
3826
|
+
for extra in extras:
|
|
3827
|
+
ek = extra.get("k")
|
|
3828
|
+
if ek in affected_keys:
|
|
3829
|
+
getattr(self, f"do_{ek}")(extra)
|
|
3830
|
+
|
|
3831
|
+
# only finalize if parse is still clean
|
|
3832
|
+
# if self.parse_ok and self.final:
|
|
3833
|
+
# self.finalize_rcord()
|
|
3834
|
+
|
|
3835
|
+
def mark_final(self) -> None:
|
|
3836
|
+
"""
|
|
3837
|
+
Mark this item as final and normalize to absolute datetimes.
|
|
3838
|
+
"""
|
|
3839
|
+
self.final = True
|
|
3840
|
+
self.rebuild_from_tokens(resolve_relative=True) # force absolute now
|
|
3841
|
+
# self.finalize_rruleset() # RRULE/DTSTART/RDATE/EXDATE strings updated
|
|
3842
|
+
|
|
3843
|
+
def rebuild_from_tokens(self, *, resolve_relative: bool) -> None:
|
|
3844
|
+
"""Recompute DTSTART/RDATE/RRULE/EXDATE + rruleset + jobs from self.relative_tokens."""
|
|
3845
|
+
if resolve_relative is None:
|
|
3846
|
+
resolve_relative = self.final
|
|
3847
|
+
bug_msg(f"{resolve_relative = }")
|
|
3848
|
+
# self._normalize_datetime_tokens(resolve_relative=resolve_relative)
|
|
3849
|
+
dtstart_str, rdstart_str, rrule_line = self._derive_rrule_pieces()
|
|
3850
|
+
self.dtstart_str = dtstart_str or ""
|
|
3851
|
+
self.rdstart_str = rdstart_str or ""
|
|
3852
|
+
self.rruleset = self._compose_rruleset(dtstart_str, rrule_line, rdstart_str)
|
|
3853
|
+
# If you derive jobs from tokens, keep this; else skip:
|
|
3854
|
+
if self.collect_grouped_tokens({"~"}):
|
|
3855
|
+
jobs = self.build_jobs()
|
|
3856
|
+
self.finalize_jobs(jobs)
|
|
3857
|
+
|
|
3858
|
+
def _normalize_datetime_tokens(self, *, resolve_relative: bool) -> None:
|
|
3859
|
+
"""Normalize @s/@+/@-/@f to compact absolute strings; optionally resolve human phrases."""
|
|
3860
|
+
|
|
3861
|
+
def to_compact(dt):
|
|
3862
|
+
if isinstance(dt, datetime):
|
|
3863
|
+
return dt.strftime("%Y%m%dT%H%M")
|
|
3864
|
+
# If you ever allow date objects:
|
|
3865
|
+
return dt.strftime("%Y%m%d")
|
|
3866
|
+
|
|
3867
|
+
for tok in self.relative_tokens:
|
|
3868
|
+
bug_msg(f"{tok = }")
|
|
3869
|
+
if tok.get("t") != "@":
|
|
3870
|
+
continue
|
|
3871
|
+
k = tok.get("k")
|
|
3872
|
+
text = (tok.get("token") or "").strip()
|
|
3873
|
+
if k == "s":
|
|
3874
|
+
body = text[2:].strip()
|
|
3875
|
+
bug_msg(f"{body = }")
|
|
3876
|
+
dt = (
|
|
3877
|
+
parse(body)
|
|
3878
|
+
if resolve_relative
|
|
3879
|
+
else self._parse_compact_or_iso(body)
|
|
3880
|
+
)
|
|
3881
|
+
tok["token"] = f"@s {to_compact(dt)} "
|
|
3882
|
+
elif k in {"+", "-"}:
|
|
3883
|
+
body = text[2:].strip()
|
|
3884
|
+
parts = [p.strip() for p in body.split(",") if p.strip()]
|
|
3885
|
+
dts = [
|
|
3886
|
+
(parse(p) if resolve_relative else self._parse_compact_or_iso(p))
|
|
3887
|
+
for p in parts
|
|
3888
|
+
]
|
|
3889
|
+
joined = ",".join(to_compact(dt) for dt in dts)
|
|
3890
|
+
tok["token"] = f"@{k} {joined} "
|
|
3891
|
+
elif k == "f":
|
|
3892
|
+
body = text[2:].strip()
|
|
3893
|
+
dt = (
|
|
3894
|
+
parse(body)
|
|
3895
|
+
if resolve_relative
|
|
3896
|
+
else self._parse_compact_or_iso(body)
|
|
3897
|
+
)
|
|
3898
|
+
tok["token"] = f"@f {to_compact(dt)} "
|
|
3899
|
+
|
|
3900
|
+
def _derive_rrule_pieces(self) -> tuple[str | None, str | None, str | None]:
|
|
3901
|
+
"""Return (DTSTART line, RDATE line, RRULE line) from tokens."""
|
|
3902
|
+
dtstart = None
|
|
3903
|
+
rdates, exdates = [], []
|
|
3904
|
+
rrule_components = {}
|
|
3905
|
+
|
|
3906
|
+
for tok in self.relative_tokens:
|
|
3907
|
+
if tok.get("t") != "@":
|
|
3908
|
+
continue
|
|
3909
|
+
k = tok.get("k")
|
|
3910
|
+
text = (tok.get("token") or "").strip()
|
|
3911
|
+
if k == "s":
|
|
3912
|
+
dtstart = text[2:].strip()
|
|
3913
|
+
elif k == "+":
|
|
3914
|
+
rdates += [p.strip() for p in text[2:].split(",") if p.strip()]
|
|
3915
|
+
elif k == "-":
|
|
3916
|
+
exdates += [p.strip() for p in text[2:].split(",") if p.strip()]
|
|
3917
|
+
elif k == "r":
|
|
3918
|
+
group = next(
|
|
3919
|
+
(
|
|
3920
|
+
g
|
|
3921
|
+
for g in self.collect_grouped_tokens({"r"})
|
|
3922
|
+
if g and g[0] is tok
|
|
3923
|
+
),
|
|
3924
|
+
None,
|
|
3925
|
+
)
|
|
3926
|
+
if group:
|
|
3927
|
+
rrule_components = self._rrule_components_from_group(group)
|
|
3928
|
+
|
|
3929
|
+
dtstart_str = None
|
|
3930
|
+
if dtstart:
|
|
3931
|
+
dtstart_str = (
|
|
3932
|
+
f"DTSTART;VALUE=DATE:{dtstart}"
|
|
3933
|
+
if len(dtstart) == 8
|
|
3934
|
+
else f"DTSTART:{dtstart}"
|
|
3935
|
+
)
|
|
3936
|
+
|
|
3937
|
+
rdstart_str = f"RDATE:{','.join(rdates)}" if rdates else None
|
|
3938
|
+
# If you want EXDATE, add it similarly and pass to _compose_rruleset.
|
|
3939
|
+
rrule_line = (
|
|
3940
|
+
f"RRULE:{';'.join(f'{k}={v}' for k, v in rrule_components.items())}"
|
|
3941
|
+
if rrule_components
|
|
3942
|
+
else None
|
|
3943
|
+
)
|
|
3944
|
+
return dtstart_str, rdstart_str, rrule_line
|
|
3945
|
+
|
|
3946
|
+
def _compose_rruleset(
|
|
3947
|
+
self, dtstart_str, rrule_line, rdate_line, exdate_line=None
|
|
3948
|
+
) -> str:
|
|
3949
|
+
parts = []
|
|
3950
|
+
if dtstart_str:
|
|
3951
|
+
parts.append(dtstart_str)
|
|
3952
|
+
if rrule_line:
|
|
3953
|
+
parts.append(rrule_line)
|
|
3954
|
+
if rdate_line:
|
|
3955
|
+
parts.append(rdate_line)
|
|
3956
|
+
if exdate_line:
|
|
3957
|
+
parts.append(exdate_line)
|
|
3958
|
+
return "\n".join(parts)
|
|
3959
|
+
|
|
3960
|
+
def _parse_compact_or_iso(self, s: str) -> datetime:
|
|
3961
|
+
"""Accept YYYYMMDD or YYYYMMDDTHHMMSS or any ISO-ish; return datetime."""
|
|
3962
|
+
s = s.strip()
|
|
3963
|
+
if len(s) == 8 and s.isdigit():
|
|
3964
|
+
return datetime.strptime(s, "%Y%m%d")
|
|
3965
|
+
if len(s) == 15 and s[8] == "T":
|
|
3966
|
+
return datetime.strptime(s, "%Y%m%dT%H%M")
|
|
3967
|
+
return parse(s)
|
|
3968
|
+
|
|
3969
|
+
def _rrule_components_from_group(self, group: list[dict]) -> dict:
|
|
3970
|
+
"""Build RRULE components dict from the @r group & its &-options."""
|
|
3971
|
+
bug_msg("IN RRULE COMPONENTS")
|
|
3972
|
+
freq_map = {"y": "YEARLY", "m": "MONTHLY", "w": "WEEKLY", "d": "DAILY"}
|
|
3973
|
+
comps = {}
|
|
3974
|
+
anchor = group[0]["token"] # "@r d" etc.
|
|
3975
|
+
parts = anchor.split(maxsplit=1)
|
|
3976
|
+
if len(parts) > 1:
|
|
3977
|
+
freq_abbr = parts[1].strip()
|
|
3978
|
+
freq = freq_map.get(freq_abbr)
|
|
3979
|
+
if freq:
|
|
3980
|
+
comps["FREQ"] = freq
|
|
3981
|
+
for tok in group[1:]:
|
|
3982
|
+
if tok.get("t") == "&":
|
|
3983
|
+
key, value = (
|
|
3984
|
+
tok.get("k"),
|
|
3985
|
+
(
|
|
3986
|
+
tok.get("v") or tok.get("token", "")[1:].split(maxsplit=1)[-1]
|
|
3987
|
+
).strip(),
|
|
3988
|
+
)
|
|
3989
|
+
if key == "m":
|
|
3990
|
+
comps["BYMONTH"] = value
|
|
3991
|
+
elif key == "w":
|
|
3992
|
+
comps["BYDAY"] = value
|
|
3993
|
+
elif key == "d":
|
|
3994
|
+
comps["BYMONTHDAY"] = value
|
|
3995
|
+
elif key == "i":
|
|
3996
|
+
comps["INTERVAL"] = value
|
|
3997
|
+
elif key == "u":
|
|
3998
|
+
bug_msg(f"GOT UNTIL: {value = }")
|
|
3999
|
+
comps["UNTIL"] = value.replace("/", "")
|
|
4000
|
+
elif key == "c":
|
|
4001
|
+
comps["COUNT"] = value
|
|
4002
|
+
return comps
|
|
4003
|
+
|
|
4004
|
+
def _strip_positions(self, tokens_with_pos: list[dict]) -> list[dict]:
|
|
4005
|
+
"""Remove 'start'/'end' from editing tokens and strip whitespace from 'token'."""
|
|
4006
|
+
out = []
|
|
4007
|
+
for t in tokens_with_pos:
|
|
4008
|
+
t2 = dict(t)
|
|
4009
|
+
t2.pop("s", None)
|
|
4010
|
+
t2.pop("e", None)
|
|
4011
|
+
if "token" in t2 and isinstance(t2["token"], str):
|
|
4012
|
+
t2["token"] = t2["token"].strip()
|
|
4013
|
+
out.append(t2)
|
|
4014
|
+
return out
|