tklr-dgraham 0.0.0rc22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tklr/__init__.py +0 -0
- tklr/cli/main.py +528 -0
- tklr/cli/migrate_etm_to_tklr.py +764 -0
- tklr/common.py +1296 -0
- tklr/controller.py +3635 -0
- tklr/item.py +4014 -0
- tklr/list_colors.py +234 -0
- tklr/model.py +4548 -0
- tklr/shared.py +739 -0
- tklr/sounds/alert.mp3 +0 -0
- tklr/tklr_env.py +493 -0
- tklr/use_system.py +64 -0
- tklr/versioning.py +21 -0
- tklr/view.py +3503 -0
- tklr/view_textual.css +296 -0
- tklr_dgraham-0.0.0rc22.dist-info/METADATA +814 -0
- tklr_dgraham-0.0.0rc22.dist-info/RECORD +20 -0
- tklr_dgraham-0.0.0rc22.dist-info/WHEEL +5 -0
- tklr_dgraham-0.0.0rc22.dist-info/entry_points.txt +2 -0
- tklr_dgraham-0.0.0rc22.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,764 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
# ------------------------------------------------------------
|
|
9
|
+
# Regex patterns for ETM tags
|
|
10
|
+
# ------------------------------------------------------------
|
|
11
|
+
TAG_PATTERNS = {
|
|
12
|
+
"D": re.compile(r"^\{D\}:(\d{8})$"),
|
|
13
|
+
"T": re.compile(r"^\{T\}:(\d{8}T\d{4})([AN])$"),
|
|
14
|
+
"I": re.compile(r"^\{I\}:(.+)$"),
|
|
15
|
+
"P": re.compile(r"^\{P\}:(.+)$"),
|
|
16
|
+
"W": re.compile(r"^\{W\}:(.+)$"),
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
BARE_DT = re.compile(r"^(\d{8})T(\d{4})([ANZ]?)$")
|
|
20
|
+
|
|
21
|
+
AND_KEY_MAP = {
|
|
22
|
+
"n": "M", # minutes -> &M
|
|
23
|
+
"h": "H", # hours -> &H
|
|
24
|
+
"M": "m", # months -> &m
|
|
25
|
+
# others unchanged
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
TYPE_MAP = {
|
|
29
|
+
"*": "*", # event
|
|
30
|
+
"-": "~", # task
|
|
31
|
+
"%": "%", # note
|
|
32
|
+
"!": "?", # inbox
|
|
33
|
+
"~": "+", # goal
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# ------------------------------------------------------------
|
|
38
|
+
# Helpers
|
|
39
|
+
# ------------------------------------------------------------
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def parse_etm_date_or_dt(val) -> list[str]:
|
|
43
|
+
"""
|
|
44
|
+
Decode ETM-encoded values (dates, datetimes, intervals, completions, weekdays).
|
|
45
|
+
Always returns a list[str]. Handles:
|
|
46
|
+
- lists (recursively)
|
|
47
|
+
- {D}:YYYYMMDD
|
|
48
|
+
- {T}:YYYYMMDDTHHMM[A|N|Z]
|
|
49
|
+
- {P}:<dt_str> -> <dt_str> (returns one string: 'left -> right' with both sides formatted)
|
|
50
|
+
- {I}:<interval> (timedelta string passthrough)
|
|
51
|
+
- {W}:<weekday spec> (passthrough)
|
|
52
|
+
- bare datetimes: YYYYMMDDTHHMM[A|N|Z]? (formatted)
|
|
53
|
+
- everything else: passthrough
|
|
54
|
+
"""
|
|
55
|
+
# lists: flatten
|
|
56
|
+
if isinstance(val, list):
|
|
57
|
+
out: list[str] = []
|
|
58
|
+
for v in val:
|
|
59
|
+
out.extend(parse_etm_date_or_dt(v))
|
|
60
|
+
return out
|
|
61
|
+
|
|
62
|
+
# non-strings: stringify
|
|
63
|
+
if not isinstance(val, str):
|
|
64
|
+
return [str(val)]
|
|
65
|
+
|
|
66
|
+
# {D}
|
|
67
|
+
if m := TAG_PATTERNS["D"].match(val):
|
|
68
|
+
d = datetime.strptime(m.group(1), "%Y%m%d").date()
|
|
69
|
+
return [format_dt(d)]
|
|
70
|
+
|
|
71
|
+
# {T}
|
|
72
|
+
if m := TAG_PATTERNS["T"].match(val):
|
|
73
|
+
ts, kind = m.groups()
|
|
74
|
+
dt = datetime.strptime(ts, "%Y%m%dT%H%M")
|
|
75
|
+
if kind in ("A", "Z"):
|
|
76
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
77
|
+
return [format_dt(dt)]
|
|
78
|
+
|
|
79
|
+
# {P}: <dt_str> -> <dt_str> → return a single "left -> right" string with both sides formatted
|
|
80
|
+
if m := TAG_PATTERNS["P"].match(val):
|
|
81
|
+
pair = m.group(1)
|
|
82
|
+
left_raw, right_raw = [s.strip() for s in pair.split("->", 1)]
|
|
83
|
+
|
|
84
|
+
# reuse this function to format each side; take first result from the returned list
|
|
85
|
+
left_fmt = parse_etm_date_or_dt(left_raw)[0]
|
|
86
|
+
right_fmt = parse_etm_date_or_dt(right_raw)[0]
|
|
87
|
+
return [f"{left_fmt} -> {right_fmt}"]
|
|
88
|
+
|
|
89
|
+
# {I} interval and {W} weekday: passthrough content
|
|
90
|
+
if m := TAG_PATTERNS["I"].match(val):
|
|
91
|
+
return [m.group(1)]
|
|
92
|
+
if m := TAG_PATTERNS["W"].match(val):
|
|
93
|
+
return [m.group(1)]
|
|
94
|
+
|
|
95
|
+
# bare datetime like 20250807T2300A / 20250807T2300 / 20250807T2300N
|
|
96
|
+
if m := BARE_DT.match(val):
|
|
97
|
+
ymd, hm, suf = m.groups()
|
|
98
|
+
dt = datetime.strptime(f"{ymd}T{hm}", "%Y%m%dT%H%M")
|
|
99
|
+
if suf in ("A", "Z"):
|
|
100
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
101
|
+
return [format_dt(dt)]
|
|
102
|
+
|
|
103
|
+
# fallback
|
|
104
|
+
return [val]
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def format_dt(dt: Any) -> str:
|
|
108
|
+
"""Format datetime or date in user-friendly format."""
|
|
109
|
+
if isinstance(dt, datetime):
|
|
110
|
+
if dt.tzinfo is not None:
|
|
111
|
+
return dt.astimezone().strftime("%Y-%m-%d %H:%M")
|
|
112
|
+
return dt.strftime("%Y-%m-%d %H:%M")
|
|
113
|
+
elif hasattr(dt, "strftime"): # date
|
|
114
|
+
return dt.strftime("%Y-%m-%d")
|
|
115
|
+
return str(dt)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def decode_etm_value(val: Any) -> list[str]:
|
|
119
|
+
"""Decode any etm-encoded value(s) into user-facing strings."""
|
|
120
|
+
if isinstance(val, list):
|
|
121
|
+
results = []
|
|
122
|
+
for v in val:
|
|
123
|
+
results.extend(decode_etm_value(v))
|
|
124
|
+
return results
|
|
125
|
+
|
|
126
|
+
if not isinstance(val, str):
|
|
127
|
+
return [str(val)]
|
|
128
|
+
|
|
129
|
+
if m := TAG_PATTERNS["D"].match(val):
|
|
130
|
+
dt = datetime.strptime(m.group(1), "%Y%m%d").date()
|
|
131
|
+
return [format_dt(dt)]
|
|
132
|
+
|
|
133
|
+
if m := TAG_PATTERNS["T"].match(val):
|
|
134
|
+
ts, kind = m.groups()
|
|
135
|
+
dt = datetime.strptime(ts, "%Y%m%dT%H%M")
|
|
136
|
+
if kind == "A":
|
|
137
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
138
|
+
return [format_dt(dt)]
|
|
139
|
+
|
|
140
|
+
if m := TAG_PATTERNS["I"].match(val):
|
|
141
|
+
return [m.group(1)]
|
|
142
|
+
|
|
143
|
+
if m := TAG_PATTERNS["P"].match(val):
|
|
144
|
+
pair = m.groups(1)[0].split("->")
|
|
145
|
+
print(f"{m.groups(1)[0] = }, {pair = }")
|
|
146
|
+
res = []
|
|
147
|
+
for dt_str in pair:
|
|
148
|
+
dt_str = dt_str.strip()
|
|
149
|
+
dt = None
|
|
150
|
+
_l = len(dt_str)
|
|
151
|
+
if _l == 8:
|
|
152
|
+
dt = datetime.strptime(dt_str, "%Y%m%d")
|
|
153
|
+
elif _l == 13:
|
|
154
|
+
dt = datetime.strptime(dt_str, "%Y%m%dT%H%M")
|
|
155
|
+
elif _l == 14:
|
|
156
|
+
dt = datetime.strptime(dt_str[:-1], "%Y%m%dT%H%M")
|
|
157
|
+
if dt_str[-1] == "A":
|
|
158
|
+
dt.replace(tzinfo=timezone.utc)
|
|
159
|
+
else:
|
|
160
|
+
print("{_l = }, {dt_str = }")
|
|
161
|
+
|
|
162
|
+
if dt:
|
|
163
|
+
res.append(format_dt(dt))
|
|
164
|
+
return ", ".join(res)
|
|
165
|
+
|
|
166
|
+
if m := TAG_PATTERNS["W"].match(val):
|
|
167
|
+
return [m.group(1)]
|
|
168
|
+
|
|
169
|
+
return [val]
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def format_subvalue(val) -> list[str]:
|
|
173
|
+
"""Normalize etm json values into lists of strings for tokens."""
|
|
174
|
+
results: list[str] = []
|
|
175
|
+
if isinstance(val, list):
|
|
176
|
+
for v in val:
|
|
177
|
+
results.extend(format_subvalue(v))
|
|
178
|
+
elif isinstance(val, str):
|
|
179
|
+
results.extend(parse_etm_date_or_dt(val))
|
|
180
|
+
elif val is None:
|
|
181
|
+
return []
|
|
182
|
+
else:
|
|
183
|
+
results.append(str(val))
|
|
184
|
+
return results
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
# ------------------------------------------------------------
|
|
188
|
+
# Conversion logic
|
|
189
|
+
# ------------------------------------------------------------
|
|
190
|
+
# def etm_to_tokens(item: dict, key: str | None, include_etm: bool = True) -> list[str]:
|
|
191
|
+
# """Convert an etm JSON entry into a list of tklr tokens."""
|
|
192
|
+
#
|
|
193
|
+
# raw_type = item.get("itemtype", "?")
|
|
194
|
+
# has_jobs = bool(item.get("j")) # detect jobs
|
|
195
|
+
# itemtype = TYPE_MAP.get(raw_type, raw_type)
|
|
196
|
+
#
|
|
197
|
+
# # Promote tasks-with-jobs to projects
|
|
198
|
+
# if itemtype == "~" and has_jobs:
|
|
199
|
+
# itemtype = "^"
|
|
200
|
+
#
|
|
201
|
+
# summary = item.get("summary", "")
|
|
202
|
+
# tokens = [f"{itemtype} {summary}"]
|
|
203
|
+
#
|
|
204
|
+
# for k, v in item.items():
|
|
205
|
+
# if k in {"itemtype", "summary", "created", "modified", "h", "k", "q"}:
|
|
206
|
+
# continue
|
|
207
|
+
#
|
|
208
|
+
# if k == "d": # description
|
|
209
|
+
# tokens.append(f"@d {v}")
|
|
210
|
+
# continue
|
|
211
|
+
#
|
|
212
|
+
# if k == "b": # beginby
|
|
213
|
+
# tokens.append(f"@b {v}d")
|
|
214
|
+
# continue
|
|
215
|
+
#
|
|
216
|
+
# if k == "z" and v == "float":
|
|
217
|
+
# tokens.append("@z none")
|
|
218
|
+
# continue
|
|
219
|
+
#
|
|
220
|
+
# if k == "s": # start datetime
|
|
221
|
+
# vals = format_subvalue(v)
|
|
222
|
+
# if vals:
|
|
223
|
+
# tokens.append(f"@s {vals[0]}")
|
|
224
|
+
# continue
|
|
225
|
+
#
|
|
226
|
+
# # finish/completion
|
|
227
|
+
# if k == "f":
|
|
228
|
+
# vals = format_subvalue(v) # uses parse_etm_date_or_dt under the hood
|
|
229
|
+
# if vals:
|
|
230
|
+
# s = vals[0] # for @f we only expect one normalized value back
|
|
231
|
+
# if "->" in s:
|
|
232
|
+
# left, right = [t.strip() for t in s.split("->", 1)]
|
|
233
|
+
# if left == right:
|
|
234
|
+
# tokens.append(f"@f {left}")
|
|
235
|
+
# else:
|
|
236
|
+
# tokens.append(f"@f {left}, {right}")
|
|
237
|
+
# else:
|
|
238
|
+
# tokens.append(f"@f {s}")
|
|
239
|
+
# continue
|
|
240
|
+
#
|
|
241
|
+
# # if k == "r": # recurrence rules
|
|
242
|
+
# # if isinstance(v, list):
|
|
243
|
+
# # for rd in v:
|
|
244
|
+
# # if isinstance(rd, dict):
|
|
245
|
+
# # subparts = []
|
|
246
|
+
# # freq = rd.get("r")
|
|
247
|
+
# # if freq:
|
|
248
|
+
# # subparts.append(freq)
|
|
249
|
+
# # for subk, subv in rd.items():
|
|
250
|
+
# # if subk == "r":
|
|
251
|
+
# # continue
|
|
252
|
+
# # mapped = AND_KEY_MAP.get(subk, subk)
|
|
253
|
+
# # vals = format_subvalue(subv)
|
|
254
|
+
# # if vals:
|
|
255
|
+
# # subparts.append(f"&{mapped} {', '.join(vals)}")
|
|
256
|
+
# # tokens.append(f"@r {' '.join(subparts)}")
|
|
257
|
+
# # continue
|
|
258
|
+
#
|
|
259
|
+
# replaced_o = False # track if @o already handled or suppressed
|
|
260
|
+
#
|
|
261
|
+
# if k == "r": # recurrence rules
|
|
262
|
+
# # Handle legacy "@o r" (offset-repeat) form
|
|
263
|
+
# if item.get("o") == "r" and itemtype in {"~", "^"}:
|
|
264
|
+
# rlist = v if isinstance(v, list) else []
|
|
265
|
+
# if rlist and isinstance(rlist[0], dict):
|
|
266
|
+
# rd = rlist[0]
|
|
267
|
+
# freq = rd.get("r")
|
|
268
|
+
# interval = rd.get("i", 1)
|
|
269
|
+
# if freq in {"y", "m", "w", "d"}:
|
|
270
|
+
# new_o_value = f"{interval}{freq}"
|
|
271
|
+
# tokens.append(f"@o {new_o_value}")
|
|
272
|
+
# replaced_o = True # prevent later duplicate
|
|
273
|
+
# # skip normal @r generation
|
|
274
|
+
# continue
|
|
275
|
+
#
|
|
276
|
+
# # --- Normal recurring events ---
|
|
277
|
+
# if isinstance(v, list):
|
|
278
|
+
# for rd in v:
|
|
279
|
+
# if isinstance(rd, dict):
|
|
280
|
+
# subparts = []
|
|
281
|
+
# freq = rd.get("r")
|
|
282
|
+
# if freq:
|
|
283
|
+
# subparts.append(freq)
|
|
284
|
+
# for subk, subv in rd.items():
|
|
285
|
+
# if subk == "r":
|
|
286
|
+
# continue
|
|
287
|
+
# mapped = AND_KEY_MAP.get(subk, subk)
|
|
288
|
+
# vals = format_subvalue(subv)
|
|
289
|
+
# if vals:
|
|
290
|
+
# subparts.append(f"&{mapped} {', '.join(vals)}")
|
|
291
|
+
# tokens.append(f"@r {' '.join(subparts)}")
|
|
292
|
+
# continue
|
|
293
|
+
#
|
|
294
|
+
# # --- Handle legacy or special "@o" forms ---
|
|
295
|
+
# if k == "o":
|
|
296
|
+
# # Skip entirely if already handled by @o r
|
|
297
|
+
# if replaced_o:
|
|
298
|
+
# continue
|
|
299
|
+
#
|
|
300
|
+
# # Handle legacy "@o s" (shift → convert itemtype)
|
|
301
|
+
# if v == "s":
|
|
302
|
+
# itemtype = "*" # promote to event
|
|
303
|
+
# continue # omit entirely, no @o token
|
|
304
|
+
#
|
|
305
|
+
# # Normal @o
|
|
306
|
+
# vals = format_subvalue(v)
|
|
307
|
+
# if vals:
|
|
308
|
+
# tokens.append(f"@o {', '.join(vals)}")
|
|
309
|
+
# continue
|
|
310
|
+
#
|
|
311
|
+
# # jobs
|
|
312
|
+
# if k == "j":
|
|
313
|
+
# if isinstance(v, list):
|
|
314
|
+
# for jd in v:
|
|
315
|
+
# if isinstance(jd, dict):
|
|
316
|
+
# parts = []
|
|
317
|
+
#
|
|
318
|
+
# # job subject
|
|
319
|
+
# job_summary = jd.get("j", "").strip()
|
|
320
|
+
# if job_summary:
|
|
321
|
+
# parts.append(job_summary)
|
|
322
|
+
#
|
|
323
|
+
# # build &r from id + prereqs
|
|
324
|
+
# jid = jd.get("i")
|
|
325
|
+
# prereqs = jd.get("p", [])
|
|
326
|
+
# if jid:
|
|
327
|
+
# if prereqs:
|
|
328
|
+
# parts.append(f"&r {jid}: {', '.join(prereqs)}")
|
|
329
|
+
# else:
|
|
330
|
+
# parts.append(f"&r {jid}")
|
|
331
|
+
#
|
|
332
|
+
# # completion (&f same as @f)
|
|
333
|
+
# if (
|
|
334
|
+
# "f" in jd
|
|
335
|
+
# and isinstance(jd["f"], str)
|
|
336
|
+
# and jd["f"].startswith("{P}:")
|
|
337
|
+
# ):
|
|
338
|
+
# pair = jd["f"][4:]
|
|
339
|
+
# comp, due = pair.split("->")
|
|
340
|
+
# comp_val = decode_etm_value(comp.strip())[0]
|
|
341
|
+
# due_val = decode_etm_value(due.strip())[0]
|
|
342
|
+
# if comp_val == due_val:
|
|
343
|
+
# parts.append(f"&f {comp_val}")
|
|
344
|
+
# else:
|
|
345
|
+
# parts.append(f"&f {comp_val}, {due_val}")
|
|
346
|
+
#
|
|
347
|
+
# # other keys (skip ones we already handled)
|
|
348
|
+
# for subk, subv in jd.items():
|
|
349
|
+
# if subk in {"j", "i", "p", "summary", "status", "req", "f"}:
|
|
350
|
+
# continue
|
|
351
|
+
# vals = format_subvalue(subv)
|
|
352
|
+
# if vals:
|
|
353
|
+
# parts.append(f"&{subk} {', '.join(vals)}")
|
|
354
|
+
#
|
|
355
|
+
# tokens.append(f"@~ {' '.join(parts)}")
|
|
356
|
+
# continue
|
|
357
|
+
#
|
|
358
|
+
# if k == "a": # alerts
|
|
359
|
+
# if isinstance(v, list):
|
|
360
|
+
# for adef in v:
|
|
361
|
+
# if isinstance(adef, list) and len(adef) == 2:
|
|
362
|
+
# times = [x for part in adef[0] for x in format_subvalue(part)]
|
|
363
|
+
# cmds = [x for part in adef[1] for x in format_subvalue(part)]
|
|
364
|
+
# tokens.append(f"@a {','.join(times)}: {','.join(cmds)}")
|
|
365
|
+
# continue
|
|
366
|
+
#
|
|
367
|
+
# if k == "u": # used time
|
|
368
|
+
# if isinstance(v, list):
|
|
369
|
+
# for used in v:
|
|
370
|
+
# if isinstance(used, list) and len(used) == 2:
|
|
371
|
+
# td = format_subvalue(used[0])[0]
|
|
372
|
+
# d = format_subvalue(used[1])[0]
|
|
373
|
+
# tokens.append(f"@u {td}: {d}")
|
|
374
|
+
# continue
|
|
375
|
+
#
|
|
376
|
+
# if k in {"+", "-", "w"}: # multi-datetimes (RDATE/EXDATE/etc.)
|
|
377
|
+
# if isinstance(v, list):
|
|
378
|
+
# vals = []
|
|
379
|
+
# for sub in v:
|
|
380
|
+
# vals.extend(format_subvalue(sub))
|
|
381
|
+
# if vals:
|
|
382
|
+
# tokens.append(f"@{k} {', '.join(vals)}")
|
|
383
|
+
# continue
|
|
384
|
+
#
|
|
385
|
+
# # everything else
|
|
386
|
+
# vals = format_subvalue(v)
|
|
387
|
+
# if vals:
|
|
388
|
+
# tokens.append(f"@{k} {', '.join(vals)}")
|
|
389
|
+
#
|
|
390
|
+
# if include_etm and key is not None:
|
|
391
|
+
# tokens.append(f"@# {key}")
|
|
392
|
+
#
|
|
393
|
+
# return tokens
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def reorder_tokens(tokens: list[str]) -> list[str]:
|
|
397
|
+
"""
|
|
398
|
+
Ensure logical ordering of tokens for valid parsing:
|
|
399
|
+
1. itemtype/subject first
|
|
400
|
+
2. @s before @r or @+
|
|
401
|
+
3. @r before @-
|
|
402
|
+
4. everything else after
|
|
403
|
+
"""
|
|
404
|
+
if not tokens:
|
|
405
|
+
return tokens
|
|
406
|
+
|
|
407
|
+
# First token should always be itemtype + subject
|
|
408
|
+
header = [tokens[0]]
|
|
409
|
+
rest = tokens[1:]
|
|
410
|
+
|
|
411
|
+
start_tokens = [t for t in rest if t.startswith("@s ")]
|
|
412
|
+
recur_tokens = [t for t in rest if t.startswith("@r ")]
|
|
413
|
+
plus_tokens = [t for t in rest if t.startswith("@+ ")]
|
|
414
|
+
minus_tokens = [t for t in rest if t.startswith("@- ")]
|
|
415
|
+
|
|
416
|
+
# Everything else stays in natural order
|
|
417
|
+
others = [
|
|
418
|
+
t
|
|
419
|
+
for t in rest
|
|
420
|
+
if not (
|
|
421
|
+
t.startswith("@s ")
|
|
422
|
+
or t.startswith("@r ")
|
|
423
|
+
or t.startswith("@+ ")
|
|
424
|
+
or t.startswith("@- ")
|
|
425
|
+
)
|
|
426
|
+
]
|
|
427
|
+
|
|
428
|
+
ordered = []
|
|
429
|
+
ordered += header # itemtype + subject
|
|
430
|
+
ordered += start_tokens # @s before @r
|
|
431
|
+
ordered += recur_tokens # @r before @+
|
|
432
|
+
ordered += plus_tokens # @+ before @-
|
|
433
|
+
ordered += minus_tokens # @-
|
|
434
|
+
ordered += others # everything else
|
|
435
|
+
|
|
436
|
+
return ordered
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
def etm_to_tokens(item: dict, key: str | None, include_etm: bool = True) -> list[str]:
|
|
440
|
+
raw_type = item.get("itemtype", "?")
|
|
441
|
+
has_jobs = bool(item.get("j"))
|
|
442
|
+
itemtype = TYPE_MAP.get(raw_type, raw_type)
|
|
443
|
+
|
|
444
|
+
# promote tasks-with-jobs to projects
|
|
445
|
+
if itemtype == "~" and has_jobs:
|
|
446
|
+
itemtype = "^"
|
|
447
|
+
|
|
448
|
+
summary = item.get("summary", "")
|
|
449
|
+
tokens = [f"{itemtype} {summary}"]
|
|
450
|
+
|
|
451
|
+
# ---------- PREPASS: decide @o behavior ----------
|
|
452
|
+
o_val = item.get("o")
|
|
453
|
+
convert_o_from_r = False # True → emit @o <interval><freq> and suppress @r
|
|
454
|
+
new_o_value = None # the computed "<interval><freq>" string
|
|
455
|
+
skip_o_key = False # True → do not emit original @o key at all
|
|
456
|
+
|
|
457
|
+
# case: "@o s" → delete @o and convert item to event
|
|
458
|
+
if o_val == "s":
|
|
459
|
+
itemtype = "*" # promote to event
|
|
460
|
+
tokens[0] = f"{itemtype} {summary}"
|
|
461
|
+
skip_o_key = True
|
|
462
|
+
|
|
463
|
+
# case: "@o r" on task/project with an r-rule → convert to "@o <interval><freq>"
|
|
464
|
+
elif o_val == "r" and itemtype in {"~", "^"}:
|
|
465
|
+
rlist = item.get("r") if isinstance(item.get("r"), list) else []
|
|
466
|
+
if rlist and isinstance(rlist[0], dict):
|
|
467
|
+
rd = rlist[0]
|
|
468
|
+
freq = rd.get("r")
|
|
469
|
+
interval = rd.get("i", 1)
|
|
470
|
+
if (
|
|
471
|
+
freq in {"y", "m", "w", "d", "h"}
|
|
472
|
+
and isinstance(interval, int)
|
|
473
|
+
and interval > 0
|
|
474
|
+
):
|
|
475
|
+
new_o_value = f"{interval}{freq}"
|
|
476
|
+
convert_o_from_r = True
|
|
477
|
+
skip_o_key = True # do not emit literal "@o r"
|
|
478
|
+
|
|
479
|
+
# ---------- MAIN LOOP ----------
|
|
480
|
+
for k, v in item.items():
|
|
481
|
+
if k in {"itemtype", "summary", "created", "modified", "h", "k", "q"}:
|
|
482
|
+
continue
|
|
483
|
+
|
|
484
|
+
if k == "d":
|
|
485
|
+
tokens.append(f"@d {v}")
|
|
486
|
+
continue
|
|
487
|
+
|
|
488
|
+
if k == "b":
|
|
489
|
+
tokens.append(f"@n {v}d")
|
|
490
|
+
continue
|
|
491
|
+
|
|
492
|
+
if k == "i":
|
|
493
|
+
tokens.append(f"@b {v}")
|
|
494
|
+
continue
|
|
495
|
+
|
|
496
|
+
if k == "z" and v == "float":
|
|
497
|
+
tokens.append("@z none")
|
|
498
|
+
continue
|
|
499
|
+
|
|
500
|
+
if k == "s":
|
|
501
|
+
vals = format_subvalue(v)
|
|
502
|
+
if vals:
|
|
503
|
+
tokens.append(f"@s {vals[0]}")
|
|
504
|
+
continue
|
|
505
|
+
|
|
506
|
+
if k == "f":
|
|
507
|
+
vals = format_subvalue(v)
|
|
508
|
+
if vals:
|
|
509
|
+
s = vals[0]
|
|
510
|
+
if "->" in s:
|
|
511
|
+
left, right = [t.strip() for t in s.split("->", 1)]
|
|
512
|
+
tokens.append(
|
|
513
|
+
f"@f {left}" if left == right else f"@f {left}, {right}"
|
|
514
|
+
)
|
|
515
|
+
else:
|
|
516
|
+
tokens.append(f"@f {s}")
|
|
517
|
+
continue
|
|
518
|
+
|
|
519
|
+
# if k == "r":
|
|
520
|
+
# # If converting "@o r", emit the computed @o once and suppress @r entirely
|
|
521
|
+
# if convert_o_from_r and new_o_value:
|
|
522
|
+
# tokens.append(f"@o {new_o_value}")
|
|
523
|
+
# continue
|
|
524
|
+
#
|
|
525
|
+
# # otherwise, normal @r parsing
|
|
526
|
+
# if isinstance(v, list):
|
|
527
|
+
# for rd in v:
|
|
528
|
+
# if isinstance(rd, dict):
|
|
529
|
+
# subparts = []
|
|
530
|
+
# freq = rd.get("r")
|
|
531
|
+
# if freq:
|
|
532
|
+
# subparts.append(freq)
|
|
533
|
+
# for subk, subv in rd.items():
|
|
534
|
+
# if subk == "r":
|
|
535
|
+
# continue
|
|
536
|
+
# mapped = AND_KEY_MAP.get(subk, subk)
|
|
537
|
+
# vals = format_subvalue(subv)
|
|
538
|
+
# if vals:
|
|
539
|
+
# subparts.append(f"&{mapped} {', '.join(vals)}")
|
|
540
|
+
# tokens.append(f"@r {' '.join(subparts)}")
|
|
541
|
+
# continue
|
|
542
|
+
|
|
543
|
+
if k == "r":
|
|
544
|
+
# If converting "@o r", emit the computed @o once and suppress @r entirely
|
|
545
|
+
if convert_o_from_r and new_o_value:
|
|
546
|
+
tokens.append(f"@o {new_o_value}")
|
|
547
|
+
continue
|
|
548
|
+
|
|
549
|
+
# otherwise, normal @r parsing
|
|
550
|
+
if isinstance(v, list):
|
|
551
|
+
for rd in v:
|
|
552
|
+
if isinstance(rd, dict):
|
|
553
|
+
subparts = []
|
|
554
|
+
freq = rd.get("r")
|
|
555
|
+
if freq:
|
|
556
|
+
subparts.append(freq)
|
|
557
|
+
|
|
558
|
+
for subk, subv in rd.items():
|
|
559
|
+
if subk == "r":
|
|
560
|
+
continue
|
|
561
|
+
|
|
562
|
+
# --- fix legacy rrule subkeys ---
|
|
563
|
+
legacy_rrule_map = {
|
|
564
|
+
"M": "m", # BYMONTH → &m
|
|
565
|
+
"m": "d", # BYMONTHDAY → &d
|
|
566
|
+
"h": "H", # BYHOUR → &H
|
|
567
|
+
"n": "M", # BYMINUTE → &M
|
|
568
|
+
}
|
|
569
|
+
mapped_subk = legacy_rrule_map.get(subk, subk)
|
|
570
|
+
|
|
571
|
+
mapped = AND_KEY_MAP.get(mapped_subk, mapped_subk)
|
|
572
|
+
vals = format_subvalue(subv)
|
|
573
|
+
if vals:
|
|
574
|
+
subparts.append(f"&{mapped} {', '.join(vals)}")
|
|
575
|
+
|
|
576
|
+
print(f"@r {' '.join(subparts)}")
|
|
577
|
+
|
|
578
|
+
tokens.append(f"@r {' '.join(subparts)}")
|
|
579
|
+
continue
|
|
580
|
+
|
|
581
|
+
if k == "j":
|
|
582
|
+
if isinstance(v, list):
|
|
583
|
+
for jd in v:
|
|
584
|
+
if isinstance(jd, dict):
|
|
585
|
+
parts = []
|
|
586
|
+
job_summary = jd.get("j", "").strip()
|
|
587
|
+
if job_summary:
|
|
588
|
+
parts.append(job_summary)
|
|
589
|
+
|
|
590
|
+
jid = jd.get("i")
|
|
591
|
+
prereqs = jd.get("p", [])
|
|
592
|
+
if jid:
|
|
593
|
+
parts.append(
|
|
594
|
+
f"&r {jid}: {', '.join(prereqs)}"
|
|
595
|
+
if prereqs
|
|
596
|
+
else f"&r {jid}"
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
if (
|
|
600
|
+
"f" in jd
|
|
601
|
+
and isinstance(jd["f"], str)
|
|
602
|
+
and jd["f"].startswith("{P}:")
|
|
603
|
+
):
|
|
604
|
+
fstr = jd["f"]
|
|
605
|
+
if not fstr:
|
|
606
|
+
return
|
|
607
|
+
fvalue = decode_etm_value(fstr)
|
|
608
|
+
|
|
609
|
+
print(f"{fstr = }, {fvalue = }")
|
|
610
|
+
# comp, due = pair.split("->")
|
|
611
|
+
# comp_val = decode_etm_value(comp.strip())[0]
|
|
612
|
+
# due_val = decode_etm_value(due.strip())[0]
|
|
613
|
+
parts.append(f"&f {fvalue}")
|
|
614
|
+
|
|
615
|
+
for subk, subv in jd.items():
|
|
616
|
+
if subk in {"j", "i", "p", "summary", "status", "req", "f"}:
|
|
617
|
+
continue
|
|
618
|
+
vals = format_subvalue(subv)
|
|
619
|
+
if vals:
|
|
620
|
+
parts.append(f"&{subk} {', '.join(vals)}")
|
|
621
|
+
|
|
622
|
+
tokens.append(f"@~ {' '.join(parts)}")
|
|
623
|
+
continue
|
|
624
|
+
|
|
625
|
+
if k == "a":
|
|
626
|
+
if isinstance(v, list):
|
|
627
|
+
for adef in v:
|
|
628
|
+
if isinstance(adef, list) and len(adef) == 2:
|
|
629
|
+
times = [x for part in adef[0] for x in format_subvalue(part)]
|
|
630
|
+
cmds = [x for part in adef[1] for x in format_subvalue(part)]
|
|
631
|
+
tokens.append(f"@a {','.join(times)}: {','.join(cmds)}")
|
|
632
|
+
continue
|
|
633
|
+
|
|
634
|
+
if k == "u":
|
|
635
|
+
if isinstance(v, list):
|
|
636
|
+
for used in v:
|
|
637
|
+
if isinstance(used, list) and len(used) == 2:
|
|
638
|
+
td = format_subvalue(used[0])[0]
|
|
639
|
+
d = format_subvalue(used[1])[0]
|
|
640
|
+
tokens.append(f"@u {td}: {d}")
|
|
641
|
+
continue
|
|
642
|
+
|
|
643
|
+
# if k in {"+", "-", "w"}:
|
|
644
|
+
# if isinstance(v, list):
|
|
645
|
+
# vals = []
|
|
646
|
+
# for sub in v:
|
|
647
|
+
# vals.extend(format_subvalue(sub))
|
|
648
|
+
# if vals:
|
|
649
|
+
# tokens.append(f"@{k} {', '.join(vals)}")
|
|
650
|
+
# continue
|
|
651
|
+
|
|
652
|
+
if k in {"+", "-", "w"}:
|
|
653
|
+
# drop @- if @r was converted to @o
|
|
654
|
+
if k == "-" and convert_o_from_r:
|
|
655
|
+
continue
|
|
656
|
+
|
|
657
|
+
if isinstance(v, list):
|
|
658
|
+
vals = []
|
|
659
|
+
for sub in v:
|
|
660
|
+
vals.extend(format_subvalue(sub))
|
|
661
|
+
if vals:
|
|
662
|
+
tokens.append(f"@{k} {', '.join(vals)}")
|
|
663
|
+
continue
|
|
664
|
+
|
|
665
|
+
if k == "o":
|
|
666
|
+
# Skip original o if we already handled it (either "@o r" conversion or "@o s" suppression)
|
|
667
|
+
if skip_o_key:
|
|
668
|
+
continue
|
|
669
|
+
vals = format_subvalue(v)
|
|
670
|
+
if vals:
|
|
671
|
+
tokens.append(f"@o {', '.join(vals)}")
|
|
672
|
+
continue
|
|
673
|
+
|
|
674
|
+
# everything else
|
|
675
|
+
vals = format_subvalue(v)
|
|
676
|
+
if vals:
|
|
677
|
+
tokens.append(f"@{k} {', '.join(vals)}")
|
|
678
|
+
|
|
679
|
+
tokens = reorder_tokens(tokens)
|
|
680
|
+
|
|
681
|
+
if include_etm and key is not None:
|
|
682
|
+
tokens.append(f"@# {key}")
|
|
683
|
+
|
|
684
|
+
return tokens
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
# ------------------------------------------------------------
|
|
688
|
+
# Entry formatting
|
|
689
|
+
# ------------------------------------------------------------
|
|
690
|
+
|
|
691
|
+
|
|
692
|
+
def tokens_to_entry(tokens: list[str]) -> str:
|
|
693
|
+
"""Convert a list of tokens into a formatted entry string."""
|
|
694
|
+
return "\n".join(tokens)
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
# ------------------------------------------------------------
|
|
698
|
+
# Migration driver
|
|
699
|
+
# ------------------------------------------------------------
|
|
700
|
+
def migrate(
|
|
701
|
+
infile: str,
|
|
702
|
+
outfile: str | None = None,
|
|
703
|
+
include_etm: bool = True,
|
|
704
|
+
section: str = "both",
|
|
705
|
+
) -> None:
|
|
706
|
+
with open(infile, "r", encoding="utf-8") as f:
|
|
707
|
+
data = json.load(f)
|
|
708
|
+
|
|
709
|
+
sections = []
|
|
710
|
+
if section in ("both", "items"):
|
|
711
|
+
sections.append("items")
|
|
712
|
+
if section in ("both", "archive"):
|
|
713
|
+
sections.append("archive")
|
|
714
|
+
|
|
715
|
+
out_lines = []
|
|
716
|
+
|
|
717
|
+
count = 0
|
|
718
|
+
for sec in sections:
|
|
719
|
+
if sec not in data:
|
|
720
|
+
continue
|
|
721
|
+
out_lines.append(f"#### {sec} ####")
|
|
722
|
+
out_lines.append("")
|
|
723
|
+
|
|
724
|
+
for rid, item in data[sec].items():
|
|
725
|
+
count += 1
|
|
726
|
+
tokens = etm_to_tokens(item, rid, include_etm=include_etm)
|
|
727
|
+
entry = tokens_to_entry(tokens)
|
|
728
|
+
out_lines.append(entry)
|
|
729
|
+
out_lines.append("...")
|
|
730
|
+
out_lines.append("")
|
|
731
|
+
|
|
732
|
+
out_text = "\n".join(out_lines).rstrip() + "\n"
|
|
733
|
+
if outfile:
|
|
734
|
+
Path(outfile).write_text(out_text, encoding="utf-8")
|
|
735
|
+
else:
|
|
736
|
+
print(out_text)
|
|
737
|
+
print(f"processed {count} records")
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
# ------------------------------------------------------------
|
|
741
|
+
# CLI
|
|
742
|
+
# ------------------------------------------------------------
|
|
743
|
+
if __name__ == "__main__":
|
|
744
|
+
import argparse
|
|
745
|
+
|
|
746
|
+
parser = argparse.ArgumentParser(
|
|
747
|
+
description="Migrate etm.json (TinyDB) records into tklr batch entry format"
|
|
748
|
+
)
|
|
749
|
+
parser.add_argument("infile", help="Path to etm.json")
|
|
750
|
+
parser.add_argument("outfile", nargs="?", help="Optional output file")
|
|
751
|
+
parser.add_argument(
|
|
752
|
+
"--no-etm", action="store_true", help="Omit @# (etm unique_id) annotations"
|
|
753
|
+
)
|
|
754
|
+
parser.add_argument(
|
|
755
|
+
"--section",
|
|
756
|
+
choices=["items", "archive", "both"],
|
|
757
|
+
default="both",
|
|
758
|
+
help="Which section(s) to migrate (default: both)",
|
|
759
|
+
)
|
|
760
|
+
args = parser.parse_args()
|
|
761
|
+
|
|
762
|
+
migrate(
|
|
763
|
+
args.infile, args.outfile, include_etm=not args.no_etm, section=args.section
|
|
764
|
+
)
|