ui-mirror-skill 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,741 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ compare_tokens.py — Produce a structured diff between benchmark and current project design tokens.
4
+
5
+ Usage:
6
+ python3 compare_tokens.py <benchmark_tokens.json> <current_tokens.json> [--format human]
7
+
8
+ Input: Two normalized token JSON files (output of extract_design_tokens.py)
9
+ Output: Structured diff JSON to stdout (default), or human-readable text (--format human).
10
+
11
+ Dependencies: Python 3 standard library only.
12
+ """
13
+
14
+ import json
15
+ import math
16
+ import re
17
+ import sys
18
+
19
+
20
+ # ---------------------------------------------------------------------------
21
+ # 1. OKLCH parsing and deltaE calculation
22
+ # ---------------------------------------------------------------------------
23
+
24
+ _OKLCH_RE = re.compile(
25
+ r"oklch\(\s*([\d.]+)\s+([\d.]+)\s+([\d.]+)\s*\)"
26
+ )
27
+
28
+
29
+ def parse_oklch(s: str):
30
+ """Parse 'oklch(L C H)' string into (L, C, H) tuple, or None."""
31
+ if not s:
32
+ return None
33
+ m = _OKLCH_RE.search(s)
34
+ if m:
35
+ return float(m.group(1)), float(m.group(2)), float(m.group(3))
36
+ return None
37
+
38
+
39
+ def oklch_delta_e(a_str: str, b_str: str) -> float | None:
40
+ """
41
+ Compute perceptual color difference using simplified Oklab Euclidean distance.
42
+
43
+ We convert OKLCH back to Oklab (L, a, b) and compute Euclidean distance.
44
+ This is a standard simplified deltaE for Oklab.
45
+ """
46
+ a = parse_oklch(a_str)
47
+ b = parse_oklch(b_str)
48
+ if a is None or b is None:
49
+ return None
50
+
51
+ L1, C1, H1 = a
52
+ L2, C2, H2 = b
53
+
54
+ # OKLCH → Oklab
55
+ a1 = C1 * math.cos(math.radians(H1))
56
+ b1 = C1 * math.sin(math.radians(H1))
57
+ a2 = C2 * math.cos(math.radians(H2))
58
+ b2 = C2 * math.sin(math.radians(H2))
59
+
60
+ dL = L1 - L2
61
+ da = a1 - a2
62
+ db = b1 - b2
63
+
64
+ return math.sqrt(dL * dL + da * da + db * db)
65
+
66
+
67
+ def classify_delta(delta_e: float) -> str:
68
+ """Classify a color deltaE into match/close/different."""
69
+ if delta_e < 0.02:
70
+ return "match"
71
+ if delta_e <= 0.1:
72
+ return "close"
73
+ return "different"
74
+
75
+
76
+ # ---------------------------------------------------------------------------
77
+ # 2. FRONTEND_CONSISTENCY.md conflict rules
78
+ # ---------------------------------------------------------------------------
79
+
80
+ # Rules we can detect conflicts for from token values
81
+ RULES = {
82
+ "R4": {
83
+ "description": "rounded-xl 이상 사용 금지",
84
+ "check_field": "radius",
85
+ "max_px": 8,
86
+ "max_tailwind": "lg",
87
+ },
88
+ "C2": {
89
+ "description": "라이트모드 전용 — dark: 접두사 금지",
90
+ "check_field": "colors",
91
+ "detect": "dark_mode",
92
+ },
93
+ "A1": {
94
+ "description": "tw-animate-css만 사용 — framer-motion 금지",
95
+ "check_field": "animations",
96
+ "detect": "complex_animation",
97
+ },
98
+ "T2": {
99
+ "description": "Tailwind 크기 스케일만 사용 (xs~4xl)",
100
+ "check_field": "typography",
101
+ "valid_sizes": {"xs", "sm", "base", "lg", "xl", "2xl", "3xl", "4xl"},
102
+ },
103
+ }
104
+
105
+ VIOLATING_RADIUS_TAILWIND = {"xl", "2xl", "3xl", "full"}
106
+
107
+
108
+ def check_radius_conflict(benchmark_tokens: dict) -> list:
109
+ """Check if benchmark has border-radius > 8px (rounded-lg), violating R4."""
110
+ conflicts = []
111
+ radius = benchmark_tokens.get("radius", {})
112
+ for name, entry in radius.items():
113
+ if isinstance(entry, dict):
114
+ px_val = entry.get("px")
115
+ tw_val = entry.get("tailwind", "")
116
+ if px_val is not None and px_val > 8:
117
+ conflicts.append({
118
+ "ruleId": "R4",
119
+ "rule": "rounded-xl+ 사용 금지",
120
+ "benchmarkValue": "rounded-{} ({}px)".format(tw_val, int(px_val)),
121
+ "component": name,
122
+ "resolution": "Clamp to rounded-lg (8px) or update R4 to allow rounded-{} for {}".format(
123
+ tw_val, name
124
+ ),
125
+ })
126
+ elif tw_val in VIOLATING_RADIUS_TAILWIND:
127
+ conflicts.append({
128
+ "ruleId": "R4",
129
+ "rule": "rounded-xl+ 사용 금지",
130
+ "benchmarkValue": "rounded-{}".format(tw_val),
131
+ "component": name,
132
+ "resolution": "Clamp to rounded-lg or update R4 to allow rounded-{} for {}".format(
133
+ tw_val, name
134
+ ),
135
+ })
136
+ return conflicts
137
+
138
+
139
+ def check_typography_conflict(benchmark_tokens: dict) -> list:
140
+ """Check if benchmark uses non-standard font sizes, violating T2."""
141
+ conflicts = []
142
+ valid = RULES["T2"]["valid_sizes"]
143
+ typography = benchmark_tokens.get("typography", {})
144
+ for name, entry in typography.items():
145
+ if isinstance(entry, dict):
146
+ size = entry.get("size", "")
147
+ if size and size not in valid:
148
+ conflicts.append({
149
+ "ruleId": "T2",
150
+ "rule": "Tailwind 크기 스케일만 사용 (xs~4xl)",
151
+ "benchmarkValue": "text-{} (in {})".format(size, name),
152
+ "resolution": "Map to nearest standard size or update T2 to allow text-{}".format(size),
153
+ })
154
+ return conflicts
155
+
156
+
157
+ # ---------------------------------------------------------------------------
158
+ # 3. Token comparison logic
159
+ # ---------------------------------------------------------------------------
160
+
161
+ def compare_colors(bench_colors: dict, current_colors: dict) -> dict:
162
+ """Compare color tokens between benchmark and current project."""
163
+ result = {"match": [], "modify": [], "add": []}
164
+
165
+ all_names = set(list(bench_colors.keys()) + list(current_colors.keys()))
166
+
167
+ for name in sorted(all_names):
168
+ bench_val = bench_colors.get(name)
169
+ current_val = current_colors.get(name)
170
+
171
+ if bench_val and current_val:
172
+ # Both exist — compare
173
+ b_oklch = bench_val.get("oklch", "") if isinstance(bench_val, dict) else ""
174
+ s_oklch = current_val.get("oklch", "") if isinstance(current_val, dict) else ""
175
+
176
+ delta = oklch_delta_e(b_oklch, s_oklch)
177
+ entry = {
178
+ "name": name,
179
+ "benchmark": b_oklch or str(bench_val),
180
+ "current": s_oklch or str(current_val),
181
+ "deltaE": round(delta, 4) if delta is not None else None,
182
+ }
183
+
184
+ if delta is not None and delta < 0.02:
185
+ result["match"].append(entry)
186
+ else:
187
+ entry["action"] = "Update --{} in globals.css".format(name)
188
+ if delta is not None:
189
+ entry["classification"] = classify_delta(delta)
190
+ result["modify"].append(entry)
191
+
192
+ elif bench_val and not current_val:
193
+ b_oklch = bench_val.get("oklch", "") if isinstance(bench_val, dict) else str(bench_val)
194
+ result["add"].append({
195
+ "name": name,
196
+ "benchmark": b_oklch,
197
+ "action": "Add new --mirror-{} token".format(name),
198
+ })
199
+ # current-only tokens are informational — we skip them in the diff
200
+ # (they exist in current but not in benchmark, no action needed)
201
+
202
+ return result
203
+
204
+
205
+ def compare_simple_category(bench_data: dict, current_data: dict, category_name: str) -> dict:
206
+ """Compare simple dict-of-dict token categories (spacing, radius, shadows, layout)."""
207
+ result = {"match": [], "modify": [], "add": []}
208
+
209
+ all_keys = set(list(bench_data.keys()) + list(current_data.keys()))
210
+
211
+ for key in sorted(all_keys):
212
+ bench_val = bench_data.get(key)
213
+ current_val = current_data.get(key)
214
+
215
+ if bench_val is not None and current_val is not None:
216
+ # Both exist
217
+ if _values_match(bench_val, current_val):
218
+ result["match"].append({
219
+ "name": key,
220
+ "benchmark": _summarize_value(bench_val),
221
+ "current": _summarize_value(current_val),
222
+ })
223
+ else:
224
+ result["modify"].append({
225
+ "name": key,
226
+ "benchmark": _summarize_value(bench_val),
227
+ "current": _summarize_value(current_val),
228
+ "action": "Update {} {}".format(category_name, key),
229
+ })
230
+
231
+ elif bench_val is not None and current_val is None:
232
+ result["add"].append({
233
+ "name": key,
234
+ "benchmark": _summarize_value(bench_val),
235
+ "action": "Add {} {}".format(category_name, key),
236
+ })
237
+
238
+ return result
239
+
240
+
241
+ def compare_typography(bench_typo: dict, current_typo: dict) -> dict:
242
+ """Compare typography tokens with detailed field-level diff."""
243
+ result = {"match": [], "modify": [], "add": []}
244
+
245
+ all_keys = set(list(bench_typo.keys()) + list(current_typo.keys()))
246
+
247
+ for key in sorted(all_keys):
248
+ bench_val = bench_typo.get(key)
249
+ current_val = current_typo.get(key)
250
+
251
+ if bench_val is not None and current_val is not None:
252
+ if isinstance(bench_val, dict) and isinstance(current_val, dict):
253
+ diffs = {}
254
+ for field in set(list(bench_val.keys()) + list(current_val.keys())):
255
+ bv = bench_val.get(field)
256
+ sv = current_val.get(field)
257
+ if bv != sv:
258
+ diffs[field] = {"benchmark": bv, "current": sv}
259
+
260
+ if not diffs:
261
+ result["match"].append({
262
+ "name": key,
263
+ "benchmark": bench_val,
264
+ "current": current_val,
265
+ })
266
+ else:
267
+ result["modify"].append({
268
+ "name": key,
269
+ "diffs": diffs,
270
+ "action": "Update typography for {}".format(key),
271
+ })
272
+ else:
273
+ if bench_val == current_val:
274
+ result["match"].append({"name": key, "value": bench_val})
275
+ else:
276
+ result["modify"].append({
277
+ "name": key,
278
+ "benchmark": bench_val,
279
+ "current": current_val,
280
+ "action": "Update typography {}".format(key),
281
+ })
282
+
283
+ elif bench_val is not None:
284
+ result["add"].append({
285
+ "name": key,
286
+ "benchmark": bench_val,
287
+ "action": "Add typography definition for {}".format(key),
288
+ })
289
+
290
+ return result
291
+
292
+
293
+ def compare_components(bench_comp: dict, current_comp: dict) -> dict:
294
+ """Compare component tokens."""
295
+ result = {"match": [], "modify": [], "add": []}
296
+
297
+ all_keys = set(list(bench_comp.keys()) + list(current_comp.keys()))
298
+
299
+ for key in sorted(all_keys):
300
+ bench_val = bench_comp.get(key)
301
+ current_val = current_comp.get(key)
302
+
303
+ if bench_val is not None and current_val is not None:
304
+ if _values_match(bench_val, current_val):
305
+ result["match"].append({"name": key})
306
+ else:
307
+ diffs = _diff_dicts(bench_val, current_val) if isinstance(bench_val, dict) and isinstance(current_val, dict) else {"benchmark": bench_val, "current": current_val}
308
+ result["modify"].append({
309
+ "name": key,
310
+ "diffs": diffs,
311
+ "action": "Update component {}".format(key),
312
+ })
313
+ elif bench_val is not None:
314
+ result["add"].append({
315
+ "name": key,
316
+ "benchmark": _summarize_value(bench_val),
317
+ "action": "Add component definition for {}".format(key),
318
+ })
319
+
320
+ return result
321
+
322
+
323
+ # ---------------------------------------------------------------------------
324
+ # 4. Helpers
325
+ # ---------------------------------------------------------------------------
326
+
327
+ def _values_match(a, b) -> bool:
328
+ """Check if two token values are effectively identical."""
329
+ if a == b:
330
+ return True
331
+ if isinstance(a, dict) and isinstance(b, dict):
332
+ # Compare tailwind values if both have them
333
+ if "tailwind" in a and "tailwind" in b:
334
+ return a["tailwind"] == b["tailwind"]
335
+ # Compare all shared keys
336
+ shared = set(a.keys()) & set(b.keys())
337
+ if shared:
338
+ return all(a[k] == b[k] for k in shared)
339
+ return False
340
+
341
+
342
+ def _summarize_value(v):
343
+ """Produce a compact summary of a token value."""
344
+ if isinstance(v, dict):
345
+ # Prefer tailwind representation
346
+ if "tailwind" in v:
347
+ return v["tailwind"]
348
+ if "oklch" in v:
349
+ return v["oklch"]
350
+ return v
351
+ return v
352
+
353
+
354
+ def _diff_dicts(a: dict, b: dict) -> dict:
355
+ """Return fields that differ between two dicts."""
356
+ result = {}
357
+ all_keys = set(list(a.keys()) + list(b.keys()))
358
+ for k in sorted(all_keys):
359
+ av = a.get(k)
360
+ bv = b.get(k)
361
+ if av != bv:
362
+ result[k] = {"benchmark": av, "current": bv}
363
+ return result
364
+
365
+
366
+ # ---------------------------------------------------------------------------
367
+ # 5. Main comparison logic
368
+ # ---------------------------------------------------------------------------
369
+
370
+ def compare_tokens(benchmark: dict, current: dict) -> dict:
371
+ """Produce a full structured diff between benchmark and current project tokens."""
372
+
373
+ categories = ["colors", "typography", "spacing", "radius", "shadows", "components", "layout"]
374
+
375
+ diff = {
376
+ "summary": {"total": 0, "match": 0, "modify": 0, "add": 0, "conflicts": 0},
377
+ }
378
+
379
+ # Compare each category
380
+ bench_colors = benchmark.get("colors", {})
381
+ current_colors = current.get("colors", {})
382
+ diff["colors"] = compare_colors(bench_colors, current_colors)
383
+
384
+ bench_typo = benchmark.get("typography", {})
385
+ current_typo = current.get("typography", {})
386
+ diff["typography"] = compare_typography(bench_typo, current_typo)
387
+
388
+ bench_spacing = benchmark.get("spacing", {})
389
+ current_spacing = current.get("spacing", {})
390
+ diff["spacing"] = compare_simple_category(bench_spacing, current_spacing, "spacing")
391
+
392
+ bench_radius = benchmark.get("radius", {})
393
+ current_radius = current.get("radius", {})
394
+ diff["radius"] = compare_simple_category(bench_radius, current_radius, "radius")
395
+
396
+ bench_shadows = benchmark.get("shadows", {})
397
+ current_shadows = current.get("shadows", {})
398
+ diff["shadows"] = compare_simple_category(bench_shadows, current_shadows, "shadows")
399
+
400
+ bench_comp = benchmark.get("components", {})
401
+ current_comp = current.get("components", {})
402
+ diff["components"] = compare_components(bench_comp, current_comp)
403
+
404
+ bench_layout = benchmark.get("layout", {})
405
+ current_layout = current.get("layout", {})
406
+ diff["layout"] = compare_simple_category(bench_layout, current_layout, "layout")
407
+
408
+ # Detect conflicts with FRONTEND_CONSISTENCY.md
409
+ conflicts = []
410
+ conflicts.extend(check_radius_conflict(benchmark))
411
+ conflicts.extend(check_typography_conflict(benchmark))
412
+ diff["conflicts"] = conflicts
413
+
414
+ # Compute summary
415
+ total = 0
416
+ match_count = 0
417
+ modify_count = 0
418
+ add_count = 0
419
+
420
+ for cat in categories:
421
+ cat_data = diff.get(cat, {})
422
+ if isinstance(cat_data, dict) and "match" in cat_data:
423
+ match_count += len(cat_data.get("match", []))
424
+ modify_count += len(cat_data.get("modify", []))
425
+ add_count += len(cat_data.get("add", []))
426
+
427
+ total = match_count + modify_count + add_count
428
+
429
+ diff["summary"] = {
430
+ "total": total,
431
+ "match": match_count,
432
+ "modify": modify_count,
433
+ "add": add_count,
434
+ "conflicts": len(conflicts),
435
+ }
436
+
437
+ return diff
438
+
439
+
440
+ # ---------------------------------------------------------------------------
441
+ # 6. Human-readable formatting
442
+ # ---------------------------------------------------------------------------
443
+
444
+ CATEGORY_LABELS = {
445
+ "colors": "Colors",
446
+ "typography": "Typography",
447
+ "spacing": "Spacing",
448
+ "radius": "Radius",
449
+ "shadows": "Shadows",
450
+ "components": "Components",
451
+ "layout": "Layout",
452
+ }
453
+
454
+
455
+ def _fmt_value(v) -> str:
456
+ """Format a token value for display."""
457
+ if v is None:
458
+ return "(none)"
459
+ if isinstance(v, dict):
460
+ parts = []
461
+ for pref in ("oklch", "tailwind", "hex", "px", "value"):
462
+ if pref in v:
463
+ parts.append(str(v[pref]))
464
+ if parts:
465
+ return " / ".join(parts)
466
+ # Fallback: compact key=value pairs
467
+ return ", ".join("{}={}".format(k, v2) for k, v2 in sorted(v.items()) if v2 is not None)
468
+ return str(v)
469
+
470
+
471
+ def _fmt_diff_field(field: str, bench, current) -> str:
472
+ """Format a single differing field as a line."""
473
+ bv = _fmt_value(bench) if not isinstance(bench, str) else bench
474
+ sv = _fmt_value(current) if not isinstance(current, str) else current
475
+ return " {} {:<18s} {} -> {}".format("|", field + ":", bv, sv)
476
+
477
+
478
+ def _fmt_color_entry(entry: dict, kind: str) -> list:
479
+ """Format a color diff entry."""
480
+ lines = []
481
+ name = entry.get("name", "?")
482
+ if kind == "match":
483
+ bench = entry.get("benchmark", entry.get("current", ""))
484
+ lines.append(" {} MATCH {:<20s} {}".format(chr(0x2713), name, bench))
485
+ elif kind == "modify":
486
+ bench = entry.get("benchmark", "")
487
+ current = entry.get("current", "")
488
+ delta = entry.get("deltaE")
489
+ cls = entry.get("classification", "")
490
+ delta_str = " dE={:.4f} ({})".format(delta, cls) if delta is not None else ""
491
+ lines.append(" {} MODIFY {}".format(chr(0x270E), name))
492
+ lines.append(" | benchmark: {}".format(bench))
493
+ lines.append(" | current: {}".format(current))
494
+ if delta_str:
495
+ lines.append(" | delta: {}".format(delta_str.strip()))
496
+ elif kind == "add":
497
+ bench = entry.get("benchmark", "")
498
+ lines.append(" {} ADD {:<20s} {}".format(chr(0x271A), name, bench))
499
+ return lines
500
+
501
+
502
+ def _fmt_simple_entry(entry: dict, kind: str) -> list:
503
+ """Format a simple category entry (spacing, radius, shadows, layout)."""
504
+ lines = []
505
+ name = entry.get("name", "?")
506
+ if kind == "match":
507
+ val = _fmt_value(entry.get("benchmark", entry.get("current", entry.get("value", ""))))
508
+ lines.append(" {} MATCH {:<20s} {}".format(chr(0x2713), name, val))
509
+ elif kind == "modify":
510
+ bench = _fmt_value(entry.get("benchmark", ""))
511
+ current = _fmt_value(entry.get("current", ""))
512
+ lines.append(" {} MODIFY {}".format(chr(0x270E), name))
513
+ lines.append(" | benchmark: {}".format(bench))
514
+ lines.append(" | current: {}".format(current))
515
+ elif kind == "add":
516
+ bench = _fmt_value(entry.get("benchmark", ""))
517
+ lines.append(" {} ADD {:<20s} {}".format(chr(0x271A), name, bench))
518
+ return lines
519
+
520
+
521
+ def _fmt_typo_entry(entry: dict, kind: str) -> list:
522
+ """Format a typography entry with field-level diffs."""
523
+ lines = []
524
+ name = entry.get("name", "?")
525
+ if kind == "match":
526
+ lines.append(" {} MATCH {}".format(chr(0x2713), name))
527
+ elif kind == "modify":
528
+ lines.append(" {} MODIFY {}".format(chr(0x270E), name))
529
+ diffs = entry.get("diffs", {})
530
+ if isinstance(diffs, dict) and all(isinstance(v, dict) for v in diffs.values()):
531
+ for field, vals in sorted(diffs.items()):
532
+ bv = vals.get("benchmark", "(none)")
533
+ sv = vals.get("current", "(none)")
534
+ lines.append(_fmt_diff_field(field, bv, sv))
535
+ else:
536
+ bench = _fmt_value(entry.get("benchmark", ""))
537
+ current = _fmt_value(entry.get("current", ""))
538
+ lines.append(" | benchmark: {}".format(bench))
539
+ lines.append(" | current: {}".format(current))
540
+ elif kind == "add":
541
+ bench = _fmt_value(entry.get("benchmark", ""))
542
+ lines.append(" {} ADD {:<20s} {}".format(chr(0x271A), name, bench))
543
+ return lines
544
+
545
+
546
+ def _fmt_component_entry(entry: dict, kind: str) -> list:
547
+ """Format a component entry with detailed field-by-field diffs."""
548
+ lines = []
549
+ name = entry.get("name", "?")
550
+ if kind == "match":
551
+ lines.append(" {} MATCH {}".format(chr(0x2713), name))
552
+ elif kind == "modify":
553
+ lines.append(" {} MODIFY {}".format(chr(0x270E), name))
554
+ diffs = entry.get("diffs", {})
555
+ if isinstance(diffs, dict):
556
+ for field, vals in sorted(diffs.items()):
557
+ if isinstance(vals, dict) and "benchmark" in vals and "current" in vals:
558
+ bv = _fmt_value(vals["benchmark"])
559
+ sv = _fmt_value(vals["current"])
560
+ lines.append(_fmt_diff_field(field, bv, sv))
561
+ elif isinstance(vals, dict):
562
+ # Nested dict — flatten one more level
563
+ for subfield, subvals in sorted(vals.items()):
564
+ if isinstance(subvals, dict) and "benchmark" in subvals:
565
+ bv = _fmt_value(subvals["benchmark"])
566
+ sv = _fmt_value(subvals.get("current", "(none)"))
567
+ lines.append(_fmt_diff_field("{}.{}".format(field, subfield), bv, sv))
568
+ else:
569
+ lines.append(" | {}.{}: {}".format(field, subfield, _fmt_value(subvals)))
570
+ else:
571
+ lines.append(" | {}: {}".format(field, _fmt_value(vals)))
572
+ else:
573
+ bench = _fmt_value(entry.get("benchmark", ""))
574
+ current = _fmt_value(entry.get("current", ""))
575
+ lines.append(" | benchmark: {}".format(bench))
576
+ lines.append(" | current: {}".format(current))
577
+ elif kind == "add":
578
+ bench = _fmt_value(entry.get("benchmark", ""))
579
+ lines.append(" {} ADD {:<20s} {}".format(chr(0x271A), name, bench))
580
+ return lines
581
+
582
+
583
+ def format_human_readable(diff: dict) -> str:
584
+ """Format the full diff as human-readable text output."""
585
+ lines = []
586
+
587
+ # Summary header
588
+ summary = diff.get("summary", {})
589
+ lines.append("=" * 70)
590
+ lines.append(" DESIGN TOKEN COMPARISON")
591
+ lines.append("=" * 70)
592
+ lines.append("")
593
+ lines.append(" Total: {} | Match: {} | Modify: {} | Add: {} | Conflicts: {}".format(
594
+ summary.get("total", 0),
595
+ summary.get("match", 0),
596
+ summary.get("modify", 0),
597
+ summary.get("add", 0),
598
+ summary.get("conflicts", 0),
599
+ ))
600
+ lines.append("")
601
+
602
+ # Category formatters
603
+ formatters = {
604
+ "colors": _fmt_color_entry,
605
+ "typography": _fmt_typo_entry,
606
+ "spacing": _fmt_simple_entry,
607
+ "radius": _fmt_simple_entry,
608
+ "shadows": _fmt_simple_entry,
609
+ "components": _fmt_component_entry,
610
+ "layout": _fmt_simple_entry,
611
+ }
612
+
613
+ for cat_key in ["colors", "typography", "spacing", "radius", "shadows", "components", "layout"]:
614
+ cat_data = diff.get(cat_key)
615
+ if not cat_data or not isinstance(cat_data, dict):
616
+ continue
617
+
618
+ match_list = cat_data.get("match", [])
619
+ modify_list = cat_data.get("modify", [])
620
+ add_list = cat_data.get("add", [])
621
+
622
+ if not match_list and not modify_list and not add_list:
623
+ continue
624
+
625
+ label = CATEGORY_LABELS.get(cat_key, cat_key)
626
+ lines.append("{} {} (match: {} | modify: {} | add: {}) {}".format(
627
+ chr(0x2500) * 2,
628
+ label,
629
+ len(match_list),
630
+ len(modify_list),
631
+ len(add_list),
632
+ chr(0x2500) * 2,
633
+ ))
634
+ lines.append("")
635
+
636
+ fmt_fn = formatters.get(cat_key, _fmt_simple_entry)
637
+
638
+ for entry in match_list:
639
+ lines.extend(fmt_fn(entry, "match"))
640
+ if match_list and (modify_list or add_list):
641
+ lines.append("")
642
+ for entry in modify_list:
643
+ lines.extend(fmt_fn(entry, "modify"))
644
+ lines.append("")
645
+ for entry in add_list:
646
+ lines.extend(fmt_fn(entry, "add"))
647
+
648
+ lines.append("")
649
+
650
+ # Conflicts section
651
+ conflicts = diff.get("conflicts", [])
652
+ if conflicts:
653
+ lines.append("{} Conflicts ({}) {}".format(chr(0x2500) * 2, len(conflicts), chr(0x2500) * 2))
654
+ lines.append("")
655
+ for c in conflicts:
656
+ rule_id = c.get("ruleId", "?")
657
+ rule = c.get("rule", "")
658
+ component = c.get("component", "")
659
+ bench_val = c.get("benchmarkValue", "")
660
+ resolution = c.get("resolution", "")
661
+ lines.append(" {} [{}] {}".format(chr(0x26A0), rule_id, rule))
662
+ if component:
663
+ lines.append(" | component: {}".format(component))
664
+ lines.append(" | benchmark: {}".format(bench_val))
665
+ lines.append(" | resolution: {}".format(resolution))
666
+ lines.append("")
667
+
668
+ lines.append("=" * 70)
669
+ return "\n".join(lines)
670
+
671
+
672
+ # ---------------------------------------------------------------------------
673
+ # 7. Entry point
674
+ # ---------------------------------------------------------------------------
675
+
676
+ def main():
677
+ # Parse args: <benchmark> <current> [--format human]
678
+ args = [a for a in sys.argv[1:] if not a.startswith("--")]
679
+ flags = [a for a in sys.argv[1:] if a.startswith("--")]
680
+
681
+ output_format = "json"
682
+ for i, flag in enumerate(flags):
683
+ if flag == "--format" and i + 1 < len(flags):
684
+ output_format = flags[i + 1]
685
+ break
686
+ # Also check sys.argv directly for --format human
687
+ for i, a in enumerate(sys.argv[1:], 1):
688
+ if a == "--format" and i + 1 < len(sys.argv):
689
+ output_format = sys.argv[i + 1]
690
+
691
+ if len(args) < 2:
692
+ print(
693
+ "Usage: {} <benchmark_tokens.json> <current_tokens.json> [--format human]".format(sys.argv[0]),
694
+ file=sys.stderr,
695
+ )
696
+ print(" Produces a structured diff between benchmark and current project design tokens.", file=sys.stderr)
697
+ print(" --format human Output human-readable text instead of JSON", file=sys.stderr)
698
+ sys.exit(1)
699
+
700
+ benchmark_path = args[0]
701
+ current_path = args[1]
702
+
703
+ # Load benchmark
704
+ try:
705
+ with open(benchmark_path, "r", encoding="utf-8") as f:
706
+ benchmark = json.load(f)
707
+ except FileNotFoundError:
708
+ print("Error: Benchmark file not found: {}".format(benchmark_path), file=sys.stderr)
709
+ sys.exit(1)
710
+ except json.JSONDecodeError as e:
711
+ print("Error: Invalid JSON in benchmark {}: {}".format(benchmark_path, e), file=sys.stderr)
712
+ sys.exit(1)
713
+
714
+ # Load current
715
+ try:
716
+ with open(current_path, "r", encoding="utf-8") as f:
717
+ current = json.load(f)
718
+ except FileNotFoundError:
719
+ print("Error: Current project file not found: {}".format(current_path), file=sys.stderr)
720
+ sys.exit(1)
721
+ except json.JSONDecodeError as e:
722
+ print("Error: Invalid JSON in current {}: {}".format(current_path, e), file=sys.stderr)
723
+ sys.exit(1)
724
+
725
+ if not isinstance(benchmark, dict):
726
+ print("Error: Benchmark JSON must be an object, got {}".format(type(benchmark).__name__), file=sys.stderr)
727
+ sys.exit(1)
728
+ if not isinstance(current, dict):
729
+ print("Error: Current JSON must be an object, got {}".format(type(current).__name__), file=sys.stderr)
730
+ sys.exit(1)
731
+
732
+ diff = compare_tokens(benchmark, current)
733
+
734
+ if output_format == "human":
735
+ print(format_human_readable(diff))
736
+ else:
737
+ print(json.dumps(diff, indent=2, ensure_ascii=False))
738
+
739
+
740
+ if __name__ == "__main__":
741
+ main()