design-clone 2.1.0 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -34
- package/SKILL.md +69 -45
- package/bin/cli.js +22 -4
- package/bin/commands/clone-site.js +31 -171
- package/bin/commands/help.js +19 -6
- package/bin/commands/init.js +9 -86
- package/bin/commands/uninstall.js +105 -0
- package/bin/commands/update.js +70 -0
- package/bin/commands/verify.js +7 -14
- package/bin/utils/paths.js +28 -0
- package/bin/utils/validate.js +2 -22
- package/bin/utils/version.js +23 -0
- package/docs/code-standards.md +789 -0
- package/docs/codebase-summary.md +533 -286
- package/docs/index.md +74 -0
- package/docs/project-overview-pdr.md +797 -0
- package/docs/system-architecture.md +718 -0
- package/package.json +14 -17
- package/src/ai/prompts/design-tokens/basic.md +80 -0
- package/src/ai/prompts/design-tokens/section-with-css.md +41 -0
- package/src/ai/prompts/design-tokens/section.md +48 -0
- package/src/ai/prompts/design-tokens/with-css.md +87 -0
- package/src/ai/prompts/structure-analysis/basic.md +55 -0
- package/src/ai/prompts/structure-analysis/with-context.md +59 -0
- package/src/ai/prompts/structure-analysis/with-dimensions.md +63 -0
- package/src/ai/prompts/structure-analysis/with-hierarchy.md +73 -0
- package/src/ai/prompts/ux-audit/aggregation.md +42 -0
- package/src/ai/prompts/ux-audit/desktop.md +92 -0
- package/src/ai/prompts/ux-audit/mobile.md +93 -0
- package/src/ai/prompts/ux-audit/tablet.md +92 -0
- package/src/core/animation/animation-extractor-ast.js +183 -0
- package/src/core/animation/animation-extractor-output.js +152 -0
- package/src/core/animation/animation-extractor.js +178 -0
- package/src/core/animation/state-capture-detection.js +200 -0
- package/src/core/animation/state-capture.js +193 -0
- package/src/core/capture/browser-context-pool.js +96 -0
- package/src/core/capture/multi-page-screenshot-page.js +110 -0
- package/src/core/capture/multi-page-screenshot.js +208 -0
- package/src/core/capture/screenshot-extraction.js +186 -0
- package/src/core/capture/screenshot-helpers.js +175 -0
- package/src/core/capture/screenshot-orchestrator.js +174 -0
- package/src/core/capture/screenshot-viewport.js +93 -0
- package/src/core/capture/screenshot.js +192 -0
- package/src/core/content/content-counter-dom.js +191 -0
- package/src/core/content/content-counter.js +76 -0
- package/src/core/css/breakpoint-detector.js +66 -0
- package/src/core/css/chromium-defaults.json +23 -0
- package/src/core/css/computed-style-extractor.js +102 -0
- package/src/core/css/css-chunker.js +103 -0
- package/src/core/css/filter-css-dead-code.js +120 -0
- package/src/core/css/filter-css-html-analyzer.js +110 -0
- package/src/core/css/filter-css-selector-matcher.js +172 -0
- package/src/core/css/filter-css.js +206 -0
- package/src/core/css/merge-css-atrule-processor.js +158 -0
- package/src/core/css/merge-css-file-io.js +68 -0
- package/src/core/css/merge-css.js +148 -0
- package/src/core/detection/framework-detector-routing.js +68 -0
- package/src/core/detection/framework-detector-signals.js +65 -0
- package/src/core/detection/framework-detector.js +198 -0
- package/src/core/dimension/dimension-extractor-card-detector.js +82 -0
- package/src/core/dimension/dimension-extractor.js +317 -0
- package/src/core/dimension/dimension-output-ai-summary.js +111 -0
- package/src/core/dimension/dimension-output.js +173 -0
- package/src/core/dimension/dom-tree-analyzer-tree-builders.js +95 -0
- package/src/core/dimension/dom-tree-analyzer.js +191 -0
- package/src/core/discovery/app-state-snapshot-capture.js +195 -0
- package/src/core/discovery/app-state-snapshot-utils.js +178 -0
- package/src/core/discovery/app-state-snapshot.js +131 -0
- package/src/core/discovery/discover-pages-routes.js +84 -0
- package/src/core/discovery/discover-pages-utils.js +177 -0
- package/src/core/discovery/discover-pages.js +191 -0
- package/src/core/html/html-extractor-inline-styler.js +70 -0
- package/src/core/html/html-extractor.js +147 -0
- package/src/core/html/semantic-enhancer-mappings.js +200 -0
- package/src/core/html/semantic-enhancer-page.js +148 -0
- package/src/core/html/semantic-enhancer.js +135 -0
- package/src/core/links/rewrite-links-css-rewriter.js +53 -0
- package/src/core/links/rewrite-links.js +173 -0
- package/src/core/media/asset-validator.js +118 -0
- package/src/core/media/extract-assets-downloader.js +187 -0
- package/src/core/media/extract-assets-page-scraper.js +115 -0
- package/src/core/media/extract-assets.js +159 -0
- package/src/core/media/video-capture-convert.js +200 -0
- package/src/core/media/video-capture.js +201 -0
- package/src/core/{lazy-loader.js → page-prep/lazy-loader.js} +37 -39
- package/src/core/section/section-cropper-helpers.js +43 -0
- package/src/core/{section-cropper.js → section/section-cropper.js} +11 -88
- package/src/core/section/section-detector-strategies.js +139 -0
- package/src/core/section/section-detector-utils.js +100 -0
- package/src/core/section/section-detector.js +88 -0
- package/src/core/tests/test-section-cropper.js +2 -2
- package/src/core/tests/test-section-detector.js +2 -2
- package/src/post-process/enhance-assets.js +29 -4
- package/src/post-process/fetch-images-unsplash-client.js +123 -0
- package/src/post-process/fetch-images.js +60 -263
- package/src/post-process/inject-gosnap.js +88 -0
- package/src/post-process/inject-icons-svg-replacer.js +76 -0
- package/src/post-process/inject-icons.js +47 -200
- package/src/route-discoverers/base-discoverer-utils.js +137 -0
- package/src/route-discoverers/base-discoverer.js +29 -118
- package/src/route-discoverers/index.js +1 -1
- package/src/shared/config.js +38 -0
- package/src/shared/error-codes.js +31 -0
- package/src/shared/viewports.js +46 -0
- package/src/utils/browser.js +0 -7
- package/src/utils/helpers.js +4 -0
- package/src/utils/log.js +12 -0
- package/src/utils/playwright-loader.js +76 -0
- package/src/utils/playwright.js +3 -69
- package/src/utils/progress.js +32 -0
- package/src/verification/generate-audit-report-css-fixes.js +52 -0
- package/src/verification/generate-audit-report-sections.js +158 -0
- package/src/verification/generate-audit-report.js +5 -281
- package/src/verification/quality-scorer.js +92 -0
- package/src/verification/verify-footer-checks.js +103 -0
- package/src/verification/verify-footer-helpers.js +178 -0
- package/src/verification/verify-footer.js +23 -381
- package/src/verification/verify-header-checks.js +104 -0
- package/src/verification/verify-header-helpers.js +156 -0
- package/src/verification/verify-header.js +23 -365
- package/src/verification/verify-layout-report.js +101 -0
- package/src/verification/verify-layout.js +13 -259
- package/src/verification/verify-menu-checks.js +104 -0
- package/src/verification/verify-menu-helpers.js +112 -0
- package/src/verification/verify-menu.js +17 -285
- package/src/verification/verify-slider-checks.js +115 -0
- package/src/verification/verify-slider-constants.js +65 -0
- package/src/verification/verify-slider-helpers.js +164 -0
- package/src/verification/verify-slider.js +23 -414
- package/.env.example +0 -14
- package/docs/basic-clone.md +0 -63
- package/docs/cli-reference.md +0 -316
- package/docs/design-clone-architecture.md +0 -492
- package/docs/pixel-perfect.md +0 -117
- package/docs/project-roadmap.md +0 -382
- package/docs/troubleshooting.md +0 -170
- package/requirements.txt +0 -5
- package/src/ai/__pycache__/analyze-structure.cpython-313.pyc +0 -0
- package/src/ai/__pycache__/extract-design-tokens.cpython-313.pyc +0 -0
- package/src/ai/analyze-structure.py +0 -375
- package/src/ai/extract-design-tokens.py +0 -782
- package/src/ai/prompts/__init__.py +0 -2
- package/src/ai/prompts/__pycache__/__init__.cpython-313.pyc +0 -0
- package/src/ai/prompts/__pycache__/design_tokens.cpython-313.pyc +0 -0
- package/src/ai/prompts/__pycache__/structure_analysis.cpython-313.pyc +0 -0
- package/src/ai/prompts/__pycache__/ux_audit.cpython-313.pyc +0 -0
- package/src/ai/prompts/design_tokens.py +0 -316
- package/src/ai/prompts/structure_analysis.py +0 -592
- package/src/ai/prompts/ux_audit.py +0 -198
- package/src/ai/ux-audit.js +0 -596
- package/src/core/animation-extractor.js +0 -526
- package/src/core/app-state-snapshot.js +0 -511
- package/src/core/content-counter.js +0 -342
- package/src/core/design-tokens.js +0 -103
- package/src/core/dimension-extractor.js +0 -438
- package/src/core/dimension-output.js +0 -305
- package/src/core/discover-pages.js +0 -542
- package/src/core/dom-tree-analyzer.js +0 -298
- package/src/core/extract-assets.js +0 -468
- package/src/core/filter-css.js +0 -499
- package/src/core/framework-detector.js +0 -538
- package/src/core/html-extractor.js +0 -212
- package/src/core/merge-css.js +0 -407
- package/src/core/multi-page-screenshot.js +0 -380
- package/src/core/rewrite-links.js +0 -226
- package/src/core/screenshot.js +0 -701
- package/src/core/section-detector.js +0 -386
- package/src/core/semantic-enhancer.js +0 -492
- package/src/core/state-capture.js +0 -598
- package/src/core/video-capture.js +0 -546
- package/src/utils/__init__.py +0 -16
- package/src/utils/__pycache__/__init__.cpython-313.pyc +0 -0
- package/src/utils/__pycache__/env.cpython-313.pyc +0 -0
- package/src/utils/env.py +0 -134
- /package/src/core/{css-extractor.js → css/css-extractor.js} +0 -0
- /package/src/core/{cookie-handler.js → page-prep/cookie-handler.js} +0 -0
- /package/src/core/{page-readiness.js → page-prep/page-readiness.js} +0 -0
|
@@ -1,782 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Extract design tokens from website screenshots using Gemini Vision API.
|
|
4
|
-
|
|
5
|
-
Usage:
|
|
6
|
-
python extract-design-tokens.py --screenshots ./analysis --output ./output
|
|
7
|
-
python extract-design-tokens.py -s ./analysis -o ./out --css source.css
|
|
8
|
-
python extract-design-tokens.py -s ./analysis -o ./out --section-mode
|
|
9
|
-
|
|
10
|
-
Options:
|
|
11
|
-
--screenshots Directory containing desktop.png, tablet.png, mobile.png
|
|
12
|
-
--output Output directory for design-tokens.json and tokens.css
|
|
13
|
-
--css Path to filtered CSS file for exact token extraction (optional)
|
|
14
|
-
--model Gemini model (default: gemini-2.5-flash)
|
|
15
|
-
--verbose Enable verbose output
|
|
16
|
-
--section-mode Analyze sections instead of viewports (sections/*.png)
|
|
17
|
-
|
|
18
|
-
Output:
|
|
19
|
-
- design-tokens.json: Machine-readable tokens
|
|
20
|
-
- tokens.css: CSS custom properties
|
|
21
|
-
- section-analysis/*.json: Per-section tokens (section-mode only)
|
|
22
|
-
|
|
23
|
-
When CSS provided, extracts EXACT colors/fonts from source instead of estimating.
|
|
24
|
-
Section mode analyzes each section separately for better detail accuracy.
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
import argparse
|
|
28
|
-
import json
|
|
29
|
-
import os
|
|
30
|
-
import re
|
|
31
|
-
import sys
|
|
32
|
-
import time
|
|
33
|
-
from pathlib import Path
|
|
34
|
-
from typing import Any, Dict, List, Optional
|
|
35
|
-
|
|
36
|
-
# Add src directory to path for local imports
|
|
37
|
-
SCRIPT_DIR = Path(__file__).parent.resolve()
|
|
38
|
-
SRC_DIR = SCRIPT_DIR.parent
|
|
39
|
-
sys.path.insert(0, str(SRC_DIR))
|
|
40
|
-
|
|
41
|
-
# Import local env resolver (portable)
|
|
42
|
-
try:
|
|
43
|
-
from utils.env import resolve_env, load_env
|
|
44
|
-
load_env() # Load .env files on startup
|
|
45
|
-
except ImportError:
|
|
46
|
-
# Fallback: simple env getter
|
|
47
|
-
def resolve_env(key, default=None):
|
|
48
|
-
return os.environ.get(key, default)
|
|
49
|
-
|
|
50
|
-
# Check for google-genai dependency
|
|
51
|
-
try:
|
|
52
|
-
from google import genai
|
|
53
|
-
from google.genai import types
|
|
54
|
-
except ImportError:
|
|
55
|
-
print(json.dumps({
|
|
56
|
-
"success": False,
|
|
57
|
-
"error": "google-genai not installed",
|
|
58
|
-
"hint": "Run: pip install google-genai"
|
|
59
|
-
}, indent=2))
|
|
60
|
-
sys.exit(1)
|
|
61
|
-
|
|
62
|
-
# Import prompts from extracted module
|
|
63
|
-
from prompts.design_tokens import build_extraction_prompt, build_section_prompt
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
# Default tokens (fallback)
|
|
67
|
-
DEFAULT_TOKENS = {
|
|
68
|
-
"colors": {
|
|
69
|
-
"primary": "#2563eb",
|
|
70
|
-
"secondary": "#64748b",
|
|
71
|
-
"accent": "#f59e0b",
|
|
72
|
-
"background": "#ffffff",
|
|
73
|
-
"surface": "#f8fafc",
|
|
74
|
-
"text": {
|
|
75
|
-
"primary": "#0f172a",
|
|
76
|
-
"secondary": "#475569",
|
|
77
|
-
"muted": "#94a3b8"
|
|
78
|
-
},
|
|
79
|
-
"border": "#e2e8f0"
|
|
80
|
-
},
|
|
81
|
-
"typography": {
|
|
82
|
-
"fontFamily": {
|
|
83
|
-
"heading": "Inter, sans-serif",
|
|
84
|
-
"body": "Inter, sans-serif"
|
|
85
|
-
},
|
|
86
|
-
"fontSize": {
|
|
87
|
-
"xs": "12px",
|
|
88
|
-
"sm": "14px",
|
|
89
|
-
"base": "16px",
|
|
90
|
-
"lg": "18px",
|
|
91
|
-
"xl": "20px",
|
|
92
|
-
"2xl": "24px",
|
|
93
|
-
"3xl": "30px",
|
|
94
|
-
"4xl": "36px"
|
|
95
|
-
},
|
|
96
|
-
"fontWeight": {
|
|
97
|
-
"normal": 400,
|
|
98
|
-
"medium": 500,
|
|
99
|
-
"semibold": 600,
|
|
100
|
-
"bold": 700
|
|
101
|
-
},
|
|
102
|
-
"lineHeight": {
|
|
103
|
-
"tight": 1.25,
|
|
104
|
-
"normal": 1.5,
|
|
105
|
-
"relaxed": 1.75
|
|
106
|
-
}
|
|
107
|
-
},
|
|
108
|
-
"spacing": {
|
|
109
|
-
"1": "4px",
|
|
110
|
-
"2": "8px",
|
|
111
|
-
"3": "12px",
|
|
112
|
-
"4": "16px",
|
|
113
|
-
"6": "24px",
|
|
114
|
-
"8": "32px",
|
|
115
|
-
"12": "48px",
|
|
116
|
-
"16": "64px"
|
|
117
|
-
},
|
|
118
|
-
"borderRadius": {
|
|
119
|
-
"sm": "4px",
|
|
120
|
-
"md": "8px",
|
|
121
|
-
"lg": "16px",
|
|
122
|
-
"full": "9999px"
|
|
123
|
-
},
|
|
124
|
-
"shadows": {
|
|
125
|
-
"sm": "0 1px 2px rgba(0,0,0,0.05)",
|
|
126
|
-
"md": "0 4px 6px rgba(0,0,0,0.1)",
|
|
127
|
-
"lg": "0 10px 15px rgba(0,0,0,0.1)"
|
|
128
|
-
},
|
|
129
|
-
"notes": ["Using default tokens - extraction failed or was not performed"]
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
def get_api_key() -> Optional[str]:
|
|
134
|
-
"""Get Gemini API key from environment (supports GEMINI_API_KEY or GOOGLE_API_KEY)."""
|
|
135
|
-
return resolve_env('GEMINI_API_KEY') or resolve_env('GOOGLE_API_KEY')
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def validate_hex_color(color: str) -> bool:
|
|
139
|
-
"""Validate hex color format."""
|
|
140
|
-
return bool(re.match(r'^#[0-9A-Fa-f]{6}$', color))
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
def validate_tokens(tokens: Dict[str, Any]) -> tuple[bool, list[str]]:
|
|
144
|
-
"""Validate extracted tokens, return (is_valid, errors)."""
|
|
145
|
-
errors = []
|
|
146
|
-
|
|
147
|
-
# Check colors
|
|
148
|
-
if 'colors' in tokens:
|
|
149
|
-
colors = tokens['colors']
|
|
150
|
-
for key in ['primary', 'secondary', 'accent', 'background', 'surface', 'border']:
|
|
151
|
-
if key in colors and not validate_hex_color(colors[key]):
|
|
152
|
-
errors.append(f"Invalid hex color: colors.{key} = {colors[key]}")
|
|
153
|
-
|
|
154
|
-
if 'text' in colors:
|
|
155
|
-
for key in ['primary', 'secondary', 'muted']:
|
|
156
|
-
if key in colors['text'] and not validate_hex_color(colors['text'][key]):
|
|
157
|
-
errors.append(f"Invalid hex color: colors.text.{key} = {colors['text'][key]}")
|
|
158
|
-
|
|
159
|
-
return len(errors) == 0, errors
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
def merge_with_defaults(tokens: Dict[str, Any]) -> Dict[str, Any]:
|
|
163
|
-
"""Merge extracted tokens with defaults for missing values."""
|
|
164
|
-
def deep_merge(base: dict, override: dict) -> dict:
|
|
165
|
-
result = base.copy()
|
|
166
|
-
for key, value in override.items():
|
|
167
|
-
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
|
|
168
|
-
result[key] = deep_merge(result[key], value)
|
|
169
|
-
else:
|
|
170
|
-
result[key] = value
|
|
171
|
-
return result
|
|
172
|
-
|
|
173
|
-
return deep_merge(DEFAULT_TOKENS.copy(), tokens)
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
def extract_section_tokens(
|
|
177
|
-
section_path: str,
|
|
178
|
-
css_content: Optional[str],
|
|
179
|
-
client,
|
|
180
|
-
model: str,
|
|
181
|
-
verbose: bool = False
|
|
182
|
-
) -> Dict[str, Any]:
|
|
183
|
-
"""Extract tokens from a single section image.
|
|
184
|
-
|
|
185
|
-
Args:
|
|
186
|
-
section_path: Path to section image
|
|
187
|
-
css_content: Optional CSS content for context
|
|
188
|
-
client: Gemini client instance
|
|
189
|
-
model: Model name to use
|
|
190
|
-
verbose: Enable verbose output
|
|
191
|
-
|
|
192
|
-
Returns:
|
|
193
|
-
Extracted tokens for this section
|
|
194
|
-
"""
|
|
195
|
-
section_name = Path(section_path).stem # e.g., section-0-header
|
|
196
|
-
|
|
197
|
-
# Build section-specific prompt
|
|
198
|
-
prompt = build_section_prompt(section_name, css_content)
|
|
199
|
-
|
|
200
|
-
# Load image
|
|
201
|
-
with open(section_path, 'rb') as f:
|
|
202
|
-
img_bytes = f.read()
|
|
203
|
-
|
|
204
|
-
content = [
|
|
205
|
-
prompt,
|
|
206
|
-
types.Part.from_bytes(data=img_bytes, mime_type='image/png')
|
|
207
|
-
]
|
|
208
|
-
|
|
209
|
-
try:
|
|
210
|
-
config = types.GenerateContentConfig(
|
|
211
|
-
response_mime_type='application/json'
|
|
212
|
-
)
|
|
213
|
-
|
|
214
|
-
response = client.models.generate_content(
|
|
215
|
-
model=model,
|
|
216
|
-
contents=content,
|
|
217
|
-
config=config
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
if hasattr(response, 'text') and response.text:
|
|
221
|
-
tokens = json.loads(response.text)
|
|
222
|
-
tokens['_section'] = section_name
|
|
223
|
-
return tokens
|
|
224
|
-
else:
|
|
225
|
-
return {'_section': section_name, 'error': 'Empty response'}
|
|
226
|
-
|
|
227
|
-
except Exception as e:
|
|
228
|
-
if verbose:
|
|
229
|
-
print(f"Error extracting {section_name}: {e}", file=sys.stderr)
|
|
230
|
-
return {'_section': section_name, 'error': str(e)}
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
def merge_section_tokens(section_tokens: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
234
|
-
"""Merge tokens from multiple sections into unified set.
|
|
235
|
-
|
|
236
|
-
Strategy:
|
|
237
|
-
- Colors: First non-null occurrence wins (header colors take priority)
|
|
238
|
-
- Typography: Collect all unique values
|
|
239
|
-
- Spacing: Merge unique values
|
|
240
|
-
- Notes: Collect all
|
|
241
|
-
|
|
242
|
-
Args:
|
|
243
|
-
section_tokens: List of per-section token dicts
|
|
244
|
-
|
|
245
|
-
Returns:
|
|
246
|
-
Merged token dictionary
|
|
247
|
-
"""
|
|
248
|
-
merged = {
|
|
249
|
-
'colors': {
|
|
250
|
-
'primary': None,
|
|
251
|
-
'secondary': None,
|
|
252
|
-
'accent': None,
|
|
253
|
-
'background': None,
|
|
254
|
-
'surface': None,
|
|
255
|
-
'text': {
|
|
256
|
-
'primary': None,
|
|
257
|
-
'secondary': None,
|
|
258
|
-
'muted': None
|
|
259
|
-
},
|
|
260
|
-
'border': None
|
|
261
|
-
},
|
|
262
|
-
'typography': {
|
|
263
|
-
'fontFamily': {
|
|
264
|
-
'heading': None,
|
|
265
|
-
'body': None
|
|
266
|
-
},
|
|
267
|
-
'fontSize': {},
|
|
268
|
-
'fontWeight': {
|
|
269
|
-
'normal': None,
|
|
270
|
-
'medium': None,
|
|
271
|
-
'semibold': None,
|
|
272
|
-
'bold': None
|
|
273
|
-
},
|
|
274
|
-
'lineHeight': {}
|
|
275
|
-
},
|
|
276
|
-
'spacing': {},
|
|
277
|
-
'borderRadius': {},
|
|
278
|
-
'shadows': {},
|
|
279
|
-
'notes': [],
|
|
280
|
-
'_sections': [],
|
|
281
|
-
'_sectionCount': len(section_tokens)
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
# Track seen font sizes for deduplication
|
|
285
|
-
seen_sizes = set()
|
|
286
|
-
|
|
287
|
-
for tokens in section_tokens:
|
|
288
|
-
if 'error' in tokens:
|
|
289
|
-
merged['notes'].append(f"Section {tokens.get('_section', 'unknown')} failed: {tokens['error']}")
|
|
290
|
-
continue
|
|
291
|
-
|
|
292
|
-
section_name = tokens.get('_section', 'unknown')
|
|
293
|
-
merged['_sections'].append(section_name)
|
|
294
|
-
|
|
295
|
-
# Merge colors (first occurrence wins)
|
|
296
|
-
if 'colors' in tokens:
|
|
297
|
-
colors = tokens['colors']
|
|
298
|
-
|
|
299
|
-
# Direct color mappings
|
|
300
|
-
color_mappings = [
|
|
301
|
-
('background', 'background'),
|
|
302
|
-
('text', 'text.primary'),
|
|
303
|
-
('heading', 'text.secondary'),
|
|
304
|
-
('accent', 'accent'),
|
|
305
|
-
('border', 'border')
|
|
306
|
-
]
|
|
307
|
-
|
|
308
|
-
for src_key, dest_key in color_mappings:
|
|
309
|
-
if src_key in colors and colors[src_key] and colors[src_key] != 'null':
|
|
310
|
-
value = colors[src_key]
|
|
311
|
-
if validate_hex_color(value):
|
|
312
|
-
if '.' in dest_key:
|
|
313
|
-
parent, child = dest_key.split('.')
|
|
314
|
-
if merged['colors'][parent][child] is None:
|
|
315
|
-
merged['colors'][parent][child] = value
|
|
316
|
-
else:
|
|
317
|
-
if merged['colors'][dest_key] is None:
|
|
318
|
-
merged['colors'][dest_key] = value
|
|
319
|
-
|
|
320
|
-
# Infer primary from accent if not set
|
|
321
|
-
if merged['colors']['primary'] is None and 'accent' in colors:
|
|
322
|
-
if colors['accent'] and validate_hex_color(colors['accent']):
|
|
323
|
-
merged['colors']['primary'] = colors['accent']
|
|
324
|
-
|
|
325
|
-
# Merge typography
|
|
326
|
-
if 'typography' in tokens:
|
|
327
|
-
typo = tokens['typography']
|
|
328
|
-
|
|
329
|
-
# Font family
|
|
330
|
-
if 'fontFamily' in typo and typo['fontFamily']:
|
|
331
|
-
font = typo['fontFamily']
|
|
332
|
-
if isinstance(font, str) and font != 'null':
|
|
333
|
-
if merged['typography']['fontFamily']['heading'] is None:
|
|
334
|
-
merged['typography']['fontFamily']['heading'] = font
|
|
335
|
-
if merged['typography']['fontFamily']['body'] is None:
|
|
336
|
-
merged['typography']['fontFamily']['body'] = font
|
|
337
|
-
|
|
338
|
-
# Font sizes - collect unique values
|
|
339
|
-
for key in ['headingSize', 'bodySize']:
|
|
340
|
-
if key in typo and typo[key] and typo[key] != 'null':
|
|
341
|
-
size = typo[key]
|
|
342
|
-
if size not in seen_sizes:
|
|
343
|
-
seen_sizes.add(size)
|
|
344
|
-
# Map to our size scale
|
|
345
|
-
if 'heading' in key.lower():
|
|
346
|
-
if '4xl' not in merged['typography']['fontSize']:
|
|
347
|
-
merged['typography']['fontSize']['4xl'] = size
|
|
348
|
-
else:
|
|
349
|
-
if 'base' not in merged['typography']['fontSize']:
|
|
350
|
-
merged['typography']['fontSize']['base'] = size
|
|
351
|
-
|
|
352
|
-
# Font weights
|
|
353
|
-
if 'fontWeight' in typo and isinstance(typo['fontWeight'], dict):
|
|
354
|
-
for key, val in typo['fontWeight'].items():
|
|
355
|
-
if val and val != 'null':
|
|
356
|
-
target_key = key.lower()
|
|
357
|
-
if target_key in merged['typography']['fontWeight']:
|
|
358
|
-
if merged['typography']['fontWeight'][target_key] is None:
|
|
359
|
-
merged['typography']['fontWeight'][target_key] = val
|
|
360
|
-
|
|
361
|
-
# Merge spacing
|
|
362
|
-
if 'spacing' in tokens:
|
|
363
|
-
spacing = tokens['spacing']
|
|
364
|
-
if isinstance(spacing, dict):
|
|
365
|
-
for key, val in spacing.items():
|
|
366
|
-
if val and val != 'null':
|
|
367
|
-
# Map section spacing to our scale
|
|
368
|
-
if 'section' in key.lower() or 'container' in key.lower():
|
|
369
|
-
if '16' not in merged['spacing']:
|
|
370
|
-
merged['spacing']['16'] = val
|
|
371
|
-
elif 'gap' in key.lower():
|
|
372
|
-
if '4' not in merged['spacing']:
|
|
373
|
-
merged['spacing']['4'] = val
|
|
374
|
-
|
|
375
|
-
# Merge border radius
|
|
376
|
-
if 'borderRadius' in tokens and tokens['borderRadius'] and tokens['borderRadius'] != 'null':
|
|
377
|
-
radius = tokens['borderRadius']
|
|
378
|
-
if 'md' not in merged['borderRadius']:
|
|
379
|
-
merged['borderRadius']['md'] = radius
|
|
380
|
-
|
|
381
|
-
# Merge shadows
|
|
382
|
-
if 'shadow' in tokens and tokens['shadow'] and tokens['shadow'] != 'null':
|
|
383
|
-
shadow = tokens['shadow']
|
|
384
|
-
if 'md' not in merged['shadows']:
|
|
385
|
-
merged['shadows']['md'] = shadow
|
|
386
|
-
|
|
387
|
-
# Collect notes
|
|
388
|
-
if 'notes' in tokens and isinstance(tokens['notes'], list):
|
|
389
|
-
merged['notes'].extend(tokens['notes'])
|
|
390
|
-
|
|
391
|
-
# Clean up None values
|
|
392
|
-
def clean_nones(d):
|
|
393
|
-
if isinstance(d, dict):
|
|
394
|
-
return {k: clean_nones(v) for k, v in d.items() if v is not None}
|
|
395
|
-
return d
|
|
396
|
-
|
|
397
|
-
# Don't clean top-level structure, just nested Nones
|
|
398
|
-
for key in ['colors', 'typography']:
|
|
399
|
-
if key in merged:
|
|
400
|
-
merged[key] = clean_nones(merged[key])
|
|
401
|
-
|
|
402
|
-
return merged
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
def generate_tokens_css(tokens: Dict[str, Any]) -> str:
|
|
406
|
-
"""Generate tokens.css from design tokens."""
|
|
407
|
-
lines = [
|
|
408
|
-
"/* Design Tokens - Auto-generated */",
|
|
409
|
-
"/* Edit values below to customize the design */",
|
|
410
|
-
"",
|
|
411
|
-
":root {",
|
|
412
|
-
" /* Colors */",
|
|
413
|
-
]
|
|
414
|
-
|
|
415
|
-
colors = tokens.get('colors', {})
|
|
416
|
-
lines.append(f" --color-primary: {colors.get('primary', '#2563eb')};")
|
|
417
|
-
lines.append(f" --color-secondary: {colors.get('secondary', '#64748b')};")
|
|
418
|
-
lines.append(f" --color-accent: {colors.get('accent', '#f59e0b')};")
|
|
419
|
-
lines.append(f" --color-background: {colors.get('background', '#ffffff')};")
|
|
420
|
-
lines.append(f" --color-surface: {colors.get('surface', '#f8fafc')};")
|
|
421
|
-
|
|
422
|
-
text_colors = colors.get('text', {})
|
|
423
|
-
lines.append(f" --color-text-primary: {text_colors.get('primary', '#0f172a')};")
|
|
424
|
-
lines.append(f" --color-text-secondary: {text_colors.get('secondary', '#475569')};")
|
|
425
|
-
lines.append(f" --color-text-muted: {text_colors.get('muted', '#94a3b8')};")
|
|
426
|
-
lines.append(f" --color-border: {colors.get('border', '#e2e8f0')};")
|
|
427
|
-
|
|
428
|
-
lines.append("")
|
|
429
|
-
lines.append(" /* Typography */")
|
|
430
|
-
|
|
431
|
-
typography = tokens.get('typography', {})
|
|
432
|
-
font_family = typography.get('fontFamily', {})
|
|
433
|
-
lines.append(f" --font-heading: {font_family.get('heading', 'Inter, sans-serif')};")
|
|
434
|
-
lines.append(f" --font-body: {font_family.get('body', 'Inter, sans-serif')};")
|
|
435
|
-
|
|
436
|
-
font_sizes = typography.get('fontSize', {})
|
|
437
|
-
for key in ['xs', 'sm', 'base', 'lg', 'xl', '2xl', '3xl', '4xl']:
|
|
438
|
-
default = DEFAULT_TOKENS['typography']['fontSize'].get(key, '16px')
|
|
439
|
-
lines.append(f" --font-size-{key}: {font_sizes.get(key, default)};")
|
|
440
|
-
|
|
441
|
-
font_weights = typography.get('fontWeight', {})
|
|
442
|
-
for key in ['normal', 'medium', 'semibold', 'bold']:
|
|
443
|
-
default = DEFAULT_TOKENS['typography']['fontWeight'].get(key, 400)
|
|
444
|
-
lines.append(f" --font-weight-{key}: {font_weights.get(key, default)};")
|
|
445
|
-
|
|
446
|
-
line_heights = typography.get('lineHeight', {})
|
|
447
|
-
for key in ['tight', 'normal', 'relaxed']:
|
|
448
|
-
default = DEFAULT_TOKENS['typography']['lineHeight'].get(key, 1.5)
|
|
449
|
-
lines.append(f" --line-height-{key}: {line_heights.get(key, default)};")
|
|
450
|
-
|
|
451
|
-
lines.append("")
|
|
452
|
-
lines.append(" /* Spacing */")
|
|
453
|
-
|
|
454
|
-
spacing = tokens.get('spacing', {})
|
|
455
|
-
for key in ['1', '2', '3', '4', '6', '8', '12', '16']:
|
|
456
|
-
default = DEFAULT_TOKENS['spacing'].get(key, '16px')
|
|
457
|
-
lines.append(f" --space-{key}: {spacing.get(key, default)};")
|
|
458
|
-
|
|
459
|
-
lines.append("")
|
|
460
|
-
lines.append(" /* Border Radius */")
|
|
461
|
-
|
|
462
|
-
border_radius = tokens.get('borderRadius', {})
|
|
463
|
-
for key in ['sm', 'md', 'lg', 'full']:
|
|
464
|
-
default = DEFAULT_TOKENS['borderRadius'].get(key, '8px')
|
|
465
|
-
lines.append(f" --radius-{key}: {border_radius.get(key, default)};")
|
|
466
|
-
|
|
467
|
-
lines.append("")
|
|
468
|
-
lines.append(" /* Shadows */")
|
|
469
|
-
|
|
470
|
-
shadows = tokens.get('shadows', {})
|
|
471
|
-
for key in ['sm', 'md', 'lg']:
|
|
472
|
-
default = DEFAULT_TOKENS['shadows'].get(key, '0 1px 2px rgba(0,0,0,0.05)')
|
|
473
|
-
lines.append(f" --shadow-{key}: {shadows.get(key, default)};")
|
|
474
|
-
|
|
475
|
-
lines.append("}")
|
|
476
|
-
lines.append("")
|
|
477
|
-
|
|
478
|
-
return "\n".join(lines)
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
def extract_tokens(
|
|
482
|
-
screenshots_dir: str,
|
|
483
|
-
css_path: str = None,
|
|
484
|
-
model: str = "gemini-2.5-flash",
|
|
485
|
-
verbose: bool = False
|
|
486
|
-
) -> Dict[str, Any]:
|
|
487
|
-
"""Extract design tokens from screenshots using Gemini Vision.
|
|
488
|
-
|
|
489
|
-
Args:
|
|
490
|
-
screenshots_dir: Directory containing screenshots
|
|
491
|
-
css_path: Optional path to filtered CSS (improves accuracy)
|
|
492
|
-
model: Gemini model to use
|
|
493
|
-
verbose: Enable verbose output
|
|
494
|
-
|
|
495
|
-
Returns:
|
|
496
|
-
Design tokens dictionary
|
|
497
|
-
"""
|
|
498
|
-
|
|
499
|
-
api_key = get_api_key()
|
|
500
|
-
if not api_key:
|
|
501
|
-
if verbose:
|
|
502
|
-
print("Warning: GEMINI_API_KEY not found, using default tokens")
|
|
503
|
-
return DEFAULT_TOKENS.copy()
|
|
504
|
-
|
|
505
|
-
# Load CSS if provided
|
|
506
|
-
css_content = None
|
|
507
|
-
if css_path and Path(css_path).exists():
|
|
508
|
-
with open(css_path, 'r', encoding='utf-8') as f:
|
|
509
|
-
css_content = f.read()
|
|
510
|
-
if verbose:
|
|
511
|
-
print(f"Loaded CSS: {len(css_content)} chars")
|
|
512
|
-
|
|
513
|
-
# Build prompt with context
|
|
514
|
-
prompt = build_extraction_prompt(css_content)
|
|
515
|
-
|
|
516
|
-
if verbose and css_content:
|
|
517
|
-
print("Using enhanced prompt with CSS context")
|
|
518
|
-
|
|
519
|
-
# Find screenshots
|
|
520
|
-
screenshots_path = Path(screenshots_dir)
|
|
521
|
-
desktop = screenshots_path / "desktop.png"
|
|
522
|
-
tablet = screenshots_path / "tablet.png"
|
|
523
|
-
mobile = screenshots_path / "mobile.png"
|
|
524
|
-
|
|
525
|
-
# Check which files exist
|
|
526
|
-
available_images = []
|
|
527
|
-
for img in [desktop, tablet, mobile]:
|
|
528
|
-
if img.exists():
|
|
529
|
-
available_images.append(img)
|
|
530
|
-
if verbose:
|
|
531
|
-
print(f"Found: {img}")
|
|
532
|
-
|
|
533
|
-
if not available_images:
|
|
534
|
-
if verbose:
|
|
535
|
-
print("Warning: No screenshots found, using default tokens")
|
|
536
|
-
return DEFAULT_TOKENS.copy()
|
|
537
|
-
|
|
538
|
-
try:
|
|
539
|
-
# Initialize client
|
|
540
|
-
client = genai.Client(api_key=api_key)
|
|
541
|
-
|
|
542
|
-
# Build content with images
|
|
543
|
-
content = [prompt]
|
|
544
|
-
|
|
545
|
-
for img_path in available_images:
|
|
546
|
-
with open(img_path, 'rb') as f:
|
|
547
|
-
img_bytes = f.read()
|
|
548
|
-
content.append(
|
|
549
|
-
types.Part.from_bytes(data=img_bytes, mime_type='image/png')
|
|
550
|
-
)
|
|
551
|
-
|
|
552
|
-
if verbose:
|
|
553
|
-
print(f"Sending {len(available_images)} images to {model}...")
|
|
554
|
-
|
|
555
|
-
# Request structured JSON output
|
|
556
|
-
config = types.GenerateContentConfig(
|
|
557
|
-
response_mime_type='application/json'
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
response = client.models.generate_content(
|
|
561
|
-
model=model,
|
|
562
|
-
contents=content,
|
|
563
|
-
config=config
|
|
564
|
-
)
|
|
565
|
-
|
|
566
|
-
# Parse response
|
|
567
|
-
if hasattr(response, 'text') and response.text:
|
|
568
|
-
tokens = json.loads(response.text)
|
|
569
|
-
|
|
570
|
-
# Validate
|
|
571
|
-
is_valid, errors = validate_tokens(tokens)
|
|
572
|
-
if not is_valid:
|
|
573
|
-
if verbose:
|
|
574
|
-
print(f"Validation warnings: {errors}")
|
|
575
|
-
tokens['notes'] = tokens.get('notes', []) + errors
|
|
576
|
-
|
|
577
|
-
# Merge with defaults for missing values
|
|
578
|
-
tokens = merge_with_defaults(tokens)
|
|
579
|
-
|
|
580
|
-
if verbose:
|
|
581
|
-
print("Tokens extracted successfully")
|
|
582
|
-
|
|
583
|
-
return tokens
|
|
584
|
-
else:
|
|
585
|
-
if verbose:
|
|
586
|
-
print("Warning: Empty response, using default tokens")
|
|
587
|
-
return DEFAULT_TOKENS.copy()
|
|
588
|
-
|
|
589
|
-
except Exception as e:
|
|
590
|
-
if verbose:
|
|
591
|
-
print(f"Error during extraction: {e}")
|
|
592
|
-
|
|
593
|
-
# Return defaults with error note
|
|
594
|
-
tokens = DEFAULT_TOKENS.copy()
|
|
595
|
-
tokens['notes'] = [f"Extraction failed: {str(e)}"]
|
|
596
|
-
return tokens
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
def main():
|
|
600
|
-
parser = argparse.ArgumentParser(
|
|
601
|
-
description="Extract design tokens from screenshots using Gemini Vision"
|
|
602
|
-
)
|
|
603
|
-
parser.add_argument(
|
|
604
|
-
'--screenshots', '-s',
|
|
605
|
-
required=True,
|
|
606
|
-
help='Directory containing screenshots (desktop.png, tablet.png, mobile.png)'
|
|
607
|
-
)
|
|
608
|
-
parser.add_argument(
|
|
609
|
-
'--output', '-o',
|
|
610
|
-
required=True,
|
|
611
|
-
help='Output directory for design-tokens.json and tokens.css'
|
|
612
|
-
)
|
|
613
|
-
parser.add_argument(
|
|
614
|
-
'--css',
|
|
615
|
-
default=None,
|
|
616
|
-
help='Path to filtered CSS file for exact token extraction (optional)'
|
|
617
|
-
)
|
|
618
|
-
parser.add_argument(
|
|
619
|
-
'--model', '-m',
|
|
620
|
-
default='gemini-2.5-flash',
|
|
621
|
-
help='Gemini model to use (default: gemini-2.5-flash)'
|
|
622
|
-
)
|
|
623
|
-
parser.add_argument(
|
|
624
|
-
'--verbose', '-v',
|
|
625
|
-
action='store_true',
|
|
626
|
-
help='Enable verbose output'
|
|
627
|
-
)
|
|
628
|
-
parser.add_argument(
|
|
629
|
-
'--section-mode',
|
|
630
|
-
action='store_true',
|
|
631
|
-
help='Analyze sections instead of viewports (looks for sections/*.png)'
|
|
632
|
-
)
|
|
633
|
-
parser.add_argument(
|
|
634
|
-
'--delay',
|
|
635
|
-
type=float,
|
|
636
|
-
default=1.0,
|
|
637
|
-
help='Delay between API calls in seconds (default: 1.0)'
|
|
638
|
-
)
|
|
639
|
-
|
|
640
|
-
args = parser.parse_args()
|
|
641
|
-
|
|
642
|
-
# Create output directory
|
|
643
|
-
output_path = Path(args.output)
|
|
644
|
-
output_path.mkdir(parents=True, exist_ok=True)
|
|
645
|
-
|
|
646
|
-
# Section mode: analyze each section separately
|
|
647
|
-
if args.section_mode:
|
|
648
|
-
sections_dir = Path(args.screenshots) / 'sections'
|
|
649
|
-
if not sections_dir.exists():
|
|
650
|
-
print(json.dumps({
|
|
651
|
-
"success": False,
|
|
652
|
-
"error": f"Sections directory not found: {sections_dir}",
|
|
653
|
-
"hint": "Run screenshot.js with --section-mode true first"
|
|
654
|
-
}, indent=2))
|
|
655
|
-
sys.exit(1)
|
|
656
|
-
|
|
657
|
-
section_files = sorted(sections_dir.glob('section-*.png'))
|
|
658
|
-
if not section_files:
|
|
659
|
-
print(json.dumps({
|
|
660
|
-
"success": False,
|
|
661
|
-
"error": "No section images found in sections/ directory"
|
|
662
|
-
}, indent=2))
|
|
663
|
-
sys.exit(1)
|
|
664
|
-
|
|
665
|
-
# Limit sections to avoid excessive API calls
|
|
666
|
-
MAX_SECTIONS = 15
|
|
667
|
-
if len(section_files) > MAX_SECTIONS:
|
|
668
|
-
if args.verbose:
|
|
669
|
-
print(f"Warning: Limiting to {MAX_SECTIONS} sections (found {len(section_files)})", file=sys.stderr)
|
|
670
|
-
section_files = section_files[:MAX_SECTIONS]
|
|
671
|
-
|
|
672
|
-
if args.verbose:
|
|
673
|
-
print(f"Found {len(section_files)} sections to analyze", file=sys.stderr)
|
|
674
|
-
|
|
675
|
-
# Check API key
|
|
676
|
-
api_key = get_api_key()
|
|
677
|
-
if not api_key:
|
|
678
|
-
print(json.dumps({
|
|
679
|
-
"success": False,
|
|
680
|
-
"error": "GEMINI_API_KEY not set",
|
|
681
|
-
"hint": "Set GEMINI_API_KEY environment variable"
|
|
682
|
-
}, indent=2))
|
|
683
|
-
sys.exit(1)
|
|
684
|
-
|
|
685
|
-
# Load CSS if provided
|
|
686
|
-
css_content = None
|
|
687
|
-
if args.css and Path(args.css).exists():
|
|
688
|
-
with open(args.css, 'r', encoding='utf-8') as f:
|
|
689
|
-
css_content = f.read()
|
|
690
|
-
if args.verbose:
|
|
691
|
-
print(f"Loaded CSS: {len(css_content)} chars", file=sys.stderr)
|
|
692
|
-
|
|
693
|
-
# Initialize client
|
|
694
|
-
client = genai.Client(api_key=api_key)
|
|
695
|
-
|
|
696
|
-
# Create section-analysis directory
|
|
697
|
-
section_output_dir = output_path / 'section-analysis'
|
|
698
|
-
section_output_dir.mkdir(exist_ok=True)
|
|
699
|
-
|
|
700
|
-
# Process each section
|
|
701
|
-
section_results = []
|
|
702
|
-
for i, section_path in enumerate(section_files):
|
|
703
|
-
if args.verbose:
|
|
704
|
-
print(f"[{i+1}/{len(section_files)}] Analyzing {section_path.name}...", file=sys.stderr)
|
|
705
|
-
|
|
706
|
-
tokens = extract_section_tokens(
|
|
707
|
-
str(section_path),
|
|
708
|
-
css_content,
|
|
709
|
-
client,
|
|
710
|
-
args.model,
|
|
711
|
-
args.verbose
|
|
712
|
-
)
|
|
713
|
-
section_results.append(tokens)
|
|
714
|
-
|
|
715
|
-
# Save individual section result
|
|
716
|
-
section_out_path = section_output_dir / f'{section_path.stem}-tokens.json'
|
|
717
|
-
with open(section_out_path, 'w') as f:
|
|
718
|
-
json.dump(tokens, f, indent=2)
|
|
719
|
-
|
|
720
|
-
# Rate limiting delay (except for last section)
|
|
721
|
-
if i < len(section_files) - 1:
|
|
722
|
-
time.sleep(args.delay)
|
|
723
|
-
|
|
724
|
-
if args.verbose:
|
|
725
|
-
print(f"Merging tokens from {len(section_results)} sections...", file=sys.stderr)
|
|
726
|
-
|
|
727
|
-
# Merge all section tokens
|
|
728
|
-
merged_tokens = merge_section_tokens(section_results)
|
|
729
|
-
|
|
730
|
-
# Merge with defaults for complete token set
|
|
731
|
-
tokens = merge_with_defaults(merged_tokens)
|
|
732
|
-
tokens['_mode'] = 'section'
|
|
733
|
-
tokens['_sections'] = merged_tokens.get('_sections', [])
|
|
734
|
-
tokens['_sectionCount'] = merged_tokens.get('_sectionCount', 0)
|
|
735
|
-
|
|
736
|
-
else:
|
|
737
|
-
# Standard mode: analyze viewport screenshots
|
|
738
|
-
tokens = extract_tokens(
|
|
739
|
-
screenshots_dir=args.screenshots,
|
|
740
|
-
css_path=args.css,
|
|
741
|
-
model=args.model,
|
|
742
|
-
verbose=args.verbose
|
|
743
|
-
)
|
|
744
|
-
tokens['_mode'] = 'viewport'
|
|
745
|
-
|
|
746
|
-
# Save design-tokens.json
|
|
747
|
-
json_path = output_path / "design-tokens.json"
|
|
748
|
-
with open(json_path, 'w') as f:
|
|
749
|
-
json.dump(tokens, f, indent=2)
|
|
750
|
-
|
|
751
|
-
if args.verbose:
|
|
752
|
-
print(f"Saved: {json_path}", file=sys.stderr)
|
|
753
|
-
|
|
754
|
-
# Generate and save tokens.css
|
|
755
|
-
css_output = generate_tokens_css(tokens)
|
|
756
|
-
css_path = output_path / "tokens.css"
|
|
757
|
-
with open(css_path, 'w') as f:
|
|
758
|
-
f.write(css_output)
|
|
759
|
-
|
|
760
|
-
if args.verbose:
|
|
761
|
-
print(f"Saved: {css_path}", file=sys.stderr)
|
|
762
|
-
|
|
763
|
-
# Output result as JSON
|
|
764
|
-
result = {
|
|
765
|
-
"success": True,
|
|
766
|
-
"tokens_json": str(json_path),
|
|
767
|
-
"tokens_css": str(css_path),
|
|
768
|
-
"model": args.model,
|
|
769
|
-
"mode": tokens.get('_mode', 'viewport'),
|
|
770
|
-
"notes": tokens.get('notes', [])
|
|
771
|
-
}
|
|
772
|
-
|
|
773
|
-
# Add section info if in section mode
|
|
774
|
-
if args.section_mode:
|
|
775
|
-
result["section_analysis"] = str(section_output_dir)
|
|
776
|
-
result["sections_processed"] = len(section_results)
|
|
777
|
-
|
|
778
|
-
print(json.dumps(result, indent=2))
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
if __name__ == '__main__':
|
|
782
|
-
main()
|