napt 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napt/__init__.py +91 -0
- napt/build/__init__.py +47 -0
- napt/build/manager.py +1087 -0
- napt/build/packager.py +315 -0
- napt/build/template.py +301 -0
- napt/cli.py +602 -0
- napt/config/__init__.py +42 -0
- napt/config/loader.py +465 -0
- napt/core.py +385 -0
- napt/detection.py +630 -0
- napt/discovery/__init__.py +86 -0
- napt/discovery/api_github.py +445 -0
- napt/discovery/api_json.py +452 -0
- napt/discovery/base.py +244 -0
- napt/discovery/url_download.py +304 -0
- napt/discovery/web_scrape.py +467 -0
- napt/exceptions.py +149 -0
- napt/io/__init__.py +42 -0
- napt/io/download.py +357 -0
- napt/io/upload.py +37 -0
- napt/logging.py +230 -0
- napt/policy/__init__.py +50 -0
- napt/policy/updates.py +126 -0
- napt/psadt/__init__.py +43 -0
- napt/psadt/release.py +309 -0
- napt/requirements.py +566 -0
- napt/results.py +143 -0
- napt/state/__init__.py +58 -0
- napt/state/tracker.py +371 -0
- napt/validation.py +467 -0
- napt/versioning/__init__.py +115 -0
- napt/versioning/keys.py +309 -0
- napt/versioning/msi.py +725 -0
- napt-0.3.1.dist-info/METADATA +114 -0
- napt-0.3.1.dist-info/RECORD +38 -0
- napt-0.3.1.dist-info/WHEEL +4 -0
- napt-0.3.1.dist-info/entry_points.txt +3 -0
- napt-0.3.1.dist-info/licenses/LICENSE +202 -0
napt/validation.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
# Copyright 2025 Roger Cibrian
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Recipe validation module.
|
|
16
|
+
|
|
17
|
+
This module provides validation functions for checking recipe syntax and
|
|
18
|
+
configuration without making network calls or downloading files. This is
|
|
19
|
+
useful for quick feedback during recipe development and in CI/CD pipelines.
|
|
20
|
+
|
|
21
|
+
Validation Checks:
|
|
22
|
+
|
|
23
|
+
- YAML syntax is valid
|
|
24
|
+
- Required top-level fields present (apiVersion, app)
|
|
25
|
+
- apiVersion is supported
|
|
26
|
+
- App has required fields (name, id, source)
|
|
27
|
+
- Discovery strategy exists and is registered
|
|
28
|
+
- Strategy-specific configuration is valid
|
|
29
|
+
- Win32 configuration fields are valid (types, values, unknown field warnings)
|
|
30
|
+
|
|
31
|
+
Example:
|
|
32
|
+
Validate a recipe and handle results:
|
|
33
|
+
```python
|
|
34
|
+
from pathlib import Path
|
|
35
|
+
from napt.validation import validate_recipe
|
|
36
|
+
|
|
37
|
+
result = validate_recipe(Path("recipes/Google/chrome.yaml"))
|
|
38
|
+
if result.status == "valid":
|
|
39
|
+
print(f"Recipe is valid with {result.app_count} app(s)")
|
|
40
|
+
else:
|
|
41
|
+
for error in result.errors:
|
|
42
|
+
print(f"Error: {error}")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
from __future__ import annotations
|
|
48
|
+
|
|
49
|
+
from pathlib import Path
|
|
50
|
+
|
|
51
|
+
import yaml
|
|
52
|
+
|
|
53
|
+
from napt.discovery import get_strategy
|
|
54
|
+
from napt.exceptions import ConfigError
|
|
55
|
+
from napt.logging import get_global_logger
|
|
56
|
+
from napt.results import ValidationResult
|
|
57
|
+
|
|
58
|
+
__all__ = ["validate_recipe"]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# Schema definitions for win32 configuration validation
|
|
62
|
+
# Each entry: field_name -> (expected_type, allowed_values or None, description)
|
|
63
|
+
_WIN32_FIELDS: dict[str, tuple[type, list[str] | None, str]] = {
|
|
64
|
+
"build_types": (str, ["both", "app_only", "update_only"], "build type"),
|
|
65
|
+
"installed_check": (dict, None, "installed check configuration"),
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
_INSTALLED_CHECK_FIELDS: dict[str, tuple[type, list[str] | None, str]] = {
|
|
69
|
+
"display_name": (str, None, "display name for registry lookup"),
|
|
70
|
+
"architecture": (str, ["x86", "x64", "arm64", "any"], "architecture"),
|
|
71
|
+
"override_msi_display_name": (bool, None, "MSI display name override flag"),
|
|
72
|
+
"fail_on_error": (bool, None, "fail on error flag"),
|
|
73
|
+
"log_format": (str, ["cmtrace"], "log format"),
|
|
74
|
+
"log_level": (str, ["INFO", "WARNING", "ERROR", "DEBUG"], "log level"),
|
|
75
|
+
"log_rotation_mb": (int, None, "log rotation size in MB"),
|
|
76
|
+
"detection": (dict, None, "detection configuration"),
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
_DETECTION_FIELDS: dict[str, tuple[type, list[str] | None, str]] = {
|
|
80
|
+
"exact_match": (bool, None, "exact version match flag"),
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _find_similar_field(unknown: str, known_fields: set[str]) -> str | None:
|
|
85
|
+
"""Find a similar field name for typo suggestions.
|
|
86
|
+
|
|
87
|
+
Uses simple heuristics: lowercase comparison, common typo patterns.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
unknown: The unknown field name.
|
|
91
|
+
known_fields: Set of known valid field names.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Similar field name if found, None otherwise.
|
|
95
|
+
|
|
96
|
+
"""
|
|
97
|
+
unknown_lower = unknown.lower().replace("_", "").replace("-", "")
|
|
98
|
+
|
|
99
|
+
for known in known_fields:
|
|
100
|
+
known_lower = known.lower().replace("_", "").replace("-", "")
|
|
101
|
+
# Exact match after normalization (e.g., "displayname" -> "display_name")
|
|
102
|
+
if unknown_lower == known_lower:
|
|
103
|
+
return known
|
|
104
|
+
# Check if one is substring of other (e.g., "display" in "display_name")
|
|
105
|
+
if len(unknown_lower) > 3 and (
|
|
106
|
+
unknown_lower in known_lower or known_lower in unknown_lower
|
|
107
|
+
):
|
|
108
|
+
return known
|
|
109
|
+
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _validate_field_type(
|
|
114
|
+
value: object,
|
|
115
|
+
expected_type: type,
|
|
116
|
+
field_path: str,
|
|
117
|
+
errors: list[str],
|
|
118
|
+
) -> bool:
|
|
119
|
+
"""Validate that a field has the expected type.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
value: The value to check.
|
|
123
|
+
expected_type: Expected Python type.
|
|
124
|
+
field_path: Full path to field for error messages.
|
|
125
|
+
errors: List to append errors to.
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
True if type is valid, False otherwise.
|
|
129
|
+
|
|
130
|
+
"""
|
|
131
|
+
if not isinstance(value, expected_type):
|
|
132
|
+
type_name = expected_type.__name__
|
|
133
|
+
actual_type = type(value).__name__
|
|
134
|
+
errors.append(f"{field_path}: Must be {type_name}, got {actual_type}")
|
|
135
|
+
return False
|
|
136
|
+
return True
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _validate_field_value(
|
|
140
|
+
value: object,
|
|
141
|
+
allowed_values: list[str],
|
|
142
|
+
field_path: str,
|
|
143
|
+
errors: list[str],
|
|
144
|
+
) -> bool:
|
|
145
|
+
"""Validate that a field value is in the allowed set.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
value: The value to check.
|
|
149
|
+
allowed_values: List of allowed values.
|
|
150
|
+
field_path: Full path to field for error messages.
|
|
151
|
+
errors: List to append errors to.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
True if value is valid, False otherwise.
|
|
155
|
+
|
|
156
|
+
"""
|
|
157
|
+
if value not in allowed_values:
|
|
158
|
+
allowed_str = ", ".join(f"'{v}'" for v in allowed_values)
|
|
159
|
+
errors.append(f"{field_path}: Invalid value '{value}'. Allowed: {allowed_str}")
|
|
160
|
+
return False
|
|
161
|
+
return True
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _validate_section(
|
|
165
|
+
section: dict,
|
|
166
|
+
schema: dict[str, tuple[type, list[str] | None, str]],
|
|
167
|
+
section_path: str,
|
|
168
|
+
errors: list[str],
|
|
169
|
+
warnings: list[str],
|
|
170
|
+
) -> None:
|
|
171
|
+
"""Validate a configuration section against its schema.
|
|
172
|
+
|
|
173
|
+
Checks types, allowed values, and warns on unknown fields.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
section: The configuration section to validate.
|
|
177
|
+
schema: Schema definition for this section.
|
|
178
|
+
section_path: Full path to section for error messages.
|
|
179
|
+
errors: List to append errors to.
|
|
180
|
+
warnings: List to append warnings to.
|
|
181
|
+
|
|
182
|
+
"""
|
|
183
|
+
known_fields = set(schema.keys())
|
|
184
|
+
actual_fields = set(section.keys())
|
|
185
|
+
|
|
186
|
+
# Check for unknown fields
|
|
187
|
+
unknown_fields = actual_fields - known_fields
|
|
188
|
+
for unknown in unknown_fields:
|
|
189
|
+
similar = _find_similar_field(unknown, known_fields)
|
|
190
|
+
if similar:
|
|
191
|
+
warnings.append(
|
|
192
|
+
f"{section_path}: Unknown field '{unknown}'. Did you mean '{similar}'?"
|
|
193
|
+
)
|
|
194
|
+
else:
|
|
195
|
+
warnings.append(f"{section_path}: Unknown field '{unknown}'")
|
|
196
|
+
|
|
197
|
+
# Validate known fields
|
|
198
|
+
for field_name, (expected_type, allowed_values, _desc) in schema.items():
|
|
199
|
+
if field_name not in section:
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
value = section[field_name]
|
|
203
|
+
field_path = f"{section_path}.{field_name}"
|
|
204
|
+
|
|
205
|
+
# Type check
|
|
206
|
+
if not _validate_field_type(value, expected_type, field_path, errors):
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
# Value check (only for non-dict types with allowed values)
|
|
210
|
+
if allowed_values is not None and expected_type is not dict:
|
|
211
|
+
_validate_field_value(value, allowed_values, field_path, errors)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _validate_win32_config(
|
|
215
|
+
app: dict,
|
|
216
|
+
app_prefix: str,
|
|
217
|
+
errors: list[str],
|
|
218
|
+
warnings: list[str],
|
|
219
|
+
) -> None:
|
|
220
|
+
"""Validate the win32 configuration section.
|
|
221
|
+
|
|
222
|
+
Validates:
|
|
223
|
+
- win32.build_types
|
|
224
|
+
- win32.installed_check.*
|
|
225
|
+
- win32.installed_check.detection.*
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
app: The app configuration dictionary.
|
|
229
|
+
app_prefix: Prefix for error messages (e.g., "app").
|
|
230
|
+
errors: List to append errors to.
|
|
231
|
+
warnings: List to append warnings to.
|
|
232
|
+
|
|
233
|
+
"""
|
|
234
|
+
win32 = app.get("win32")
|
|
235
|
+
if win32 is None:
|
|
236
|
+
return
|
|
237
|
+
|
|
238
|
+
win32_path = f"{app_prefix}.win32"
|
|
239
|
+
|
|
240
|
+
# Validate win32 is a dict
|
|
241
|
+
if not isinstance(win32, dict):
|
|
242
|
+
errors.append(f"{win32_path}: Must be a dictionary")
|
|
243
|
+
return
|
|
244
|
+
|
|
245
|
+
# Validate win32 section
|
|
246
|
+
_validate_section(win32, _WIN32_FIELDS, win32_path, errors, warnings)
|
|
247
|
+
|
|
248
|
+
# Validate installed_check subsection
|
|
249
|
+
installed_check = win32.get("installed_check")
|
|
250
|
+
if installed_check is not None:
|
|
251
|
+
ic_path = f"{win32_path}.installed_check"
|
|
252
|
+
|
|
253
|
+
if not isinstance(installed_check, dict):
|
|
254
|
+
errors.append(f"{ic_path}: Must be a dictionary")
|
|
255
|
+
else:
|
|
256
|
+
_validate_section(
|
|
257
|
+
installed_check, _INSTALLED_CHECK_FIELDS, ic_path, errors, warnings
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Validate detection subsection
|
|
261
|
+
detection = installed_check.get("detection")
|
|
262
|
+
if detection is not None:
|
|
263
|
+
det_path = f"{ic_path}.detection"
|
|
264
|
+
|
|
265
|
+
if not isinstance(detection, dict):
|
|
266
|
+
errors.append(f"{det_path}: Must be a dictionary")
|
|
267
|
+
else:
|
|
268
|
+
_validate_section(
|
|
269
|
+
detection, _DETECTION_FIELDS, det_path, errors, warnings
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def validate_recipe(recipe_path: Path) -> ValidationResult:
|
|
274
|
+
"""Validate a recipe file without downloading anything.
|
|
275
|
+
|
|
276
|
+
Validates recipe syntax, required fields, and configuration without
|
|
277
|
+
making network calls.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
recipe_path: Path to the recipe YAML file to validate.
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
Validation status, errors, warnings, and app count.
|
|
284
|
+
|
|
285
|
+
Example:
|
|
286
|
+
Validate a recipe and check results:
|
|
287
|
+
```python
|
|
288
|
+
from pathlib import Path
|
|
289
|
+
|
|
290
|
+
result = validate_recipe(Path("recipes/app.yaml"))
|
|
291
|
+
if result.status == "valid":
|
|
292
|
+
print("Recipe is valid!")
|
|
293
|
+
else:
|
|
294
|
+
for error in result.errors:
|
|
295
|
+
print(f"Error: {error}")
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
"""
|
|
299
|
+
logger = get_global_logger()
|
|
300
|
+
|
|
301
|
+
errors = []
|
|
302
|
+
warnings = []
|
|
303
|
+
app_count = 0
|
|
304
|
+
|
|
305
|
+
logger.verbose("VALIDATION", f"Validating recipe: {recipe_path}")
|
|
306
|
+
|
|
307
|
+
# Check file exists
|
|
308
|
+
if not recipe_path.exists():
|
|
309
|
+
errors.append(f"Recipe file not found: {recipe_path}")
|
|
310
|
+
return ValidationResult(
|
|
311
|
+
status="invalid",
|
|
312
|
+
errors=errors,
|
|
313
|
+
warnings=warnings,
|
|
314
|
+
app_count=0,
|
|
315
|
+
recipe_path=str(recipe_path),
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# Parse YAML
|
|
319
|
+
try:
|
|
320
|
+
with open(recipe_path, encoding="utf-8") as f:
|
|
321
|
+
recipe = yaml.safe_load(f)
|
|
322
|
+
except yaml.YAMLError as err:
|
|
323
|
+
errors.append(f"Invalid YAML syntax: {err}")
|
|
324
|
+
return ValidationResult(
|
|
325
|
+
status="invalid",
|
|
326
|
+
errors=errors,
|
|
327
|
+
warnings=warnings,
|
|
328
|
+
app_count=0,
|
|
329
|
+
recipe_path=str(recipe_path),
|
|
330
|
+
)
|
|
331
|
+
except Exception as err:
|
|
332
|
+
errors.append(f"Failed to read recipe file: {err}")
|
|
333
|
+
return ValidationResult(
|
|
334
|
+
status="invalid",
|
|
335
|
+
errors=errors,
|
|
336
|
+
warnings=warnings,
|
|
337
|
+
app_count=0,
|
|
338
|
+
recipe_path=str(recipe_path),
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
logger.verbose("VALIDATION", "YAML syntax is valid")
|
|
342
|
+
|
|
343
|
+
# Validate recipe is a dict
|
|
344
|
+
if not isinstance(recipe, dict):
|
|
345
|
+
errors.append("Recipe must be a YAML dictionary/mapping")
|
|
346
|
+
return ValidationResult(
|
|
347
|
+
status="invalid",
|
|
348
|
+
errors=errors,
|
|
349
|
+
warnings=warnings,
|
|
350
|
+
app_count=0,
|
|
351
|
+
recipe_path=str(recipe_path),
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
# Check apiVersion
|
|
355
|
+
if "apiVersion" not in recipe:
|
|
356
|
+
errors.append("Missing required field: apiVersion")
|
|
357
|
+
else:
|
|
358
|
+
api_version = recipe["apiVersion"]
|
|
359
|
+
if not isinstance(api_version, str):
|
|
360
|
+
errors.append("apiVersion must be a string")
|
|
361
|
+
elif api_version != "napt/v1":
|
|
362
|
+
warnings.append(
|
|
363
|
+
f"apiVersion '{api_version}' may not be supported (expected: napt/v1)"
|
|
364
|
+
)
|
|
365
|
+
if not errors:
|
|
366
|
+
logger.verbose("VALIDATION", f"apiVersion: {api_version}")
|
|
367
|
+
|
|
368
|
+
# Check app field
|
|
369
|
+
app = recipe.get("app")
|
|
370
|
+
if not app:
|
|
371
|
+
errors.append("Field 'app' is required")
|
|
372
|
+
return ValidationResult(
|
|
373
|
+
status="invalid",
|
|
374
|
+
errors=errors,
|
|
375
|
+
warnings=warnings,
|
|
376
|
+
app_count=0,
|
|
377
|
+
recipe_path=str(recipe_path),
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
if not isinstance(app, dict):
|
|
381
|
+
errors.append("Field 'app' must be a dictionary")
|
|
382
|
+
return ValidationResult(
|
|
383
|
+
status="invalid",
|
|
384
|
+
errors=errors,
|
|
385
|
+
warnings=warnings,
|
|
386
|
+
app_count=0,
|
|
387
|
+
recipe_path=str(recipe_path),
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
app_prefix = "app"
|
|
391
|
+
|
|
392
|
+
logger.verbose("VALIDATION", f"Found app: {app.get('name', 'unnamed')}")
|
|
393
|
+
|
|
394
|
+
# Check required fields
|
|
395
|
+
for field in ["name", "id", "source"]:
|
|
396
|
+
if field not in app:
|
|
397
|
+
errors.append(f"{app_prefix}: Missing required field: {field}")
|
|
398
|
+
|
|
399
|
+
# Validate name
|
|
400
|
+
if "name" in app and not isinstance(app["name"], str):
|
|
401
|
+
errors.append(f"{app_prefix}: Field 'name' must be a string")
|
|
402
|
+
|
|
403
|
+
# Validate id
|
|
404
|
+
if "id" in app:
|
|
405
|
+
if not isinstance(app["id"], str):
|
|
406
|
+
errors.append(f"{app_prefix}: Field 'id' must be a string")
|
|
407
|
+
elif not app["id"]:
|
|
408
|
+
errors.append(f"{app_prefix}: Field 'id' cannot be empty")
|
|
409
|
+
|
|
410
|
+
# Validate source
|
|
411
|
+
if "source" not in app:
|
|
412
|
+
# Already reported missing field, but continue to check other things
|
|
413
|
+
pass
|
|
414
|
+
else:
|
|
415
|
+
source = app["source"]
|
|
416
|
+
if not isinstance(source, dict):
|
|
417
|
+
errors.append(f"{app_prefix}.source: Must be a dictionary")
|
|
418
|
+
else:
|
|
419
|
+
# Check strategy field
|
|
420
|
+
if "strategy" not in source:
|
|
421
|
+
errors.append(f"{app_prefix}.source: Missing required field: strategy")
|
|
422
|
+
else:
|
|
423
|
+
strategy_name = source["strategy"]
|
|
424
|
+
if not isinstance(strategy_name, str):
|
|
425
|
+
errors.append(f"{app_prefix}.source.strategy: Must be a string")
|
|
426
|
+
else:
|
|
427
|
+
logger.verbose(
|
|
428
|
+
"VALIDATION",
|
|
429
|
+
f"App '{app.get('name', 'unnamed')}' uses strategy: {strategy_name}",
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
# Check if strategy exists
|
|
433
|
+
try:
|
|
434
|
+
strategy = get_strategy(strategy_name)
|
|
435
|
+
except ConfigError as err:
|
|
436
|
+
errors.append(f"{app_prefix}.source.strategy: {err}")
|
|
437
|
+
else:
|
|
438
|
+
# Validate strategy-specific configuration
|
|
439
|
+
if hasattr(strategy, "validate_config"):
|
|
440
|
+
try:
|
|
441
|
+
config_errors = strategy.validate_config(app)
|
|
442
|
+
for error in config_errors:
|
|
443
|
+
errors.append(f"{app_prefix}: {error}")
|
|
444
|
+
except Exception as err:
|
|
445
|
+
errors.append(
|
|
446
|
+
f"{app_prefix}: Strategy validation failed: {err}"
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Validate win32 configuration
|
|
450
|
+
_validate_win32_config(app, app_prefix, errors, warnings)
|
|
451
|
+
|
|
452
|
+
# Determine final status
|
|
453
|
+
status = "valid" if len(errors) == 0 else "invalid"
|
|
454
|
+
app_count = 1 if status == "valid" else 0
|
|
455
|
+
|
|
456
|
+
if status == "valid":
|
|
457
|
+
logger.verbose("VALIDATION", "Recipe is valid!")
|
|
458
|
+
else:
|
|
459
|
+
logger.verbose("VALIDATION", f"Recipe has {len(errors)} error(s)")
|
|
460
|
+
|
|
461
|
+
return ValidationResult(
|
|
462
|
+
status=status,
|
|
463
|
+
errors=errors,
|
|
464
|
+
warnings=warnings,
|
|
465
|
+
app_count=app_count,
|
|
466
|
+
recipe_path=str(recipe_path),
|
|
467
|
+
)
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
# Copyright 2025 Roger Cibrian
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Version comparison and extraction utilities for NAPT.
|
|
16
|
+
|
|
17
|
+
This package provides tools for comparing version strings and extracting
|
|
18
|
+
version information from binary files (MSI, EXE). It supports multiple
|
|
19
|
+
comparison strategies and handles various versioning schemes including
|
|
20
|
+
semantic versioning, numeric versions, and prerelease tags.
|
|
21
|
+
|
|
22
|
+
Modules:
|
|
23
|
+
keys
|
|
24
|
+
Core version comparison logic with semver-like parsing and robust fallbacks.
|
|
25
|
+
msi
|
|
26
|
+
MSI ProductVersion extraction and metadata extraction using msilib, _msi,
|
|
27
|
+
PowerShell, or msitools.
|
|
28
|
+
|
|
29
|
+
Version Comparison Strategies:
|
|
30
|
+
|
|
31
|
+
The versioning system supports multiple comparison modes:
|
|
32
|
+
|
|
33
|
+
1. **Semantic Versioning (semver)**:
|
|
34
|
+
- Parses X.Y.Z tuples with optional prerelease and build metadata
|
|
35
|
+
- Handles prerelease tags: alpha, beta, rc, dev, etc.
|
|
36
|
+
- Correctly orders: 1.0.0-alpha < 1.0.0-beta < 1.0.0-rc < 1.0.0
|
|
37
|
+
|
|
38
|
+
2. **Numeric (MSI/EXE)**:
|
|
39
|
+
- Strict numeric-only parsing
|
|
40
|
+
- MSI: 3-part versions (major.minor.patch)
|
|
41
|
+
- EXE: 4-part versions (major.minor.patch.build)
|
|
42
|
+
|
|
43
|
+
3. **Lexicographic**:
|
|
44
|
+
- Fallback string comparison for non-version-like strings
|
|
45
|
+
- Useful for build IDs, timestamps, etc.
|
|
46
|
+
|
|
47
|
+
Example:
|
|
48
|
+
Basic version comparison:
|
|
49
|
+
```python
|
|
50
|
+
from napt.versioning import compare_any, is_newer_any
|
|
51
|
+
|
|
52
|
+
# Compare versions (returns 1 for newer, 0 for equal, -1 for older)
|
|
53
|
+
result = compare_any("1.2.0", "1.1.9") # Returns: 1
|
|
54
|
+
|
|
55
|
+
# Check if version is newer
|
|
56
|
+
is_newer = is_newer_any("1.2.0", "1.1.9") # Returns: True
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Prerelease handling:
|
|
60
|
+
```python
|
|
61
|
+
# rc is newer than beta
|
|
62
|
+
compare_any("1.0.0-rc.1", "1.0.0-beta.5") # Returns: 1
|
|
63
|
+
|
|
64
|
+
# Release is newer than prerelease
|
|
65
|
+
compare_any("1.0.0", "1.0.0-rc.1") # Returns: 1
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
MSI version extraction:
|
|
69
|
+
```python
|
|
70
|
+
from pathlib import Path
|
|
71
|
+
from napt.versioning.msi import version_from_msi_product_version
|
|
72
|
+
|
|
73
|
+
discovered = version_from_msi_product_version(Path("installer.msi"))
|
|
74
|
+
print(discovered.version) # e.g., "1.2.3"
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
MSI metadata extraction:
|
|
78
|
+
```python
|
|
79
|
+
from pathlib import Path
|
|
80
|
+
from napt.versioning.msi import extract_msi_metadata
|
|
81
|
+
|
|
82
|
+
metadata = extract_msi_metadata(Path("installer.msi"))
|
|
83
|
+
print(f"{metadata.product_name} {metadata.product_version}")
|
|
84
|
+
# e.g., "Google Chrome 131.0.6778.86"
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
MSI architecture extraction:
|
|
88
|
+
```python
|
|
89
|
+
from pathlib import Path
|
|
90
|
+
from napt.versioning.msi import extract_msi_architecture
|
|
91
|
+
|
|
92
|
+
arch = extract_msi_architecture(Path("installer.msi"))
|
|
93
|
+
print(f"Architecture: {arch}") # e.g., "x64"
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
Note:
|
|
97
|
+
- Version comparison is format-agnostic: no network or file I/O
|
|
98
|
+
- MSI extraction works cross-platform with appropriate backends
|
|
99
|
+
- Prerelease ordering follows common conventions but allows custom tags
|
|
100
|
+
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
from .keys import (
|
|
104
|
+
DiscoveredVersion,
|
|
105
|
+
SourceHint,
|
|
106
|
+
compare_any,
|
|
107
|
+
is_newer_any,
|
|
108
|
+
version_key_any,
|
|
109
|
+
)
|
|
110
|
+
from .msi import (
|
|
111
|
+
MSIMetadata,
|
|
112
|
+
architecture_from_template,
|
|
113
|
+
extract_msi_architecture,
|
|
114
|
+
extract_msi_metadata,
|
|
115
|
+
)
|