autopkg-wrapper 2024.2.4__py3-none-any.whl → 2026.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,674 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import plistlib
5
+ import re
6
+ import zipfile
7
+ from typing import Dict, List, Optional, Tuple
8
+
9
+
10
+ def find_report_dirs(base_path: str) -> List[str]:
11
+ dirs: List[str] = []
12
+ if not os.path.exists(base_path):
13
+ return dirs
14
+ for root, subdirs, _files in os.walk(base_path):
15
+ for d in subdirs:
16
+ if d.startswith("autopkg_report-"):
17
+ dirs.append(os.path.join(root, d))
18
+ if not dirs:
19
+ try:
20
+ has_files = any(
21
+ os.path.isfile(os.path.join(base_path, f))
22
+ for f in os.listdir(base_path)
23
+ )
24
+ except FileNotFoundError:
25
+ has_files = False
26
+ if has_files:
27
+ dirs.append(base_path)
28
+ return sorted(dirs)
29
+
30
+
31
+ def parse_json_file(path: str) -> Dict:
32
+ try:
33
+ with open(path, "r", encoding="utf-8") as f:
34
+ return json.load(f)
35
+ except Exception:
36
+ return {}
37
+
38
+
39
+ def _infer_recipe_name_from_filename(path: str) -> str:
40
+ base = os.path.basename(path)
41
+ if base.endswith(".plist"):
42
+ base = base[:-6]
43
+ m = re.search(r"-(\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2})$", base)
44
+ if m:
45
+ return base[: m.start()]
46
+ return base
47
+
48
+
49
+ def parse_text_file(path: str) -> Dict[str, List]:
50
+ uploads: List[Dict] = []
51
+ policies: List[Dict] = []
52
+ errors: List[str] = []
53
+
54
+ re_error = re.compile(r"ERROR[:\s-]+(.+)", re.IGNORECASE)
55
+ re_upload = re.compile(
56
+ r"(Uploaded|Upload|Uploading)[^\n]*?(?P<name>[A-Za-z0-9 ._+\-]+?)(?=(?:\s+\bversion\b)|$)(?:[^\n]*?\bversion\b[^\d]*(?P<version>\d+(?:\.\d+)+))?",
57
+ re.IGNORECASE,
58
+ )
59
+ re_policy = re.compile(r"Policy (created|updated):\s*(?P<name>.+)", re.IGNORECASE)
60
+
61
+ try:
62
+ with open(path, "r", encoding="utf-8", errors="ignore") as f:
63
+ for line in f:
64
+ m_err = re_error.search(line)
65
+ if m_err:
66
+ errors.append(m_err.group(1).strip())
67
+ continue
68
+
69
+ m_up = re_upload.search(line)
70
+ if m_up:
71
+ uploads.append(
72
+ {
73
+ "name": (m_up.group("name") or "").strip(),
74
+ "version": (m_up.group("version") or "-") or "-",
75
+ }
76
+ )
77
+ continue
78
+
79
+ m_pol = re_policy.search(line)
80
+ if m_pol:
81
+ action = "updated" if "updated" in line.lower() else "created"
82
+ policies.append(
83
+ {
84
+ "name": m_pol.group("name").strip(),
85
+ "action": action,
86
+ }
87
+ )
88
+ except Exception:
89
+ pass
90
+
91
+ return {"uploads": uploads, "policies": policies, "errors": errors}
92
+
93
+
94
+ def parse_plist_file(path: str) -> Dict[str, List]:
95
+ uploads: List[Dict] = []
96
+ policies: List[Dict] = []
97
+ errors: List[str] = []
98
+ upload_rows: List[Dict] = []
99
+ policy_rows: List[Dict] = []
100
+ error_rows: List[Dict] = []
101
+
102
+ try:
103
+ with open(path, "rb") as f:
104
+ plist = plistlib.load(f)
105
+ except Exception:
106
+ return {
107
+ "uploads": uploads,
108
+ "policies": policies,
109
+ "errors": errors,
110
+ "upload_rows": upload_rows,
111
+ "policy_rows": policy_rows,
112
+ "error_rows": error_rows,
113
+ }
114
+
115
+ failures = plist.get("failures", []) or []
116
+
117
+ sr = plist.get("summary_results", {}) or {}
118
+
119
+ recipe_name = _infer_recipe_name_from_filename(path)
120
+ recipe_identifier: Optional[str] = None
121
+
122
+ jpu = sr.get("jamfpackageuploader_summary_result")
123
+ if isinstance(jpu, dict):
124
+ rows = jpu.get("data_rows") or []
125
+ for row in rows:
126
+ name = (row.get("name") or row.get("pkg_display_name") or "-").strip()
127
+ version = (row.get("version") or "-").strip()
128
+ uploads.append({"name": name, "version": version})
129
+ pkg_name = (
130
+ row.get("pkg_name") or row.get("pkg_display_name") or "-"
131
+ ).strip()
132
+ pkg_path = (row.get("pkg_path") or "").strip()
133
+ if pkg_path:
134
+ parts = pkg_path.split("/cache/")
135
+ if len(parts) > 1:
136
+ after = parts[1]
137
+ rid = after.split("/")[0]
138
+ recipe_identifier = rid or recipe_identifier
139
+ upload_rows.append(
140
+ {
141
+ "recipe_name": recipe_name,
142
+ "recipe_identifier": recipe_identifier or "-",
143
+ "package": pkg_name,
144
+ "version": version or "-",
145
+ }
146
+ )
147
+
148
+ for key, block in sr.items():
149
+ if not isinstance(block, dict):
150
+ continue
151
+ hdr = [h.lower() for h in (block.get("header") or [])]
152
+ rows = block.get("data_rows") or []
153
+ summary_text = (block.get("summary_text") or "").lower()
154
+ looks_like_policy = (
155
+ "policy" in key.lower()
156
+ or "policy" in summary_text
157
+ or any("policy" in h for h in hdr)
158
+ )
159
+ if looks_like_policy and rows:
160
+ for row in rows:
161
+ name = row.get("policy_name") or row.get("name") or row.get("title")
162
+ action = row.get("action") or row.get("status") or row.get("result")
163
+ if name:
164
+ policies.append(
165
+ {
166
+ "name": str(name).strip(),
167
+ "action": (str(action).strip() if action else "-"),
168
+ }
169
+ )
170
+ policy_rows.append(
171
+ {
172
+ "recipe_name": recipe_name,
173
+ "recipe_identifier": recipe_identifier or "-",
174
+ "policy": str(name).strip(),
175
+ }
176
+ )
177
+
178
+ for fail in failures:
179
+ if isinstance(fail, dict):
180
+ msg = fail.get("message") or json.dumps(fail)
181
+ rec = fail.get("recipe") or recipe_name
182
+ else:
183
+ msg = str(fail)
184
+ rec = recipe_name
185
+ errors.append(msg)
186
+ error_rows.append(
187
+ {
188
+ "recipe_name": rec,
189
+ "error_type": _classify_error_simple(msg),
190
+ }
191
+ )
192
+
193
+ return {
194
+ "uploads": uploads,
195
+ "policies": policies,
196
+ "errors": errors,
197
+ "upload_rows": upload_rows,
198
+ "policy_rows": policy_rows,
199
+ "error_rows": error_rows,
200
+ }
201
+
202
+
203
+ def aggregate_reports(base_path: str) -> Dict:
204
+ summary = {
205
+ "uploads": [],
206
+ "policies": [],
207
+ "errors": [],
208
+ "recipes": 0,
209
+ "upload_rows": [],
210
+ "policy_rows": [],
211
+ "error_rows": [],
212
+ }
213
+ report_dirs = find_report_dirs(base_path)
214
+
215
+ for repdir in report_dirs:
216
+ for root, _subdirs, files in os.walk(repdir):
217
+ for fn in files:
218
+ p = os.path.join(root, fn)
219
+ ext = os.path.splitext(fn)[1].lower()
220
+
221
+ if ext == ".plist":
222
+ data = parse_plist_file(p)
223
+ summary["uploads"] += data.get("uploads", [])
224
+ summary["policies"] += data.get("policies", [])
225
+ summary["errors"] += data.get("errors", [])
226
+ summary["upload_rows"] += data.get("upload_rows", [])
227
+ summary["policy_rows"] += data.get("policy_rows", [])
228
+ summary["error_rows"] += data.get("error_rows", [])
229
+ summary["recipes"] += 1
230
+ elif ext == ".json":
231
+ data = parse_json_file(p)
232
+ if not data:
233
+ continue
234
+ if isinstance(data, dict):
235
+ uploads = data.get("uploads")
236
+ policies = data.get("policies")
237
+ errors = data.get("errors")
238
+ recipes = data.get("recipes")
239
+ if isinstance(uploads, list):
240
+ summary["uploads"] += uploads
241
+ if isinstance(policies, list):
242
+ summary["policies"] += policies
243
+ if isinstance(errors, list):
244
+ summary["errors"] += errors
245
+ if isinstance(errors, list):
246
+ for e in errors:
247
+ if isinstance(e, dict):
248
+ rn = e.get("recipe") or "-"
249
+ msg = e.get("message") or json.dumps(e)
250
+ summary["error_rows"].append(
251
+ {
252
+ "recipe_name": rn,
253
+ "error_type": _classify_error_simple(
254
+ str(msg)
255
+ ),
256
+ }
257
+ )
258
+ if isinstance(recipes, int):
259
+ summary["recipes"] += recipes
260
+ else:
261
+ data = parse_text_file(p)
262
+ summary["uploads"] += data.get("uploads", [])
263
+ summary["policies"] += data.get("policies", [])
264
+ summary["errors"] += data.get("errors", [])
265
+
266
+ return summary
267
+
268
+
269
+ # ---------- Rendering ----------
270
+
271
+
272
+ def _aggregate_for_display(
273
+ summary: Dict,
274
+ ) -> Tuple[Dict[str, set], Dict[str, set], Dict[str, int]]:
275
+ uploads = summary.get("uploads", [])
276
+ policies = summary.get("policies", [])
277
+ errors = summary.get("errors", [])
278
+
279
+ def plausible_app_name(n: str) -> bool:
280
+ if not n or n == "-":
281
+ return False
282
+ if n.lower() in {"apps", "packages", "pkg", "file", "37"}:
283
+ return False
284
+ if not re.search(r"[A-Za-z]", n):
285
+ return False
286
+ return True
287
+
288
+ uploads_by_app: Dict[str, set] = {}
289
+ for u in uploads:
290
+ if isinstance(u, dict):
291
+ name = (u.get("name") or "-").strip()
292
+ ver = (u.get("version") or "-").strip()
293
+ else:
294
+ name = str(u).strip()
295
+ ver = "-"
296
+ if not plausible_app_name(name):
297
+ name = "-"
298
+ uploads_by_app.setdefault(name, set()).add(ver)
299
+
300
+ policies_by_name: Dict[str, set] = {}
301
+ for p in policies:
302
+ if isinstance(p, dict):
303
+ name = (p.get("name") or "-").strip()
304
+ action = (p.get("action") or "-").strip()
305
+ else:
306
+ name = str(p).strip()
307
+ action = "-"
308
+ policies_by_name.setdefault(name, set()).add(action)
309
+
310
+ error_categories: Dict[str, int] = {
311
+ "trust": 0,
312
+ "signature": 0,
313
+ "download": 0,
314
+ "network": 0,
315
+ "auth": 0,
316
+ "jamf": 0,
317
+ "other": 0,
318
+ }
319
+
320
+ def classify_error(msg: str) -> str:
321
+ lm = msg.lower()
322
+ if "trust" in lm:
323
+ return "trust"
324
+ if "signature" in lm or "codesign" in lm:
325
+ return "signature"
326
+ if "download" in lm or "fetch" in lm:
327
+ return "download"
328
+ if (
329
+ "proxy" in lm
330
+ or "timeout" in lm
331
+ or "network" in lm
332
+ or "url" in lm
333
+ or "dns" in lm
334
+ ):
335
+ return "network"
336
+ if (
337
+ "auth" in lm
338
+ or "token" in lm
339
+ or "permission" in lm
340
+ or "401" in lm
341
+ or "403" in lm
342
+ ):
343
+ return "auth"
344
+ if "jamf" in lm or "policy" in lm:
345
+ return "jamf"
346
+ return "other"
347
+
348
+ for e in errors:
349
+ emsg = e if isinstance(e, str) else json.dumps(e)
350
+ cat = classify_error(emsg)
351
+ error_categories[cat] = error_categories.get(cat, 0) + 1
352
+
353
+ return uploads_by_app, policies_by_name, error_categories
354
+
355
+
356
+ def render_job_summary(summary: Dict, environment: str, run_date: str) -> str:
357
+ lines: List[str] = []
358
+ title_bits: List[str] = []
359
+ if environment:
360
+ title_bits.append(environment)
361
+ if run_date:
362
+ title_bits.append(run_date)
363
+ if title_bits:
364
+ lines.append(f"# Autopkg Report Summary ({' '.join(title_bits)})")
365
+ else:
366
+ lines.append("# Autopkg Report Summary")
367
+ lines.append("")
368
+
369
+ total_uploads_raw = len(summary.get("uploads", []))
370
+ uploads_by_app, policies_by_name, error_categories = _aggregate_for_display(summary)
371
+ total_uploads_apps = len(uploads_by_app)
372
+ total_policies = len(policies_by_name)
373
+ total_errors = len(summary.get("errors", []))
374
+ recipes = summary.get("recipes") or "N/A"
375
+
376
+ lines.append("| Metric | Value |")
377
+ lines.append("| --- | --- |")
378
+ lines.append(f"| Recipes processed | {recipes} |")
379
+ lines.append(
380
+ f"| Apps uploaded | {total_uploads_apps} (items: {total_uploads_raw}) |"
381
+ )
382
+ lines.append(f"| Policies changed | {total_policies} |")
383
+ lines.append(f"| Errors | {total_errors} |")
384
+ lines.append("")
385
+
386
+ if summary.get("upload_rows"):
387
+ lines.append("## Uploaded Recipes")
388
+ lines.append("")
389
+ lines.append("| Recipe Name | Identifier | Package | Version |")
390
+ lines.append("| --- | --- | --- | --- |")
391
+ for row in sorted(
392
+ summary["upload_rows"], key=lambda r: str(r.get("recipe_name", "")).lower()
393
+ ):
394
+ pkg = row.get("package", "-")
395
+ pkg_url = row.get("package_url")
396
+ pkg_cell = f"[{pkg}]({pkg_url})" if pkg_url else pkg
397
+ lines.append(
398
+ f"| {row.get('recipe_name', '-')} | {row.get('recipe_identifier', '-')} | {pkg_cell} | {row.get('version', '-')} |"
399
+ )
400
+ lines.append("")
401
+ else:
402
+ lines.append("No uploads in this run.")
403
+ lines.append("")
404
+
405
+ if summary.get("policy_rows"):
406
+ lines.append("## Policy Recipes")
407
+ lines.append("")
408
+ lines.append("| Recipe Name | Identifier | Policy |")
409
+ lines.append("| --- | --- | --- |")
410
+ for row in sorted(
411
+ summary["policy_rows"], key=lambda r: str(r.get("recipe_name", "")).lower()
412
+ ):
413
+ lines.append(
414
+ f"| {row.get('recipe_name', '-')} | {row.get('recipe_identifier', '-')} | {row.get('policy', '-')} |"
415
+ )
416
+ lines.append("")
417
+
418
+ if total_errors:
419
+ lines.append("## Errors Summary")
420
+ lines.append("")
421
+ lines.append("| Category | Count |")
422
+ lines.append("| --- | --- |")
423
+ for cat in [
424
+ "trust",
425
+ "signature",
426
+ "download",
427
+ "network",
428
+ "auth",
429
+ "jamf",
430
+ "other",
431
+ ]:
432
+ lines.append(f"| {cat} | {error_categories.get(cat, 0)} |")
433
+ lines.append("")
434
+
435
+ return "\n".join(lines)
436
+
437
+
438
+ def render_issue_body(summary: Dict, environment: str, run_date: str) -> str:
439
+ lines: List[str] = []
440
+ total_errors = len(summary.get("errors", []))
441
+ _uploads_by_app, _policies_by_name, _error_categories = _aggregate_for_display(
442
+ summary
443
+ )
444
+
445
+ prefix = "Autopkg run"
446
+ suffix_bits: List[str] = []
447
+ if run_date:
448
+ suffix_bits.append(f"on {run_date}")
449
+ if environment:
450
+ suffix_bits.append(f"({environment})")
451
+ suffix = (" ".join(suffix_bits)).strip()
452
+ if suffix:
453
+ lines.append(f"{prefix} {suffix} reported {total_errors} error(s).")
454
+ else:
455
+ lines.append(f"{prefix} reported {total_errors} error(s).")
456
+ lines.append("")
457
+ lines.append("### Errors")
458
+ lines.append("| Recipe | Error Type |")
459
+ lines.append("| --- | --- |")
460
+ for row in summary.get("error_rows", []):
461
+ lines.append(
462
+ f"| {row.get('recipe_name', '-')} | {row.get('error_type', 'other')} |"
463
+ )
464
+
465
+ lines.append("")
466
+
467
+ return "\n".join(lines)
468
+
469
+
470
+ # ---------- Utility ----------
471
+
472
+
473
+ def _redact_sensitive(s: str) -> str:
474
+ s = re.sub(r"ghs_[A-Za-z0-9]+", "ghs_***", s)
475
+ s = re.sub(
476
+ r"(Authorization:\s*token)\s+[A-Za-z0-9_\-]+",
477
+ r"\1 ***",
478
+ s,
479
+ flags=re.IGNORECASE,
480
+ )
481
+ s = re.sub(r"(Bearer)\s+[A-Za-z0-9._\-]+", r"\1 ***", s, flags=re.IGNORECASE)
482
+ return s
483
+
484
+
485
+ def _classify_error_simple(msg: str) -> str:
486
+ lm = msg.lower()
487
+ if "trust" in lm:
488
+ return "trust"
489
+ if "signature" in lm or "codesign" in lm:
490
+ return "signature"
491
+ if (
492
+ "401" in lm
493
+ or "403" in lm
494
+ or "auth" in lm
495
+ or "token" in lm
496
+ or "permission" in lm
497
+ ):
498
+ return "auth"
499
+ if "download" in lm or "fetch" in lm or "curl" in lm:
500
+ return "download"
501
+ if (
502
+ "proxy" in lm
503
+ or "timeout" in lm
504
+ or "network" in lm
505
+ or "url" in lm
506
+ or "dns" in lm
507
+ ):
508
+ return "network"
509
+ if "jamf" in lm or "policy" in lm:
510
+ return "jamf"
511
+ return "other"
512
+
513
+
514
+ # ---------- Jamf Helpers ----------
515
+
516
+
517
+ def _normalize_host(url: str) -> str:
518
+ h = (url or "").strip()
519
+ if h.startswith("https://"):
520
+ h = h[len("https://") :]
521
+ if h.startswith("http://"):
522
+ h = h[len("http://") :]
523
+ return h.rstrip("/")
524
+
525
+
526
+ def build_pkg_map(jss_url: str, client_id: str, client_secret: str) -> Dict[str, str]:
527
+ host = _normalize_host(jss_url)
528
+ _ = host # silence linters about unused var; kept for readability
529
+ pkg_map: Dict[str, str] = {}
530
+ try:
531
+ from jamf_pro_sdk import ( # type: ignore
532
+ ApiClientCredentialsProvider,
533
+ JamfProClient,
534
+ )
535
+
536
+ client = JamfProClient(
537
+ _normalize_host(jss_url),
538
+ ApiClientCredentialsProvider(client_id, client_secret),
539
+ )
540
+ packages = client.pro_api.get_packages_v1()
541
+ for p in packages:
542
+ try:
543
+ name = str(getattr(p, "packageName")).strip()
544
+ pid = str(getattr(p, "id")).strip()
545
+ except Exception as e: # noqa: F841
546
+ # ignore objects that do not match expected shape
547
+ continue
548
+ if not name or not pid:
549
+ continue
550
+ url = f"{jss_url}/view/settings/computer-management/packages/{pid}"
551
+ if name not in pkg_map:
552
+ pkg_map[name] = url
553
+ except Exception as e: # noqa: F841
554
+ return {}
555
+ return pkg_map
556
+
557
+
558
+ def enrich_upload_rows(upload_rows: List[Dict], pkg_map: Dict[str, str]) -> int:
559
+ linked = 0
560
+ for row in upload_rows:
561
+ pkg_name = str(row.get("package") or "").strip()
562
+ url = pkg_map.get(pkg_name)
563
+ if url:
564
+ row["package_url"] = url
565
+ linked += 1
566
+ return linked
567
+
568
+
569
+ def enrich_upload_rows_with_jamf(
570
+ summary: Dict, jss_url: str, client_id: str, client_secret: str
571
+ ) -> Tuple[int, List[str]]:
572
+ pkg_map = build_pkg_map(jss_url, client_id, client_secret)
573
+ linked = enrich_upload_rows(summary.get("upload_rows", []), pkg_map)
574
+ return linked, sorted(set(pkg_map.keys()))
575
+
576
+
577
+ def process_reports(
578
+ *,
579
+ zip_file: Optional[str],
580
+ extract_dir: str,
581
+ reports_dir: Optional[str],
582
+ environment: str = "",
583
+ run_date: str = "",
584
+ out_dir: str,
585
+ debug: bool,
586
+ strict: bool,
587
+ ) -> int:
588
+ os.makedirs(out_dir, exist_ok=True)
589
+
590
+ if zip_file:
591
+ zpath = zip_file
592
+ if not os.path.exists(zpath):
593
+ raise FileNotFoundError(f"zip file not found: {zpath}")
594
+ os.makedirs(extract_dir, exist_ok=True)
595
+ with zipfile.ZipFile(zpath, "r") as zf:
596
+ zf.extractall(extract_dir)
597
+ process_dir = extract_dir
598
+ else:
599
+ process_dir = reports_dir or extract_dir
600
+
601
+ summary = aggregate_reports(process_dir)
602
+
603
+ jss_url = os.environ.get("AUTOPKG_JSS_URL")
604
+ jss_client_id = os.environ.get("AUTOPKG_CLIENT_ID")
605
+ jss_client_secret = os.environ.get("AUTOPKG_CLIENT_SECRET")
606
+ jamf_attempted = False
607
+ jamf_linked = 0
608
+ jamf_keys: List[str] = []
609
+ jamf_total = len(summary.get("upload_rows", []))
610
+ if jss_url and jss_client_id and jss_client_secret and jamf_total:
611
+ jamf_attempted = True
612
+ try:
613
+ jamf_linked, jamf_keys = enrich_upload_rows_with_jamf(
614
+ summary, jss_url, jss_client_id, jss_client_secret
615
+ )
616
+ except Exception:
617
+ jamf_linked = 0
618
+
619
+ job_md = render_job_summary(summary, environment, run_date)
620
+ issue_md = None
621
+ if summary.get("errors"):
622
+ issue_md = render_issue_body(summary, environment, run_date)
623
+
624
+ with open(os.path.join(out_dir, "job_summary.md"), "w", encoding="utf-8") as f:
625
+ f.write(job_md)
626
+
627
+ if issue_md:
628
+ with open(os.path.join(out_dir, "errors_issue.md"), "w", encoding="utf-8") as f:
629
+ f.write(issue_md)
630
+
631
+ jamf_log_path = ""
632
+ if debug:
633
+ jamf_log_path = os.path.join(out_dir, "jamf_lookup_debug.json")
634
+ try:
635
+ upload_pkg_names = [
636
+ str(r.get("package") or "").strip()
637
+ for r in summary.get("upload_rows", [])
638
+ ]
639
+ matched = [
640
+ r for r in summary.get("upload_rows", []) if r.get("package_url")
641
+ ]
642
+ unmatched = [
643
+ r for r in summary.get("upload_rows", []) if not r.get("package_url")
644
+ ]
645
+ diag = {
646
+ "jss_url": jss_url or "",
647
+ "jamf_keys_count": len(jamf_keys),
648
+ "jamf_keys_sample": jamf_keys[:20],
649
+ "uploads_count": len(upload_pkg_names),
650
+ "matched_count": len(matched),
651
+ "unmatched_count": len(unmatched),
652
+ "unmatched_names": [r.get("package") for r in unmatched][:20],
653
+ }
654
+ with open(jamf_log_path, "w", encoding="utf-8") as jf:
655
+ json.dump(diag, jf, indent=2)
656
+ except Exception:
657
+ jamf_log_path = ""
658
+
659
+ status = [
660
+ f"Processed reports in '{process_dir}'. Recipes: {summary.get('recipes', 'N/A')}",
661
+ f"Summary: '{os.path.join(out_dir, 'job_summary.md')}'",
662
+ f"Errors file: {'errors_issue.md' if issue_md else 'none'}",
663
+ ]
664
+ if jamf_attempted:
665
+ status.append(f"Jamf links added: {jamf_linked}/{jamf_total}")
666
+ if jamf_log_path:
667
+ status.append(f"Jamf lookup log: '{jamf_log_path}'")
668
+ else:
669
+ status.append("Jamf links: skipped (missing env or no uploads)")
670
+ logging.info(". ".join(status))
671
+
672
+ if strict and summary.get("errors"):
673
+ return 1
674
+ return 0