elspais 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,522 @@
1
+ """
2
+ elspais.commands.edit - Edit requirements command.
3
+
4
+ Provides functionality to modify requirements in-place:
5
+ - Change Implements references
6
+ - Change Status
7
+ - Move requirements between files
8
+ - Batch operations via JSON
9
+ """
10
+
11
+ import argparse
12
+ import json
13
+ import re
14
+ import sys
15
+ from pathlib import Path
16
+ from typing import Any, Dict, List, Optional
17
+
18
+
19
+ def run(args: argparse.Namespace) -> int:
20
+ """Run the edit command."""
21
+ from elspais.config.defaults import DEFAULT_CONFIG
22
+ from elspais.config.loader import find_config_file, get_spec_directories, load_config
23
+
24
+ # Load configuration
25
+ config_path = args.config if hasattr(args, 'config') else None
26
+ if config_path is None:
27
+ config_path = find_config_file(Path.cwd())
28
+ if config_path and config_path.exists():
29
+ config = load_config(config_path)
30
+ else:
31
+ config = DEFAULT_CONFIG
32
+
33
+ # Get spec directories
34
+ spec_dir = args.spec_dir if hasattr(args, 'spec_dir') and args.spec_dir else None
35
+ spec_dirs = get_spec_directories(spec_dir, config)
36
+ if not spec_dirs:
37
+ print("Error: No spec directories found", file=sys.stderr)
38
+ return 1
39
+
40
+ # Use first spec dir as base
41
+ base_spec_dir = spec_dirs[0]
42
+
43
+ dry_run = getattr(args, 'dry_run', False)
44
+
45
+ validate_refs = getattr(args, 'validate_refs', False)
46
+
47
+ # Handle batch mode
48
+ if hasattr(args, 'from_json') and args.from_json:
49
+ return run_batch_edit(args.from_json, base_spec_dir, dry_run, validate_refs)
50
+
51
+ # Handle single edit mode
52
+ if hasattr(args, 'req_id') and args.req_id:
53
+ return run_single_edit(args, base_spec_dir, dry_run)
54
+
55
+ print("Error: Must specify --req-id or --from-json", file=sys.stderr)
56
+ return 1
57
+
58
+
59
+ def run_batch_edit(
60
+ json_source: str, spec_dir: Path, dry_run: bool, validate_refs: bool = False
61
+ ) -> int:
62
+ """Run batch edit from JSON file or stdin."""
63
+ # Load JSON
64
+ if json_source == "-":
65
+ changes = json.load(sys.stdin)
66
+ else:
67
+ json_path = Path(json_source)
68
+ if not json_path.exists():
69
+ print(f"Error: JSON file not found: {json_source}", file=sys.stderr)
70
+ return 1
71
+ changes = json.loads(json_path.read_text())
72
+
73
+ if not isinstance(changes, list):
74
+ print("Error: JSON must be a list of changes", file=sys.stderr)
75
+ return 1
76
+
77
+ results = batch_edit(spec_dir, changes, dry_run=dry_run, validate_refs=validate_refs)
78
+
79
+ # Report results
80
+ success_count = sum(1 for r in results if r.get("success"))
81
+ error_count = len(results) - success_count
82
+
83
+ if dry_run:
84
+ print(f"[DRY RUN] Would apply {len(results)} changes")
85
+ else:
86
+ print(f"Applied {success_count} changes")
87
+
88
+ if error_count > 0:
89
+ print(f"Errors: {error_count}")
90
+ for r in results:
91
+ if not r.get("success"):
92
+ print(f" - {r.get('req_id', 'unknown')}: {r.get('error', 'unknown error')}")
93
+ return 1
94
+
95
+ return 0
96
+
97
+
98
+ def run_single_edit(args: argparse.Namespace, spec_dir: Path, dry_run: bool) -> int:
99
+ """Run single requirement edit."""
100
+ req_id = args.req_id
101
+
102
+ # Find the requirement
103
+ location = find_requirement_in_files(spec_dir, req_id)
104
+ if not location:
105
+ print(f"Error: Requirement {req_id} not found", file=sys.stderr)
106
+ return 1
107
+
108
+ file_path = location["file_path"]
109
+ results = []
110
+
111
+ # Apply implements change
112
+ if hasattr(args, 'implements') and args.implements is not None:
113
+ impl_list = [i.strip() for i in args.implements.split(",")]
114
+ result = modify_implements(file_path, req_id, impl_list, dry_run=dry_run)
115
+ results.append(("implements", result))
116
+
117
+ # Apply status change
118
+ if hasattr(args, 'status') and args.status:
119
+ result = modify_status(file_path, req_id, args.status, dry_run=dry_run)
120
+ results.append(("status", result))
121
+
122
+ # Apply move
123
+ if hasattr(args, 'move_to') and args.move_to:
124
+ dest_path = spec_dir / args.move_to
125
+ result = move_requirement(file_path, dest_path, req_id, dry_run=dry_run)
126
+ results.append(("move", result))
127
+
128
+ # Report
129
+ for op_name, result in results:
130
+ # Check for source_empty warning on move
131
+ if op_name == "move" and result.get("source_empty") and not dry_run:
132
+ print(f"INFO: Source file is now empty: {result.get('source_file')}")
133
+ if result.get("success"):
134
+ if dry_run:
135
+ print(f"[DRY RUN] Would {op_name}: {req_id}")
136
+ else:
137
+ print(f"Updated {op_name}: {req_id}")
138
+ else:
139
+ print(f"Error in {op_name}: {result.get('error')}", file=sys.stderr)
140
+ return 1
141
+
142
+ return 0
143
+
144
+
145
+ def find_requirement_in_files(
146
+ spec_dir: Path,
147
+ req_id: str,
148
+ ) -> Optional[Dict[str, Any]]:
149
+ """
150
+ Find a requirement in spec files.
151
+
152
+ Args:
153
+ spec_dir: Directory to search
154
+ req_id: Requirement ID to find
155
+
156
+ Returns:
157
+ Dict with file_path, req_id, line_number, or None if not found
158
+ """
159
+ # Pattern to match requirement header
160
+ pattern = re.compile(rf'^#\s*{re.escape(req_id)}:', re.MULTILINE)
161
+
162
+ for md_file in spec_dir.rglob("*.md"):
163
+ content = md_file.read_text()
164
+ match = pattern.search(content)
165
+ if match:
166
+ # Count line number
167
+ line_number = content[:match.start()].count('\n') + 1
168
+ return {
169
+ "file_path": md_file,
170
+ "req_id": req_id,
171
+ "line_number": line_number,
172
+ }
173
+
174
+ return None
175
+
176
+
177
+ def modify_implements(
178
+ file_path: Path,
179
+ req_id: str,
180
+ new_implements: List[str],
181
+ dry_run: bool = False,
182
+ ) -> Dict[str, Any]:
183
+ """
184
+ Modify the Implements field of a requirement.
185
+
186
+ Args:
187
+ file_path: Path to the spec file
188
+ req_id: Requirement ID to modify
189
+ new_implements: New list of implements references (empty = set to "-")
190
+ dry_run: If True, don't actually modify the file
191
+
192
+ Returns:
193
+ Dict with success, old_implements, new_implements, error
194
+ """
195
+ content = file_path.read_text()
196
+
197
+ # Find the requirement header
198
+ req_pattern = re.compile(rf'^(#\s*{re.escape(req_id)}:[^\n]*\n)', re.MULTILINE)
199
+ req_match = req_pattern.search(content)
200
+
201
+ if not req_match:
202
+ return {"success": False, "error": f"Requirement {req_id} not found in {file_path}"}
203
+
204
+ # Find the **Implements**: field after the header
205
+ start_pos = req_match.end()
206
+ search_region = content[start_pos:start_pos + 500]
207
+
208
+ impl_pattern = re.compile(r'(\*\*Implements\*\*:\s*)([^|\n]+)')
209
+ impl_match = impl_pattern.search(search_region)
210
+
211
+ if not impl_match:
212
+ return {"success": False, "error": f"Could not find **Implements** for {req_id}"}
213
+
214
+ # Extract old value
215
+ old_value = impl_match.group(2).strip()
216
+ old_implements = [v.strip() for v in old_value.split(",")] if old_value != "-" else []
217
+
218
+ # Build new value
219
+ if new_implements:
220
+ new_value = ", ".join(new_implements)
221
+ else:
222
+ new_value = "-"
223
+
224
+ # Calculate absolute positions
225
+ abs_start = start_pos + impl_match.start()
226
+ abs_end = start_pos + impl_match.end()
227
+
228
+ old_line = impl_match.group(0)
229
+ new_line = impl_match.group(1) + new_value
230
+
231
+ if old_line == new_line:
232
+ return {
233
+ "success": True,
234
+ "old_implements": old_implements,
235
+ "new_implements": new_implements,
236
+ "no_change": True,
237
+ "dry_run": dry_run,
238
+ }
239
+
240
+ # Apply change
241
+ new_content = content[:abs_start] + new_line + content[abs_end:]
242
+
243
+ if not dry_run:
244
+ file_path.write_text(new_content)
245
+
246
+ return {
247
+ "success": True,
248
+ "old_implements": old_implements,
249
+ "new_implements": new_implements,
250
+ "dry_run": dry_run,
251
+ }
252
+
253
+
254
+ def modify_status(
255
+ file_path: Path,
256
+ req_id: str,
257
+ new_status: str,
258
+ dry_run: bool = False,
259
+ ) -> Dict[str, Any]:
260
+ """
261
+ Modify the Status field of a requirement.
262
+
263
+ Args:
264
+ file_path: Path to the spec file
265
+ req_id: Requirement ID to modify
266
+ new_status: New status value
267
+ dry_run: If True, don't actually modify the file
268
+
269
+ Returns:
270
+ Dict with success, old_status, new_status, error
271
+ """
272
+ content = file_path.read_text()
273
+
274
+ # Find the requirement header
275
+ req_pattern = re.compile(rf'^(#\s*{re.escape(req_id)}:[^\n]*\n)', re.MULTILINE)
276
+ req_match = req_pattern.search(content)
277
+
278
+ if not req_match:
279
+ return {"success": False, "error": f"Requirement {req_id} not found in {file_path}"}
280
+
281
+ # Find the **Status**: field after the header
282
+ start_pos = req_match.end()
283
+ search_region = content[start_pos:start_pos + 500]
284
+
285
+ status_pattern = re.compile(r'(\*\*Status\*\*:\s*)(\w+)')
286
+ status_match = status_pattern.search(search_region)
287
+
288
+ if not status_match:
289
+ return {"success": False, "error": f"Could not find **Status** for {req_id}"}
290
+
291
+ old_status = status_match.group(2)
292
+
293
+ if old_status == new_status:
294
+ return {
295
+ "success": True,
296
+ "old_status": old_status,
297
+ "new_status": new_status,
298
+ "no_change": True,
299
+ "dry_run": dry_run,
300
+ }
301
+
302
+ # Calculate absolute positions
303
+ abs_start = start_pos + status_match.start()
304
+ abs_end = start_pos + status_match.end()
305
+
306
+ new_line = status_match.group(1) + new_status
307
+
308
+ # Apply change
309
+ new_content = content[:abs_start] + new_line + content[abs_end:]
310
+
311
+ if not dry_run:
312
+ file_path.write_text(new_content)
313
+
314
+ return {
315
+ "success": True,
316
+ "old_status": old_status,
317
+ "new_status": new_status,
318
+ "dry_run": dry_run,
319
+ }
320
+
321
+
322
+ def move_requirement(
323
+ source_file: Path,
324
+ dest_file: Path,
325
+ req_id: str,
326
+ dry_run: bool = False,
327
+ ) -> Dict[str, Any]:
328
+ """
329
+ Move a requirement from one file to another.
330
+
331
+ Args:
332
+ source_file: Source spec file
333
+ dest_file: Destination spec file
334
+ req_id: Requirement ID to move
335
+ dry_run: If True, don't actually modify files
336
+
337
+ Returns:
338
+ Dict with success, source_file, dest_file, error
339
+ """
340
+ source_content = source_file.read_text()
341
+
342
+ # Find the requirement block
343
+ # Pattern: # REQ-xxx: title ... *End* *title* | **Hash**: xxx\n---
344
+ req_pattern = re.compile(
345
+ rf'(^#\s*{re.escape(req_id)}:[^\n]*\n'
346
+ rf'.*?'
347
+ rf'\*End\*[^\n]*\n'
348
+ rf'(?:---\n)?)',
349
+ re.MULTILINE | re.DOTALL
350
+ )
351
+
352
+ req_match = req_pattern.search(source_content)
353
+
354
+ if not req_match:
355
+ return {"success": False, "error": f"Requirement {req_id} not found in {source_file}"}
356
+
357
+ req_block = req_match.group(0)
358
+
359
+ # Ensure block ends with separator
360
+ if not req_block.endswith("---\n"):
361
+ req_block = req_block.rstrip() + "\n---\n"
362
+
363
+ # Remove from source
364
+ new_source_content = source_content[:req_match.start()] + source_content[req_match.end():]
365
+ # Clean up extra blank lines
366
+ new_source_content = re.sub(r'\n{3,}', '\n\n', new_source_content)
367
+
368
+ # Add to destination
369
+ dest_content = dest_file.read_text() if dest_file.exists() else ""
370
+ if dest_content and not dest_content.endswith("\n"):
371
+ dest_content += "\n"
372
+ if dest_content and not dest_content.endswith("\n\n"):
373
+ dest_content += "\n"
374
+ new_dest_content = dest_content + req_block
375
+
376
+ # Check if source will be empty after move
377
+ source_empty = len(new_source_content.strip()) == 0
378
+
379
+ if not dry_run:
380
+ source_file.write_text(new_source_content)
381
+ dest_file.write_text(new_dest_content)
382
+
383
+ return {
384
+ "success": True,
385
+ "source_file": str(source_file),
386
+ "dest_file": str(dest_file),
387
+ "source_empty": source_empty,
388
+ "dry_run": dry_run,
389
+ }
390
+
391
+
392
+ def collect_all_req_ids(spec_dir: Path) -> set:
393
+ """
394
+ Collect all requirement IDs from spec directory.
395
+
396
+ Args:
397
+ spec_dir: Directory to scan
398
+
399
+ Returns:
400
+ Set of requirement IDs found (short form, e.g., "p00001")
401
+ """
402
+ import re
403
+ req_ids = set()
404
+ pattern = re.compile(r'^#\s*(REQ-[A-Za-z0-9-]+):', re.MULTILINE)
405
+
406
+ for md_file in spec_dir.rglob("*.md"):
407
+ content = md_file.read_text()
408
+ for match in pattern.finditer(content):
409
+ full_id = match.group(1)
410
+ # Extract short form (e.g., "p00001" from "REQ-p00001")
411
+ if full_id.startswith("REQ-"):
412
+ short_id = full_id[4:] # Remove "REQ-" prefix
413
+ req_ids.add(short_id)
414
+ req_ids.add(full_id) # Also add full form
415
+
416
+ return req_ids
417
+
418
+
419
+ def batch_edit(
420
+ spec_dir: Path,
421
+ changes: List[Dict[str, Any]],
422
+ dry_run: bool = False,
423
+ validate_refs: bool = False,
424
+ ) -> List[Dict[str, Any]]:
425
+ """
426
+ Apply batch edits from a list of change specifications.
427
+
428
+ Args:
429
+ spec_dir: Base spec directory
430
+ changes: List of change dicts, each with req_id and one of:
431
+ - implements: List[str]
432
+ - status: str
433
+ - move_to: str (relative path)
434
+ dry_run: If True, don't actually modify files
435
+ validate_refs: If True, validate that implements references exist
436
+
437
+ Returns:
438
+ List of result dicts
439
+ """
440
+ results = []
441
+
442
+ # Collect all req IDs if validation is enabled
443
+ valid_refs: Optional[set] = None
444
+ if validate_refs:
445
+ valid_refs = collect_all_req_ids(spec_dir)
446
+
447
+ for change in changes:
448
+ req_id = change.get("req_id")
449
+ if not req_id:
450
+ results.append({"success": False, "error": "Missing req_id"})
451
+ continue
452
+
453
+ # Find the requirement
454
+ location = find_requirement_in_files(spec_dir, req_id)
455
+ if not location:
456
+ results.append({
457
+ "success": False,
458
+ "req_id": req_id,
459
+ "error": f"Requirement {req_id} not found",
460
+ })
461
+ continue
462
+
463
+ file_path = location["file_path"]
464
+ result: Dict[str, Any] = {"req_id": req_id, "success": True}
465
+
466
+ # Validate implements references if enabled
467
+ if validate_refs and valid_refs and "implements" in change:
468
+ invalid_refs = []
469
+ for ref in change["implements"]:
470
+ # Check both short and full forms
471
+ if ref not in valid_refs and f"REQ-{ref}" not in valid_refs:
472
+ invalid_refs.append(ref)
473
+ if invalid_refs:
474
+ result = {
475
+ "req_id": req_id,
476
+ "success": False,
477
+ "error": f"Invalid implements references: {', '.join(invalid_refs)}",
478
+ }
479
+ results.append(result)
480
+ continue
481
+
482
+ # Apply implements change
483
+ if "implements" in change:
484
+ impl_result = modify_implements(
485
+ file_path, req_id, change["implements"], dry_run=dry_run
486
+ )
487
+ if not impl_result["success"]:
488
+ result = impl_result
489
+ result["req_id"] = req_id
490
+ results.append(result)
491
+ continue
492
+ result["implements"] = impl_result
493
+
494
+ # Apply status change
495
+ if "status" in change:
496
+ status_result = modify_status(
497
+ file_path, req_id, change["status"], dry_run=dry_run
498
+ )
499
+ if not status_result["success"]:
500
+ result = status_result
501
+ result["req_id"] = req_id
502
+ results.append(result)
503
+ continue
504
+ result["status"] = status_result
505
+
506
+ # Apply move (must be last since it changes file location)
507
+ if "move_to" in change:
508
+ dest_path = spec_dir / change["move_to"]
509
+ move_result = move_requirement(
510
+ file_path, dest_path, req_id, dry_run=dry_run
511
+ )
512
+ if not move_result["success"]:
513
+ result = move_result
514
+ result["req_id"] = req_id
515
+ results.append(result)
516
+ continue
517
+ result["move"] = move_result
518
+
519
+ result["dry_run"] = dry_run
520
+ results.append(result)
521
+
522
+ return results
@@ -0,0 +1,174 @@
1
+ """
2
+ elspais.commands.hash_cmd - Hash management command.
3
+
4
+ Verify and update requirement hashes.
5
+ """
6
+
7
+ import argparse
8
+ import sys
9
+ from pathlib import Path
10
+
11
+ from elspais.config.defaults import DEFAULT_CONFIG
12
+ from elspais.config.loader import find_config_file, get_spec_directories, load_config
13
+ from elspais.core.hasher import calculate_hash, verify_hash
14
+ from elspais.core.models import Requirement
15
+ from elspais.core.parser import RequirementParser
16
+ from elspais.core.patterns import PatternConfig
17
+
18
+
19
+ def run(args: argparse.Namespace) -> int:
20
+ """Run the hash command."""
21
+ if not args.hash_action:
22
+ print("Usage: elspais hash {verify|update}")
23
+ return 1
24
+
25
+ if args.hash_action == "verify":
26
+ return run_verify(args)
27
+ elif args.hash_action == "update":
28
+ return run_update(args)
29
+
30
+ return 1
31
+
32
+
33
+ def run_verify(args: argparse.Namespace) -> int:
34
+ """Verify all requirement hashes."""
35
+ config, requirements = load_requirements(args)
36
+ if not requirements:
37
+ return 1
38
+
39
+ hash_length = config.get("validation", {}).get("hash_length", 8)
40
+ algorithm = config.get("validation", {}).get("hash_algorithm", "sha256")
41
+
42
+ mismatches = []
43
+ missing = []
44
+
45
+ for req_id, req in requirements.items():
46
+ if not req.hash:
47
+ missing.append(req_id)
48
+ else:
49
+ expected = calculate_hash(req.body, length=hash_length, algorithm=algorithm)
50
+ if not verify_hash(req.body, req.hash, length=hash_length, algorithm=algorithm):
51
+ mismatches.append((req_id, req.hash, expected))
52
+
53
+ # Report results
54
+ if missing:
55
+ print(f"Missing hashes: {len(missing)}")
56
+ for req_id in missing:
57
+ print(f" - {req_id}")
58
+
59
+ if mismatches:
60
+ print(f"\nHash mismatches: {len(mismatches)}")
61
+ for req_id, current, expected in mismatches:
62
+ print(f" - {req_id}: {current} (expected: {expected})")
63
+
64
+ if not missing and not mismatches:
65
+ print(f"✓ All {len(requirements)} hashes verified")
66
+ return 0
67
+
68
+ return 1 if mismatches else 0
69
+
70
+
71
+ def run_update(args: argparse.Namespace) -> int:
72
+ """Update requirement hashes."""
73
+ config, requirements = load_requirements(args)
74
+ if not requirements:
75
+ return 1
76
+
77
+ hash_length = config.get("validation", {}).get("hash_length", 8)
78
+ algorithm = config.get("validation", {}).get("hash_algorithm", "sha256")
79
+
80
+ # Filter to specific requirement if specified
81
+ if args.req_id:
82
+ if args.req_id not in requirements:
83
+ print(f"Requirement not found: {args.req_id}")
84
+ return 1
85
+ requirements = {args.req_id: requirements[args.req_id]}
86
+
87
+ updates = []
88
+
89
+ for req_id, req in requirements.items():
90
+ expected = calculate_hash(req.body, length=hash_length, algorithm=algorithm)
91
+ if req.hash != expected:
92
+ updates.append((req_id, req, expected))
93
+
94
+ if not updates:
95
+ print("All hashes are up to date")
96
+ return 0
97
+
98
+ # Show or apply updates
99
+ if args.dry_run:
100
+ print(f"Would update {len(updates)} hashes:")
101
+ for req_id, req, new_hash in updates:
102
+ old_hash = req.hash or "(none)"
103
+ print(f" {req_id}: {old_hash} -> {new_hash}")
104
+ else:
105
+ print(f"Updating {len(updates)} hashes...")
106
+ for req_id, req, new_hash in updates:
107
+ update_hash_in_file(req, new_hash)
108
+ print(f" ✓ {req_id}")
109
+
110
+ return 0
111
+
112
+
113
+ def load_requirements(args: argparse.Namespace) -> tuple:
114
+ """Load configuration and requirements."""
115
+ config_path = args.config or find_config_file(Path.cwd())
116
+ if config_path and config_path.exists():
117
+ config = load_config(config_path)
118
+ else:
119
+ config = DEFAULT_CONFIG
120
+
121
+ spec_dirs = get_spec_directories(args.spec_dir, config)
122
+ if not spec_dirs:
123
+ print("Error: No spec directories found", file=sys.stderr)
124
+ return config, {}
125
+
126
+ pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
127
+ spec_config = config.get("spec", {})
128
+ no_reference_values = spec_config.get("no_reference_values")
129
+ skip_files = spec_config.get("skip_files", [])
130
+ parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
131
+
132
+ try:
133
+ requirements = parser.parse_directories(spec_dirs, skip_files=skip_files)
134
+ except Exception as e:
135
+ print(f"Error parsing requirements: {e}", file=sys.stderr)
136
+ return config, {}
137
+
138
+ return config, requirements
139
+
140
+
141
+ def update_hash_in_file(req: Requirement, new_hash: str) -> None:
142
+ """Update the hash in the requirement's source file.
143
+
144
+ The replacement is scoped to the specific requirement's end marker
145
+ (identified by title) to avoid accidentally updating other requirements
146
+ in the same file that might have the same hash value.
147
+ """
148
+ if not req.file_path:
149
+ return
150
+
151
+ content = req.file_path.read_text(encoding="utf-8")
152
+
153
+ import re
154
+
155
+ if req.hash:
156
+ # Replace existing hash - SCOPED to this requirement's end marker
157
+ # Match: *End* *Title* | **Hash**: oldhash
158
+ # Replace hash only for THIS requirement (identified by title)
159
+ content = re.sub(
160
+ rf"(\*End\*\s+\*{re.escape(req.title)}\*\s*\|\s*)\*\*Hash\*\*:\s*{re.escape(req.hash)}",
161
+ rf"\1**Hash**: {new_hash}",
162
+ content,
163
+ )
164
+ else:
165
+ # Add hash to end marker
166
+ # Pattern: *End* *Title* (without hash)
167
+ # Add: | **Hash**: XXXX
168
+ content = re.sub(
169
+ rf"(\*End\*\s+\*{re.escape(req.title)}\*)(?!\s*\|\s*\*\*Hash\*\*)",
170
+ rf"\1 | **Hash**: {new_hash}",
171
+ content,
172
+ )
173
+
174
+ req.file_path.write_text(content, encoding="utf-8")