llm-secure-cli 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_cli/__init__.py +0 -0
- llm_cli/apps/__init__.py +0 -0
- llm_cli/apps/cli_common.py +145 -0
- llm_cli/apps/configure.py +433 -0
- llm_cli/apps/identity_tool.py +71 -0
- llm_cli/apps/mcp_server.py +168 -0
- llm_cli/apps/model_listing.py +323 -0
- llm_cli/apps/pqc_decrypt.py +66 -0
- llm_cli/apps/unified.py +193 -0
- llm_cli/clients/__init__.py +0 -0
- llm_cli/clients/base.py +408 -0
- llm_cli/clients/base_helpers.py +241 -0
- llm_cli/clients/claude.py +209 -0
- llm_cli/clients/command_handler.py +517 -0
- llm_cli/clients/completer.py +94 -0
- llm_cli/clients/config.py +203 -0
- llm_cli/clients/config_models.py +140 -0
- llm_cli/clients/exceptions.py +27 -0
- llm_cli/clients/gemini.py +477 -0
- llm_cli/clients/gemini_handlers.py +272 -0
- llm_cli/clients/grok.py +430 -0
- llm_cli/clients/managers.py +376 -0
- llm_cli/clients/mcp_manager.py +244 -0
- llm_cli/clients/ollama.py +200 -0
- llm_cli/clients/openai.py +494 -0
- llm_cli/clients/registry.py +85 -0
- llm_cli/clients/session.py +533 -0
- llm_cli/clients/session_ui.py +144 -0
- llm_cli/clients/tool_executor.py +455 -0
- llm_cli/consts.py +15 -0
- llm_cli/mamba_core/__init__.py +0 -0
- llm_cli/mamba_core/mamba_numpy.py +625 -0
- llm_cli/mamba_core/utils.py +102 -0
- llm_cli/mcp_lib.py +510 -0
- llm_cli/modules/__init__.py +0 -0
- llm_cli/modules/custom_markdown.py +46 -0
- llm_cli/modules/media_utils.py +179 -0
- llm_cli/modules/models.py +76 -0
- llm_cli/modules/tool_registry.py +312 -0
- llm_cli/modules/tools/__init__.py +0 -0
- llm_cli/modules/tools/explorer.py +94 -0
- llm_cli/modules/tools/file_ops.py +467 -0
- llm_cli/modules/tools/interpreter.py +303 -0
- llm_cli/modules/tools/web.py +89 -0
- llm_cli/security/__init__.py +4 -0
- llm_cli/security/audit.py +217 -0
- llm_cli/security/cass.py +100 -0
- llm_cli/security/identity.py +315 -0
- llm_cli/security/integrity.py +581 -0
- llm_cli/security/intent_analyzer.py +165 -0
- llm_cli/security/path_validator.py +69 -0
- llm_cli/security/policy.py +368 -0
- llm_cli/security/pqc.py +475 -0
- llm_cli/security/resource_manager.py +65 -0
- llm_cli/security/sentinel.py +362 -0
- llm_cli/security/static_analyzer.py +234 -0
- llm_cli/ui.py +33 -0
- llm_secure_cli-1.0.1.dist-info/METADATA +219 -0
- llm_secure_cli-1.0.1.dist-info/RECORD +63 -0
- llm_secure_cli-1.0.1.dist-info/WHEEL +5 -0
- llm_secure_cli-1.0.1.dist-info/entry_points.txt +5 -0
- llm_secure_cli-1.0.1.dist-info/licenses/LICENSE +201 -0
- llm_secure_cli-1.0.1.dist-info/top_level.txt +1 -0
llm_cli/__init__.py
ADDED
|
File without changes
|
llm_cli/apps/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# llm_cli/apps/cli_common.py
|
|
2
|
+
|
|
3
|
+
"""Shared CLI entry point functionality for all LLM clients."""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import sys
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from rich.markup import escape
|
|
11
|
+
|
|
12
|
+
from llm_cli.clients.base import BaseLlmClient, console
|
|
13
|
+
from llm_cli.clients.exceptions import ProviderSwitchRequest
|
|
14
|
+
from llm_cli.modules.tool_registry import registry
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ClientConfig:
|
|
19
|
+
"""Configuration for CLI entry point."""
|
|
20
|
+
|
|
21
|
+
client_class: type[BaseLlmClient]
|
|
22
|
+
description: str
|
|
23
|
+
supports_provider_selection: bool = False
|
|
24
|
+
provider_choices: list[str] | None = None
|
|
25
|
+
extra_args: list[tuple[str, dict[str, Any]]] = field(default_factory=list)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def create_standard_parser(config: ClientConfig) -> argparse.ArgumentParser:
|
|
29
|
+
parser = argparse.ArgumentParser(
|
|
30
|
+
description=config.description, formatter_class=argparse.RawTextHelpFormatter
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
parser.add_argument("sources", nargs="*", help="Sources (text, files, URLs).")
|
|
34
|
+
parser.add_argument(
|
|
35
|
+
"-m", "--model", default="default", help="Model alias (default: 'default')"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if config.supports_provider_selection:
|
|
39
|
+
from llm_cli.clients.registry import client_registry
|
|
40
|
+
|
|
41
|
+
parser.add_argument(
|
|
42
|
+
"-p",
|
|
43
|
+
"--provider",
|
|
44
|
+
choices=config.provider_choices or client_registry.list_aliases(),
|
|
45
|
+
help="Provider to use",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"-s", "--stdout", action="store_true", help="Print to stdout and exit"
|
|
50
|
+
)
|
|
51
|
+
parser.add_argument("--raw", action="store_true", help="Disable Markdown rendering")
|
|
52
|
+
parser.add_argument("--mcp", action="store_true", help="Enable MCP integration")
|
|
53
|
+
parser.add_argument(
|
|
54
|
+
"--mcp-server", action="store_true", help="Run as an MCP server"
|
|
55
|
+
)
|
|
56
|
+
parser.add_argument("--session", help="Load a saved session JSON file on startup")
|
|
57
|
+
|
|
58
|
+
for arg_name, arg_config in config.extra_args:
|
|
59
|
+
parser.add_argument(arg_name, **arg_config)
|
|
60
|
+
|
|
61
|
+
return parser
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def run_client_cli(config: ClientConfig) -> None:
|
|
65
|
+
parser = create_standard_parser(config)
|
|
66
|
+
args = parser.parse_args()
|
|
67
|
+
|
|
68
|
+
if args.stdout and args.mcp:
|
|
69
|
+
console.print("[red]Error: --stdout and --mcp cannot be used together.[/red]")
|
|
70
|
+
sys.exit(1)
|
|
71
|
+
|
|
72
|
+
if args.stdout and args.mcp_server:
|
|
73
|
+
console.print(
|
|
74
|
+
"[red]Error: --stdout and --mcp-server cannot be used together.[/red]"
|
|
75
|
+
)
|
|
76
|
+
sys.exit(1)
|
|
77
|
+
|
|
78
|
+
# Check if MCP module is installed when --mcp or --mcp-server is used
|
|
79
|
+
if args.mcp or args.mcp_server:
|
|
80
|
+
# Custom MCP implementation used on Termux
|
|
81
|
+
pass
|
|
82
|
+
|
|
83
|
+
if args.mcp_server:
|
|
84
|
+
try:
|
|
85
|
+
from llm_cli.apps.mcp_server import main as run_mcp_server
|
|
86
|
+
|
|
87
|
+
run_mcp_server()
|
|
88
|
+
sys.exit(0)
|
|
89
|
+
except (ImportError, Exception) as e:
|
|
90
|
+
console.print(f"[red]Failed to start MCP server: {e}[/red]")
|
|
91
|
+
sys.exit(1)
|
|
92
|
+
|
|
93
|
+
stdout = args.stdout or not sys.stdin.isatty()
|
|
94
|
+
|
|
95
|
+
initial_tools: list[str] | None = None
|
|
96
|
+
enable_mcp = args.mcp
|
|
97
|
+
|
|
98
|
+
if stdout:
|
|
99
|
+
enable_mcp = False
|
|
100
|
+
initial_tools = []
|
|
101
|
+
|
|
102
|
+
client_kwargs = {
|
|
103
|
+
"initial_model_alias": args.model,
|
|
104
|
+
"stdout": stdout,
|
|
105
|
+
"render_markdown": not args.raw,
|
|
106
|
+
"initial_tools": initial_tools,
|
|
107
|
+
"disable_system_prompt": False,
|
|
108
|
+
"enable_mcp": enable_mcp,
|
|
109
|
+
"live_debug": False,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if config.supports_provider_selection and getattr(args, "provider", None):
|
|
113
|
+
client_kwargs["initial_provider"] = args.provider
|
|
114
|
+
|
|
115
|
+
client = config.client_class(**client_kwargs)
|
|
116
|
+
|
|
117
|
+
if args.session:
|
|
118
|
+
client.load_session(args.session)
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
if not sys.stdin.isatty():
|
|
122
|
+
stdin_input = sys.stdin.read().strip()
|
|
123
|
+
all_sources = ([stdin_input] if stdin_input else []) + args.sources
|
|
124
|
+
if all_sources:
|
|
125
|
+
client.process_sources(all_sources)
|
|
126
|
+
else:
|
|
127
|
+
client.talk()
|
|
128
|
+
elif args.sources:
|
|
129
|
+
client.process_sources(args.sources)
|
|
130
|
+
else:
|
|
131
|
+
if stdout:
|
|
132
|
+
console.print("[red]Error: --stdout requires input[/red]")
|
|
133
|
+
console.print("[red](from stdin or arguments).[/red]")
|
|
134
|
+
sys.exit(1)
|
|
135
|
+
client.talk()
|
|
136
|
+
except ProviderSwitchRequest as e:
|
|
137
|
+
console.print(
|
|
138
|
+
f"[bold red]Switching to provider '{escape(e.provider)}' "
|
|
139
|
+
"is not supported here.[/bold red]"
|
|
140
|
+
)
|
|
141
|
+
console.print("Use [bold cyan]llm-cli[/bold cyan] for switching.")
|
|
142
|
+
sys.exit(0)
|
|
143
|
+
finally:
|
|
144
|
+
# Ensure all resources (like browser sessions) are cleaned up on exit.
|
|
145
|
+
registry.shutdown()
|
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
# llm_cli/apps/configure.py
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
import shlex
|
|
6
|
+
import sys
|
|
7
|
+
import tomllib
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
import tomli_w
|
|
12
|
+
from prompt_toolkit import prompt
|
|
13
|
+
from prompt_toolkit.completion import PathCompleter
|
|
14
|
+
from prompt_toolkit.shortcuts import CompleteStyle
|
|
15
|
+
from rich import print
|
|
16
|
+
|
|
17
|
+
from llm_cli.consts import CONFIG_DIR, CONFIG_FILE_PATH
|
|
18
|
+
|
|
19
|
+
# Define the path for the configuration directory and file
|
|
20
|
+
CONFIG_FILE = CONFIG_FILE_PATH
|
|
21
|
+
|
|
22
|
+
# Load default values from external TOML
|
|
23
|
+
DEFAULTS_FILE = Path(__file__).parent / "defaults.toml"
|
|
24
|
+
if DEFAULTS_FILE.exists():
|
|
25
|
+
with DEFAULTS_FILE.open("rb") as f:
|
|
26
|
+
DEFAULTS = tomllib.load(f)
|
|
27
|
+
else:
|
|
28
|
+
DEFAULTS = {}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def load_config() -> dict[str, Any]:
|
|
32
|
+
"""Loads the configuration file and returns it as a dictionary."""
|
|
33
|
+
if not CONFIG_FILE.exists():
|
|
34
|
+
return {}
|
|
35
|
+
with CONFIG_FILE.open("rb") as f:
|
|
36
|
+
try:
|
|
37
|
+
return tomllib.load(f)
|
|
38
|
+
except Exception:
|
|
39
|
+
print(
|
|
40
|
+
f"Warning: Could not parse {CONFIG_FILE}. Starting with empty config."
|
|
41
|
+
)
|
|
42
|
+
return {}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def save_config(config: dict[str, Any]) -> None:
|
|
46
|
+
"""Saves the configuration dictionary to the file."""
|
|
47
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
48
|
+
# Ensure secure permissions
|
|
49
|
+
CONFIG_FILE.touch(mode=0o600, exist_ok=True)
|
|
50
|
+
with CONFIG_FILE.open("wb") as f:
|
|
51
|
+
tomli_w.dump(config, f)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def prompt_input(
|
|
55
|
+
prompt_text: str,
|
|
56
|
+
current_value: Any = None,
|
|
57
|
+
secret: bool = False,
|
|
58
|
+
completer: Any = None,
|
|
59
|
+
) -> str:
|
|
60
|
+
"""Displays a prompt and returns user input or current value."""
|
|
61
|
+
if current_value is not None:
|
|
62
|
+
if secret and isinstance(current_value, str) and len(current_value) > 8:
|
|
63
|
+
display_val = f"...{current_value[-4:]}"
|
|
64
|
+
else:
|
|
65
|
+
display_val = str(current_value)
|
|
66
|
+
prompt_str = f"{prompt_text} [{display_val}]: "
|
|
67
|
+
else:
|
|
68
|
+
prompt_str = f"{prompt_text}: "
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
value = prompt(
|
|
72
|
+
prompt_str,
|
|
73
|
+
completer=completer,
|
|
74
|
+
complete_style=CompleteStyle.READLINE_LIKE,
|
|
75
|
+
is_password=secret,
|
|
76
|
+
).strip()
|
|
77
|
+
except (KeyboardInterrupt, EOFError):
|
|
78
|
+
raise
|
|
79
|
+
|
|
80
|
+
return value if value else (str(current_value) if current_value is not None else "")
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def prompt_bool(prompt_text: str, current_value: bool = False) -> bool:
|
|
84
|
+
"""Prompts for a boolean value."""
|
|
85
|
+
default_str = "Y/n" if current_value else "y/N"
|
|
86
|
+
try:
|
|
87
|
+
val = (
|
|
88
|
+
prompt(
|
|
89
|
+
f"{prompt_text} ({default_str}): ",
|
|
90
|
+
complete_style=CompleteStyle.READLINE_LIKE,
|
|
91
|
+
)
|
|
92
|
+
.strip()
|
|
93
|
+
.lower()
|
|
94
|
+
)
|
|
95
|
+
except (KeyboardInterrupt, EOFError):
|
|
96
|
+
raise
|
|
97
|
+
|
|
98
|
+
if not val:
|
|
99
|
+
return current_value
|
|
100
|
+
return val.startswith(("y", "y"))
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def prompt_list(prompt_text: str, current_value: list[str] | None = None) -> list[str]:
|
|
104
|
+
"""Prompts for a comma-separated list."""
|
|
105
|
+
current_str = ", ".join(current_value) if current_value else ""
|
|
106
|
+
val = prompt_input(prompt_text + " (comma-separated)", current_str)
|
|
107
|
+
if not val:
|
|
108
|
+
return []
|
|
109
|
+
return [item.strip() for item in val.split(",") if item.strip()]
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def configure_provider(config: dict[str, Any], provider: str, name: str) -> None:
|
|
113
|
+
"""Interactively configures a specific LLM provider."""
|
|
114
|
+
print(f"\n--- {name} Configuration ---")
|
|
115
|
+
if not prompt_bool(f"Configure {name}?", provider in config):
|
|
116
|
+
return
|
|
117
|
+
|
|
118
|
+
p_config = config.setdefault(provider, {})
|
|
119
|
+
|
|
120
|
+
if provider == "brave":
|
|
121
|
+
p_config["api_key"] = prompt_input(
|
|
122
|
+
"API Key", p_config.get("api_key"), secret=True
|
|
123
|
+
)
|
|
124
|
+
return # Brave only needs API Key
|
|
125
|
+
elif provider == "ollama":
|
|
126
|
+
p_config["api_url"] = prompt_input(
|
|
127
|
+
"Ollama API URL",
|
|
128
|
+
p_config.get("api_url", "http://localhost:11434/v1/chat/completions"),
|
|
129
|
+
)
|
|
130
|
+
p_config["api_key"] = prompt_input(
|
|
131
|
+
"API Key (optional)", p_config.get("api_key"), secret=True
|
|
132
|
+
)
|
|
133
|
+
else:
|
|
134
|
+
p_config["api_key"] = prompt_input(
|
|
135
|
+
"API Key", p_config.get("api_key"), secret=True
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
p_config["system_prompt"] = prompt_input(
|
|
139
|
+
"System Prompt (Optional)", p_config.get("system_prompt")
|
|
140
|
+
)
|
|
141
|
+
p_config["disable_date_prompt"] = prompt_bool(
|
|
142
|
+
"Disable automatic date prompt?", p_config.get("disable_date_prompt", False)
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
print(f"\nModel Aliases for {name} (Press Enter to keep default):")
|
|
146
|
+
m_config = p_config.setdefault("models", {})
|
|
147
|
+
|
|
148
|
+
# Configure default models and aliases
|
|
149
|
+
provider_defaults = DEFAULTS.get(provider, {}).get("models", {})
|
|
150
|
+
for alias, def_model in provider_defaults.items():
|
|
151
|
+
current_val = m_config.get(alias, def_model)
|
|
152
|
+
user_input = prompt_input(f"Model for alias '{alias}'", current_val)
|
|
153
|
+
|
|
154
|
+
# If the input looks like a dictionary string (common when defaults have dicts),
|
|
155
|
+
# try to convert it back to a real dictionary so tomli_w saves it correctly.
|
|
156
|
+
if isinstance(user_input, str) and user_input.startswith("{"):
|
|
157
|
+
try:
|
|
158
|
+
import ast
|
|
159
|
+
|
|
160
|
+
parsed = ast.literal_eval(user_input)
|
|
161
|
+
if isinstance(parsed, dict):
|
|
162
|
+
m_config[alias] = parsed
|
|
163
|
+
else:
|
|
164
|
+
m_config[alias] = user_input
|
|
165
|
+
except (ValueError, SyntaxError):
|
|
166
|
+
m_config[alias] = user_input
|
|
167
|
+
else:
|
|
168
|
+
m_config[alias] = user_input
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def configure_general(config: dict[str, Any]) -> None:
|
|
172
|
+
"""Configures general application settings, including data paths."""
|
|
173
|
+
print("\n--- General Settings ---")
|
|
174
|
+
g_config = config.setdefault("general", {})
|
|
175
|
+
|
|
176
|
+
providers = ["google", "openai", "anthropic", "xai", "ollama"]
|
|
177
|
+
current_p = g_config.get("unified_default_provider", "google")
|
|
178
|
+
print(f"Available providers: {', '.join(providers)}")
|
|
179
|
+
g_config["unified_default_provider"] = prompt_input("Default Provider", current_p)
|
|
180
|
+
|
|
181
|
+
print("\nBehavior Settings:")
|
|
182
|
+
g_config["request_timeout"] = int(
|
|
183
|
+
prompt_input("Request Timeout (seconds)", g_config.get("request_timeout", 1800))
|
|
184
|
+
)
|
|
185
|
+
g_config["command_timeout"] = int(
|
|
186
|
+
prompt_input(
|
|
187
|
+
"Shell Command Timeout (seconds)",
|
|
188
|
+
g_config.get("command_timeout", 300),
|
|
189
|
+
)
|
|
190
|
+
)
|
|
191
|
+
g_config["max_command_memory_mb"] = int(
|
|
192
|
+
prompt_input(
|
|
193
|
+
"Max Command Memory (MB)",
|
|
194
|
+
g_config.get("max_command_memory_mb", 1024),
|
|
195
|
+
)
|
|
196
|
+
)
|
|
197
|
+
g_config["max_output_length"] = int(
|
|
198
|
+
prompt_input(
|
|
199
|
+
"Default Tool Output Max Length (chars)",
|
|
200
|
+
g_config.get("max_output_length", 10000),
|
|
201
|
+
)
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def configure_security(config: dict[str, Any]) -> None:
|
|
206
|
+
"""Configures security settings."""
|
|
207
|
+
print("\n--- Security Settings ---")
|
|
208
|
+
s_config = config.setdefault("security", {})
|
|
209
|
+
|
|
210
|
+
# Configure Allowed Environment Variables
|
|
211
|
+
current_allowed_env = s_config.get("allowed_env_vars", [])
|
|
212
|
+
print(f"Current allowed environment variables: {current_allowed_env}")
|
|
213
|
+
if prompt_bool("Modify allowed environment variables?", False):
|
|
214
|
+
new_allowed_env = prompt_list(
|
|
215
|
+
"Allowed Environment Variables (e.g. 'PYTHONPATH, CUDA_VISIBLE_DEVICES')",
|
|
216
|
+
current_allowed_env,
|
|
217
|
+
)
|
|
218
|
+
s_config["allowed_env_vars"] = new_allowed_env
|
|
219
|
+
|
|
220
|
+
# Configure Missing Token Policy
|
|
221
|
+
s_config["missing_token_policy"] = prompt_input(
|
|
222
|
+
"Missing Token Policy (guest/deny)",
|
|
223
|
+
s_config.get("missing_token_policy", "guest"),
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
# Configure Default Roles
|
|
227
|
+
current_roles = s_config.get("default_roles", [])
|
|
228
|
+
if not current_roles:
|
|
229
|
+
current_roles = DEFAULTS.get("security", {}).get("default_roles", ["user"])
|
|
230
|
+
print(f"Current default roles: {current_roles}")
|
|
231
|
+
print(
|
|
232
|
+
"[yellow]Warning: If 'admin' is not included in the roles, "
|
|
233
|
+
"some sensitive tools may be restricted.[/yellow]"
|
|
234
|
+
)
|
|
235
|
+
if prompt_bool("Modify default roles?", False):
|
|
236
|
+
new_roles = prompt_list("Default Roles (e.g. 'admin, user')", current_roles)
|
|
237
|
+
s_config["default_roles"] = new_roles
|
|
238
|
+
|
|
239
|
+
# Configure Allowed Paths
|
|
240
|
+
current_allowed_paths = s_config.get("allowed_paths", ["."])
|
|
241
|
+
print(f"\nCurrent allowed paths: {current_allowed_paths}")
|
|
242
|
+
if prompt_bool("Modify allowed paths?", False):
|
|
243
|
+
new_allowed_paths = prompt_list(
|
|
244
|
+
"Allowed Paths (e.g. '.', '~', '/mnt/data')", current_allowed_paths
|
|
245
|
+
)
|
|
246
|
+
s_config["allowed_paths"] = new_allowed_paths
|
|
247
|
+
|
|
248
|
+
# Configure Blocked Paths
|
|
249
|
+
current_blocked_paths = s_config.get("blocked_paths", [])
|
|
250
|
+
if not current_blocked_paths:
|
|
251
|
+
current_blocked_paths = DEFAULTS.get("security", {}).get("blocked_paths", [])
|
|
252
|
+
print(f"Current blocked paths: {current_blocked_paths}")
|
|
253
|
+
if prompt_bool("Modify blocked paths?", False):
|
|
254
|
+
new_blocked_paths = prompt_list(
|
|
255
|
+
"Blocked Paths (e.g. '/etc', '/root', '/var')", current_blocked_paths
|
|
256
|
+
)
|
|
257
|
+
s_config["blocked_paths"] = new_blocked_paths
|
|
258
|
+
|
|
259
|
+
# Configure Static Analysis Error setting
|
|
260
|
+
current_sa_is_error = s_config.get(
|
|
261
|
+
"static_analysis_is_error",
|
|
262
|
+
DEFAULTS.get("security", {}).get("static_analysis_is_error", True),
|
|
263
|
+
)
|
|
264
|
+
s_config["static_analysis_is_error"] = prompt_bool(
|
|
265
|
+
"Treat static analysis warnings as errors?", current_sa_is_error
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# Configure Intent Analyzer (New!)
|
|
269
|
+
print("\n--- Intent Analyzer (Dual-Model Guardrails) ---")
|
|
270
|
+
current_ia_enabled = s_config.get("intent_analyzer_enabled", False)
|
|
271
|
+
if prompt_bool("Enable Intent Analyzer?", current_ia_enabled):
|
|
272
|
+
s_config["intent_analyzer_enabled"] = True
|
|
273
|
+
|
|
274
|
+
current_ia_provider = s_config.get("intent_analyzer_provider", "google")
|
|
275
|
+
s_config["intent_analyzer_provider"] = prompt_input(
|
|
276
|
+
"Verifier Provider (e.g., google, openai)", current_ia_provider
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
current_ia_model = s_config.get(
|
|
280
|
+
"intent_analyzer_model", "gemini-flash-lite-latest"
|
|
281
|
+
)
|
|
282
|
+
s_config["intent_analyzer_model"] = prompt_input(
|
|
283
|
+
"Verifier Model Name", current_ia_model
|
|
284
|
+
)
|
|
285
|
+
else:
|
|
286
|
+
s_config["intent_analyzer_enabled"] = False
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def configure_mcp(config: dict[str, Any]) -> None:
|
|
290
|
+
"""Configures MCP servers."""
|
|
291
|
+
print("\n--- MCP (Model Context Protocol) Servers ---")
|
|
292
|
+
if not prompt_bool("Configure MCP servers?", "mcp_servers" in config):
|
|
293
|
+
return
|
|
294
|
+
|
|
295
|
+
mcp_servers = config.get("mcp_servers", [])
|
|
296
|
+
|
|
297
|
+
while True:
|
|
298
|
+
if mcp_servers:
|
|
299
|
+
print("\nCurrent MCP Servers:")
|
|
300
|
+
for i, srv in enumerate(mcp_servers):
|
|
301
|
+
print(f"{i + 1}. {srv.get('name')} ({srv.get('command')})")
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
choice = prompt(
|
|
305
|
+
"\nOptions: [a]dd server, [r]emove server, [d]one: ",
|
|
306
|
+
complete_style=CompleteStyle.READLINE_LIKE,
|
|
307
|
+
).lower()
|
|
308
|
+
except (KeyboardInterrupt, EOFError):
|
|
309
|
+
raise
|
|
310
|
+
|
|
311
|
+
if choice == "a":
|
|
312
|
+
name = prompt(
|
|
313
|
+
"Server Name: ", complete_style=CompleteStyle.READLINE_LIKE
|
|
314
|
+
).strip()
|
|
315
|
+
cmd = prompt(
|
|
316
|
+
"Command (e.g. ssh, docker, npx): ",
|
|
317
|
+
complete_style=CompleteStyle.READLINE_LIKE,
|
|
318
|
+
completer=PathCompleter(expanduser=True),
|
|
319
|
+
).strip()
|
|
320
|
+
args_str = prompt(
|
|
321
|
+
"Arguments (space separated): ",
|
|
322
|
+
complete_style=CompleteStyle.READLINE_LIKE,
|
|
323
|
+
).strip()
|
|
324
|
+
args = shlex.split(args_str)
|
|
325
|
+
|
|
326
|
+
zt_enabled = prompt_bool("Enable Zero Trust (PQC Auth)?", False)
|
|
327
|
+
|
|
328
|
+
server_entry: dict[str, Any] = {"name": name, "command": cmd, "args": args}
|
|
329
|
+
if zt_enabled:
|
|
330
|
+
server_entry["zero_trust"] = True
|
|
331
|
+
|
|
332
|
+
mcp_servers.append(server_entry)
|
|
333
|
+
elif choice == "r" and mcp_servers:
|
|
334
|
+
try:
|
|
335
|
+
idx_str = prompt(
|
|
336
|
+
"Server number to remove: ",
|
|
337
|
+
complete_style=CompleteStyle.READLINE_LIKE,
|
|
338
|
+
)
|
|
339
|
+
idx = int(idx_str) - 1
|
|
340
|
+
if 0 <= idx < len(mcp_servers):
|
|
341
|
+
mcp_servers.pop(idx)
|
|
342
|
+
except ValueError:
|
|
343
|
+
pass
|
|
344
|
+
except (KeyboardInterrupt, EOFError):
|
|
345
|
+
raise
|
|
346
|
+
elif choice == "d" or not choice:
|
|
347
|
+
break
|
|
348
|
+
|
|
349
|
+
config["mcp_servers"] = mcp_servers
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def mask_secrets(data: Any) -> Any:
|
|
353
|
+
"""Recursively mask sensitive information in configuration data."""
|
|
354
|
+
if isinstance(data, dict):
|
|
355
|
+
return {
|
|
356
|
+
k: mask_secrets(v)
|
|
357
|
+
if k != "api_key"
|
|
358
|
+
else (f"...{v[-4:]}" if isinstance(v, str) and len(v) > 8 else "***")
|
|
359
|
+
for k, v in data.items()
|
|
360
|
+
}
|
|
361
|
+
if isinstance(data, list):
|
|
362
|
+
return [mask_secrets(item) for item in data]
|
|
363
|
+
if isinstance(data, str):
|
|
364
|
+
# Match github_pat_ followed by at least 10 alphanumeric characters
|
|
365
|
+
return re.sub(
|
|
366
|
+
r"github_pat_[a-zA-Z0-9_]{10,}",
|
|
367
|
+
lambda m: f"{m.group(0)[:11]}...{m.group(0)[-4:]}",
|
|
368
|
+
data,
|
|
369
|
+
)
|
|
370
|
+
return data
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def configure_sentinel(config: dict[str, Any]) -> None:
|
|
374
|
+
"""Configures the Reasoning Sentinel settings."""
|
|
375
|
+
print("\n--- Reasoning Sentinel (Mamba-SSM Guard) ---")
|
|
376
|
+
s_config = config.setdefault("sentinel", {})
|
|
377
|
+
|
|
378
|
+
s_config["enabled"] = prompt_bool(
|
|
379
|
+
"Enable Reasoning Sentinel monitoring?", s_config.get("enabled", True)
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
if s_config["enabled"]:
|
|
383
|
+
s_config["mode"] = prompt_input(
|
|
384
|
+
"Sentinel Mode (detect/collect)", s_config.get("mode", "collect")
|
|
385
|
+
)
|
|
386
|
+
print(
|
|
387
|
+
"Note: Sentinel anomaly thresholds are now self-calibrating "
|
|
388
|
+
"based on model loss."
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def main() -> None:
|
|
393
|
+
try:
|
|
394
|
+
print("========================================")
|
|
395
|
+
print(" llm-cli Interactive Configuration ")
|
|
396
|
+
print("========================================")
|
|
397
|
+
print("Press Ctrl+C at any time to quit and discard changes.")
|
|
398
|
+
print(f"Config file: {CONFIG_FILE}\n")
|
|
399
|
+
|
|
400
|
+
config = load_config()
|
|
401
|
+
|
|
402
|
+
# Provider configurations
|
|
403
|
+
configure_provider(config, "google", "Google Gemini")
|
|
404
|
+
configure_provider(config, "openai", "OpenAI")
|
|
405
|
+
configure_provider(config, "anthropic", "Anthropic Claude")
|
|
406
|
+
configure_provider(config, "xai", "xAI Grok")
|
|
407
|
+
configure_provider(config, "brave", "Brave Search")
|
|
408
|
+
configure_provider(config, "ollama", "Ollama (Local)")
|
|
409
|
+
|
|
410
|
+
# General and Security
|
|
411
|
+
configure_general(config)
|
|
412
|
+
configure_security(config)
|
|
413
|
+
configure_sentinel(config)
|
|
414
|
+
configure_mcp(config)
|
|
415
|
+
|
|
416
|
+
print("\nSummary of changes:")
|
|
417
|
+
display_config = mask_secrets(config)
|
|
418
|
+
|
|
419
|
+
print(json.dumps(display_config, indent=2, ensure_ascii=False))
|
|
420
|
+
|
|
421
|
+
# Check if user wants to save
|
|
422
|
+
if prompt_bool("Save configuration?", True):
|
|
423
|
+
save_config(config)
|
|
424
|
+
print(f"\nConfiguration saved to {CONFIG_FILE}")
|
|
425
|
+
else:
|
|
426
|
+
print("\nConfiguration NOT saved.")
|
|
427
|
+
except (KeyboardInterrupt, EOFError):
|
|
428
|
+
print("\n\nConfiguration cancelled.")
|
|
429
|
+
sys.exit(0)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
if __name__ == "__main__":
|
|
433
|
+
main()
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import logging
|
|
3
|
+
import sys
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from llm_cli.security.identity import IdentityManager
|
|
7
|
+
from llm_cli.security.integrity import IntegrityVerifier
|
|
8
|
+
|
|
9
|
+
logging.basicConfig(level=logging.INFO)
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main() -> None:
|
|
14
|
+
parser = argparse.ArgumentParser(
|
|
15
|
+
description="LLM-CLI Identity and Integrity Management Tool"
|
|
16
|
+
)
|
|
17
|
+
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
|
18
|
+
|
|
19
|
+
# keygen command
|
|
20
|
+
subparsers.add_parser("keygen", help="Generate RSA and PQC key pairs")
|
|
21
|
+
|
|
22
|
+
# manifest command
|
|
23
|
+
subparsers.add_parser("manifest", help="Generate/Update integrity manifest")
|
|
24
|
+
|
|
25
|
+
# decrypt-log command
|
|
26
|
+
decrypt_parser = subparsers.add_parser(
|
|
27
|
+
"decrypt-log", help="Decrypt PQC-encrypted (ML-KEM) audit logs"
|
|
28
|
+
)
|
|
29
|
+
decrypt_parser.add_argument(
|
|
30
|
+
"input", help="Path to the encrypted audit log (.jsonl)"
|
|
31
|
+
)
|
|
32
|
+
decrypt_parser.add_argument("-o", "--output", help="Path to save the decrypted log")
|
|
33
|
+
|
|
34
|
+
args = parser.parse_args()
|
|
35
|
+
|
|
36
|
+
if args.command == "keygen":
|
|
37
|
+
print("🛡️ Generating Identity Keys...")
|
|
38
|
+
IdentityManager._ensure_keys(force=True)
|
|
39
|
+
print(f"✅ Keys generated in {IdentityManager._KEY_DIR}")
|
|
40
|
+
print(
|
|
41
|
+
f"RSA Public Key: {IdentityManager._PRIVATE_KEY_PATH.with_suffix('.pub')}"
|
|
42
|
+
)
|
|
43
|
+
print(f"ML-DSA Public Key: {IdentityManager._PQC_PUBLIC_KEY_PATH}")
|
|
44
|
+
print(f"ML-KEM Public Key: {IdentityManager._PQC_KEM_PUBLIC_KEY_PATH}")
|
|
45
|
+
print(
|
|
46
|
+
"\n[Action Required] Copy the PQC Public Key to your remote "
|
|
47
|
+
"servers if using Strict Zero Trust."
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
elif args.command == "manifest":
|
|
51
|
+
print("🛡️ Generating Integrity Manifest...")
|
|
52
|
+
# Path to project root
|
|
53
|
+
root_path = Path(__file__).resolve().parent.parent.parent
|
|
54
|
+
verifier = IntegrityVerifier(root_path)
|
|
55
|
+
if verifier.rebuild_manifest():
|
|
56
|
+
print(f"✅ Integrity manifest signed and saved to {verifier.MANIFEST_PATH}")
|
|
57
|
+
else:
|
|
58
|
+
print("❌ Failed to generate manifest.")
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
|
|
61
|
+
elif args.command == "decrypt-log":
|
|
62
|
+
from llm_cli.apps.pqc_decrypt import decrypt_log_file
|
|
63
|
+
|
|
64
|
+
print(f"🛡️ Decrypting log file: {args.input}...")
|
|
65
|
+
decrypt_log_file(Path(args.input), Path(args.output) if args.output else None)
|
|
66
|
+
else:
|
|
67
|
+
parser.print_help()
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
if __name__ == "__main__":
|
|
71
|
+
main()
|