hey-cli-python 1.0.0__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/PKG-INFO +2 -4
  2. hey_cli_python-1.0.2/hey_cli/llm.py +162 -0
  3. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/models.py +5 -3
  4. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli_python.egg-info/PKG-INFO +2 -4
  5. hey_cli_python-1.0.2/hey_cli_python.egg-info/requires.txt +1 -0
  6. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/pyproject.toml +2 -4
  7. hey_cli_python-1.0.0/hey_cli/llm.py +0 -130
  8. hey_cli_python-1.0.0/hey_cli_python.egg-info/requires.txt +0 -3
  9. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/LICENSE +0 -0
  10. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/README.md +0 -0
  11. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/__init__.py +0 -0
  12. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/cli.py +0 -0
  13. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/governance.py +0 -0
  14. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/history.py +0 -0
  15. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/runner.py +0 -0
  16. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli/skills.py +0 -0
  17. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli_python.egg-info/SOURCES.txt +0 -0
  18. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli_python.egg-info/dependency_links.txt +0 -0
  19. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli_python.egg-info/entry_points.txt +0 -0
  20. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/hey_cli_python.egg-info/top_level.txt +0 -0
  21. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/setup.cfg +0 -0
  22. {hey_cli_python-1.0.0 → hey_cli_python-1.0.2}/tests/test_cli.py +0 -0
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hey-cli-python
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: A secure, zero-bloat CLI companion that turns natural language and error logs into executable commands.
5
- Author: Mohit S.
5
+ Author: Mohit Singh Sinsniwal
6
6
  Project-URL: Homepage, https://github.com/sinsniwal/hey-cli
7
7
  Project-URL: Repository, https://github.com/sinsniwal/hey-cli
8
8
  Project-URL: Issues, https://github.com/sinsniwal/hey-cli/issues
@@ -21,8 +21,6 @@ Classifier: Programming Language :: Python :: 3.12
21
21
  Requires-Python: >=3.9
22
22
  Description-Content-Type: text/markdown
23
23
  License-File: LICENSE
24
- Requires-Dist: pydantic>=2.0.0
25
- Requires-Dist: ollama>=0.1.0
26
24
  Requires-Dist: rich>=13.0.0
27
25
  Dynamic: license-file
28
26
 
@@ -0,0 +1,162 @@
1
+ import json
2
+ import os
3
+ import platform
4
+ import urllib.request
5
+ import urllib.error
6
+ from .models import CommandResponse, TroubleshootResponse
7
+
8
+ DEFAULT_MODEL = "gpt-oss:20b-cloud"
9
+
10
+ SYSTEM_PROMPT = r"""You are hey-cli, an autonomous, minimalist CLI companion and terminal expert.
11
+ Your primary goal is to turn natural language objectives and error logs into actionable shell commands.
12
+ Your user intends to execute the command you provide.
13
+ Do NOT output markdown blocks or conversational text outside the required JSON schema.
14
+ Only output valid JSON matching the requested schema exactly.
15
+ You MUST provide "command", "explanation", and "needs_context" fields in your JSON output.
16
+ WARNING: Ensure any quotes inside your command (e.g. echo 'text') are single quotes, or properly escaped double quotes, to maintain valid JSON string structure.
17
+ CRITICAL PARSING RULE: If the user provides a specific filename, directory name, string, or port, you MUST preserve it EXACTLY as written. Do not autocorrect spelling, abbreviate, or drop extensions (e.g., if asked to make 'temporarily', do not output 'temporay').
18
+
19
+ IMPORTANT AGENTIC INSTRUCTION:
20
+ If the user asks ANY question about their system state, files, or environment (e.g., "is docker running?", "what is my IP?", "explain this folder"), you MUST set `needs_context = true` and target a bash command to silently gather the data.
21
+ ONLY set `needs_context = false` when you are providing the FINAL answer.
22
+ If your final answer is an explanation or simply answering a question, leave the `command` field empty `""` and put a high-quality Markdown response in the `explanation` field. Do NOT write bash `echo` or `printf` statements.
23
+ If your final answer requires an action to be ran (e.g., "start docker", "delete the folder"), put the executable bash string in `command`.
24
+ CRITICAL JSON REQUIREMENT: If your bash command contains any backslashes (e.g. for regex like `\.` or escaping spaces), you MUST double-escape them (`\\\\.`) so the output remains valid JSON!
25
+ """
26
+
27
+ from .skills import get_compiled_skills
28
+
29
+ def get_system_context() -> str:
30
+ os_name = platform.system()
31
+ os_release = platform.release()
32
+ arch = platform.machine()
33
+ shell = os.environ.get("SHELL", "unknown")
34
+
35
+ skills_block = f"\n\n{get_compiled_skills()}"
36
+
37
+ return f"Operating System: {os_name} {os_release} ({arch})\nCurrent Shell: {shell}{skills_block}"
38
+
39
+ TROUBLESHOOT_PROMPT = r"""You are acting as an iterative troubleshooter.
40
+ You will be provided with an objective, the previous commands attempted, and the stdout/stderr.
41
+ Determine the next command to run to resolve the issue, OR if the issue is resolved, indicate it.
42
+ Keep your explanation brief and chill. If a file or tests do not exist, do not try to aggressively brute-force create configurations. Just explain the situation and set is_resolved=True to gracefully stop.
43
+ """
44
+
45
+ def generate_command(prompt: str, context: str = "", model_name: str = DEFAULT_MODEL, history: list = None) -> CommandResponse:
46
+ content = prompt
47
+ if context:
48
+ content = f"Context (e.g. error logs or piped data):\n{context}\n\nObjective:\n{prompt}"
49
+
50
+ sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
51
+ msgs = [{"role": "system", "content": SYSTEM_PROMPT + "\n\n" + sys_context}]
52
+ if history:
53
+ msgs.extend(history)
54
+ msgs.append({"role": "user", "content": content})
55
+
56
+ max_retries = 3
57
+ last_error = None
58
+ raw_val = "None"
59
+
60
+ for attempt in range(max_retries):
61
+ try:
62
+ payload = {
63
+ "model": model_name,
64
+ "messages": msgs,
65
+ "format": "json",
66
+ "stream": False,
67
+ "options": {"temperature": 0.0}
68
+ }
69
+ req = urllib.request.Request(
70
+ "http://localhost:11434/api/chat",
71
+ data=json.dumps(payload).encode('utf-8'),
72
+ headers={"Content-Type": "application/json"}
73
+ )
74
+ with urllib.request.urlopen(req, timeout=30) as resp:
75
+ response = json.loads(resp.read().decode('utf-8'))
76
+
77
+ raw_val = response["message"]["content"]
78
+ content_str = raw_val
79
+
80
+ if content_str.startswith("```json"):
81
+ content_str = content_str[7:-3].strip()
82
+ elif content_str.startswith("```"):
83
+ content_str = content_str[3:-3].strip()
84
+
85
+ data = json.loads(content_str)
86
+ return CommandResponse(**data)
87
+
88
+ except Exception as e:
89
+ last_error = e
90
+ if "refusal" in raw_val.lower() or "sorry" in raw_val.lower():
91
+ return CommandResponse(
92
+ command="",
93
+ explanation=f"LLM Safety Trigger: The model refused to generate this command.\n\nRaw output: {raw_val.strip()}",
94
+ needs_context=False
95
+ )
96
+
97
+ msgs.append({"role": "assistant", "content": raw_val})
98
+ msgs.append({"role": "user", "content": f"Your JSON output failed validation: {str(e)}\nPlease strictly follow the schema and output ONLY valid JSON without markdown wrapping."})
99
+
100
+ return CommandResponse(
101
+ command="",
102
+ explanation=f"Error generating command from LLM after {max_retries} retries: {str(last_error)}\nRaw Output:\n{raw_val}"
103
+ )
104
+
105
+ def generate_troubleshoot_step(objective: str, history: list, model_name: str = DEFAULT_MODEL) -> TroubleshootResponse:
106
+ history_text = "\n".join([
107
+ f"Cmd: {h['cmd']}\nExit: {h['exit_code']}\nOut/Err:\n{h['output']}"
108
+ for h in history
109
+ ])
110
+
111
+ content = f"Objective:\n{objective}\n\nHistory of execution:\n{history_text}\n\nAnalyze the specific error and provide the NEXT logical command to test or fix. Re-read logs carefully."
112
+
113
+ sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
114
+ msgs = [
115
+ {"role": "system", "content": SYSTEM_PROMPT + "\n" + TROUBLESHOOT_PROMPT + "\n\n" + sys_context},
116
+ {"role": "user", "content": content}
117
+ ]
118
+
119
+ max_retries = 3
120
+ last_error = None
121
+ raw_val = "None"
122
+
123
+ for attempt in range(max_retries):
124
+ try:
125
+ payload = {
126
+ "model": model_name,
127
+ "messages": msgs,
128
+ "format": "json",
129
+ "stream": False,
130
+ "options": {"temperature": 0.0}
131
+ }
132
+ req = urllib.request.Request(
133
+ "http://localhost:11434/api/chat",
134
+ data=json.dumps(payload).encode('utf-8'),
135
+ headers={"Content-Type": "application/json"}
136
+ )
137
+ with urllib.request.urlopen(req, timeout=30) as resp:
138
+ response = json.loads(resp.read().decode('utf-8'))
139
+
140
+ raw_val = response["message"]["content"].strip()
141
+ if not raw_val:
142
+ raise ValueError("LLM returned empty JSON object.")
143
+
144
+ content_str = raw_val
145
+ if content_str.startswith("```json"):
146
+ content_str = content_str[7:-3].strip()
147
+ elif content_str.startswith("```"):
148
+ content_str = content_str[3:-3].strip()
149
+
150
+ data = json.loads(content_str)
151
+ return TroubleshootResponse(**data)
152
+
153
+ except Exception as e:
154
+ last_error = e
155
+ msgs.append({"role": "assistant", "content": raw_val})
156
+ msgs.append({"role": "user", "content": f"Your JSON output failed validation: {str(e)}\nFix the syntax and output ONLY strict JSON schema."})
157
+
158
+ return TroubleshootResponse(
159
+ command=None,
160
+ explanation=f"Error analyzing execution after {max_retries} retries: {str(last_error)}\nRaw Output:\n{raw_val}",
161
+ is_resolved=False
162
+ )
@@ -1,12 +1,14 @@
1
- from pydantic import BaseModel, ConfigDict
1
+ from dataclasses import dataclass
2
2
  from typing import Optional
3
3
 
4
- class CommandResponse(BaseModel):
4
+ @dataclass
5
+ class CommandResponse:
5
6
  command: str
6
7
  explanation: str = ""
7
8
  needs_context: bool = False
8
9
 
9
- class TroubleshootResponse(BaseModel):
10
+ @dataclass
11
+ class TroubleshootResponse:
10
12
  command: Optional[str] = None
11
13
  explanation: str = ""
12
14
  is_resolved: bool = False
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hey-cli-python
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: A secure, zero-bloat CLI companion that turns natural language and error logs into executable commands.
5
- Author: Mohit S.
5
+ Author: Mohit Singh Sinsniwal
6
6
  Project-URL: Homepage, https://github.com/sinsniwal/hey-cli
7
7
  Project-URL: Repository, https://github.com/sinsniwal/hey-cli
8
8
  Project-URL: Issues, https://github.com/sinsniwal/hey-cli/issues
@@ -21,8 +21,6 @@ Classifier: Programming Language :: Python :: 3.12
21
21
  Requires-Python: >=3.9
22
22
  Description-Content-Type: text/markdown
23
23
  License-File: LICENSE
24
- Requires-Dist: pydantic>=2.0.0
25
- Requires-Dist: ollama>=0.1.0
26
24
  Requires-Dist: rich>=13.0.0
27
25
  Dynamic: license-file
28
26
 
@@ -0,0 +1 @@
1
+ rich>=13.0.0
@@ -4,12 +4,12 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "hey-cli-python"
7
- version = "1.0.0"
7
+ version = "1.0.2"
8
8
  description = "A secure, zero-bloat CLI companion that turns natural language and error logs into executable commands."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.9"
11
11
  authors = [
12
- {name = "Mohit S."}
12
+ {name = "Mohit Singh Sinsniwal"}
13
13
  ]
14
14
  keywords = ["cli", "llm", "bash", "terminal", "ollama", "sysadmin"]
15
15
  classifiers = [
@@ -26,8 +26,6 @@ classifiers = [
26
26
  "Programming Language :: Python :: 3.12",
27
27
  ]
28
28
  dependencies = [
29
- "pydantic>=2.0.0",
30
- "ollama>=0.1.0",
31
29
  "rich>=13.0.0"
32
30
  ]
33
31
  urls.Homepage = "https://github.com/sinsniwal/hey-cli"
@@ -1,130 +0,0 @@
1
- import json
2
- import os
3
- import platform
4
- import ollama
5
- from .models import CommandResponse, TroubleshootResponse
6
-
7
- DEFAULT_MODEL = "gpt-oss:20b-cloud"
8
-
9
- SYSTEM_PROMPT = r"""You are hey-cli, an autonomous, minimalist CLI companion and terminal expert.
10
- Your primary goal is to turn natural language objectives and error logs into actionable shell commands.
11
- Your user intends to execute the command you provide.
12
- Do NOT output markdown blocks or conversational text outside the required JSON schema.
13
- Only output valid JSON matching the requested schema exactly.
14
- You MUST provide "command", "explanation", and "needs_context" fields in your JSON output.
15
- WARNING: Ensure any quotes inside your command (e.g. echo 'text') are single quotes, or properly escaped double quotes, to maintain valid JSON string structure.
16
- CRITICAL PARSING RULE: If the user provides a specific filename, directory name, string, or port, you MUST preserve it EXACTLY as written. Do not autocorrect spelling, abbreviate, or drop extensions (e.g., if asked to make 'temporarily', do not output 'temporay').
17
-
18
- IMPORTANT AGENTIC INSTRUCTION:
19
- If the user asks ANY question about their system state, files, or environment (e.g., "is docker running?", "what is my IP?", "explain this folder"), you MUST set `needs_context = true` and target a bash command to silently gather the data.
20
- ONLY set `needs_context = false` when you are providing the FINAL answer.
21
- If your final answer is an explanation or simply answering a question, leave the `command` field empty `""` and put a high-quality Markdown response in the `explanation` field. Do NOT write bash `echo` or `printf` statements.
22
- If your final answer requires an action to be ran (e.g., "start docker", "delete the folder"), put the executable bash string in `command`.
23
- CRITICAL JSON REQUIREMENT: If your bash command contains any backslashes (e.g. for regex like `\.` or escaping spaces), you MUST double-escape them (`\\\\.`) so the output remains valid JSON!
24
- """
25
-
26
- from .skills import get_compiled_skills
27
-
28
- def get_system_context() -> str:
29
- os_name = platform.system()
30
- os_release = platform.release()
31
- arch = platform.machine()
32
- shell = os.environ.get("SHELL", "unknown")
33
-
34
- skills_block = f"\n\n{get_compiled_skills()}"
35
-
36
- return f"Operating System: {os_name} {os_release} ({arch})\nCurrent Shell: {shell}{skills_block}"
37
-
38
- TROUBLESHOOT_PROMPT = r"""You are acting as an iterative troubleshooter.
39
- You will be provided with an objective, the previous commands attempted, and the stdout/stderr.
40
- Determine the next command to run to resolve the issue, OR if the issue is resolved, indicate it.
41
- Keep your explanation brief and chill. If a file or tests do not exist, do not try to aggressively brute-force create configurations. Just explain the situation and set is_resolved=True to gracefully stop.
42
- """
43
-
44
- def generate_command(prompt: str, context: str = "", model_name: str = DEFAULT_MODEL, history: list = None) -> CommandResponse:
45
- content = prompt
46
- if context:
47
- content = f"Context (e.g. error logs or piped data):\n{context}\n\nObjective:\n{prompt}"
48
-
49
- try:
50
- sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
51
- msgs = [{"role": "system", "content": SYSTEM_PROMPT + "\n\n" + sys_context}]
52
- if history:
53
- msgs.extend(history)
54
- msgs.append({"role": "user", "content": content})
55
-
56
- response = ollama.chat(
57
- model=model_name,
58
- messages=msgs,
59
- format="json",
60
- options={"temperature": 0.0}
61
- )
62
-
63
- content = response["message"]["content"]
64
- # In case ollama returns markdown format code block for JSON
65
- if content.startswith("```json"):
66
- content = content[7:-3].strip()
67
- elif content.startswith("```"):
68
- content = content[3:-3].strip()
69
-
70
- data = json.loads(content)
71
- return CommandResponse(**data)
72
- except Exception as e:
73
- raw_val = content if 'content' in locals() else "None"
74
-
75
- # Check if the error was caused by a safety refusal schema validation failure
76
- if "refusal" in raw_val.lower() or "sorry" in raw_val.lower():
77
- return CommandResponse(
78
- command="",
79
- explanation=f"LLM Safety Trigger: The model refused to generate this command.\n\nRaw output: {raw_val.strip()}",
80
- needs_context=False
81
- )
82
-
83
- # Fallback empty block on failure
84
- return CommandResponse(
85
- command="",
86
- explanation=f"Error generating command from LLM: {str(e)}\nRaw Output:\n{raw_val}"
87
- )
88
-
89
- def generate_troubleshoot_step(objective: str, history: list, model_name: str = DEFAULT_MODEL) -> TroubleshootResponse:
90
- history_text = "\n".join([
91
- f"Cmd: {h['cmd']}\nExit: {h['exit_code']}\nOut/Err:\n{h['output']}"
92
- for h in history
93
- ])
94
-
95
- content = f"Objective:\n{objective}\n\nHistory of execution:\n{history_text}\n\nAnalyze the specific error and provide the NEXT logical command to test or fix. Re-read logs carefully."
96
-
97
- try:
98
- sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
99
- response = ollama.chat(
100
- model=model_name,
101
- messages=[
102
- {"role": "system", "content": SYSTEM_PROMPT + "\n" + TROUBLESHOOT_PROMPT + "\n\n" + sys_context},
103
- {"role": "user", "content": content}
104
- ],
105
- format="json",
106
- options={"temperature": 0.0}
107
- )
108
-
109
- content = response["message"]["content"].strip()
110
- if not content:
111
- return TroubleshootResponse(
112
- command=None,
113
- explanation="LLM returned empty JSON object.",
114
- is_resolved=False
115
- )
116
-
117
- if content.startswith("```json"):
118
- content = content[7:-3].strip()
119
- elif content.startswith("```"):
120
- content = content[3:-3].strip()
121
-
122
- data = json.loads(content)
123
- return TroubleshootResponse(**data)
124
- except Exception as e:
125
- raw_val = content if 'content' in locals() else "None"
126
- return TroubleshootResponse(
127
- command=None,
128
- explanation=f"Error analyzing execution: {str(e)}\nRaw Output:\n{raw_val}",
129
- is_resolved=False
130
- )
@@ -1,3 +0,0 @@
1
- pydantic>=2.0.0
2
- ollama>=0.1.0
3
- rich>=13.0.0
File without changes
File without changes
File without changes