hey-cli-python 1.0.0__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/PKG-INFO +1 -2
  2. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/llm.py +83 -68
  3. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/models.py +5 -3
  4. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli_python.egg-info/PKG-INFO +1 -2
  5. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli_python.egg-info/requires.txt +0 -1
  6. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/pyproject.toml +1 -2
  7. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/LICENSE +0 -0
  8. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/README.md +0 -0
  9. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/__init__.py +0 -0
  10. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/cli.py +0 -0
  11. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/governance.py +0 -0
  12. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/history.py +0 -0
  13. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/runner.py +0 -0
  14. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli/skills.py +0 -0
  15. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli_python.egg-info/SOURCES.txt +0 -0
  16. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli_python.egg-info/dependency_links.txt +0 -0
  17. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli_python.egg-info/entry_points.txt +0 -0
  18. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/hey_cli_python.egg-info/top_level.txt +0 -0
  19. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/setup.cfg +0 -0
  20. {hey_cli_python-1.0.0 → hey_cli_python-1.0.1}/tests/test_cli.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hey-cli-python
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: A secure, zero-bloat CLI companion that turns natural language and error logs into executable commands.
5
5
  Author: Mohit S.
6
6
  Project-URL: Homepage, https://github.com/sinsniwal/hey-cli
@@ -21,7 +21,6 @@ Classifier: Programming Language :: Python :: 3.12
21
21
  Requires-Python: >=3.9
22
22
  Description-Content-Type: text/markdown
23
23
  License-File: LICENSE
24
- Requires-Dist: pydantic>=2.0.0
25
24
  Requires-Dist: ollama>=0.1.0
26
25
  Requires-Dist: rich>=13.0.0
27
26
  Dynamic: license-file
@@ -46,45 +46,52 @@ def generate_command(prompt: str, context: str = "", model_name: str = DEFAULT_M
46
46
  if context:
47
47
  content = f"Context (e.g. error logs or piped data):\n{context}\n\nObjective:\n{prompt}"
48
48
 
49
- try:
50
- sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
51
- msgs = [{"role": "system", "content": SYSTEM_PROMPT + "\n\n" + sys_context}]
52
- if history:
53
- msgs.extend(history)
54
- msgs.append({"role": "user", "content": content})
55
-
56
- response = ollama.chat(
57
- model=model_name,
58
- messages=msgs,
59
- format="json",
60
- options={"temperature": 0.0}
61
- )
62
-
63
- content = response["message"]["content"]
64
- # In case ollama returns markdown format code block for JSON
65
- if content.startswith("```json"):
66
- content = content[7:-3].strip()
67
- elif content.startswith("```"):
68
- content = content[3:-3].strip()
69
-
70
- data = json.loads(content)
71
- return CommandResponse(**data)
72
- except Exception as e:
73
- raw_val = content if 'content' in locals() else "None"
74
-
75
- # Check if the error was caused by a safety refusal schema validation failure
76
- if "refusal" in raw_val.lower() or "sorry" in raw_val.lower():
77
- return CommandResponse(
78
- command="",
79
- explanation=f"LLM Safety Trigger: The model refused to generate this command.\n\nRaw output: {raw_val.strip()}",
80
- needs_context=False
49
+ sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
50
+ msgs = [{"role": "system", "content": SYSTEM_PROMPT + "\n\n" + sys_context}]
51
+ if history:
52
+ msgs.extend(history)
53
+ msgs.append({"role": "user", "content": content})
54
+
55
+ max_retries = 3
56
+ last_error = None
57
+ raw_val = "None"
58
+
59
+ for attempt in range(max_retries):
60
+ try:
61
+ response = ollama.chat(
62
+ model=model_name,
63
+ messages=msgs,
64
+ format="json",
65
+ options={"temperature": 0.0}
81
66
  )
82
67
 
83
- # Fallback empty block on failure
84
- return CommandResponse(
85
- command="",
86
- explanation=f"Error generating command from LLM: {str(e)}\nRaw Output:\n{raw_val}"
87
- )
68
+ raw_val = response["message"]["content"]
69
+ content_str = raw_val
70
+
71
+ if content_str.startswith("```json"):
72
+ content_str = content_str[7:-3].strip()
73
+ elif content_str.startswith("```"):
74
+ content_str = content_str[3:-3].strip()
75
+
76
+ data = json.loads(content_str)
77
+ return CommandResponse(**data)
78
+
79
+ except Exception as e:
80
+ last_error = e
81
+ if "refusal" in raw_val.lower() or "sorry" in raw_val.lower():
82
+ return CommandResponse(
83
+ command="",
84
+ explanation=f"LLM Safety Trigger: The model refused to generate this command.\n\nRaw output: {raw_val.strip()}",
85
+ needs_context=False
86
+ )
87
+
88
+ msgs.append({"role": "assistant", "content": raw_val})
89
+ msgs.append({"role": "user", "content": f"Your JSON output failed validation: {str(e)}\nPlease strictly follow the schema and output ONLY valid JSON without markdown wrapping."})
90
+
91
+ return CommandResponse(
92
+ command="",
93
+ explanation=f"Error generating command from LLM after {max_retries} retries: {str(last_error)}\nRaw Output:\n{raw_val}"
94
+ )
88
95
 
89
96
  def generate_troubleshoot_step(objective: str, history: list, model_name: str = DEFAULT_MODEL) -> TroubleshootResponse:
90
97
  history_text = "\n".join([
@@ -94,37 +101,45 @@ def generate_troubleshoot_step(objective: str, history: list, model_name: str =
94
101
 
95
102
  content = f"Objective:\n{objective}\n\nHistory of execution:\n{history_text}\n\nAnalyze the specific error and provide the NEXT logical command to test or fix. Re-read logs carefully."
96
103
 
97
- try:
98
- sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
99
- response = ollama.chat(
100
- model=model_name,
101
- messages=[
102
- {"role": "system", "content": SYSTEM_PROMPT + "\n" + TROUBLESHOOT_PROMPT + "\n\n" + sys_context},
103
- {"role": "user", "content": content}
104
- ],
105
- format="json",
106
- options={"temperature": 0.0}
107
- )
108
-
109
- content = response["message"]["content"].strip()
110
- if not content:
111
- return TroubleshootResponse(
112
- command=None,
113
- explanation="LLM returned empty JSON object.",
114
- is_resolved=False
104
+ sys_context = f"--- ENVIRONMENT ---\n{get_system_context()}\n-------------------\n"
105
+ msgs = [
106
+ {"role": "system", "content": SYSTEM_PROMPT + "\n" + TROUBLESHOOT_PROMPT + "\n\n" + sys_context},
107
+ {"role": "user", "content": content}
108
+ ]
109
+
110
+ max_retries = 3
111
+ last_error = None
112
+ raw_val = "None"
113
+
114
+ for attempt in range(max_retries):
115
+ try:
116
+ response = ollama.chat(
117
+ model=model_name,
118
+ messages=msgs,
119
+ format="json",
120
+ options={"temperature": 0.0}
115
121
  )
116
122
 
117
- if content.startswith("```json"):
118
- content = content[7:-3].strip()
119
- elif content.startswith("```"):
120
- content = content[3:-3].strip()
123
+ raw_val = response["message"]["content"].strip()
124
+ if not raw_val:
125
+ raise ValueError("LLM returned empty JSON object.")
126
+
127
+ content_str = raw_val
128
+ if content_str.startswith("```json"):
129
+ content_str = content_str[7:-3].strip()
130
+ elif content_str.startswith("```"):
131
+ content_str = content_str[3:-3].strip()
132
+
133
+ data = json.loads(content_str)
134
+ return TroubleshootResponse(**data)
121
135
 
122
- data = json.loads(content)
123
- return TroubleshootResponse(**data)
124
- except Exception as e:
125
- raw_val = content if 'content' in locals() else "None"
126
- return TroubleshootResponse(
127
- command=None,
128
- explanation=f"Error analyzing execution: {str(e)}\nRaw Output:\n{raw_val}",
129
- is_resolved=False
130
- )
136
+ except Exception as e:
137
+ last_error = e
138
+ msgs.append({"role": "assistant", "content": raw_val})
139
+ msgs.append({"role": "user", "content": f"Your JSON output failed validation: {str(e)}\nFix the syntax and output ONLY strict JSON schema."})
140
+
141
+ return TroubleshootResponse(
142
+ command=None,
143
+ explanation=f"Error analyzing execution after {max_retries} retries: {str(last_error)}\nRaw Output:\n{raw_val}",
144
+ is_resolved=False
145
+ )
@@ -1,12 +1,14 @@
1
- from pydantic import BaseModel, ConfigDict
1
+ from dataclasses import dataclass
2
2
  from typing import Optional
3
3
 
4
- class CommandResponse(BaseModel):
4
+ @dataclass
5
+ class CommandResponse:
5
6
  command: str
6
7
  explanation: str = ""
7
8
  needs_context: bool = False
8
9
 
9
- class TroubleshootResponse(BaseModel):
10
+ @dataclass
11
+ class TroubleshootResponse:
10
12
  command: Optional[str] = None
11
13
  explanation: str = ""
12
14
  is_resolved: bool = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hey-cli-python
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: A secure, zero-bloat CLI companion that turns natural language and error logs into executable commands.
5
5
  Author: Mohit S.
6
6
  Project-URL: Homepage, https://github.com/sinsniwal/hey-cli
@@ -21,7 +21,6 @@ Classifier: Programming Language :: Python :: 3.12
21
21
  Requires-Python: >=3.9
22
22
  Description-Content-Type: text/markdown
23
23
  License-File: LICENSE
24
- Requires-Dist: pydantic>=2.0.0
25
24
  Requires-Dist: ollama>=0.1.0
26
25
  Requires-Dist: rich>=13.0.0
27
26
  Dynamic: license-file
@@ -1,3 +1,2 @@
1
- pydantic>=2.0.0
2
1
  ollama>=0.1.0
3
2
  rich>=13.0.0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "hey-cli-python"
7
- version = "1.0.0"
7
+ version = "1.0.1"
8
8
  description = "A secure, zero-bloat CLI companion that turns natural language and error logs into executable commands."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.9"
@@ -26,7 +26,6 @@ classifiers = [
26
26
  "Programming Language :: Python :: 3.12",
27
27
  ]
28
28
  dependencies = [
29
- "pydantic>=2.0.0",
30
29
  "ollama>=0.1.0",
31
30
  "rich>=13.0.0"
32
31
  ]
File without changes
File without changes
File without changes