smart-code-assistant 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,41 @@
1
+ Metadata-Version: 2.4
2
+ Name: smart-code-assistant
3
+ Version: 0.1.0
4
+ Summary: A CLI tool to explain, debug, test and optimize code using LLM
5
+ Author-email: Deepak Kambala <deepakkambala21@gmail.com>
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: typer
9
+ Requires-Dist: ollama
10
+
11
+ # Smart Code Assistant CLI
12
+
13
+ ## Short Description
14
+ Smart Code Assistant CLI helps beginners debug, optimize, explain, and generate tests for Python code instantly, powered by an offline **Ollama 3.21B** model. It keeps developers focused by providing AI-powered solutions directly in the project directory.
15
+
16
+ ## Features
17
+ - **Code Debugger** – Automatically detects issues in your code and generates a text file with suggestions.
18
+ - **Code Optimizer** – Analyzes your code and suggests more efficient alternatives in a separate file.
19
+ - **Code Explainer** – Provides clear explanations of your code logic up to a specific point.
20
+ - **Test Generator** – Generates sample test cases to validate correctness and handle edge cases.
21
+ - **Offline Support** – Powered by Ollama 3.21B, works entirely offline without internet dependency.
22
+ - **Fast Execution** – Commands execute in 2–3 seconds (network optional) for instant feedback.
23
+
24
+ ## How It Works
25
+ 1. Execute a custom command (e.g., `debug`) in your terminal.
26
+ 2. Your code is sent to the **Ollama LLM** for processing.
27
+ 3. The generated output (debugged code, optimized version, explanation, or tests) is saved as a text file in your project folder.
28
+ 4. No need to switch tools—stay focused and reduce attention residue.
29
+
30
+ ## Supported Commands
31
+ | Command | Description |
32
+ |----------------|-------------|
33
+ | `debug` | Debugs your code and writes suggestions to a file. |
34
+ | `optimize` | Suggests improvements for efficiency. |
35
+ | `explain` | Explains your code logic clearly. |
36
+ | `generate-test`| Creates sample test cases for validation. |
37
+
38
+ ## Tech Stack
39
+ - Python
40
+ - Ollama 3.21B LLM (offline)
41
+ - Windows PowerShell (custom CLI commands)
@@ -0,0 +1,31 @@
1
+ # Smart Code Assistant CLI
2
+
3
+ ## Short Description
4
+ Smart Code Assistant CLI helps beginners debug, optimize, explain, and generate tests for Python code instantly, powered by an offline **Ollama 3.21B** model. It keeps developers focused by providing AI-powered solutions directly in the project directory.
5
+
6
+ ## Features
7
+ - **Code Debugger** – Automatically detects issues in your code and generates a text file with suggestions.
8
+ - **Code Optimizer** – Analyzes your code and suggests more efficient alternatives in a separate file.
9
+ - **Code Explainer** – Provides clear explanations of your code logic up to a specific point.
10
+ - **Test Generator** – Generates sample test cases to validate correctness and handle edge cases.
11
+ - **Offline Support** – Powered by Ollama 3.21B, works entirely offline without internet dependency.
12
+ - **Fast Execution** – Commands execute in 2–3 seconds (network optional) for instant feedback.
13
+
14
+ ## How It Works
15
+ 1. Execute a custom command (e.g., `debug`) in your terminal.
16
+ 2. Your code is sent to the **Ollama LLM** for processing.
17
+ 3. The generated output (debugged code, optimized version, explanation, or tests) is saved as a text file in your project folder.
18
+ 4. No need to switch tools—stay focused and reduce attention residue.
19
+
20
+ ## Supported Commands
21
+ | Command | Description |
22
+ |----------------|-------------|
23
+ | `debug` | Debugs your code and writes suggestions to a file. |
24
+ | `optimize` | Suggests improvements for efficiency. |
25
+ | `explain` | Explains your code logic clearly. |
26
+ | `generate-test`| Creates sample test cases for validation. |
27
+
28
+ ## Tech Stack
29
+ - Python
30
+ - Ollama 3.21B LLM (offline)
31
+ - Windows PowerShell (custom CLI commands)
@@ -0,0 +1,20 @@
1
+ [project]
2
+ name = "smart-code-assistant"
3
+ version = "0.1.0"
4
+ description = "A CLI tool to explain, debug, test and optimize code using LLM"
5
+ authors = [
6
+ { name="Deepak Kambala", email="deepakkambala21@gmail.com" }
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">=3.8"
10
+ dependencies = [
11
+ "typer",
12
+ "ollama"
13
+ ]
14
+
15
+ [project.scripts]
16
+ smart-assist = "smart_code_assistant.cli:app"
17
+
18
+ [build-system]
19
+ requires = ["setuptools", "wheel"]
20
+ build-backend = "setuptools.build_meta"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,7 @@
1
+
2
+ #code_tool/ # Root folder
3
+ #├── src/ # Source code
4
+ #│ ├── __init__.py # Empty file (makes it a package)
5
+ #│ └── cli.py # Main CLI entry point (all commands here)
6
+ #├── pyproject.toml # Project config (makes it installable)
7
+ #└── README.md # (Optional)
@@ -0,0 +1,15 @@
1
+ import typer
2
+ from smart_code_assistant.explainer import explain
3
+ from smart_code_assistant.test import generate
4
+ from smart_code_assistant.debug import debug
5
+ from smart_code_assistant.optimize import optimize
6
+
7
+ app = typer.Typer()
8
+
9
+ app.command()(explain)
10
+ app.command(name="test")(generate)
11
+ app.command()(debug)
12
+ app.command()(optimize)
13
+
14
+ if __name__ == "__main__":
15
+ app()
@@ -0,0 +1,121 @@
1
+ #!/usr/bin/env python3
2
+ import subprocess
3
+ from pathlib import Path
4
+ import sys
5
+ import typer
6
+
7
+ app = typer.Typer()
8
+
9
+ # Ensure UTF-8 encoding for Windows terminals
10
+ sys.stdin.reconfigure(encoding="utf-8")
11
+ sys.stdout.reconfigure(encoding="utf-8")
12
+
13
+
14
+ def generate_debug_report(file_path: Path, model_name: str = "llama3.2:1b") -> str | None:
15
+ """
16
+ Generate a structured debug report for the given code file using Ollama.
17
+ """
18
+
19
+ try:
20
+ code_content = file_path.read_text(encoding="utf-8")
21
+
22
+ prompt = f"""You are a senior software engineer and debugging expert.
23
+
24
+ Carefully analyze the following code.
25
+
26
+ Your tasks:
27
+
28
+ 1. Detect syntax errors.
29
+ 2. Detect logical errors.
30
+ 3. Detect runtime errors.
31
+ 4. Identify bad practices or inefficiencies.
32
+ 5. Clearly explain each issue.
33
+ 6. Provide step-by-step solutions.
34
+ 7. Provide a fully corrected version of the entire code at the end.
35
+
36
+ Format your response EXACTLY like this:
37
+
38
+ === ERRORS FOUND ===
39
+ (List all issues clearly)
40
+
41
+ === SOLUTIONS ===
42
+ (Explain how to fix them)
43
+
44
+ === CORRECTED CODE ===
45
+ (Provide full corrected code)
46
+
47
+ Here is the code to debug:
48
+
49
+ {code_content}
50
+ """
51
+
52
+ typer.echo(f"🔍 Debugging {file_path} using model '{model_name}'...")
53
+
54
+ process = subprocess.Popen(
55
+ ["ollama", "run", model_name],
56
+ stdin=subprocess.PIPE,
57
+ stdout=subprocess.PIPE,
58
+ stderr=subprocess.PIPE
59
+ )
60
+
61
+ stdout, stderr = process.communicate(input=prompt.encode("utf-8"))
62
+
63
+ stdout = stdout.decode("utf-8", errors="replace")
64
+ stderr = stderr.decode("utf-8", errors="replace")
65
+
66
+ if process.returncode != 0:
67
+ typer.secho(f"\n❌ Error running Ollama:\n{stderr}", fg=typer.colors.RED)
68
+ return None
69
+
70
+ return stdout
71
+
72
+ except Exception as e:
73
+ typer.secho(f"\n❌ An unexpected error occurred: {str(e)}", fg=typer.colors.RED)
74
+ return None
75
+
76
+
77
+ def save_debug_report(file_path: Path, report: str) -> Path:
78
+ """
79
+ Save the debug report to a text file.
80
+ """
81
+ output_file = file_path.with_stem(file_path.stem + "_debug").with_suffix(".txt")
82
+ output_file.write_text(report, encoding="utf-8")
83
+
84
+ typer.secho(f"\n✅ Debug report saved to: {output_file}", fg=typer.colors.GREEN)
85
+ return output_file
86
+
87
+
88
+ @app.command()
89
+ def debug(
90
+ file: Path = typer.Argument(..., exists=True, readable=True, help="The code file to debug."),
91
+ model: str = typer.Option("llama3.2:1b", help="Ollama model to use.")
92
+ ):
93
+ """
94
+ Please review and debug the following code.
95
+
96
+ If it contains any syntax errors, logical mistakes, runtime issues, or potential bugs, identify and explain them clearly.
97
+
98
+ For each issue you find:
99
+ - Explain why it is a problem.
100
+ - Provide a clear solution.
101
+ - Suggest improvements if applicable.
102
+
103
+ After identifying and explaining all issues, provide a fully corrected version of the entire code.
104
+
105
+ Then briefly describe the code’s purpose, main functions, and overall structure.
106
+
107
+ Here is the code:
108
+
109
+ {code_content}
110
+ """
111
+
112
+ report = generate_debug_report(file, model)
113
+
114
+ if report:
115
+ save_debug_report(file, report)
116
+ else:
117
+ typer.secho("⚠ Debugging failed.", fg=typer.colors.YELLOW)
118
+
119
+
120
+ if __name__ == "__main__":
121
+ app()
@@ -0,0 +1,44 @@
1
+ import os
2
+ import subprocess
3
+ import typer
4
+
5
+ app = typer.Typer()
6
+
7
+ @app.command()
8
+ def explain(file: str, model: str = "llama3.2:1b"):
9
+ """Generate explanation for the code file"""
10
+ try:
11
+ with open(file, 'r', encoding='utf-8') as f:
12
+ code = f.read()
13
+
14
+ prompt = f"""Please explain the following code in detail.
15
+ Describe its purpose, main functions, and key components.
16
+ Be concise but thorough. Here's the code:
17
+
18
+ {code}
19
+ """
20
+
21
+ process = subprocess.Popen(
22
+ ["ollama", "run", model],
23
+ stdin=subprocess.PIPE,
24
+ stdout=subprocess.PIPE,
25
+ stderr=subprocess.PIPE
26
+ )
27
+
28
+ stdout, stderr = process.communicate(input=prompt.encode('utf-8'))
29
+ stdout = stdout.decode('utf-8')
30
+ stderr = stderr.decode('utf-8')
31
+
32
+ if process.returncode != 0:
33
+ typer.echo(f"Error: {stderr}")
34
+ raise typer.Exit(1)
35
+
36
+ output_file = os.path.splitext(file)[0] + "_explanation.txt"
37
+ with open(output_file, 'w', encoding='utf-8') as out:
38
+ out.write(stdout)
39
+
40
+ typer.echo(f"Explanation saved to {output_file}")
41
+
42
+ except Exception as e:
43
+ typer.echo(f"Exception: {e}")
44
+ raise typer.Exit(1)
@@ -0,0 +1,122 @@
1
+ #!/usr/bin/env python3
2
+ import subprocess
3
+ from pathlib import Path
4
+ from typing import Optional
5
+ import typer
6
+
7
+ # Initialize Typer app
8
+ app = typer.Typer()
9
+
10
+
11
+ # ----------------------------------
12
+ # Core Ollama Runner
13
+ # ----------------------------------
14
+ def run_ollama(prompt: str, model_name: str) -> Optional[str]:
15
+ """
16
+ Run Ollama with the given prompt and return the output.
17
+ """
18
+ try:
19
+ process = subprocess.Popen(
20
+ ["ollama", "run", model_name],
21
+ stdin=subprocess.PIPE,
22
+ stdout=subprocess.PIPE,
23
+ stderr=subprocess.PIPE,
24
+ )
25
+
26
+ stdout, stderr = process.communicate(input=prompt.encode("utf-8"))
27
+
28
+ if process.returncode != 0:
29
+ typer.echo(f"[ERROR] Ollama error:\n{stderr.decode()}", err=True)
30
+ return None
31
+
32
+ return stdout.decode("utf-8", errors="replace")
33
+
34
+ except Exception as e:
35
+ typer.echo(f"[EXCEPTION] {str(e)}", err=True)
36
+ return None
37
+
38
+
39
+ # ----------------------------------
40
+ # Optimization Logic
41
+ # ----------------------------------
42
+ def generate_optimization(file_path: Path, model_name: str) -> Optional[str]:
43
+ """
44
+ Analyze and optimize the given code.
45
+ """
46
+
47
+ try:
48
+ typer.echo(f"Reading file: {file_path}")
49
+ code_content = file_path.read_text(encoding="utf-8")
50
+
51
+ prompt = f"""You are a senior performance engineer and algorithm expert.
52
+
53
+ Analyze the following code strictly from an optimization perspective.
54
+
55
+ Your tasks:
56
+
57
+ 1. Identify inefficiencies in time complexity.
58
+ 2. Identify memory inefficiencies.
59
+ 3. Suggest better algorithms if applicable.
60
+ 4. Suggest better data structures.
61
+ 5. Suggest more Pythonic or cleaner implementations.
62
+ 6. Suggest design improvements if relevant.
63
+ 7. Compare time and space complexity (before vs after).
64
+ 8. Provide a fully optimized version of the code.
65
+
66
+ Do NOT explain basic functionality unless necessary.
67
+ Focus only on performance, scalability, and clean design.
68
+
69
+ Here is the code:
70
+
71
+ {code_content}
72
+ """
73
+
74
+ typer.echo(f"Optimizing using model: {model_name}...")
75
+ return run_ollama(prompt, model_name)
76
+
77
+ except Exception as e:
78
+ typer.echo(f"[EXCEPTION] {str(e)}", err=True)
79
+ return None
80
+
81
+
82
+ def save_output(file_path: Path, content: str) -> Path:
83
+ """
84
+ Save optimization output to file.
85
+ """
86
+ output_file = file_path.with_name(f"{file_path.stem}_optimized.txt")
87
+ output_file.write_text(content, encoding="utf-8")
88
+
89
+ typer.echo(f"[✓] Optimization saved to: {output_file}")
90
+ return output_file
91
+
92
+
93
+ # ----------------------------------
94
+ # CLI Command
95
+ # ----------------------------------
96
+ @app.command()
97
+ def optimize(
98
+ file: str = typer.Argument(..., help="The code file to optimize"),
99
+ model: str = typer.Option("llama3.2:1b", help="Ollama model to use"),
100
+ ):
101
+ """
102
+ Optimize a code file by suggesting better algorithms,
103
+ data structures, and performance improvements.
104
+ """
105
+
106
+ resolved_path = Path(file).resolve()
107
+
108
+ if not resolved_path.is_file():
109
+ typer.echo(f"[ERROR] File not found: {resolved_path}", err=True)
110
+ raise typer.Exit(1)
111
+
112
+ optimization = generate_optimization(resolved_path, model)
113
+
114
+ if optimization:
115
+ save_output(resolved_path, optimization)
116
+
117
+
118
+ # ----------------------------------
119
+ # Entry Point
120
+ # ----------------------------------
121
+ if __name__ == "__main__":
122
+ app()
@@ -0,0 +1,122 @@
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import subprocess
4
+ import typer
5
+ from typing import Optional
6
+
7
+ app = typer.Typer(help="Edge Case Generator using Ollama")
8
+
9
+ def generate_edge_cases(file_path: str, model_name: str = "llama3.2:1b") -> Optional[str]:
10
+ """
11
+ Generate comprehensive edge test cases using Ollama
12
+ """
13
+ try:
14
+ with open(file_path, 'r', encoding='utf-8') as file:
15
+ code_content = file.read()
16
+
17
+ prompt = f"""Analyze the following code and generate comprehensive edge test cases.
18
+ For each test case, include:
19
+ 1. Description of what edge case it tests
20
+ 2. Input values that would trigger this edge case
21
+ 3. Expected output/behavior
22
+ 4. Why this case is important to test
23
+
24
+ Focus on:
25
+ - Boundary conditions
26
+ - Invalid inputs
27
+ - Unusual scenarios
28
+ - Race conditions (if applicable)
29
+ - Memory edge cases
30
+ - Error handling paths
31
+
32
+ Format each test case clearly with numbered sections.
33
+ Here's the code:
34
+
35
+ {code_content}
36
+ """
37
+ typer.echo(f"🔍 Generating edge cases for [bold]{file_path}[/bold]...", color="blue")
38
+
39
+ process = subprocess.Popen(
40
+ ["ollama", "run", model_name],
41
+ stdin=subprocess.PIPE,
42
+ stdout=subprocess.PIPE,
43
+ stderr=subprocess.PIPE,
44
+ text=True,
45
+ encoding='utf-8'
46
+ )
47
+
48
+ stdout, stderr = process.communicate(input=prompt)
49
+
50
+ if process.returncode != 0:
51
+ typer.echo(f"[red]❌ Error generating edge cases:\n{stderr}[/red]", err=True)
52
+ return None
53
+
54
+ return stdout
55
+
56
+ except FileNotFoundError:
57
+ typer.echo(f"[red]❌ Error: File '{file_path}' not found[/red]", err=True)
58
+ raise typer.Exit(1)
59
+ except Exception as e:
60
+ typer.echo(f"[red]❌ An error occurred: {str(e)}[/red]", err=True)
61
+ return None
62
+
63
+ def save_edge_cases(file_path: str, edge_cases: str) -> str:
64
+ """Save edge cases to a text file with Markdown formatting"""
65
+ base_name = os.path.splitext(file_path)[0]
66
+ output_file = f"{base_name}_edge_cases.md"
67
+
68
+ with open(output_file, 'w', encoding='utf-8') as file:
69
+ file.write(f"# Edge Case Analysis for {os.path.basename(file_path)}\n\n")
70
+ file.write(edge_cases)
71
+
72
+ typer.echo(f"[green]✅ Edge cases saved to [bold]{output_file}[/bold][/green]")
73
+ return output_file
74
+
75
+ @app.command()
76
+ def generate(
77
+ file: str = typer.Argument(..., help="Path to the code file to analyze"),
78
+ model: str = typer.Option(
79
+ "llama3.2:1b",
80
+ "--model",
81
+ help="Ollama model to use",
82
+ show_default=True
83
+ ),
84
+ show: bool = typer.Option(
85
+ False,
86
+ "--show",
87
+ help="Display edge cases in console",
88
+ show_default=True
89
+ ),
90
+ markdown: bool = typer.Option(
91
+ True,
92
+ "--no-markdown",
93
+ help="Disable Markdown formatting",
94
+ show_default=True
95
+ )
96
+ ):
97
+ """
98
+ Generate comprehensive edge test cases for code analysis.
99
+
100
+ Features:
101
+ - Boundary condition testing
102
+ - Invalid input scenarios
103
+ - Unusual usage patterns
104
+ - Detailed explanations for each case
105
+ """
106
+ edge_cases = generate_edge_cases(file, model)
107
+
108
+ if edge_cases:
109
+ if not markdown:
110
+ base_name = os.path.splitext(file)[0]
111
+ output_file = f"{base_name}_edge_cases.txt"
112
+ with open(output_file, 'w', encoding='utf-8') as f:
113
+ f.write(edge_cases)
114
+ else:
115
+ output_file = save_edge_cases(file, edge_cases)
116
+
117
+ if show:
118
+ typer.echo("\n🧪 Generated Edge Cases:", color="cyan")
119
+ typer.echo(edge_cases)
120
+
121
+ if __name__ == "__main__":
122
+ app()
@@ -0,0 +1,41 @@
1
+ Metadata-Version: 2.4
2
+ Name: smart-code-assistant
3
+ Version: 0.1.0
4
+ Summary: A CLI tool to explain, debug, test and optimize code using LLM
5
+ Author-email: Deepak Kambala <deepakkambala21@gmail.com>
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: typer
9
+ Requires-Dist: ollama
10
+
11
+ # Smart Code Assistant CLI
12
+
13
+ ## Short Description
14
+ Smart Code Assistant CLI helps beginners debug, optimize, explain, and generate tests for Python code instantly, powered by an offline **Ollama 3.21B** model. It keeps developers focused by providing AI-powered solutions directly in the project directory.
15
+
16
+ ## Features
17
+ - **Code Debugger** – Automatically detects issues in your code and generates a text file with suggestions.
18
+ - **Code Optimizer** – Analyzes your code and suggests more efficient alternatives in a separate file.
19
+ - **Code Explainer** – Provides clear explanations of your code logic up to a specific point.
20
+ - **Test Generator** – Generates sample test cases to validate correctness and handle edge cases.
21
+ - **Offline Support** – Powered by Ollama 3.21B, works entirely offline without internet dependency.
22
+ - **Fast Execution** – Commands execute in 2–3 seconds (network optional) for instant feedback.
23
+
24
+ ## How It Works
25
+ 1. Execute a custom command (e.g., `debug`) in your terminal.
26
+ 2. Your code is sent to the **Ollama LLM** for processing.
27
+ 3. The generated output (debugged code, optimized version, explanation, or tests) is saved as a text file in your project folder.
28
+ 4. No need to switch tools—stay focused and reduce attention residue.
29
+
30
+ ## Supported Commands
31
+ | Command | Description |
32
+ |----------------|-------------|
33
+ | `debug` | Debugs your code and writes suggestions to a file. |
34
+ | `optimize` | Suggests improvements for efficiency. |
35
+ | `explain` | Explains your code logic clearly. |
36
+ | `generate-test`| Creates sample test cases for validation. |
37
+
38
+ ## Tech Stack
39
+ - Python
40
+ - Ollama 3.21B LLM (offline)
41
+ - Windows PowerShell (custom CLI commands)
@@ -0,0 +1,14 @@
1
+ README.md
2
+ pyproject.toml
3
+ smart_code_assistant/__init__.py
4
+ smart_code_assistant/cli.py
5
+ smart_code_assistant/debug.py
6
+ smart_code_assistant/explainer.py
7
+ smart_code_assistant/optimize.py
8
+ smart_code_assistant/test.py
9
+ smart_code_assistant.egg-info/PKG-INFO
10
+ smart_code_assistant.egg-info/SOURCES.txt
11
+ smart_code_assistant.egg-info/dependency_links.txt
12
+ smart_code_assistant.egg-info/entry_points.txt
13
+ smart_code_assistant.egg-info/requires.txt
14
+ smart_code_assistant.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ smart-assist = smart_code_assistant.cli:app
@@ -0,0 +1 @@
1
+ smart_code_assistant