norbok 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ .aider*
norbok-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,8 @@
1
+ Metadata-Version: 2.4
2
+ Name: norbok
3
+ Version: 0.1.0
4
+ Summary: A terminal-based AI Python tutor that teaches coding through local AI projects
5
+ Requires-Python: >=3.10
6
+ Requires-Dist: ollama
7
+ Requires-Dist: psutil
8
+ Requires-Dist: rich
norbok-0.1.0/README.md ADDED
File without changes
norbok-0.1.0/SPEC.md ADDED
@@ -0,0 +1,92 @@
1
+ # Norbok — AI Python Tutor
2
+
3
+ ## What it is
4
+ A terminal-based interactive Python tutor that teaches coding through
5
+ AI-focused projects. Runs entirely locally, no cloud, no browser,
6
+ just a terminal. The student learns Python by building real AI projects
7
+ from day one using Ollama and local models.
8
+
9
+ ## How it works
10
+ User runs `python norbok.py` (eventually `norbok` after pip install).
11
+ App starts, checks Ollama is running, launches a menu.
12
+ User picks a project, works through milestones one at a time,
13
+ and can ask Norbok for help at any point during any milestone.
14
+
15
+ ## Teaching philosophy
16
+ Project-first and AI-first. Every project uses AI in some way even
17
+ the beginner ones. Concepts are introduced exactly when the student
18
+ needs them, not as prerequisites. A total beginner builds a working
19
+ AI chatbot on day one and feels like a genius immediately.
20
+
21
+ ## Models
22
+ - qwen2.5-coder:7b — Norbok's brain, handles all teaching,
23
+ explanations and coaching throughout the app
24
+ - dolphin-llama3 — used by the student in their projects,
25
+ not installed by Norbok. Students pull it themselves as
26
+ part of the first project milestone. Recommended for all
27
+ creative and personality-driven projects.
28
+ - Qwen — also taught to students for technical projects
29
+ like the AI code reviewer
30
+
31
+ ## Tech stack
32
+ - Python
33
+ - Rich (terminal UI and output)
34
+ - Ollama (local AI runner)
35
+ - qwen2.5-coder:7b (Norbok's brain)
36
+
37
+ ## File structure
38
+ - norbok.py — entry point, launches the app
39
+ - ollama_manager.py — Ollama connection and model management
40
+ - chat.py — chat loop and Norbok message handling
41
+ - ui.py — all Rich terminal display and menus
42
+ - projects.py — all project and milestone content
43
+ - hardware.py — hardware detection, model recommendation
44
+ - SPEC.md — this file
45
+
46
+ ## Core features
47
+ - Checks Ollama is running on launch, guides user if not
48
+ - Project selection menu organized by difficulty level
49
+ - Milestone walkthrough with Norbok coaching at every step
50
+ - Free chat mode to ask Norbok anything
51
+ - Student asks questions mid-milestone and gets instant help
52
+ - Project-first AI curriculum, beginner to advanced
53
+
54
+ ## Curriculum structure
55
+ Three difficulty levels, all projects are AI focused.
56
+ Placeholder projects will be filled out over time.
57
+
58
+ BEGINNER:
59
+ 1. AI Chatbot with Custom Personality (fully built, first project)
60
+ - teaches: Ollama, Dolphin, system prompts, while loops, Rich
61
+ 2. AI Magic 8 Ball (placeholder)
62
+ 3. AI Joke Generator (placeholder)
63
+ 4. AI Story Starter (placeholder)
64
+ 5. AI Word Game (placeholder)
65
+
66
+ INTERMEDIATE:
67
+ 6. AI Quiz Generator (placeholder)
68
+ 7. AI Dungeon Game (placeholder)
69
+ 8. AI Movie Recommender (placeholder)
70
+ 9. AI Code Reviewer with Qwen (placeholder)
71
+
72
+ ADVANCED:
73
+ 10. AI Web Scraper with Summaries (placeholder)
74
+ 11. AI Flask Chatbot (placeholder)
75
+ 12. AI Discord Bot (placeholder)
76
+ 13. AI Flask Dashboard (placeholder)
77
+
78
+ ## First project detail
79
+ Name: AI Chatbot with Custom Personality
80
+ The student installs Dolphin, writes a script that talks to it,
81
+ adds a while loop, gives it a personality via system prompt,
82
+ lets the user pick from a personality list, then styles it with Rich.
83
+ By milestone 2 they have a working AI chatbot in 8 lines of Python.
84
+
85
+ ## Distribution (planned)
86
+ pip install norbok
87
+
88
+ ## What Norbok is NOT
89
+ - Not a web app
90
+ - Not a cloud service
91
+ - Not a censored corporate tutor
92
+ - Not a drill-first boring curriculum
File without changes
@@ -0,0 +1,7 @@
1
+ from . import ui
2
+
3
+ def main():
4
+ ui.main()
5
+
6
+ if __name__ == "__main__":
7
+ main()
@@ -0,0 +1,84 @@
1
+ import ollama
2
+ from rich.console import Console
3
+ from rich.prompt import Prompt
4
+ from rich.panel import Panel
5
+
6
+ console = Console()
7
+
8
+
9
+ def stream_teacher_response(user_input):
10
+ """Stream the model reply token‑by‑token to eliminate perceived delay."""
11
+ messages = [
12
+ {
13
+ "role": "system",
14
+ "content": (
15
+ "you are a friendly, considerate web development teacher. "
16
+ "you do simple analogies and then explain it technically. "
17
+ "talk like you are teaching a total beginner, step by step guides. "
18
+ "start with making a flask app webpage."
19
+ )
20
+ },
21
+ {
22
+ "role": "user",
23
+ "content": user_input,
24
+ },
25
+ ]
26
+ options = {
27
+ "num_predict": 256,
28
+ "temperature": 0.7,
29
+ }
30
+
31
+ def _stream():
32
+ return ollama.chat(
33
+ model='qwen2.5-coder:7b',
34
+ messages=messages,
35
+ options=options,
36
+ stream=True,
37
+ )
38
+
39
+ try:
40
+ stream = _stream()
41
+ except Exception as e:
42
+ if 'model' in str(e).lower() and 'not found' in str(e).lower():
43
+ console.print("[yellow]Model 'qwen2.5-coder:7b' not found. Pulling...[/]")
44
+ try:
45
+ ollama.pull('qwen2.5-coder:7b')
46
+ console.print("[green]Pull complete. Retrying request...[/]")
47
+ stream = _stream()
48
+ except Exception as pull_err:
49
+ raise RuntimeError(f"Pull failed: {pull_err}")
50
+ else:
51
+ raise
52
+
53
+ console.print("[bold green]Teacher:[/bold green]")
54
+ for chunk in stream:
55
+ token = chunk['message']['content']
56
+ console.print(token, end='', style="green", highlight=False)
57
+ console.print() # newline after the stream finishes
58
+
59
+
60
+ def run_free_chat():
61
+ """Enter the free‑chat loop. Returns when the user types 'exit'/'quit'."""
62
+ console.rule("[bold cyan]Free Chat with Norbok[/bold cyan]")
63
+ console.print("Type your message ('exit' or 'quit' to return to main menu)")
64
+ while True:
65
+ try:
66
+ user_input = Prompt.ask("[bold blue]You[/bold blue]")
67
+ if user_input.strip().lower() in ("exit", "quit"):
68
+ console.print("[yellow]Returning to main menu...[/yellow]")
69
+ break
70
+
71
+ # display user message
72
+ user_panel = Panel.fit(user_input, title="You", border_style="blue")
73
+ console.print(user_panel)
74
+
75
+ stream_teacher_response(user_input)
76
+
77
+ except KeyboardInterrupt:
78
+ console.print("\n[yellow]Interrupted. Returning to menu.[/yellow]")
79
+ break
80
+
81
+ except Exception as e:
82
+ error_msg = f"Error: {e}. Ensure Ollama is running and model is pulled."
83
+ console.print(Panel.fit(error_msg, title="Error", border_style="red"))
84
+ # loop continues so the user can type 'exit' or try another message
@@ -0,0 +1,15 @@
1
+ def get_recommended_model():
2
+ try:
3
+ import psutil
4
+ mem = psutil.virtual_memory()
5
+ ram_gb = mem.total / (1024 ** 3)
6
+ except ImportError:
7
+ # If psutil isn't available, assume enough RAM for the default model
8
+ ram_gb = 32
9
+
10
+ if ram_gb >= 16:
11
+ return "qwen2.5-coder:7b"
12
+ elif ram_gb >= 8:
13
+ return "qwen2.5-coder:1.5b"
14
+ else:
15
+ return "qwen2.5-coder:0.5b"
@@ -0,0 +1,4 @@
1
+ from . import ui
2
+
3
+ if __name__ == '__main__':
4
+ ui.main()
@@ -0,0 +1,162 @@
1
+ import subprocess
2
+ import time
3
+ import sys
4
+ import shutil
5
+ import ollama
6
+ from rich.console import Console
7
+
8
+ console = Console()
9
+
10
+ # Will be set to True once the Ollama API becomes reachable
11
+ ollama_available = False
12
+
13
+
14
+ def start_ollama():
15
+ global ollama_available
16
+
17
+ # 1) Check if server is already running
18
+ try:
19
+ ollama.list()
20
+ ollama_available = True
21
+ console.print("[green]Ollama server already running.[/]")
22
+ # Preload model to avoid first-query lag
23
+ preload_model()
24
+ return
25
+ except Exception:
26
+ # Server not reachable – try to start it
27
+ console.print("[yellow]Ollama server not running, trying to start...[/]")
28
+
29
+ # 2) Make sure the ollama command is available
30
+ if shutil.which("ollama") is None:
31
+ console.print("[red]Could not find 'ollama' command. Is Ollama installed and in your PATH?[/]")
32
+ return
33
+
34
+ # 3) Launch the server in the background, capturing stderr
35
+ proc = subprocess.Popen(
36
+ ["ollama", "serve"],
37
+ stdout=subprocess.DEVNULL,
38
+ stderr=subprocess.PIPE, # capture errors so we can show them
39
+ )
40
+ # Give it a moment to start, then check if it terminated immediately
41
+ time.sleep(1)
42
+ if proc.poll() is not None:
43
+ # The process ended very quickly – probably an error
44
+ try:
45
+ # Read remaining output so we can show the reason
46
+ _, stderr = proc.communicate(timeout=2)
47
+ err_msg = stderr.decode().strip()
48
+ except Exception:
49
+ err_msg = "(could not capture stderr)"
50
+
51
+ # "address already in use" means Ollama is already running – treat as success
52
+ if "address already in use" in err_msg.lower():
53
+ console.print("[yellow]Port 11434 is already in use – Ollama may already be running.[/]")
54
+ try:
55
+ ollama.list()
56
+ except Exception:
57
+ console.print("[red]Could not connect to running Ollama server. Please check manually.[/]")
58
+ return
59
+ # Connection succeeded – Ollama is definitely running
60
+ ollama_available = True
61
+ console.print("[green]Ollama server is already running (port in use).[/]")
62
+ preload_model()
63
+ return
64
+ else:
65
+ console.print(f"[red]Ollama server exited immediately. Output: {err_msg}[/]")
66
+ console.print("[yellow]Try running 'ollama serve' manually in another terminal and then restart Norbok.[/]")
67
+ return
68
+
69
+ # 4) Wait up to 30 seconds for the server to become ready
70
+ ready = False
71
+ for attempt in range(60): # 60 * 0.5 s = 30 s
72
+ time.sleep(0.5)
73
+ try:
74
+ ollama.list()
75
+ ready = True
76
+ break
77
+ except Exception:
78
+ if attempt == 0:
79
+ console.print("[cyan]Waiting for Ollama server to start...[/]")
80
+
81
+ if ready:
82
+ ollama_available = True
83
+ console.print("[green]Ollama server started.[/]")
84
+ preload_model()
85
+ else:
86
+ console.print("[red]Warning: Ollama server could not be started within timeout.[/]")
87
+ # Attempt to clean up the background process
88
+ try:
89
+ proc.terminate()
90
+ except Exception:
91
+ pass
92
+ return
93
+
94
+
95
+ def preload_model():
96
+ """Try to load the model into memory now to speed up the first question."""
97
+ try:
98
+ # Check if model exists locally; if not, skip preload to avoid pull delay
99
+ ollama.show('qwen2.5-coder:7b')
100
+ except Exception:
101
+ console.print("[yellow]Model 'qwen2.5-coder:7b' not found locally, first question will trigger pull/load.[/]")
102
+ return
103
+
104
+ console.print("[cyan]Warming up the AI model (this may take a few seconds)...[/]")
105
+ try:
106
+ ollama.chat(
107
+ model='qwen2.5-coder:7b',
108
+ messages=[{"role": "system", "content": "You are an AI assistant."}],
109
+ options={"num_predict": 1, "temperature": 0.0},
110
+ keep_alive=-1, # keep model loaded indefinitely
111
+ stream=False,
112
+ )
113
+ console.print("[green]Model ready.[/]")
114
+ except Exception as e:
115
+ console.print(f"[yellow]Warm-up request failed, first query will load the model: {e}[/]")
116
+
117
+
118
+ def chat_with_model(user_input):
119
+ messages = [
120
+ {
121
+ "role": "system",
122
+ "content": (
123
+ "you are a friendly, considerate web development teacher. "
124
+ "you do simple analogies and then explain it technically. "
125
+ "talk like you are teaching a total beginner, step by step guides. "
126
+ "start with making a flask app webpage."
127
+ )
128
+ },
129
+ {
130
+ "role": "user",
131
+ "content": user_input,
132
+ },
133
+ ]
134
+ options = {
135
+ "num_predict": 256,
136
+ "temperature": 0.7,
137
+ }
138
+
139
+ try:
140
+ response = ollama.chat(
141
+ model='qwen2.5-coder:7b',
142
+ messages=messages,
143
+ options=options,
144
+ )
145
+ return response['message']['content']
146
+ except Exception as e:
147
+ # If the model is missing, pull it and retry once
148
+ if 'model' in str(e).lower() and 'not found' in str(e).lower():
149
+ console.print("[yellow]Model 'qwen2.5-coder:7b' not found. Pulling...[/]")
150
+ try:
151
+ ollama.pull('qwen2.5-coder:7b')
152
+ console.print("[green]Pull complete. Retrying request...[/]")
153
+ response = ollama.chat(
154
+ model='qwen2.5-coder:7b',
155
+ messages=messages,
156
+ options=options,
157
+ )
158
+ return response['message']['content']
159
+ except Exception as pull_err:
160
+ raise RuntimeError(f"Pull failed: {pull_err}")
161
+ else:
162
+ raise
@@ -0,0 +1,112 @@
1
+ from rich.console import Console
2
+ from rich.prompt import Prompt
3
+ from rich.panel import Panel
4
+ from . import chat
5
+
6
+ console = Console()
7
+
8
+
9
+ BEGINNER_PROJECTS = [
10
+ {
11
+ "name": "Hello World Flask App",
12
+ "description": "Create your first Flask web application.",
13
+ "milestones": [
14
+ "Install Flask using pip (pip install flask).",
15
+ "Create a new Python file (app.py) and write a route that returns 'Hello, World!'.",
16
+ "Run the app and visit http://127.0.0.1:5000 in your browser.",
17
+ "Add a second route that displays your name.",
18
+ ]
19
+ },
20
+ {
21
+ "name": "Personal Blog",
22
+ "description": "Build a simple blog where you can create and view posts.",
23
+ "milestones": [
24
+ "Set up Flask project structure: create app.py, /templates and /static folders.",
25
+ "Create an index page template that lists blog posts (hardcoded).",
26
+ "Add a page with a form for creating new posts.",
27
+ "Store new posts in a global list and display them on the index page.",
28
+ "Add the ability to delete a post.",
29
+ ]
30
+ },
31
+ {
32
+ "name": "To-Do List App",
33
+ "description": "A classic to-do web app to learn CRUD operations.",
34
+ "milestones": [
35
+ "Create a Flask app with a route that renders an HTML list of tasks.",
36
+ "Use a Python list to keep tasks in memory.",
37
+ "Add a form on the page to add new tasks.",
38
+ "Add a delete link to remove tasks.",
39
+ "Bonus: mark tasks as completed.",
40
+ ]
41
+ },
42
+ ]
43
+
44
+
45
+ def walkthrough_milestones(project):
46
+ """Step through each milestone of a selected project."""
47
+ name = project["name"]
48
+ description = project["description"]
49
+ milestones = project["milestones"]
50
+
51
+ console.rule(f"[bold cyan]{name}[/bold cyan]")
52
+ console.print(description)
53
+
54
+ for idx, milestone in enumerate(milestones, 1):
55
+ console.rule(f"[bold yellow]Milestone {idx}/{len(milestones)}[/bold yellow]")
56
+ console.print(f"[bold]{milestone}[/bold]")
57
+ while True:
58
+ user_input = Prompt.ask(
59
+ "[bold]You[/bold] (press Enter to mark as done, ask a question, or 'quit' to return)"
60
+ ).strip()
61
+ if user_input.lower() == 'quit':
62
+ return # stop walking through milestones
63
+ if user_input == '':
64
+ break # move to next milestone
65
+ # treat the input as a question about this milestone
66
+ combined = f"Current milestone: {milestone}\nUser question: {user_input}"
67
+ try:
68
+ chat.stream_teacher_response(combined)
69
+ except KeyboardInterrupt:
70
+ console.print("\n[yellow]Interrupted.[/yellow]")
71
+ break # go to the next milestone after an interrupt
72
+ except Exception as e:
73
+ console.print(Panel.fit(str(e), title="Error", border_style="red"))
74
+
75
+ console.print("[green]All milestones completed! Returning to project selection.[/green]")
76
+
77
+
78
+ def start_project():
79
+ """Let user choose a beginner project and walk through its milestones."""
80
+ while True:
81
+ console.rule("[bold cyan]Start a Project[/bold cyan]")
82
+ for idx, proj in enumerate(BEGINNER_PROJECTS, 1):
83
+ console.print(f"{idx}. [bold]{proj['name']}[/bold] - {proj['description']}")
84
+ console.print("Type [bold]0[/bold] or [bold]back[/bold] to return to the main menu.")
85
+
86
+ choice = Prompt.ask("[bold]Select a project[/bold]").strip().lower()
87
+ if choice in ("0", "back"):
88
+ return
89
+
90
+ # try matching by number
91
+ try:
92
+ idx = int(choice)
93
+ if 1 <= idx <= len(BEGINNER_PROJECTS):
94
+ selected = BEGINNER_PROJECTS[idx - 1]
95
+ else:
96
+ console.print("[red]Invalid project number.[/red]")
97
+ continue
98
+ except ValueError:
99
+ # match by name (case insensitive)
100
+ matched = None
101
+ for proj in BEGINNER_PROJECTS:
102
+ if proj["name"].lower() == choice:
103
+ matched = proj
104
+ break
105
+ if matched:
106
+ selected = matched
107
+ else:
108
+ console.print("[red]Project not found. Please try again.[/red]")
109
+ continue
110
+
111
+ walkthrough_milestones(selected)
112
+ # after finishing milestones (or returning) the outer loop shows the project list again
@@ -0,0 +1,52 @@
1
+ import sys
2
+ import time
3
+ from rich.console import Console
4
+ from rich.prompt import Prompt
5
+ from rich.panel import Panel
6
+ from . import ollama_manager
7
+ from . import chat
8
+ from . import projects
9
+
10
+ console = Console()
11
+
12
+
13
+ def main():
14
+ # Show Norbok ASCII logo on startup
15
+ logo = (
16
+ "███╗ ██╗ ██████╗ ██████╗ ██████╗ ██████╗ ██╗ ██╗\n"
17
+ "████╗ ██║██╔═══██╗██╔══██╗██╔══██╗██╔═══██╗██║ ██╔╝\n"
18
+ "██╔██╗ ██║██║ ██║██████╔╝██████╔╝██║ ██║█████╔╝ \n"
19
+ "██║╚██╗██║██║ ██║██╔══██╗██╔══██╗██║ ██║██╔═██╗ \n"
20
+ "██║ ╚████║╚██████╔╝██║ ██║██████╔╝╚██████╔╝██║ ██╗\n"
21
+ "╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═════╝ ╚═╝ ╚═╝\n"
22
+ " AI WebDev Tutor"
23
+ )
24
+ logo_panel = Panel(logo, title="Welcome to Norbok", border_style="bold cyan")
25
+ console.print(logo_panel)
26
+
27
+ console.print("[bold cyan]AI WebDev Tutor (terminal)[/bold cyan]")
28
+ console.print("Ask me anything about starting your Flask app... (type 'exit' to quit)")
29
+ ollama_manager.start_ollama()
30
+
31
+ if not ollama_manager.ollama_available:
32
+ console.print("[red]Ollama server is not available. Exiting.[/red]")
33
+ sys.exit(1)
34
+
35
+ # Main menu loop
36
+ while True:
37
+ console.rule("[bold cyan]Main Menu[/bold cyan]")
38
+ console.print("1. Start a project")
39
+ console.print("2. Free chat with Norbok")
40
+ console.print("3. Quit")
41
+ choice = Prompt.ask("[bold]Choose an option[/bold]").strip().lower()
42
+
43
+ if choice in ("1", "start a project"):
44
+ projects.start_project()
45
+ elif choice in ("2", "free chat with norbok", "chat"):
46
+ chat.run_free_chat()
47
+ elif choice in ("3", "quit", "exit"):
48
+ console.print("[bold yellow]Goodbye![/bold yellow]")
49
+ sys.exit(0)
50
+ else:
51
+ console.print("[red]Invalid option, please try again.[/red]")
52
+ time.sleep(1)
@@ -0,0 +1,29 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "norbok"
7
+ version = "0.1.0"
8
+ description = "A terminal-based AI Python tutor that teaches coding through local AI projects"
9
+ requires-python = ">=3.10"
10
+ dependencies = [
11
+ "ollama",
12
+ "rich",
13
+ "psutil",
14
+ ]
15
+
16
+ [project.scripts]
17
+ norbok = "norbok.__main__:main"
18
+
19
+ [tool.hatch.build.targets.sdist]
20
+ exclude = [
21
+ "venv/",
22
+ "venv/**",
23
+ "**/__pycache__/",
24
+ ]
25
+
26
+ # Author information placeholder – fill in when ready
27
+ # [project.authors]
28
+ # name = "Your Name"
29
+ # email = "you@example.com"