nlos 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,206 @@
1
+ #!/usr/bin/env bash
2
+ # kernel-boot-lm-studio.sh - Boot NL-OS kernel via LM Studio
3
+ #
4
+ # Usage: ./scripts/kernel-boot-lm-studio.sh [--full] [--output FILE] [--json]
5
+ #
6
+ # Generates a system prompt file for import into LM Studio.
7
+ # Copy the output to LM Studio's System Prompt field or use the API.
8
+ #
9
+ # Options:
10
+ # --full Load full tier including personalities and command map
11
+ # --output FILE Output file (default: /tmp/capturebox-lm-studio-prompt.txt)
12
+ # --json Output as JSON for LM Studio API (OpenAI-compatible format)
13
+ # --api Send directly to LM Studio local API (http://localhost:1234)
14
+ # --help Show this help message
15
+ #
16
+ # Examples:
17
+ # ./scripts/kernel-boot-lm-studio.sh # Generate system prompt
18
+ # ./scripts/kernel-boot-lm-studio.sh --full # Full kernel context
19
+ # ./scripts/kernel-boot-lm-studio.sh --json # JSON format for API
20
+ # ./scripts/kernel-boot-lm-studio.sh --api # Direct API call
21
+
22
+ set -euo pipefail
23
+
24
+ # Resolve capturebox root directory
25
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
26
+ CAPTUREBOX_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
27
+
28
+ # Defaults
29
+ FULL_BOOT=false
30
+ OUTPUT_FILE="/tmp/capturebox-lm-studio-prompt.txt"
31
+ JSON_OUTPUT=false
32
+ API_MODE=false
33
+ LM_STUDIO_URL="http://localhost:1234/v1/chat/completions"
34
+
35
+ # Color output
36
+ RED='\033[0;31m'
37
+ GREEN='\033[0;32m'
38
+ YELLOW='\033[1;33m'
39
+ BLUE='\033[0;34m'
40
+ NC='\033[0m' # No Color
41
+
42
+ # Parse arguments
43
+ while [[ $# -gt 0 ]]; do
44
+ case $1 in
45
+ --full)
46
+ FULL_BOOT=true
47
+ shift
48
+ ;;
49
+ --output)
50
+ OUTPUT_FILE="$2"
51
+ shift 2
52
+ ;;
53
+ --json)
54
+ JSON_OUTPUT=true
55
+ shift
56
+ ;;
57
+ --api)
58
+ API_MODE=true
59
+ JSON_OUTPUT=true
60
+ shift
61
+ ;;
62
+ --help|-h)
63
+ head -22 "$0" | tail -17
64
+ exit 0
65
+ ;;
66
+ *)
67
+ echo -e "${RED}Unknown option: $1${NC}"
68
+ echo "Use --help for usage information"
69
+ exit 1
70
+ ;;
71
+ esac
72
+ done
73
+
74
+ # Verify required files exist
75
+ echo -e "${BLUE}Verifying kernel files...${NC}"
76
+
77
+ MANDATORY_FILES=(
78
+ "$CAPTUREBOX_ROOT/memory.md"
79
+ "$CAPTUREBOX_ROOT/AGENTS.md"
80
+ "$CAPTUREBOX_ROOT/axioms.yaml"
81
+ )
82
+
83
+ for file in "${MANDATORY_FILES[@]}"; do
84
+ if [[ ! -f "$file" ]]; then
85
+ echo -e "${RED}CRITICAL: Missing mandatory file: $file${NC}"
86
+ exit 1
87
+ fi
88
+ done
89
+
90
+ # Build kernel payload
91
+ echo -e "${BLUE}Building kernel payload for LM Studio...${NC}"
92
+
93
+ SYSTEM_PROMPT="You are booting into Capturebox NL-OS. The following kernel context defines your operational parameters. Read and internalize these instructions before responding.
94
+
95
+ After processing this context, acknowledge with: \"Kernel loaded. Ready for capturebox operations.\"
96
+
97
+ ---
98
+
99
+ # Capturebox NL-OS Kernel Context
100
+
101
+ ## memory.md (Behavioral Directives)
102
+
103
+ $(cat "$CAPTUREBOX_ROOT/memory.md")
104
+
105
+ ---
106
+
107
+ ## AGENTS.md (Hard Invariants)
108
+
109
+ $(cat "$CAPTUREBOX_ROOT/AGENTS.md")
110
+
111
+ ---
112
+
113
+ ## axioms.yaml (Canonical Definitions)
114
+
115
+ $(cat "$CAPTUREBOX_ROOT/axioms.yaml")
116
+ "
117
+
118
+ if [[ "$FULL_BOOT" == true ]]; then
119
+ echo -e "${BLUE}Including lazy tier files...${NC}"
120
+
121
+ SYSTEM_PROMPT+="
122
+
123
+ ---
124
+
125
+ ## personalities.md (Voice Presets)
126
+
127
+ $(cat "$CAPTUREBOX_ROOT/personalities.md")
128
+
129
+ ---
130
+
131
+ ## COMMAND-MAP.md (Command Registry)
132
+
133
+ $(cat "$CAPTUREBOX_ROOT/.cursor/commands/COMMAND-MAP.md")
134
+ "
135
+ fi
136
+
137
+ # Calculate approximate token count
138
+ CHAR_COUNT=${#SYSTEM_PROMPT}
139
+ TOKEN_ESTIMATE=$((CHAR_COUNT / 4))
140
+
141
+ echo -e "${GREEN}Kernel payload built: ~$TOKEN_ESTIMATE tokens${NC}"
142
+ echo -e "${BLUE}Tier: $(if [[ "$FULL_BOOT" == true ]]; then echo "FULL"; else echo "MANDATORY"; fi)${NC}"
143
+
144
+ # Output format
145
+ if [[ "$JSON_OUTPUT" == true ]]; then
146
+ # Escape the system prompt for JSON
147
+ ESCAPED_PROMPT=$(echo "$SYSTEM_PROMPT" | python3 -c 'import json,sys; print(json.dumps(sys.stdin.read()))')
148
+
149
+ JSON_PAYLOAD="{
150
+ \"model\": \"local-model\",
151
+ \"messages\": [
152
+ {
153
+ \"role\": \"system\",
154
+ \"content\": $ESCAPED_PROMPT
155
+ },
156
+ {
157
+ \"role\": \"user\",
158
+ \"content\": \"Please acknowledge that you have loaded the Capturebox NL-OS kernel.\"
159
+ }
160
+ ],
161
+ \"temperature\": 0.7,
162
+ \"max_tokens\": -1,
163
+ \"stream\": false
164
+ }"
165
+
166
+ if [[ "$API_MODE" == true ]]; then
167
+ echo -e "${YELLOW}Sending to LM Studio API...${NC}"
168
+
169
+ # Check if LM Studio is running
170
+ if ! curl -s --connect-timeout 2 "$LM_STUDIO_URL" > /dev/null 2>&1; then
171
+ echo -e "${RED}Error: LM Studio API not reachable at $LM_STUDIO_URL${NC}"
172
+ echo "Make sure LM Studio is running with 'Start Server' enabled"
173
+ exit 1
174
+ fi
175
+
176
+ # Send request
177
+ RESPONSE=$(curl -s "$LM_STUDIO_URL" \
178
+ -H "Content-Type: application/json" \
179
+ -d "$JSON_PAYLOAD")
180
+
181
+ echo -e "${GREEN}Response from LM Studio:${NC}"
182
+ echo "$RESPONSE" | python3 -c 'import json,sys; r=json.load(sys.stdin); print(r.get("choices",[{}])[0].get("message",{}).get("content","No response"))'
183
+ else
184
+ echo "$JSON_PAYLOAD" > "$OUTPUT_FILE"
185
+ echo -e "${GREEN}JSON payload saved to: $OUTPUT_FILE${NC}"
186
+ echo ""
187
+ echo -e "${YELLOW}To use with LM Studio API:${NC}"
188
+ echo " curl http://localhost:1234/v1/chat/completions \\"
189
+ echo " -H 'Content-Type: application/json' \\"
190
+ echo " -d @$OUTPUT_FILE"
191
+ fi
192
+ else
193
+ # Plain text output
194
+ echo "$SYSTEM_PROMPT" > "$OUTPUT_FILE"
195
+ echo -e "${GREEN}System prompt saved to: $OUTPUT_FILE${NC}"
196
+ echo ""
197
+ echo -e "${YELLOW}To use in LM Studio:${NC}"
198
+ echo "1. Open LM Studio"
199
+ echo "2. Load a model"
200
+ echo "3. Click 'System Prompt' in the chat settings"
201
+ echo "4. Paste the contents of: $OUTPUT_FILE"
202
+ echo "5. Start chatting - the model will be in NL-OS mode"
203
+ echo ""
204
+ echo "Or copy to clipboard (macOS):"
205
+ echo " cat $OUTPUT_FILE | pbcopy"
206
+ fi
@@ -0,0 +1,214 @@
1
+ #!/usr/bin/env bash
2
+ # kernel-boot-ollama.sh - Boot NL-OS kernel via Ollama
3
+ #
4
+ # Usage: ./scripts/kernel-boot-ollama.sh [--model MODEL] [--full] [--profile PROFILE]
5
+ #
6
+ # Generates system prompt from kernel files and launches Ollama session.
7
+ # The model will "boot" into Capturebox NL-OS mode with full kernel context.
8
+ #
9
+ # Options:
10
+ # --model MODEL Specify model (default: qwen2.5:3b from model-catalog.yaml)
11
+ # --full Load full tier including personalities and command map
12
+ # --profile PROF Use operational profile: speed, balanced, quality, memory_constrained
13
+ # --dry-run Print system prompt without launching Ollama
14
+ # --help Show this help message
15
+ #
16
+ # Examples:
17
+ # ./scripts/kernel-boot-ollama.sh # Boot with default model
18
+ # ./scripts/kernel-boot-ollama.sh --model llama3.1:8b # Boot with specific model
19
+ # ./scripts/kernel-boot-ollama.sh --full # Load full kernel context
20
+ # ./scripts/kernel-boot-ollama.sh --profile quality # Use quality profile
21
+ # ./scripts/kernel-boot-ollama.sh --dry-run # Preview system prompt
22
+
23
+ set -euo pipefail
24
+
25
+ # Resolve capturebox root directory
26
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
27
+ CAPTUREBOX_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
28
+
29
+ # Defaults
30
+ MODEL="qwen2.5:3b"
31
+ FULL_BOOT=false
32
+ DRY_RUN=false
33
+ PROFILE=""
34
+
35
+ # Color output
36
+ RED='\033[0;31m'
37
+ GREEN='\033[0;32m'
38
+ YELLOW='\033[1;33m'
39
+ BLUE='\033[0;34m'
40
+ NC='\033[0m' # No Color
41
+
42
+ # Parse arguments
43
+ while [[ $# -gt 0 ]]; do
44
+ case $1 in
45
+ --model)
46
+ MODEL="$2"
47
+ shift 2
48
+ ;;
49
+ --full)
50
+ FULL_BOOT=true
51
+ shift
52
+ ;;
53
+ --profile)
54
+ PROFILE="$2"
55
+ shift 2
56
+ ;;
57
+ --dry-run)
58
+ DRY_RUN=true
59
+ shift
60
+ ;;
61
+ --help|-h)
62
+ head -30 "$0" | tail -25
63
+ exit 0
64
+ ;;
65
+ *)
66
+ echo -e "${RED}Unknown option: $1${NC}"
67
+ echo "Use --help for usage information"
68
+ exit 1
69
+ ;;
70
+ esac
71
+ done
72
+
73
+ # Override model based on profile
74
+ if [[ -n "$PROFILE" ]]; then
75
+ case $PROFILE in
76
+ speed)
77
+ MODEL="qwen2.5:3b"
78
+ ;;
79
+ balanced)
80
+ MODEL="mistral:7b"
81
+ ;;
82
+ quality)
83
+ MODEL="llama3.1:8b"
84
+ ;;
85
+ memory_constrained)
86
+ MODEL="qwen2.5:3b"
87
+ ;;
88
+ *)
89
+ echo -e "${RED}Unknown profile: $PROFILE${NC}"
90
+ echo "Valid profiles: speed, balanced, quality, memory_constrained"
91
+ exit 1
92
+ ;;
93
+ esac
94
+ fi
95
+
96
+ # Verify required files exist
97
+ echo -e "${BLUE}Verifying kernel files...${NC}"
98
+
99
+ MANDATORY_FILES=(
100
+ "$CAPTUREBOX_ROOT/memory.md"
101
+ "$CAPTUREBOX_ROOT/AGENTS.md"
102
+ "$CAPTUREBOX_ROOT/axioms.yaml"
103
+ )
104
+
105
+ LAZY_FILES=(
106
+ "$CAPTUREBOX_ROOT/personalities.md"
107
+ "$CAPTUREBOX_ROOT/.cursor/commands/COMMAND-MAP.md"
108
+ )
109
+
110
+ for file in "${MANDATORY_FILES[@]}"; do
111
+ if [[ ! -f "$file" ]]; then
112
+ echo -e "${RED}CRITICAL: Missing mandatory file: $file${NC}"
113
+ exit 1
114
+ fi
115
+ done
116
+
117
+ # Build kernel payload
118
+ echo -e "${BLUE}Building kernel payload...${NC}"
119
+
120
+ PAYLOAD="# Capturebox NL-OS Kernel Context
121
+
122
+ You are booting into Capturebox NL-OS. The following kernel context defines your operational parameters. Read and internalize these instructions before responding.
123
+
124
+ After processing this context, acknowledge with: \"Kernel loaded. Ready for capturebox operations.\"
125
+
126
+ ---
127
+
128
+ ## memory.md (Behavioral Directives)
129
+
130
+ $(cat "$CAPTUREBOX_ROOT/memory.md")
131
+
132
+ ---
133
+
134
+ ## AGENTS.md (Hard Invariants)
135
+
136
+ $(cat "$CAPTUREBOX_ROOT/AGENTS.md")
137
+
138
+ ---
139
+
140
+ ## axioms.yaml (Canonical Definitions)
141
+
142
+ $(cat "$CAPTUREBOX_ROOT/axioms.yaml")
143
+ "
144
+
145
+ if [[ "$FULL_BOOT" == true ]]; then
146
+ echo -e "${BLUE}Including lazy tier files...${NC}"
147
+
148
+ PAYLOAD+="
149
+
150
+ ---
151
+
152
+ ## personalities.md (Voice Presets)
153
+
154
+ $(cat "$CAPTUREBOX_ROOT/personalities.md")
155
+
156
+ ---
157
+
158
+ ## COMMAND-MAP.md (Command Registry)
159
+
160
+ $(cat "$CAPTUREBOX_ROOT/.cursor/commands/COMMAND-MAP.md")
161
+ "
162
+ fi
163
+
164
+ # Calculate approximate token count (rough estimate: 4 chars per token)
165
+ CHAR_COUNT=${#PAYLOAD}
166
+ TOKEN_ESTIMATE=$((CHAR_COUNT / 4))
167
+
168
+ echo -e "${GREEN}Kernel payload built: ~$TOKEN_ESTIMATE tokens${NC}"
169
+
170
+ # Dry run - just print the payload
171
+ if [[ "$DRY_RUN" == true ]]; then
172
+ echo -e "${YELLOW}=== DRY RUN: System Prompt ===${NC}"
173
+ echo "$PAYLOAD"
174
+ echo -e "${YELLOW}=== END DRY RUN ===${NC}"
175
+ exit 0
176
+ fi
177
+
178
+ # Check if Ollama is running
179
+ if ! command -v ollama &> /dev/null; then
180
+ echo -e "${RED}Error: Ollama is not installed or not in PATH${NC}"
181
+ echo "Install Ollama from: https://ollama.ai"
182
+ exit 1
183
+ fi
184
+
185
+ if ! ollama list &> /dev/null; then
186
+ echo -e "${RED}Error: Ollama is not running${NC}"
187
+ echo "Start Ollama with: ollama serve"
188
+ exit 1
189
+ fi
190
+
191
+ # Check if model is available
192
+ if ! ollama list | grep -q "^$MODEL"; then
193
+ echo -e "${YELLOW}Model $MODEL not found locally. Pulling...${NC}"
194
+ ollama pull "$MODEL"
195
+ fi
196
+
197
+ # Launch Ollama with kernel context
198
+ echo -e "${GREEN}Booting Capturebox NL-OS via Ollama ($MODEL)...${NC}"
199
+ echo -e "${BLUE}Tier: $(if [[ "$FULL_BOOT" == true ]]; then echo "FULL"; else echo "MANDATORY"; fi)${NC}"
200
+ echo ""
201
+
202
+ # Create a temporary file for the system prompt
203
+ TEMP_PROMPT=$(mktemp)
204
+ echo "$PAYLOAD" > "$TEMP_PROMPT"
205
+
206
+ # Launch interactive session
207
+ # Note: Ollama's --system flag has length limits, so we use a different approach
208
+ # We'll send the system prompt as the first message context
209
+ ollama run "$MODEL" <<< "/set system $PAYLOAD
210
+
211
+ Acknowledge that you have loaded the Capturebox NL-OS kernel and are ready for operations."
212
+
213
+ # Cleanup
214
+ rm -f "$TEMP_PROMPT"