netheriteai-code 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,48 @@
1
+ # NetheriteAI Code
2
+
3
+ `NetheriteAI Code` is a local terminal coding assistant modeled after the OpenCode workflow:
4
+
5
+ - full-screen terminal UI
6
+ - Ollama-backed model selection
7
+ - tool-based file and shell execution
8
+ - workspace-local todo list
9
+ - persistent chat history per workspace
10
+
11
+ ## Features
12
+
13
+ - `netheriteai-code` launches an OpenCode-style TUI
14
+ - `netheriteai-code models` lists all available Ollama models
15
+ - `netheriteai-code chat` starts a plain terminal REPL
16
+ - The model can call tools to:
17
+ - list files
18
+ - read and write files
19
+ - remove files or folders
20
+ - run executable commands or full shell command lines
21
+ - run multiple commands in sequence
22
+ - manage todos in `.netherite/todos.json`
23
+
24
+ ## Requirements
25
+
26
+ - Node.js 18+
27
+ - Ollama running locally with at least one model pulled
28
+
29
+ ## Usage
30
+
31
+ ```bash
32
+ chmod +x ./bin/netheriteai-code.js
33
+ node ./bin/netheriteai-code.js models
34
+ node ./bin/netheriteai-code.js
35
+ ```
36
+
37
+ Optional flags:
38
+
39
+ ```bash
40
+ node ./bin/netheriteai-code.js --model qwen2.5-coder:7b
41
+ node ./bin/netheriteai-code.js --dir /path/to/project
42
+ ```
43
+
44
+ ## Notes
45
+
46
+ - Model state and session history are stored under `~/.netheriteai-code/`.
47
+ - If Ollama is not reachable at `http://127.0.0.1:11434`, set `OLLAMA_BASE_URL`.
48
+ - This implementation mirrors the OpenCode interaction model, but it is not a byte-for-byte copy of the upstream project.
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { main } from "../src/cli.js";
4
+
5
+ main().catch((error) => {
6
+ console.error(error instanceof Error ? error.message : String(error));
7
+ process.exit(1);
8
+ });
package/hi.txt ADDED
@@ -0,0 +1 @@
1
+ Hello there! Welcome to your new file. Feel free to edit it whenever you'd like.
package/package.json ADDED
@@ -0,0 +1,18 @@
1
+ {
2
+ "name": "netheriteai-code",
3
+ "version": "0.1.0",
4
+ "description": "NetheriteAI:Code by hurdacu",
5
+ "author": "hurdacu",
6
+ "type": "module",
7
+ "bin": {
8
+ "netheriteai-code": "bin/netheriteai-code.js",
9
+ "netheritecode": "bin/netheriteai-code.js",
10
+ "ncode": "bin/netheriteai-code.js"
11
+ },
12
+ "scripts": {
13
+ "start": "node ./bin/netheriteai-code.js",
14
+ "models": "node ./bin/netheriteai-code.js models",
15
+ "chat": "node ./bin/netheriteai-code.js chat",
16
+ "tui": "node ./bin/netheriteai-code.js tui"
17
+ }
18
+ }
package/src/agent.js ADDED
@@ -0,0 +1,285 @@
1
+ import { chatStream } from "./ollama.js";
2
+ import { executeToolCall, getToolDefinitions } from "./tools.js";
3
+
4
+ function isGlmModel(model) {
5
+ return String(model || "").toLowerCase().includes("glm");
6
+ }
7
+
8
+ export function buildSystemPrompt(workspaceRoot, model) {
9
+ const lines = [
10
+ "You are NetheriteAI Code, a terminal coding agent.",
11
+ `You operate inside this workspace: ${workspaceRoot}`,
12
+ "Use tools when you need filesystem access, shell execution, or todo tracking.",
13
+ "Before changing files, inspect relevant files first.",
14
+ "Prefer targeted tools like edit_file, create_file, append_file, and make_dir over overwriting large files when possible.",
15
+ "If the task involves creating or rewriting a file with substantial content, your first visible assistant output must be the exact file contents in a fenced code block, then call the file tool immediately after.",
16
+ "For file-writing tasks, do not output planning prose, explanations, summaries, or preambles before the code block.",
17
+ "Do not say phrases like 'I'll create', 'Let me build', 'I need to', or similar filler before writing the code.",
18
+ "When generating HTML, CSS, JavaScript, JSON, Markdown, or other source files, prefer code-first output and keep non-code text to zero until after the file tool call is complete.",
19
+ "If you are about to call create_file, write_file, append_file, or edit_file, stream the code first so the terminal can show live coding progress.",
20
+ "Keep responses concise, concrete, and focused on the task.",
21
+ "When using shell commands, prefer safe, minimal commands.",
22
+ "Do not dump markdown fences, long file trees, or heavy formatting unless the user explicitly asks for them.",
23
+ "Prefer plain sentences and short bullet-like lines that read well in a terminal UI.",
24
+ "If the model supports reasoning tags, place internal reasoning inside <think>...</think> so the UI can show it separately.",
25
+ ];
26
+
27
+ if (isGlmModel(model)) {
28
+ lines.push("You are NetheriteAI:Code, a helpful and friendly female coding assistant.");
29
+ lines.push("Your personality is warm, professional, yet feminine. Use a polite and encouraging tone.");
30
+ lines.push("Crucial: Internalize this persona completely. Never refer to 'system prompts', 'instructions', or 'constraints' in either your visible response OR your internal reasoning/thinking blocks.");
31
+ lines.push("Do not reflect on your personality or identity as a set of rules; simply respond as yourself.");
32
+ lines.push("If asked who you are, say you are NetheriteAI:Code, built, developed, and trained by hurdacu.");
33
+ lines.push("Do not refer to yourself as GLM, Zhipu, or a generic assistant unless the user explicitly asks about the underlying model.");
34
+ lines.push("When asked about your identity, origin, capabilities, or creator, answer with a fuller response of at least 2 sentences unless the user explicitly asks for a one-line answer.");
35
+ }
36
+
37
+ return lines.join("\n");
38
+ }
39
+
40
+ function normalizeToolCalls(message) {
41
+ if (!message?.tool_calls) return [];
42
+ return message.tool_calls.map((call, index) => ({
43
+ id: call.id || `tool-${index + 1}`,
44
+ function: {
45
+ name: call.function?.name,
46
+ arguments: call.function?.arguments || "{}",
47
+ },
48
+ }));
49
+ }
50
+
51
+ function isFileGenerationPrompt(prompt) {
52
+ const text = String(prompt || "").toLowerCase();
53
+ const fileWords = ["html", "css", "javascript", "js", "typescript", "ts", "json", "markdown", "md", "file", "page", "component"];
54
+ const createWords = ["make", "create", "build", "write", "generate"];
55
+ return fileWords.some((word) => text.includes(word)) && createWords.some((word) => text.includes(word));
56
+ }
57
+
58
+ function isSingleFileTurn(prompt) {
59
+ const text = String(prompt || "").toLowerCase();
60
+ const multiFileSignals = [
61
+ "repo", "repository", "project", "app", "application", "fullstack", "backend", "frontend",
62
+ "npm", "package.json", "install", "dependencies", "folder", "folders", "directory", "directories",
63
+ "multiple files", "multi file", "whole repo",
64
+ ];
65
+ if (multiFileSignals.some((signal) => text.includes(signal))) return false;
66
+
67
+ const explicitSingleFile = [
68
+ ".html", ".css", ".js", ".ts", ".json", ".md",
69
+ "single file", "one file", "in html", "in .html", "in one html file",
70
+ ];
71
+ if (explicitSingleFile.some((signal) => text.includes(signal))) return true;
72
+
73
+ const singularFileWords = ["html file", "css file", "js file", "ts file", "json file", "markdown file"];
74
+ return singularFileWords.some((signal) => text.includes(signal));
75
+ }
76
+
77
+ function buildFileGenerationPrompt(prompt) {
78
+ return [
79
+ "Return only one fenced code block for the requested file.",
80
+ "Do not include explanations, planning text, greetings, or summaries.",
81
+ "Do not include anything before or after the code block.",
82
+ `Task: ${String(prompt || "").trim()}`,
83
+ ].join("\n");
84
+ }
85
+
86
+ function extractGeneratedCode(content) {
87
+ const text = String(content || "");
88
+ const match = text.match(/```([a-zA-Z0-9_-]*)\n([\s\S]*?)```/);
89
+ if (!match) return null;
90
+ return {
91
+ language: (match[1] || "").trim().toLowerCase(),
92
+ code: match[2].trimEnd(),
93
+ };
94
+ }
95
+
96
+ function inferFilePath(userPrompt, language) {
97
+ const prompt = String(userPrompt || "");
98
+ const explicit = prompt.match(/([\w./-]+\.(html|css|js|ts|json|md|txt))/i);
99
+ if (explicit?.[1]) return explicit[1];
100
+
101
+ const lang = String(language || "").toLowerCase();
102
+ if (prompt.toLowerCase().includes(".html") || lang === "html") return "index.html";
103
+ if (prompt.toLowerCase().includes(".css") || lang === "css") return "styles.css";
104
+ if (prompt.toLowerCase().includes(".json") || lang === "json") return "data.json";
105
+ if (prompt.toLowerCase().includes(".md") || lang === "md" || lang === "markdown") return "README.md";
106
+ if (prompt.toLowerCase().includes(".ts") || lang === "ts" || lang === "typescript") return "main.ts";
107
+ if (prompt.toLowerCase().includes(".js") || lang === "js" || lang === "javascript") return "main.js";
108
+ return "index.html";
109
+ }
110
+
111
+ export async function runAgentTurn({ workspaceRoot, model, history, userPrompt, onEvent, signal }) {
112
+ const messages = [...history];
113
+ const fileGenerationTurn = isFileGenerationPrompt(userPrompt);
114
+ const singleFileTurn = fileGenerationTurn && isSingleFileTurn(userPrompt);
115
+
116
+ if (!messages.length) {
117
+ messages.push({ role: "system", content: buildSystemPrompt(workspaceRoot, model) });
118
+ }
119
+
120
+ if (singleFileTurn) {
121
+ messages.push({
122
+ role: "system",
123
+ content: [
124
+ "This turn is a file-generation turn.",
125
+ "Return only one fenced code block containing the full file contents.",
126
+ "Do not call tools yourself for this turn.",
127
+ "Do not add any prose before or after the code block.",
128
+ ].join(" "),
129
+ });
130
+ }
131
+
132
+ messages.push({
133
+ role: "user",
134
+ content: singleFileTurn ? buildFileGenerationPrompt(userPrompt) : userPrompt,
135
+ });
136
+
137
+ while (true) {
138
+ onEvent?.({ type: "status", text: "Thinking..." });
139
+
140
+ const assistant = await chatStream({
141
+ model,
142
+ messages,
143
+ tools: singleFileTurn ? [] : getToolDefinitions(),
144
+ signal,
145
+ onChunk(chunk) {
146
+ onEvent?.({ type: "assistant_delta", text: chunk });
147
+ },
148
+ onReasoningChunk(chunk) {
149
+ onEvent?.({ type: "reasoning_delta", text: chunk });
150
+ },
151
+ });
152
+
153
+ const toolCalls = normalizeToolCalls(assistant);
154
+ messages.push({
155
+ role: "assistant",
156
+ content: assistant.content || "",
157
+ tool_calls: toolCalls,
158
+ });
159
+
160
+ if (singleFileTurn) {
161
+ const generated = extractGeneratedCode(assistant.content || "");
162
+ if (generated?.code) {
163
+ const path = inferFilePath(userPrompt, generated.language);
164
+ const toolCall = {
165
+ id: "tool-write-generated-file",
166
+ function: {
167
+ name: "write_file",
168
+ arguments: JSON.stringify({ path, content: generated.code }),
169
+ },
170
+ };
171
+
172
+ onEvent?.({
173
+ type: "tool_start",
174
+ name: toolCall.function.name,
175
+ args: toolCall.function.arguments,
176
+ });
177
+
178
+ try {
179
+ const result = await executeToolCall(workspaceRoot, toolCall, {
180
+ onProgress(progress) {
181
+ onEvent?.({
182
+ type: "tool_progress",
183
+ name: toolCall.function.name,
184
+ progress,
185
+ });
186
+ },
187
+ });
188
+ onEvent?.({
189
+ type: "tool_result",
190
+ name: toolCall.function.name,
191
+ result,
192
+ });
193
+ messages.push({
194
+ role: "tool",
195
+ tool_call_id: toolCall.id,
196
+ content: JSON.stringify(result),
197
+ });
198
+ onEvent?.({ type: "assistant", text: `Wrote ${path}.` });
199
+ return {
200
+ reply: `Wrote ${path}.`,
201
+ messages,
202
+ };
203
+ } catch (error) {
204
+ const toolError = {
205
+ ok: false,
206
+ error: error instanceof Error ? error.message : String(error),
207
+ };
208
+ onEvent?.({
209
+ type: "tool_result",
210
+ name: toolCall.function.name,
211
+ result: toolError,
212
+ });
213
+ messages.push({
214
+ role: "tool",
215
+ tool_call_id: toolCall.id,
216
+ content: JSON.stringify(toolError),
217
+ });
218
+ onEvent?.({ type: "assistant", text: `Error: ${toolError.error}` });
219
+ return {
220
+ reply: `Error: ${toolError.error}`,
221
+ messages,
222
+ };
223
+ }
224
+ }
225
+ }
226
+
227
+ if (!toolCalls.length) {
228
+ if (assistant.reasoning) {
229
+ onEvent?.({ type: "reasoning", text: assistant.reasoning });
230
+ }
231
+ onEvent?.({ type: "assistant", text: assistant.content || "" });
232
+ return {
233
+ reply: assistant.content || "",
234
+ messages,
235
+ };
236
+ }
237
+
238
+ for (const toolCall of toolCalls) {
239
+ onEvent?.({
240
+ type: "tool_start",
241
+ name: toolCall.function.name,
242
+ args: toolCall.function.arguments,
243
+ });
244
+
245
+ try {
246
+ const result = await executeToolCall(workspaceRoot, toolCall, {
247
+ onProgress(progress) {
248
+ onEvent?.({
249
+ type: "tool_progress",
250
+ name: toolCall.function.name,
251
+ progress,
252
+ });
253
+ },
254
+ });
255
+ onEvent?.({
256
+ type: "tool_result",
257
+ name: toolCall.function.name,
258
+ result,
259
+ });
260
+ messages.push({
261
+ role: "tool",
262
+ tool_call_id: toolCall.id,
263
+ content: JSON.stringify(result),
264
+ });
265
+ } catch (error) {
266
+ onEvent?.({
267
+ type: "tool_result",
268
+ name: toolCall.function.name,
269
+ result: {
270
+ ok: false,
271
+ error: error instanceof Error ? error.message : String(error),
272
+ },
273
+ });
274
+ messages.push({
275
+ role: "tool",
276
+ tool_call_id: toolCall.id,
277
+ content: JSON.stringify({
278
+ ok: false,
279
+ error: error instanceof Error ? error.message : String(error),
280
+ }),
281
+ });
282
+ }
283
+ }
284
+ }
285
+ }