gwendoline 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +37 -0
  2. package/build/index.js +69 -0
  3. package/package.json +39 -0
package/README.md ADDED
@@ -0,0 +1,37 @@
1
+ # Gwendoline
2
+
3
+ Gwendoline is a CLI based tool for interacting with language models directly from your terminal, allowing you to send prompts and receive responses via standard input and output.
4
+
5
+ It is using Ollama and some LLMs as default:
6
+
7
+ - `qwen3:4b` for local usage
8
+ - `gpt-oss:120b-cloud` for usage with a cloud model
9
+
10
+ ## Dependencies
11
+
12
+ Gwendoline depends on [Ollama](https://ollama.com/) as a local runtime for language models. By default, it uses the `qwen3:4b` model for local processing via Ollama, and the `gpt-oss:120b-cloud` model for cloud-based requests. Both models are preconfigured and need to be installed with Ollama.
13
+
14
+ Anyway, an **alternative model** can be specified as CLI parameter to override the defaults.
15
+
16
+ ## Installation
17
+
18
+ ```sh
19
+ npm install -g gwendoline
20
+ ```
21
+
22
+ ## Usage
23
+
24
+ Use `gwendoline` or `gwen` on CLI to run.
25
+
26
+ Some examples of how to run it:
27
+
28
+ ```sh
29
+ gwen
30
+
31
+ echo "Why is the sky blue?" | gwen
32
+
33
+ cat prompt.md | gwen
34
+ cat prompt.md | gwen --cloud
35
+ cat prompt.md | gwen --model "gpt-oss:120b-cloud"
36
+ cat prompt.md | gwen --model "gpt-oss:120b-cloud" > output.md
37
+ ```
package/build/index.js ADDED
@@ -0,0 +1,69 @@
1
+ #!/usr/bin/env node
2
+ import { argv } from "node:process";
3
+ import readline from "node:readline";
4
+ import { Ollama } from "ollama";
5
+ const LLM_MODEL_LOCAL = "qwen3:4b";
6
+ const LLM_MODEL_CLOUD = "gpt-oss:120b-cloud";
7
+ const isCloudLLM = argv.includes("--cloud");
8
+ const hasLLMSpecified = argv.includes("--model");
9
+ let customModelName = "";
10
+ if (hasLLMSpecified) {
11
+ argv.forEach((val, index) => {
12
+ if (val === "--model") {
13
+ customModelName = argv[index + 1];
14
+ console.log(`Using model ${customModelName}`);
15
+ }
16
+ });
17
+ }
18
+ async function main() {
19
+ let input = "";
20
+ process.stdin.setEncoding("utf8");
21
+ process.stdin.on("data", (chunk) => {
22
+ input += chunk;
23
+ });
24
+ process.stdin.on("end", async () => {
25
+ const content = await runLLMRequest(input.trim());
26
+ process.stdout.write(content);
27
+ });
28
+ if (process.stdin.isTTY) {
29
+ const rl = readline.createInterface({
30
+ input: process.stdin,
31
+ output: process.stdout,
32
+ });
33
+ rl.question(`Type your prompt!\n\n`, async (prompt) => {
34
+ rl.close();
35
+ if (prompt == "/bye") {
36
+ process.stdout.write("Bye!");
37
+ process.exit(1);
38
+ }
39
+ const content = await runLLMRequest(prompt);
40
+ process.stdout.write(content);
41
+ process.exit(1);
42
+ });
43
+ }
44
+ }
45
+ main().catch((error) => {
46
+ console.error("Fatal error in main():", error);
47
+ process.exit(1);
48
+ });
49
+ async function runLLMRequest(prompt = "") {
50
+ const LLM_MODEL = isCloudLLM ? LLM_MODEL_CLOUD : LLM_MODEL_LOCAL;
51
+ try {
52
+ const ollama = new Ollama({
53
+ // host: "http://127.0.0.1:11434",
54
+ headers: {
55
+ // Authorization: "Bearer <api key>",
56
+ // "X-Custom-Header": "custom-value",
57
+ "User-Agent": "Gwendoline/0.0",
58
+ },
59
+ });
60
+ const response = await ollama.chat({
61
+ model: customModelName || LLM_MODEL,
62
+ messages: [{ role: "user", content: prompt }],
63
+ });
64
+ return response.message.content;
65
+ }
66
+ catch (e) {
67
+ return `Error: ${e}`;
68
+ }
69
+ }
package/package.json ADDED
@@ -0,0 +1,39 @@
1
+ {
2
+ "name": "gwendoline",
3
+ "version": "0.1.0",
4
+ "description": "",
5
+ "type": "module",
6
+ "bin": {
7
+ "gwen": "./build/index.js",
8
+ "gwendoline": "./build/index.js"
9
+ },
10
+ "engines": {
11
+ "node": ">=21"
12
+ },
13
+ "scripts": {
14
+ "preinstall": "echo \"Make sure to install Ollama first!\"",
15
+ "build": "tsc && chmod 755 build/index.js",
16
+ "test": "echo \"Error: no test specified\" && exit 1",
17
+ "build:watch": "tsc --watch"
18
+ },
19
+ "files": [
20
+ "build"
21
+ ],
22
+ "repository": {
23
+ "type": "git",
24
+ "url": "git+https://github.com/marcusbaer/gwendoline.git"
25
+ },
26
+ "author": "Marcus Baer",
27
+ "license": "ISC",
28
+ "bugs": {
29
+ "url": "https://github.com/marcusbaer/gwendoline/issues"
30
+ },
31
+ "homepage": "https://github.com/marcusbaer/gwendoline#readme",
32
+ "dependencies": {
33
+ "ollama": "^0.6.3"
34
+ },
35
+ "devDependencies": {
36
+ "@types/node": "^24.10.1",
37
+ "typescript": "^5.9.3"
38
+ }
39
+ }