gwendoline 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -32,6 +32,24 @@ echo "Why is the sky blue?" | gwen
32
32
 
33
33
  cat prompt.md | gwen
34
34
  cat prompt.md | gwen --cloud
35
- cat prompt.md | gwen --model "gpt-oss:120b-cloud"
36
- cat prompt.md | gwen --model "gpt-oss:120b-cloud" > output.md
35
+ cat prompt.md | gwen --model gpt-oss:120b-cloud
36
+ cat prompt.md | "gwen --model gpt-oss:120b-cloud" > output.md
37
+ cat input.json | gwen --chat > output.json
38
+ ```
39
+
40
+ ## Chat Mode Usage
41
+
42
+ Create a file with input message first or pipe it. Then run with parameter `--chat`.
43
+ In chat mode, Gwendoline is expecting the input to be already a list of chat messages. This must already include at least the message, you want to ask now. The output will be a list of chat messages as well, including the response from LLM.
44
+
45
+ For example, create file `chat.json` with the content:
46
+
47
+ ```json
48
+ [{ "role": "user", "content": "Why is the sky blue?" }]
49
+ ```
50
+
51
+ Run command:
52
+
53
+ ```sh
54
+ cat chat-input.json | gwendoline --chat --model gpt-oss:120b-cloud > chat-output.json
37
55
  ```
package/build/index.js CHANGED
@@ -6,6 +6,7 @@ const LLM_MODEL_LOCAL = "qwen3:4b";
6
6
  const LLM_MODEL_CLOUD = "gpt-oss:120b-cloud";
7
7
  const isCloudLLM = argv.includes("--cloud");
8
8
  const hasLLMSpecified = argv.includes("--model");
9
+ const isChatMode = argv.includes("--chat");
9
10
  let customModelName = "";
10
11
  if (hasLLMSpecified) {
11
12
  argv.forEach((val, index) => {
@@ -22,8 +23,20 @@ async function main() {
22
23
  input += chunk;
23
24
  });
24
25
  process.stdin.on("end", async () => {
25
- const content = await runLLMRequest(input.trim());
26
- process.stdout.write(content);
26
+ if (isChatMode) {
27
+ try {
28
+ const inputMessages = JSON.parse(input.trim() || "[]");
29
+ const content = await runLLMRequest(inputMessages, isChatMode);
30
+ process.stdout.write(content);
31
+ }
32
+ catch (error) {
33
+ throw Error("Could not parse input of chat messages", error || "");
34
+ }
35
+ }
36
+ else {
37
+ const content = await runLLMRequest([{ role: "user", content: input.trim() }], isChatMode);
38
+ process.stdout.write(content);
39
+ }
27
40
  });
28
41
  if (process.stdin.isTTY) {
29
42
  const rl = readline.createInterface({
@@ -36,7 +49,7 @@ async function main() {
36
49
  process.stdout.write("Bye!");
37
50
  process.exit(1);
38
51
  }
39
- const content = await runLLMRequest(prompt);
52
+ const content = await runLLMRequest([{ role: "user", content: prompt }], false);
40
53
  process.stdout.write(content);
41
54
  process.exit(1);
42
55
  });
@@ -46,7 +59,7 @@ main().catch((error) => {
46
59
  console.error("Fatal error in main():", error);
47
60
  process.exit(1);
48
61
  });
49
- async function runLLMRequest(prompt = "") {
62
+ async function runLLMRequest(messages, returnChat = false) {
50
63
  const LLM_MODEL = isCloudLLM ? LLM_MODEL_CLOUD : LLM_MODEL_LOCAL;
51
64
  try {
52
65
  const ollama = new Ollama({
@@ -59,8 +72,16 @@ async function runLLMRequest(prompt = "") {
59
72
  });
60
73
  const response = await ollama.chat({
61
74
  model: customModelName || LLM_MODEL,
62
- messages: [{ role: "user", content: prompt }],
75
+ messages,
63
76
  });
77
+ if (returnChat) {
78
+ messages.push({
79
+ role: "assistant",
80
+ content: response.message.content,
81
+ });
82
+ const messagesStr = JSON.stringify(messages);
83
+ return messagesStr.trim();
84
+ }
64
85
  return response.message.content;
65
86
  }
66
87
  catch (e) {
package/package.json CHANGED
@@ -1,7 +1,13 @@
1
1
  {
2
2
  "name": "gwendoline",
3
- "version": "0.1.0",
4
- "description": "",
3
+ "version": "0.1.2",
4
+ "description": "CLI based tool for interacting with language models directly from terminal",
5
+ "keywords": [
6
+ "cli",
7
+ "llm",
8
+ "ollama",
9
+ "chat"
10
+ ],
5
11
  "type": "module",
6
12
  "bin": {
7
13
  "gwen": "./build/index.js",