gaunt-sloth-assistant 0.0.7 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,68 +1,87 @@
1
- import {
2
- END,
3
- MemorySaver,
4
- MessagesAnnotation,
5
- START,
6
- StateGraph,
7
- } from "@langchain/langgraph";
8
- import { writeFileSync } from "node:fs";
9
- import path from "node:path";
10
- import {initConfig, slothContext} from "./config.js";
11
- import { display, displayError, displaySuccess } from "./consoleUtils.js";
12
- import { fileSafeLocalDate, toFileSafeString } from "./utils.js";
13
-
14
- await initConfig();
15
-
16
- export async function askQuestion(source, preamble, content) {
17
- // This node receives the current state (messages) and invokes the LLM
18
- const callModel = async (state) => {
19
- // state.messages will contain the list including the system preamble and user diff
20
- const response = await slothContext.config.llm.invoke(state.messages);
21
- // MessagesAnnotation expects the node to return the new message(s) to be added to the state.
22
- // Wrap the response in an array if it's a single message object.
23
- return { messages: response };
24
- };
25
-
26
- // Define the graph structure with MessagesAnnotation state
27
- const workflow = new StateGraph(MessagesAnnotation)
28
- // Define the node and edge
29
- .addNode("model", callModel)
30
- .addEdge(START, "model") // Start at the 'model' node
31
- .addEdge("model", END); // End after the 'model' node completes
32
-
33
- // Set up memory (optional but good practice for potential future multi-turn interactions)
34
- const memory = new MemorySaver();
35
-
36
- // Compile the workflow into a runnable app
37
- const app = workflow.compile({ checkpointer: memory });
38
-
39
- // Construct the initial the messages including the preamble as a system message
40
- const messages = [
41
- {
42
- role: "system",
43
- content: preamble, // The preamble goes here
44
- },
45
- {
46
- role: "user",
47
- content, // The code diff goes here
48
- },
49
- ];
50
-
51
- display("Thinking...");
52
- const output = await app.invoke({messages}, slothContext.session);
53
- // FIXME this looks ugly, there should be other way
54
- const outputContent = output.messages[output.messages.length - 1].content;
55
- const filePath = path.resolve(process.cwd(), toFileSafeString(source)+'-'+fileSafeLocalDate()+".md");
56
- display(`writing ${filePath}`);
57
- // TODO highlight LLM output with something like Prism.JS
58
- display(outputContent);
59
- try {
60
- writeFileSync(filePath, outputContent);
61
- displaySuccess(`This report can be found in ${filePath}`);
62
- } catch (error) {
63
- displayError(`Failed to write review to file: ${filePath}`);
64
- displayError(error.message);
65
- // Consider if you want to exit or just log the error
66
- // process.exit(1);
67
- }
68
- }
1
+ import {
2
+ END,
3
+ MemorySaver,
4
+ MessagesAnnotation,
5
+ START,
6
+ StateGraph,
7
+ } from "@langchain/langgraph";
8
+ import { writeFileSync } from "node:fs";
9
+ import * as path from "node:path";
10
+ import { slothContext } from "../config.js";
11
+ import { display, displayError, displaySuccess } from "../consoleUtils.js";
12
+ import { fileSafeLocalDate, toFileSafeString, ProgressIndicator, extractLastMessageContent } from "../utils.js";
13
+
14
+ /**
15
+ * Ask a question and get an answer from the LLM
16
+ * @param {string} source - The source of the question (used for file naming)
17
+ * @param {string} preamble - The preamble to send to the LLM
18
+ * @param {string} content - The content of the question
19
+ */
20
+ export async function askQuestion(source, preamble, content) {
21
+ const progressIndicator = new ProgressIndicator("Thinking.");
22
+ const outputContent = await askQuestionInner(slothContext, () => progressIndicator.indicate(), preamble, content);
23
+ const filePath = path.resolve(process.cwd(), toFileSafeString(source)+'-'+fileSafeLocalDate()+".md");
24
+ display(`\nwriting ${filePath}`);
25
+ // TODO highlight LLM output with something like Prism.JS
26
+ display('\n' + outputContent);
27
+ try {
28
+ writeFileSync(filePath, outputContent);
29
+ displaySuccess(`This report can be found in ${filePath}`);
30
+ } catch (error) {
31
+ displayError(`Failed to write answer to file: ${filePath}`);
32
+ displayError(error.message);
33
+ // Consider if you want to exit or just log the error
34
+ // process.exit(1);
35
+ }
36
+ }
37
+
38
+ /**
39
+ * Inner function to ask a question and get an answer from the LLM
40
+ * @param {Object} context - The context object
41
+ * @param {Function} indicateProgress - Function to indicate progress
42
+ * @param {string} preamble - The preamble to send to the LLM
43
+ * @param {string} content - The content of the question
44
+ * @returns {string} The answer from the LLM
45
+ */
46
+ export async function askQuestionInner(context, indicateProgress, preamble, content) {
47
+ // This node receives the current state (messages) and invokes the LLM
48
+ const callModel = async (state) => {
49
+ // state.messages will contain the list including the system preamble and user diff
50
+ const response = await context.config.llm.invoke(state.messages);
51
+ // MessagesAnnotation expects the node to return the new message(s) to be added to the state.
52
+ // Wrap the response in an array if it's a single message object.
53
+ return { messages: response };
54
+ };
55
+
56
+ // Define the graph structure with MessagesAnnotation state
57
+ const workflow = new StateGraph(MessagesAnnotation)
58
+ // Define the node and edge
59
+ .addNode("model", callModel)
60
+ .addEdge(START, "model") // Start at the 'model' node
61
+ .addEdge("model", END); // End after the 'model' node completes
62
+
63
+ // Set up memory (optional but good practice for potential future multi-turn interactions)
64
+ const memory = new MemorySaver();
65
+
66
+ // Compile the workflow into a runnable app
67
+ const app = workflow.compile({ checkpointer: memory });
68
+
69
+ // Construct the initial the messages including the preamble as a system message
70
+ const messages = [
71
+ {
72
+ role: "system",
73
+ content: preamble, // The preamble goes here
74
+ },
75
+ {
76
+ role: "user",
77
+ content, // The question goes here
78
+ },
79
+ ];
80
+
81
+ indicateProgress();
82
+ // TODO create proper progress indicator for async tasks.
83
+ const progress = setInterval(() => indicateProgress(), 1000);
84
+ const output = await app.invoke({messages}, context.session);
85
+ clearInterval(progress);
86
+ return extractLastMessageContent(output);
87
+ }
@@ -7,17 +7,35 @@ import {
7
7
  } from "@langchain/langgraph";
8
8
  import { writeFileSync } from "node:fs";
9
9
  import path from "node:path";
10
- import {initConfig, slothContext} from "./config.js";
11
- import { display, displayError, displaySuccess } from "./consoleUtils.js";
12
- import { fileSafeLocalDate, toFileSafeString } from "./utils.js";
13
-
14
- await initConfig();
10
+ import { initConfig, slothContext } from "../config.js";
11
+ import { display, displayError, displaySuccess } from "../consoleUtils.js";
12
+ import { fileSafeLocalDate, toFileSafeString, ProgressIndicator, extractLastMessageContent } from "../utils.js";
15
13
 
16
14
  export async function review(source, preamble, diff) {
15
+ const progressIndicator = new ProgressIndicator("Reviewing.");
16
+ const outputContent = await reviewInner(slothContext, () => progressIndicator.indicate(), preamble, diff);
17
+ const filePath = path.resolve(process.cwd(), toFileSafeString(source)+'-'+fileSafeLocalDate()+".md");
18
+ process.stdout.write("\n");
19
+ display(`writing ${filePath}`);
20
+ process.stdout.write("\n");
21
+ // TODO highlight LLM output with something like Prism.JS (maybe system emoj are enough ✅⚠️❌)
22
+ display(outputContent);
23
+ try {
24
+ writeFileSync(filePath, outputContent);
25
+ displaySuccess(`This report can be found in ${filePath}`);
26
+ } catch (error) {
27
+ displayError(`Failed to write review to file: ${filePath}`);
28
+ displayError(error.message);
29
+ // Consider if you want to exit or just log the error
30
+ // process.exit(1);
31
+ }
32
+ }
33
+
34
+ export async function reviewInner(context, indicateProgress, preamble, diff) {
17
35
  // This node receives the current state (messages) and invokes the LLM
18
36
  const callModel = async (state) => {
19
37
  // state.messages will contain the list including the system preamble and user diff
20
- const response = await slothContext.config.llm.invoke(state.messages);
38
+ const response = await context.config.llm.invoke(state.messages);
21
39
  // MessagesAnnotation expects the node to return the new message(s) to be added to the state.
22
40
  // Wrap the response in an array if it's a single message object.
23
41
  return { messages: response };
@@ -48,26 +66,10 @@ export async function review(source, preamble, diff) {
48
66
  },
49
67
  ];
50
68
 
51
- process.stdout.write("Reviewing.");
69
+ indicateProgress();
52
70
  // TODO create proper progress indicator for async tasks.
53
- const progress = setInterval(() => process.stdout.write('.'), 1000);
54
- const output = await app.invoke({messages}, slothContext.session);
55
- const filePath = path.resolve(process.cwd(), toFileSafeString(source)+'-'+fileSafeLocalDate()+".md");
56
- process.stdout.write("\n");
57
- display(`writing ${filePath}`);
58
- // FIXME this looks ugly, there should be other way
59
- const outputContent = output.messages[output.messages.length - 1].content;
71
+ const progress = setInterval(() => indicateProgress(), 1000);
72
+ const output = await app.invoke({messages}, context.session);
60
73
  clearInterval(progress);
61
- process.stdout.write("\n");
62
- // TODO highlight LLM output with something like Prism.JS (maybe system emoj are enough ✅⚠️❌)
63
- display(outputContent);
64
- try {
65
- writeFileSync(filePath, outputContent);
66
- displaySuccess(`This report can be found in ${filePath}`);
67
- } catch (error) {
68
- displayError(`Failed to write review to file: ${filePath}`);
69
- displayError(error.message);
70
- // Consider if you want to exit or just log the error
71
- // process.exit(1);
72
- }
74
+ return extractLastMessageContent(output);
73
75
  }
package/src/prompt.js CHANGED
@@ -1,6 +1,7 @@
1
1
  import {resolve} from "node:path";
2
2
  import {SLOTH_INTERNAL_PREAMBLE, slothContext} from "./config.js";
3
3
  import {readFileSyncWithMessages, spawnCommand} from "./utils.js";
4
+ import { displayError } from "./consoleUtils.js";
4
5
 
5
6
  export function readInternalPreamble() {
6
7
  const filePath = resolve(slothContext.installDir, SLOTH_INTERNAL_PREAMBLE);
@@ -18,8 +19,15 @@ export function readPreamble(preambleFilename) {
18
19
 
19
20
  /**
20
21
  * This function expects https://cli.github.com/ to be installed and authenticated.
22
+ * It does something like `gh pr diff 42`
21
23
  */
22
24
  export async function getPrDiff(pr) {
23
25
  // TODO makes sense to check if gh is available and authenticated
24
- return spawnCommand('gh', ['pr', 'diff', pr], 'Loading PR diff...', 'Loaded PR diff.');
26
+ try {
27
+ return await spawnCommand('gh', ['pr', 'diff', pr], 'Loading PR diff...', 'Loaded PR diff.');
28
+ } catch (e) {
29
+ displayError(e.toString());
30
+ displayError(`Failed to call "gh pr diff ${pr}", see message above for details.`);
31
+ process.exit();
32
+ }
25
33
  }
@@ -0,0 +1,11 @@
1
+ import {spawnCommand} from "../utils.js";
2
+ import {displayWarning} from "../consoleUtils.js";
3
+
4
+ export async function get(_, pr) {
5
+ // TODO makes sense to check if gh is available and authenticated
6
+ if (!pr) {
7
+ displayWarning("No PR provided, skipping PR diff fetching.")
8
+ return "";
9
+ }
10
+ return spawnCommand('gh', ['pr', 'diff', pr], 'Loading PR diff...', 'Loaded PR diff.');
11
+ }
@@ -0,0 +1,81 @@
1
+ import {display, displayWarning} from "../consoleUtils.js";
2
+
3
+ export async function get(config, prId) {
4
+ const issueData = await getJiraIssue(config, prId);
5
+ return `## ${prId} Requirements - ${issueData.fields?.summary}\n\n${issueData.fields?.description}`
6
+ }
7
+
8
+ /**
9
+ * Fetches a Jira issue using the Atlassian REST API v2.
10
+ *
11
+ * @async
12
+ * @param {object} config - Configuration object.
13
+ * @param {string} config.username - Your Jira email address or username used for authentication.
14
+ * @param {string} config.token - Your Jira API token (legacy access token).
15
+ * @param {string} config.baseUrl - The base URL of your Jira instance API (e.g., 'https://your-domain.atlassian.net/rest/api/2/issue/').
16
+ * @param {string} jiraKey - The Jira issue key (e.g., 'UI-1005').
17
+ * @returns {Promise<object>} A promise that resolves with the Jira issue data as a JSON object.
18
+ * @throws {Error} Throws an error if the fetch fails, authentication is wrong, the issue is not found, or the response status is not OK.
19
+ */
20
+ async function getJiraIssue(config, jiraKey) {
21
+ const { username, token, baseUrl } = config;
22
+ if (!jiraKey) {
23
+ displayWarning("No jiraKey provided, skipping Jira issue fetching.")
24
+ return "";
25
+ }
26
+
27
+ // Validate essential inputs
28
+ if (!username || !token || !baseUrl) {
29
+ throw new Error('Missing required parameters in config (username, token, baseUrl) or missing jiraKey.');
30
+ }
31
+
32
+ // Ensure baseUrl doesn't end with a slash to avoid double slashes in the URL
33
+ const cleanBaseUrl = baseUrl.endsWith('/') ? baseUrl.slice(0, -1) : baseUrl;
34
+
35
+ // Construct the full API URL
36
+ const apiUrl = `${cleanBaseUrl}/${jiraKey}`;
37
+
38
+ // Encode credentials for Basic Authentication header
39
+ const credentials = `${username}:${token}`;
40
+ const encodedCredentials = Buffer.from(credentials).toString('base64');
41
+ const authHeader = `Basic ${encodedCredentials}`;
42
+
43
+ // Define request headers
44
+ const headers = {
45
+ 'Authorization': authHeader,
46
+ 'Accept': 'application/json', // Tell the server we expect JSON back
47
+ // 'Content-Type': 'application/json' // Usually not needed for GET requests
48
+ };
49
+
50
+ display(`Fetching Jira issue: ${apiUrl}`);
51
+
52
+ try {
53
+ const response = await fetch(apiUrl, {
54
+ method: 'GET',
55
+ headers: headers,
56
+ });
57
+
58
+ // Check if the response status code indicates success (e.g., 200 OK)
59
+ if (!response.ok) {
60
+ let errorBody = 'Could not read error body.';
61
+ try {
62
+ // Attempt to get more details from the response body for non-OK statuses
63
+ errorBody = await response.text();
64
+ } catch (e) {
65
+ // Silent fail - we already have a generic error message
66
+ }
67
+ // Throw a detailed error including status, status text, URL, and body if available
68
+ throw new Error(`HTTP error! Status: ${response.status} ${response.statusText}. URL: ${apiUrl}. Response Body: ${errorBody}`);
69
+ }
70
+
71
+ // Parse the JSON response body if the request was successful
72
+ const issueData = await response.json();
73
+ return issueData;
74
+
75
+ } catch (error) {
76
+ // Handle network errors (e.g., DNS resolution failure, connection refused)
77
+ // or errors thrown from the non-OK response check above
78
+ // Re-throw the error so the caller can handle it appropriately
79
+ throw error;
80
+ }
81
+ }
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Simple provider returning text as it is.
3
+ */
4
+ export async function get(_, text) {
5
+ return text;
6
+ }
package/src/utils.js CHANGED
@@ -50,39 +50,51 @@ export function readFileSyncWithMessages(filePath, errorMessageIn, noFileMessage
50
50
  }
51
51
 
52
52
  export function readStdin(program) {
53
- if(process.stdin.isTTY) {
54
- program.parse();
55
- } else {
56
- // Support piping diff into gsloth
57
- process.stdout.write('reading STDIN.');
58
- process.stdin.on('readable', function() {
59
- const chunk = this.read();
60
- process.stdout.write('.');
61
- if (chunk !== null) {
62
- slothContext.stdin += chunk;
63
- }
64
- });
65
- process.stdin.on('end', function() {
66
- process.stdout.write('.\n');
67
- program.parse(process.argv);
68
- });
69
- }
53
+ return new Promise((resolve) => {
54
+ if(process.stdin.isTTY) {
55
+ program.parseAsync().then(resolve);
56
+ } else {
57
+ // Support piping diff into gsloth
58
+ process.stdout.write('reading STDIN.');
59
+ process.stdin.on('readable', function() {
60
+ const chunk = this.read();
61
+ process.stdout.write('.');
62
+ if (chunk !== null) {
63
+ slothContext.stdin += chunk;
64
+ }
65
+ });
66
+ process.stdin.on('end', function() {
67
+ process.stdout.write('.\n');
68
+ program.parseAsync(process.argv).then(resolve);
69
+ });
70
+ }
71
+ });
70
72
  }
71
73
 
72
74
  export async function spawnCommand(command, args, progressMessage, successMessage) {
73
75
  return new Promise((resolve, reject) => {
74
- const out = {stdout: ''};
76
+ // TODO use progress indicator
77
+ const out = {stdout: '', stderr: ''};
75
78
  const spawned = spawn(command, args);
76
- spawned.stdout.on('data', async (stdoutChunk) => {
79
+ spawned.stdout.on('data', async (stdoutChunk, dd) => {
77
80
  display(progressMessage);
78
81
  out.stdout += stdoutChunk.toString();
79
82
  });
83
+ spawned.stderr.on('data', (err) => {
84
+ display(progressMessage);
85
+ out.stderr += err.toString();
86
+ })
80
87
  spawned.on('error', (err) => {
81
- reject(err);
88
+ reject(err.toString());
82
89
  })
83
90
  spawned.on('close', (code) => {
84
- display(successMessage);
85
- resolve(out.stdout);
91
+ if (code === 0) {
92
+ display(successMessage);
93
+ resolve(out.stdout);
94
+ } else {
95
+ displayError(`Failed to spawn command with code ${code}`);
96
+ reject(out.stdout + ' ' + out.stderr);
97
+ }
86
98
  });
87
99
  });
88
100
  }
@@ -92,3 +104,34 @@ export function getSlothVersion() {
92
104
  const projectJson = readFileSync(jsonPath, { encoding: 'utf8' });
93
105
  return JSON.parse(projectJson).version;
94
106
  }
107
+
108
+
109
+ export class ProgressIndicator {
110
+
111
+ constructor(initialMessage) {
112
+ this.hasBeenCalled = false;
113
+ this.initialMessage = initialMessage;
114
+ }
115
+
116
+ indicate() {
117
+ if (this.hasBeenCalled) {
118
+ process.stdout.write('.');
119
+ } else {
120
+ this.hasBeenCalled = true;
121
+ process.stdout.write(this.initialMessage);
122
+ }
123
+ }
124
+
125
+ }
126
+
127
+ /**
128
+ * Extracts the content of the last message from an LLM response
129
+ * @param {Object} output - The output from the LLM containing messages
130
+ * @returns {string} The content of the last message
131
+ */
132
+ export function extractLastMessageContent(output) {
133
+ if (!output || !output.messages || !output.messages.length) {
134
+ return '';
135
+ }
136
+ return output.messages[output.messages.length - 1].content;
137
+ }