gaunt-sloth-assistant 0.1.4 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/.prettierrc.json +9 -0
  2. package/README.md +177 -158
  3. package/ROADMAP.md +1 -1
  4. package/dist/commands/askCommand.d.ts +6 -0
  5. package/dist/commands/askCommand.js +26 -0
  6. package/dist/commands/askCommand.js.map +1 -0
  7. package/dist/commands/initCommand.d.ts +6 -0
  8. package/dist/commands/initCommand.js +16 -0
  9. package/dist/commands/initCommand.js.map +1 -0
  10. package/dist/commands/reviewCommand.d.ts +3 -0
  11. package/dist/commands/reviewCommand.js +128 -0
  12. package/dist/commands/reviewCommand.js.map +1 -0
  13. package/dist/config.d.ts +80 -0
  14. package/dist/config.js +178 -0
  15. package/dist/config.js.map +1 -0
  16. package/dist/configs/anthropic.d.ts +5 -0
  17. package/{src → dist}/configs/anthropic.js +45 -48
  18. package/dist/configs/anthropic.js.map +1 -0
  19. package/dist/configs/fake.d.ts +3 -0
  20. package/{src → dist}/configs/fake.js +11 -14
  21. package/dist/configs/fake.js.map +1 -0
  22. package/dist/configs/groq.d.ts +4 -0
  23. package/{src → dist}/configs/groq.js +10 -13
  24. package/dist/configs/groq.js.map +1 -0
  25. package/dist/configs/types.d.ts +14 -0
  26. package/dist/configs/types.js +2 -0
  27. package/dist/configs/types.js.map +1 -0
  28. package/dist/configs/vertexai.d.ts +4 -0
  29. package/{src → dist}/configs/vertexai.js +44 -47
  30. package/dist/configs/vertexai.js.map +1 -0
  31. package/dist/consoleUtils.d.ts +6 -0
  32. package/{src → dist}/consoleUtils.js +10 -15
  33. package/dist/consoleUtils.js.map +1 -0
  34. package/dist/index.d.ts +1 -0
  35. package/dist/index.js +17 -0
  36. package/dist/index.js.map +1 -0
  37. package/dist/modules/questionAnsweringModule.d.ts +18 -0
  38. package/{src → dist}/modules/questionAnsweringModule.js +72 -82
  39. package/dist/modules/questionAnsweringModule.js.map +1 -0
  40. package/dist/modules/reviewModule.d.ts +4 -0
  41. package/{src → dist}/modules/reviewModule.js +25 -35
  42. package/dist/modules/reviewModule.js.map +1 -0
  43. package/dist/modules/types.d.ts +18 -0
  44. package/dist/modules/types.js +2 -0
  45. package/dist/modules/types.js.map +1 -0
  46. package/dist/prompt.d.ts +7 -0
  47. package/dist/prompt.js +32 -0
  48. package/dist/prompt.js.map +1 -0
  49. package/dist/providers/file.d.ts +8 -0
  50. package/dist/providers/file.js +20 -0
  51. package/dist/providers/file.js.map +1 -0
  52. package/dist/providers/ghPrDiffProvider.d.ts +8 -0
  53. package/dist/providers/ghPrDiffProvider.js +16 -0
  54. package/dist/providers/ghPrDiffProvider.js.map +1 -0
  55. package/dist/providers/jiraIssueLegacyAccessTokenProvider.d.ts +8 -0
  56. package/dist/providers/jiraIssueLegacyAccessTokenProvider.js +62 -0
  57. package/dist/providers/jiraIssueLegacyAccessTokenProvider.js.map +1 -0
  58. package/dist/providers/jiraIssueLegacyProvider.d.ts +8 -0
  59. package/dist/providers/jiraIssueLegacyProvider.js +74 -0
  60. package/dist/providers/jiraIssueLegacyProvider.js.map +1 -0
  61. package/dist/providers/jiraIssueProvider.d.ts +11 -0
  62. package/dist/providers/jiraIssueProvider.js +96 -0
  63. package/dist/providers/jiraIssueProvider.js.map +1 -0
  64. package/dist/providers/text.d.ts +8 -0
  65. package/dist/providers/text.js +10 -0
  66. package/dist/providers/text.js.map +1 -0
  67. package/dist/providers/types.d.ts +21 -0
  68. package/dist/providers/types.js +2 -0
  69. package/dist/providers/types.js.map +1 -0
  70. package/dist/systemUtils.d.ts +22 -0
  71. package/dist/systemUtils.js +36 -0
  72. package/dist/systemUtils.js.map +1 -0
  73. package/dist/utils.d.ts +49 -0
  74. package/{src → dist}/utils.js +73 -60
  75. package/dist/utils.js.map +1 -0
  76. package/docs/CONFIGURATION.md +95 -6
  77. package/docs/RELEASE-HOWTO.md +1 -1
  78. package/eslint.config.js +99 -21
  79. package/index.js +10 -27
  80. package/package.json +26 -15
  81. package/src/commands/askCommand.ts +34 -0
  82. package/src/commands/initCommand.ts +19 -0
  83. package/src/commands/reviewCommand.ts +209 -0
  84. package/src/config.ts +266 -0
  85. package/src/configs/anthropic.ts +55 -0
  86. package/src/configs/fake.ts +15 -0
  87. package/src/configs/groq.ts +54 -0
  88. package/src/configs/vertexai.ts +53 -0
  89. package/src/consoleUtils.ts +33 -0
  90. package/src/index.ts +21 -0
  91. package/src/modules/questionAnsweringModule.ts +97 -0
  92. package/src/modules/reviewModule.ts +81 -0
  93. package/src/modules/types.ts +23 -0
  94. package/src/prompt.ts +39 -0
  95. package/src/providers/file.ts +24 -0
  96. package/src/providers/ghPrDiffProvider.ts +20 -0
  97. package/src/providers/jiraIssueLegacyProvider.ts +103 -0
  98. package/src/providers/jiraIssueProvider.ts +133 -0
  99. package/src/providers/text.ts +14 -0
  100. package/src/providers/types.ts +24 -0
  101. package/src/systemUtils.ts +52 -0
  102. package/src/utils.ts +225 -0
  103. package/tsconfig.json +24 -0
  104. package/vitest.config.ts +13 -0
  105. package/.eslint.config.mjs +0 -72
  106. package/.github/dependabot.yml +0 -11
  107. package/.github/workflows/ci.yml +0 -33
  108. package/spec/.gsloth.config.js +0 -22
  109. package/spec/.gsloth.config.json +0 -25
  110. package/spec/askCommand.spec.js +0 -92
  111. package/spec/config.spec.js +0 -421
  112. package/spec/initCommand.spec.js +0 -55
  113. package/spec/predefinedConfigs.spec.js +0 -100
  114. package/spec/questionAnsweringModule.spec.js +0 -137
  115. package/spec/reviewCommand.spec.js +0 -222
  116. package/spec/reviewModule.spec.js +0 -28
  117. package/spec/support/jasmine.mjs +0 -14
  118. package/src/commands/askCommand.js +0 -27
  119. package/src/commands/initCommand.js +0 -17
  120. package/src/commands/reviewCommand.js +0 -154
  121. package/src/config.js +0 -177
  122. package/src/prompt.js +0 -34
  123. package/src/providers/file.js +0 -19
  124. package/src/providers/ghPrDiffProvider.js +0 -11
  125. package/src/providers/jiraIssueLegacyAccessTokenProvider.js +0 -84
  126. package/src/providers/text.js +0 -6
  127. package/src/systemUtils.js +0 -32
  128. /package/{.gsloth.preamble.internal.md → .gsloth.backstory.md} +0 -0
package/src/config.ts ADDED
@@ -0,0 +1,266 @@
1
+ import path from 'node:path/posix';
2
+ import { v4 as uuidv4 } from 'uuid';
3
+ import { displayDebug, displayError, displayInfo, displayWarning } from '#src/consoleUtils.js';
4
+ import { importExternalFile, writeFileIfNotExistsWithMessages } from '#src/utils.js';
5
+ import { existsSync, readFileSync } from 'node:fs';
6
+ import { error, exit, getCurrentDir } from '#src/systemUtils.js';
7
+ import { LanguageModelLike } from '@langchain/core/language_models/base';
8
+
9
+ export interface SlothConfig extends BaseSlothConfig {
10
+ llm: LanguageModelLike; // FIXME this is still bad keeping instance in config is probably not best choice
11
+ contentProvider: string;
12
+ requirementsProvider: string;
13
+ commands: {
14
+ pr: {
15
+ contentProvider: string;
16
+ };
17
+ };
18
+ }
19
+
20
+ /**
21
+ * Raw, unprocessed sloth config
22
+ */
23
+ export interface RawSlothConfig extends BaseSlothConfig {
24
+ llm: LLMConfig;
25
+ }
26
+
27
+ /**
28
+ * Do not export this one
29
+ */
30
+ interface BaseSlothConfig {
31
+ llm: unknown;
32
+ contentProvider?: string;
33
+ requirementsProvider?: string;
34
+ commands?: {
35
+ pr: {
36
+ contentProvider: string;
37
+ };
38
+ };
39
+ review?: {
40
+ requirementsProvider?: string;
41
+ contentProvider?: string;
42
+ };
43
+ pr?: {
44
+ requirementsProvider?: string;
45
+ };
46
+ requirementsProviderConfig?: Record<string, unknown>;
47
+ contentProviderConfig?: Record<string, unknown>;
48
+ }
49
+
50
+ /**
51
+ * @deprecated
52
+ * this object has blurred responsibility lines and bad name.
53
+ */
54
+ export interface SlothContext {
55
+ config: SlothConfig;
56
+ stdin: string;
57
+ session: {
58
+ configurable: {
59
+ thread_id: string;
60
+ };
61
+ };
62
+ }
63
+
64
+ export interface LLMConfig extends Record<string, unknown> {
65
+ type: string;
66
+ model: string;
67
+ }
68
+
69
+ export const USER_PROJECT_CONFIG_JS = '.gsloth.config.js';
70
+ export const USER_PROJECT_CONFIG_JSON = '.gsloth.config.json';
71
+ export const USER_PROJECT_CONFIG_MJS = '.gsloth.config.mjs';
72
+ export const GSLOTH_BACKSTORY = '.gsloth.backstory.md';
73
+ export const USER_PROJECT_REVIEW_PREAMBLE = '.gsloth.preamble.review.md';
74
+
75
+ export const availableDefaultConfigs = ['vertexai', 'anthropic', 'groq'] as const;
76
+ export type ConfigType = (typeof availableDefaultConfigs)[number];
77
+
78
+ export const DEFAULT_CONFIG: Partial<SlothConfig> = {
79
+ llm: undefined,
80
+ contentProvider: 'file',
81
+ requirementsProvider: 'file',
82
+ commands: {
83
+ pr: {
84
+ contentProvider: 'gh',
85
+ },
86
+ },
87
+ };
88
+
89
+ /**
90
+ * @deprecated
91
+ * this object has blurred responsibility lines and bad name.
92
+ * TODO this should be reworked to something more robust
93
+ */
94
+ export const slothContext = {
95
+ config: DEFAULT_CONFIG,
96
+ stdin: '',
97
+ session: { configurable: { thread_id: uuidv4() } },
98
+ } as Partial<SlothContext> as SlothContext;
99
+
100
+ export async function initConfig(): Promise<void> {
101
+ const currentDir = getCurrentDir();
102
+ const jsonConfigPath = path.join(currentDir, USER_PROJECT_CONFIG_JSON);
103
+ const jsConfigPath = path.join(currentDir, USER_PROJECT_CONFIG_JS);
104
+ const mjsConfigPath = path.join(currentDir, USER_PROJECT_CONFIG_MJS);
105
+
106
+ // Try loading JSON config file first
107
+ if (existsSync(jsonConfigPath)) {
108
+ try {
109
+ const jsonConfig = JSON.parse(readFileSync(jsonConfigPath, 'utf8')) as RawSlothConfig;
110
+ // If the config has an LLM with a type, create the appropriate LLM instance
111
+ if (jsonConfig.llm && typeof jsonConfig.llm === 'object' && 'type' in jsonConfig.llm) {
112
+ await tryJsonConfig(jsonConfig);
113
+ } else {
114
+ error(`${jsonConfigPath} is not in valid format. Should at least define llm.type`);
115
+ exit(1);
116
+ }
117
+ } catch (e) {
118
+ displayDebug(e instanceof Error ? e : String(e));
119
+ displayError(
120
+ `Failed to read config from ${USER_PROJECT_CONFIG_JSON}, will try other formats.`
121
+ );
122
+ // Continue to try other formats
123
+ return tryJsConfig();
124
+ }
125
+ } else {
126
+ // JSON config not found, try JS
127
+ return tryJsConfig();
128
+ }
129
+
130
+ // Helper function to try loading JS config
131
+ async function tryJsConfig(): Promise<void> {
132
+ if (existsSync(jsConfigPath)) {
133
+ return importExternalFile(jsConfigPath)
134
+ .then((i: { configure: (module: string) => Promise<Partial<SlothConfig>> }) =>
135
+ i.configure(jsConfigPath)
136
+ )
137
+ .then((config) => {
138
+ slothContext.config = { ...slothContext.config, ...config };
139
+ })
140
+ .catch((e) => {
141
+ displayDebug(e instanceof Error ? e : String(e));
142
+ displayError(
143
+ `Failed to read config from ${USER_PROJECT_CONFIG_JS}, will try other formats.`
144
+ );
145
+ // Continue to try other formats
146
+ return tryMjsConfig();
147
+ });
148
+ } else {
149
+ // JS config not found, try MJS
150
+ return tryMjsConfig();
151
+ }
152
+ }
153
+
154
+ // Helper function to try loading MJS config
155
+ async function tryMjsConfig(): Promise<void> {
156
+ if (existsSync(mjsConfigPath)) {
157
+ return importExternalFile(mjsConfigPath)
158
+ .then((i: { configure: (module: string) => Promise<Partial<SlothConfig>> }) =>
159
+ i.configure(mjsConfigPath)
160
+ )
161
+ .then((config) => {
162
+ slothContext.config = { ...slothContext.config, ...config };
163
+ })
164
+ .catch((e) => {
165
+ displayDebug(e instanceof Error ? e : String(e));
166
+ displayError(`Failed to read config from ${USER_PROJECT_CONFIG_MJS}.`);
167
+ displayError(`No valid configuration found. Please create a valid configuration file.`);
168
+ exit();
169
+ });
170
+ } else {
171
+ // No config files found
172
+ displayError(
173
+ 'No configuration file found. Please create one of: ' +
174
+ `${USER_PROJECT_CONFIG_JSON}, ${USER_PROJECT_CONFIG_JS}, or ${USER_PROJECT_CONFIG_MJS} ` +
175
+ 'in your project directory.'
176
+ );
177
+ exit();
178
+ }
179
+ }
180
+ }
181
+
182
+ // Process JSON LLM config by creating the appropriate LLM instance
183
+ export async function tryJsonConfig(jsonConfig: RawSlothConfig): Promise<void> {
184
+ const llmConfig = jsonConfig?.llm;
185
+ const llmType = llmConfig?.type?.toLowerCase();
186
+
187
+ // Check if the LLM type is in availableDefaultConfigs
188
+ if (!llmType || !availableDefaultConfigs.includes(llmType as ConfigType)) {
189
+ displayError(
190
+ `Unsupported LLM type: ${llmType}. Available types are: ${availableDefaultConfigs.join(', ')}`
191
+ );
192
+ exit(1);
193
+ return;
194
+ }
195
+
196
+ try {
197
+ // Import the appropriate config module based on the LLM type
198
+ try {
199
+ const configModule = await import(`./configs/${llmType}.js`);
200
+ if (configModule.processJsonConfig) {
201
+ const llm = (await configModule.processJsonConfig(llmConfig)) as LanguageModelLike;
202
+ slothContext.config = { ...slothContext.config, ...jsonConfig, llm };
203
+ } else {
204
+ displayWarning(`Config module for ${llmType} does not have processJsonConfig function.`);
205
+ exit(1);
206
+ }
207
+ } catch (importError) {
208
+ displayDebug(importError instanceof Error ? importError : String(importError));
209
+ displayWarning(`Could not import config module for ${llmType}.`);
210
+ exit(1);
211
+ }
212
+ } catch (error) {
213
+ displayDebug(error instanceof Error ? error : String(error));
214
+ displayError(`Error creating LLM instance for type ${llmType}.`);
215
+ exit(1);
216
+ }
217
+ }
218
+
219
+ export async function createProjectConfig(configType: string): Promise<void> {
220
+ displayInfo(`Setting up your project\n`);
221
+ writeProjectReviewPreamble();
222
+ displayWarning(
223
+ `Make sure you add as much detail as possible to your ${USER_PROJECT_REVIEW_PREAMBLE}.\n`
224
+ );
225
+
226
+ // Check if the config type is in availableDefaultConfigs
227
+ if (!availableDefaultConfigs.includes(configType as ConfigType)) {
228
+ displayError(
229
+ `Unsupported config type: ${configType}. Available types are: ${availableDefaultConfigs.join(', ')}`
230
+ );
231
+ exit(1);
232
+ return;
233
+ }
234
+
235
+ displayInfo(`Creating project config for ${configType}`);
236
+ const vendorConfig = await import(`./configs/${configType}.js`);
237
+ vendorConfig.init(USER_PROJECT_CONFIG_JSON, slothContext);
238
+ }
239
+
240
+ export function writeProjectReviewPreamble(): void {
241
+ const currentDir = getCurrentDir();
242
+ const reviewPreamblePath = path.join(currentDir, USER_PROJECT_REVIEW_PREAMBLE);
243
+ writeFileIfNotExistsWithMessages(
244
+ reviewPreamblePath,
245
+ 'You are doing generic code review.\n' +
246
+ ' Important! Please remind user to prepare proper AI preamble in' +
247
+ USER_PROJECT_REVIEW_PREAMBLE +
248
+ ' for this project. Use decent amount of ⚠️ to highlight lack of config.' +
249
+ ' Explicitly mention `' +
250
+ USER_PROJECT_REVIEW_PREAMBLE +
251
+ '`.'
252
+ );
253
+ }
254
+
255
+ /**
256
+ * @deprecated test only
257
+ * TODO should be gone together with slothContext itself
258
+ */
259
+ export function reset() {
260
+ Object.keys(slothContext).forEach((key) => {
261
+ delete (slothContext as unknown as Record<string, unknown>)[key];
262
+ });
263
+ slothContext.config = DEFAULT_CONFIG as SlothConfig;
264
+ slothContext.stdin = '';
265
+ slothContext.session = { configurable: { thread_id: uuidv4() } };
266
+ }
@@ -0,0 +1,55 @@
1
+ import path from 'node:path';
2
+ import { displayWarning } from '#src/consoleUtils.js';
3
+ import { env, getCurrentDir } from '#src/systemUtils.js';
4
+ import { writeFileIfNotExistsWithMessages } from '#src/utils.js';
5
+ import { LanguageModelLike } from '@langchain/core/language_models/base';
6
+ import type { AnthropicInput } from '@langchain/anthropic';
7
+ import type { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
8
+
9
+ // Function to process JSON config and create Anthropic LLM instance
10
+ export async function processJsonConfig(
11
+ llmConfig: AnthropicInput & BaseChatModelParams
12
+ ): Promise<LanguageModelLike> {
13
+ const anthropic = await import('@langchain/anthropic');
14
+ // Use environment variable if available, otherwise use the config value
15
+ const anthropicApiKey = env.ANTHROPIC_API_KEY || llmConfig.apiKey;
16
+ return new anthropic.ChatAnthropic({
17
+ ...llmConfig,
18
+ apiKey: anthropicApiKey,
19
+ model: llmConfig.model || 'claude-3-7-sonnet-20250219',
20
+ });
21
+ }
22
+
23
+ const jsContent = `/* eslint-disable */
24
+ export async function configure(importFunction, global) {
25
+ // this is going to be imported from sloth dependencies,
26
+ // but can potentially be pulled from global node modules or from this project
27
+ // At a moment only google-vertexai and anthropic packaged with Sloth, but you can install support for any other langchain llms
28
+ const anthropic = await importFunction('@langchain/anthropic');
29
+ return {
30
+ llm: new anthropic.ChatAnthropic({
31
+ apiKey: process.env.ANTHROPIC_API_KEY, // Default value, but you can provide the key in many different ways, even as literal
32
+ model: "claude-3-7-sonnet-20250219" // Don't forget to check new models availability.
33
+ })
34
+ };
35
+ }
36
+ `;
37
+
38
+ const jsonContent = `{
39
+ "llm": {
40
+ "type": "anthropic",
41
+ "apiKey": "your-api-key-here",
42
+ "model": "claude-3-7-sonnet-20250219"
43
+ }
44
+ }`;
45
+
46
+ export function init(configFileName: string): void {
47
+ const currentDir = getCurrentDir();
48
+ path.join(currentDir, configFileName);
49
+
50
+ // Determine which content to use based on file extension
51
+ const content = configFileName.endsWith('.json') ? jsonContent : jsContent;
52
+
53
+ writeFileIfNotExistsWithMessages(configFileName, content);
54
+ displayWarning(`You need to update your ${configFileName} to add your Anthropic API key.`);
55
+ }
@@ -0,0 +1,15 @@
1
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
+ import { displayWarning } from '#src/consoleUtils.js';
3
+ import type { FakeChatInput } from '@langchain/core/utils/testing';
4
+
5
+ // Function to process JSON config and create Fake LLM instance for testing
6
+ export async function processJsonConfig(llmConfig: FakeChatInput): Promise<BaseChatModel | null> {
7
+ if (llmConfig.responses) {
8
+ const test = await import('@langchain/core/utils/testing');
9
+ return new test.FakeListChatModel(llmConfig);
10
+ }
11
+ displayWarning("Fake LLM requires 'responses' array in config");
12
+ return null;
13
+ }
14
+
15
+ // No init function needed for fake LLM as it's only used for testing
@@ -0,0 +1,54 @@
1
+ import path from 'node:path';
2
+ import { displayInfo, displayWarning } from '#src/consoleUtils.js';
3
+ import { env, getCurrentDir } from '#src/systemUtils.js';
4
+ import { writeFileIfNotExistsWithMessages } from '#src/utils.js';
5
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
6
+ import { ChatGroqInput } from '@langchain/groq';
7
+
8
+ // Function to process JSON config and create Groq LLM instance
9
+ export async function processJsonConfig(llmConfig: ChatGroqInput): Promise<BaseChatModel> {
10
+ const groq = await import('@langchain/groq');
11
+ // Use environment variable if available, otherwise use the config value
12
+ const groqApiKey = env.GROQ_API_KEY || llmConfig.apiKey;
13
+ return new groq.ChatGroq({
14
+ ...llmConfig,
15
+ apiKey: groqApiKey,
16
+ model: llmConfig.model || 'deepseek-r1-distill-llama-70b',
17
+ });
18
+ }
19
+
20
+ const jsContent = `/* eslint-disable */
21
+ export async function configure(importFunction, global) {
22
+ // this is going to be imported from sloth dependencies,
23
+ // but can potentially be pulled from global node modules or from this project
24
+ const groq = await importFunction('@langchain/groq');
25
+ return {
26
+ llm: new groq.ChatGroq({
27
+ model: "deepseek-r1-distill-llama-70b", // Check other models available
28
+ apiKey: process.env.GROQ_API_KEY, // Default value, but you can provide the key in many different ways, even as literal
29
+ })
30
+ };
31
+ }
32
+ `;
33
+
34
+ const jsonContent = `{
35
+ "llm": {
36
+ "type": "groq",
37
+ "model": "deepseek-r1-distill-llama-70b",
38
+ "apiKey": "your-api-key-here"
39
+ }
40
+ }`;
41
+
42
+ export function init(configFileName: string): void {
43
+ const currentDir = getCurrentDir();
44
+ path.join(currentDir, configFileName);
45
+
46
+ // Determine which content to use based on file extension
47
+ const content = configFileName.endsWith('.json') ? jsonContent : jsContent;
48
+
49
+ writeFileIfNotExistsWithMessages(configFileName, content);
50
+ displayInfo(
51
+ `You can define GROQ_API_KEY environment variable with your Groq API key and it will work with default model.`
52
+ );
53
+ displayWarning(`You need to edit your ${configFileName} to configure model.`);
54
+ }
@@ -0,0 +1,53 @@
1
+ import { LanguageModelLike } from '@langchain/core/language_models/base';
2
+ import path from 'node:path';
3
+ import { displayWarning } from '#src/consoleUtils.js';
4
+ import { getCurrentDir } from '#src/systemUtils.js';
5
+ import { writeFileIfNotExistsWithMessages } from '#src/utils.js';
6
+ import { ChatVertexAIInput } from '@langchain/google-vertexai';
7
+
8
+ const jsContent = `/* eslint-disable */
9
+ export async function configure(importFunction, global) {
10
+ // this is going to be imported from sloth dependencies,
11
+ // but can potentially be pulled from global node modules or from this project
12
+ const vertexAi = await importFunction('@langchain/google-vertexai');
13
+ return {
14
+ llm: new vertexAi.ChatVertexAI({
15
+ model: "gemini-2.5-pro-preview-05-06", // Consider checking for latest recommended model versions
16
+ // temperature: 0,
17
+ // Other parameters might be relevant depending on Vertex AI API updates
18
+ // The project is not in the interface, but it is in documentation
19
+ // project: 'your-cool-gcloud-project'
20
+ })
21
+ }
22
+ }
23
+ `;
24
+
25
+ const jsonContent = `{
26
+ "llm": {
27
+ "type": "vertexai",
28
+ "model": "gemini-2.5-pro-preview-05-06",
29
+ "temperature": 0
30
+ }
31
+ }`;
32
+
33
+ export function init(configFileName: string): void {
34
+ const currentDir = getCurrentDir();
35
+ path.join(currentDir, configFileName);
36
+
37
+ // Determine which content to use based on file extension
38
+ const content = configFileName.endsWith('.json') ? jsonContent : jsContent;
39
+
40
+ writeFileIfNotExistsWithMessages(configFileName, content);
41
+ displayWarning(
42
+ 'For Google VertexAI you likely to need to do `gcloud auth login` and `gcloud auth application-default login`.'
43
+ );
44
+ }
45
+
46
+ // Function to process JSON config and create VertexAI LLM instance
47
+ export async function processJsonConfig(llmConfig: ChatVertexAIInput): Promise<LanguageModelLike> {
48
+ const vertexAi = await import('@langchain/google-vertexai');
49
+ return new vertexAi.ChatVertexAI({
50
+ ...llmConfig,
51
+ model: llmConfig.model || 'gemini-2.5-pro-preview-05-06',
52
+ });
53
+ }
@@ -0,0 +1,33 @@
1
+ import chalk from 'chalk';
2
+ import { debug as systemDebug, error as systemError, log } from '#src/systemUtils.js';
3
+
4
+ // TODO it seems like commander supports coloured output, maybe dependency to chalk can be removed
5
+
6
+ export function displayError(message: string): void {
7
+ systemError(chalk.red(message));
8
+ }
9
+
10
+ export function displayWarning(message: string): void {
11
+ systemError(chalk.yellow(message));
12
+ }
13
+
14
+ export function displaySuccess(message: string): void {
15
+ systemError(chalk.green(message));
16
+ }
17
+
18
+ export function displayInfo(message: string): void {
19
+ systemError(chalk.blue(message));
20
+ }
21
+
22
+ export function display(message: string): void {
23
+ log(message);
24
+ }
25
+
26
+ export function displayDebug(message: string | Error | undefined): void {
27
+ // TODO make it controlled by config
28
+ if (message instanceof Error) {
29
+ systemDebug(message.stack || '');
30
+ } else if (message !== undefined) {
31
+ systemDebug(message);
32
+ }
33
+ }
package/src/index.ts ADDED
@@ -0,0 +1,21 @@
1
+ import { Command } from 'commander';
2
+ import { askCommand } from '#src/commands/askCommand.js';
3
+ import { initCommand } from '#src/commands/initCommand.js';
4
+ import { reviewCommand } from '#src/commands/reviewCommand.js';
5
+ import { slothContext } from '#src/config.js';
6
+ import { getSlothVersion, readStdin } from '#src/utils.js';
7
+
8
+ const program = new Command();
9
+
10
+ program
11
+ .name('gsloth')
12
+ .description('Gaunt Sloth Assistant reviewing your PRs')
13
+ .version(getSlothVersion());
14
+
15
+ initCommand(program);
16
+ reviewCommand(program, slothContext);
17
+ askCommand(program);
18
+
19
+ // TODO add general interactive chat command
20
+
21
+ await readStdin(program);
@@ -0,0 +1,97 @@
1
+ import type { SlothContext } from '#src/config.js';
2
+ import { slothContext } from '#src/config.js';
3
+ import { display, displayError, displaySuccess } from '#src/consoleUtils.js';
4
+ import type { Message, ProgressCallback, State } from '#src/modules/types.js';
5
+ import { getCurrentDir } from '#src/systemUtils.js';
6
+ import { fileSafeLocalDate, ProgressIndicator, toFileSafeString } from '#src/utils.js';
7
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
8
+ import { AIMessageChunk, HumanMessage, SystemMessage } from '@langchain/core/messages';
9
+ import { END, MemorySaver, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
10
+ import { writeFileSync } from 'node:fs';
11
+ import * as path from 'node:path';
12
+
13
+ /**
14
+ * Ask a question and get an answer from the LLM
15
+ * @param source - The source of the question (used for file naming)
16
+ * @param preamble - The preamble to send to the LLM
17
+ * @param content - The content of the question
18
+ */
19
+ export async function askQuestion(
20
+ source: string,
21
+ preamble: string,
22
+ content: string
23
+ ): Promise<void> {
24
+ const progressIndicator = new ProgressIndicator('Thinking.');
25
+ const outputContent = await askQuestionInner(
26
+ slothContext,
27
+ () => progressIndicator.indicate(),
28
+ preamble,
29
+ content
30
+ );
31
+ const filePath = path.resolve(
32
+ getCurrentDir(),
33
+ toFileSafeString(source) + '-' + fileSafeLocalDate() + '.md'
34
+ );
35
+ display(`\nwriting ${filePath}`);
36
+ // TODO highlight LLM output with something like Prism.JS
37
+ display('\n' + outputContent);
38
+ try {
39
+ writeFileSync(filePath, outputContent);
40
+ displaySuccess(`This report can be found in ${filePath}`);
41
+ } catch (error) {
42
+ displayError(`Failed to write answer to file: ${filePath}`);
43
+ displayError(error instanceof Error ? error.message : String(error));
44
+ // TODO Consider if we want to exit or just log the error
45
+ // exit(1);
46
+ }
47
+ }
48
+
49
+ /**
50
+ * Inner function to ask a question and get an answer from the LLM
51
+ * @param context - The context object
52
+ * @param indicateProgress - Function to indicate progress
53
+ * @param preamble - The preamble to send to the LLM
54
+ * @param content - The content of the question
55
+ * @returns The answer from the LLM
56
+ */
57
+ export async function askQuestionInner(
58
+ context: SlothContext,
59
+ indicateProgress: ProgressCallback,
60
+ preamble: string,
61
+ content: string
62
+ ): Promise<string> {
63
+ // This node receives the current state (messages) and invokes the LLM
64
+ const callModel = async (state: State): Promise<{ messages: AIMessageChunk }> => {
65
+ // state.messages will contain the list including the system preamble and user diff
66
+ const response = await (context.config.llm as BaseChatModel).invoke(state.messages);
67
+ // MessagesAnnotation expects the node to return the new message(s) to be added to the state.
68
+ // Wrap the response in an array if it's a single message object.
69
+ return { messages: response };
70
+ };
71
+
72
+ // Define the graph structure with MessagesAnnotation state
73
+ const workflow = new StateGraph(MessagesAnnotation)
74
+ // Define the node and edge
75
+ .addNode('model', callModel)
76
+ .addEdge(START, 'model') // Start at the 'model' node
77
+ .addEdge('model', END); // End after the 'model' node completes
78
+
79
+ // Set up memory (optional but good practice for potential future multi-turn interactions)
80
+ const memory = new MemorySaver();
81
+
82
+ // Compile the workflow into a runnable app
83
+ const app = workflow.compile({ checkpointer: memory });
84
+
85
+ // Construct the initial the messages including the preamble as a system message
86
+ const messages: Message[] = [new SystemMessage(preamble), new HumanMessage(content)];
87
+
88
+ indicateProgress();
89
+ // TODO create proper progress indicator for async tasks.
90
+ const progress = setInterval(() => indicateProgress(), 1000);
91
+ const output = await app.invoke({ messages }, context.session);
92
+ clearInterval(progress);
93
+ const lastMessage = output.messages[output.messages.length - 1];
94
+ return typeof lastMessage.content === 'string'
95
+ ? lastMessage.content
96
+ : JSON.stringify(lastMessage.content);
97
+ }
@@ -0,0 +1,81 @@
1
+ import type { SlothContext } from '#src/config.js';
2
+ import { slothContext } from '#src/config.js';
3
+ import { display, displayDebug, displayError, displaySuccess } from '#src/consoleUtils.js';
4
+ import type { Message, ProgressCallback, State } from '#src/modules/types.js';
5
+ import { getCurrentDir, stdout } from '#src/systemUtils.js';
6
+ import { fileSafeLocalDate, ProgressIndicator, toFileSafeString } from '#src/utils.js';
7
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
8
+ import { AIMessageChunk, HumanMessage, SystemMessage } from '@langchain/core/messages';
9
+ import { END, MemorySaver, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
10
+ import { writeFileSync } from 'node:fs';
11
+ import path from 'node:path';
12
+
13
+ export async function review(source: string, preamble: string, diff: string): Promise<void> {
14
+ const progressIndicator = new ProgressIndicator('Reviewing.');
15
+ const outputContent = await reviewInner(
16
+ slothContext,
17
+ () => progressIndicator.indicate(),
18
+ preamble,
19
+ diff
20
+ );
21
+ const filePath = path.resolve(
22
+ getCurrentDir(),
23
+ toFileSafeString(source) + '-' + fileSafeLocalDate() + '.md'
24
+ );
25
+ stdout.write('\n');
26
+ display(`writing ${filePath}`);
27
+ stdout.write('\n');
28
+ // TODO highlight LLM output with something like Prism.JS (maybe system emoj are enough ✅⚠️❌)
29
+ display(outputContent);
30
+ try {
31
+ writeFileSync(filePath, outputContent);
32
+ displaySuccess(`This report can be found in ${filePath}`);
33
+ } catch (error) {
34
+ displayDebug(error instanceof Error ? error : String(error));
35
+ displayError(`Failed to write review to file: ${filePath}`);
36
+ // Consider if you want to exit or just log the error
37
+ // exit(1);
38
+ }
39
+ }
40
+
41
+ export async function reviewInner(
42
+ context: SlothContext,
43
+ indicateProgress: ProgressCallback,
44
+ preamble: string,
45
+ diff: string
46
+ ): Promise<string> {
47
+ // This node receives the current state (messages) and invokes the LLM
48
+ const callModel = async (state: State): Promise<{ messages: AIMessageChunk }> => {
49
+ // state.messages will contain the list including the system preamble and user diff
50
+ const response = await (context.config.llm as BaseChatModel).invoke(state.messages);
51
+ // MessagesAnnotation expects the node to return the new message(s) to be added to the state.
52
+ // Wrap the response in an array if it's a single message object.
53
+ return { messages: response };
54
+ };
55
+
56
+ // Define the graph structure with MessagesAnnotation state
57
+ const workflow = new StateGraph(MessagesAnnotation)
58
+ // Define the node and edge
59
+ .addNode('model', callModel)
60
+ .addEdge(START, 'model') // Start at the 'model' node
61
+ .addEdge('model', END); // End after the 'model' node completes
62
+
63
+ // Set up memory (optional but good practice for potential future multi-turn interactions)
64
+ const memory = new MemorySaver(); // TODO extract to config
65
+
66
+ // Compile the workflow into a runnable app
67
+ const app = workflow.compile({ checkpointer: memory });
68
+
69
+ // Construct the initial the messages including the preamble as a system message
70
+ const messages: Message[] = [new SystemMessage(preamble), new HumanMessage(diff)];
71
+
72
+ indicateProgress();
73
+ // TODO create proper progress indicator for async tasks.
74
+ const progress = setInterval(() => indicateProgress(), 1000);
75
+ const output = await app.invoke({ messages }, context.session);
76
+ clearInterval(progress);
77
+ const lastMessage = output.messages[output.messages.length - 1];
78
+ return typeof lastMessage.content === 'string'
79
+ ? lastMessage.content
80
+ : JSON.stringify(lastMessage.content);
81
+ }