threadlines 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -97,29 +97,66 @@ async function checkCommand(options) {
97
97
  logger_1.logger.error('Not a git repository. Threadline requires a git repository.');
98
98
  process.exit(1);
99
99
  }
100
- // Pre-flight check: Validate OpenAI API key is set (required for local processing)
100
+ // Determine which LLM provider to use based on configuration
101
+ // Explicit provider selection (not a fallback pattern)
102
+ const bedrockConfig = (0, config_1.getBedrockConfig)(config);
101
103
  const openAIConfig = (0, config_1.getOpenAIConfig)(config);
102
- if (!openAIConfig) {
103
- logger_1.logger.error('Missing required environment variable: OPENAI_API_KEY');
104
+ let provider;
105
+ let bedrockConfigToUse;
106
+ let openAIConfigToUse;
107
+ // Explicit provider selection: handle all cases clearly
108
+ if (bedrockConfig && openAIConfig) {
109
+ // Both configured: use Bedrock (explicit priority)
110
+ logger_1.logger.warn('Both Bedrock and OpenAI are configured. Using Bedrock (priority provider).');
111
+ provider = 'bedrock';
112
+ bedrockConfigToUse = bedrockConfig;
113
+ openAIConfigToUse = undefined;
114
+ (0, config_1.logBedrockConfig)(bedrockConfig);
115
+ }
116
+ else if (bedrockConfig) {
117
+ // Only Bedrock configured: use Bedrock
118
+ provider = 'bedrock';
119
+ bedrockConfigToUse = bedrockConfig;
120
+ openAIConfigToUse = undefined;
121
+ (0, config_1.logBedrockConfig)(bedrockConfig);
122
+ }
123
+ else if (openAIConfig) {
124
+ // Only OpenAI configured: use OpenAI
125
+ provider = 'openai';
126
+ bedrockConfigToUse = undefined;
127
+ openAIConfigToUse = openAIConfig;
128
+ (0, config_1.logOpenAIConfig)(openAIConfig);
129
+ }
130
+ else {
131
+ // Neither configured: fail loudly
132
+ logger_1.logger.error('Missing required LLM provider configuration');
104
133
  logger_1.logger.output('');
105
- logger_1.logger.output(chalk_1.default.yellow('To fix this:'));
134
+ logger_1.logger.output(chalk_1.default.yellow('You need to configure either Bedrock or OpenAI:'));
106
135
  logger_1.logger.output('');
107
- logger_1.logger.output(chalk_1.default.white(' Local development:'));
108
- logger_1.logger.output(chalk_1.default.gray(' 1. Create a .env.local file in your project root'));
109
- logger_1.logger.output(chalk_1.default.gray(' 2. Add: OPENAI_API_KEY=your-openai-api-key'));
110
- logger_1.logger.output(chalk_1.default.gray(' 3. Make sure .env.local is in your .gitignore'));
136
+ logger_1.logger.output(chalk_1.default.white(' Option 1: Amazon Bedrock'));
137
+ logger_1.logger.output(chalk_1.default.gray(' Local development:'));
138
+ logger_1.logger.output(chalk_1.default.gray(' 1. Create a .env.local file in your project root'));
139
+ logger_1.logger.output(chalk_1.default.gray(' 2. Add: BEDROCK_ACCESS_KEY_ID=your-access-key-id'));
140
+ logger_1.logger.output(chalk_1.default.gray(' 3. Add: BEDROCK_SECRET_ACCESS_KEY=your-secret-access-key'));
141
+ logger_1.logger.output(chalk_1.default.gray(' 4. Ensure .threadlinerc contains: bedrock_model and bedrock_region'));
142
+ logger_1.logger.output(chalk_1.default.gray(' CI/CD: Add BEDROCK_ACCESS_KEY_ID and BEDROCK_SECRET_ACCESS_KEY as secrets'));
143
+ logger_1.logger.output(chalk_1.default.gray(' Ensure .threadlinerc contains: bedrock_model and bedrock_region'));
111
144
  logger_1.logger.output('');
112
- logger_1.logger.output(chalk_1.default.white(' CI/CD:'));
113
- logger_1.logger.output(chalk_1.default.gray(' GitHub Actions: Settings → Secrets → Add OPENAI_API_KEY'));
114
- logger_1.logger.output(chalk_1.default.gray(' GitLab CI: Settings CI/CD Variables Add OPENAI_API_KEY'));
115
- logger_1.logger.output(chalk_1.default.gray(' Bitbucket Pipelines: Repository settings → Repository variables → Add OPENAI_API_KEY'));
116
- logger_1.logger.output(chalk_1.default.gray(' Vercel: Settings Environment Variables Add OPENAI_API_KEY'));
145
+ logger_1.logger.output(chalk_1.default.white(' Option 2: OpenAI'));
146
+ logger_1.logger.output(chalk_1.default.gray(' Local development:'));
147
+ logger_1.logger.output(chalk_1.default.gray(' 1. Create a .env.local file in your project root'));
148
+ logger_1.logger.output(chalk_1.default.gray(' 2. Add: OPENAI_API_KEY=your-openai-api-key'));
149
+ logger_1.logger.output(chalk_1.default.gray(' 3. Ensure .threadlinerc contains: openai_model and openai_service_tier'));
150
+ logger_1.logger.output(chalk_1.default.gray(' CI/CD:'));
151
+ logger_1.logger.output(chalk_1.default.gray(' GitHub Actions: Settings → Secrets → Add OPENAI_API_KEY'));
152
+ logger_1.logger.output(chalk_1.default.gray(' GitLab CI: Settings → CI/CD → Variables → Add OPENAI_API_KEY'));
153
+ logger_1.logger.output(chalk_1.default.gray(' Bitbucket Pipelines: Repository settings → Repository variables → Add OPENAI_API_KEY'));
154
+ logger_1.logger.output(chalk_1.default.gray(' Vercel: Settings → Environment Variables → Add OPENAI_API_KEY'));
155
+ logger_1.logger.output(chalk_1.default.gray(' Ensure .threadlinerc contains: openai_model and openai_service_tier'));
117
156
  logger_1.logger.output('');
118
157
  logger_1.logger.output(chalk_1.default.gray('Get your OpenAI API key at: https://platform.openai.com/api-keys'));
119
158
  process.exit(1);
120
159
  }
121
- // Log OpenAI configuration
122
- (0, config_1.logOpenAIConfig)(openAIConfig);
123
160
  // 1. Find and validate threadlines
124
161
  logger_1.logger.info('Finding threadlines...');
125
162
  const threadlines = await (0, experts_1.findThreadlines)(cwd, gitRoot);
@@ -263,7 +300,7 @@ async function checkCommand(options) {
263
300
  contextContent
264
301
  };
265
302
  });
266
- // 5. Process threadlines locally using OpenAI
303
+ // 5. Process threadlines locally using configured LLM provider
267
304
  logger_1.logger.info('Running threadline checks...');
268
305
  const processResponse = await (0, expert_1.processThreadlines)({
269
306
  threadlines: threadlinesWithContext.map(t => ({
@@ -276,9 +313,18 @@ async function checkCommand(options) {
276
313
  })),
277
314
  diff: gitDiff.diff,
278
315
  files: gitDiff.changedFiles,
279
- apiKey: openAIConfig.apiKey,
280
- model: openAIConfig.model,
281
- serviceTier: openAIConfig.serviceTier,
316
+ provider,
317
+ bedrockConfig: bedrockConfigToUse ? {
318
+ accessKeyId: bedrockConfigToUse.accessKeyId,
319
+ secretAccessKey: bedrockConfigToUse.secretAccessKey,
320
+ model: bedrockConfigToUse.model,
321
+ region: bedrockConfigToUse.region
322
+ } : undefined,
323
+ openaiConfig: openAIConfigToUse ? {
324
+ apiKey: openAIConfigToUse.apiKey,
325
+ model: openAIConfigToUse.model,
326
+ serviceTier: openAIConfigToUse.serviceTier
327
+ } : undefined,
282
328
  contextLinesForLLM: config.diff_context_lines
283
329
  });
284
330
  // Convert ProcessThreadlinesResponse to ReviewResponse format for displayResults
@@ -93,6 +93,8 @@ async function initCommand() {
93
93
  "api_url": "https://devthreadline.com",
94
94
  "openai_model": "gpt-5.2",
95
95
  "openai_service_tier": "Flex",
96
+ "bedrock_model": "us.anthropic.claude-sonnet-4-5-20250929-v1:0",
97
+ "bedrock_region": "us-east-1",
96
98
  "diff_context_lines": 10
97
99
  }`;
98
100
  fs.writeFileSync(configFile, configContent, 'utf-8');
@@ -116,7 +118,12 @@ async function initCommand() {
116
118
  logger_1.logger.output(chalk_1.default.white(' To use threadlines check, you need:'));
117
119
  logger_1.logger.output('');
118
120
  logger_1.logger.output(chalk_1.default.white(' Create a .env.local file in your project root with:'));
121
+ logger_1.logger.output(chalk_1.default.gray(' // For OpenAI (one of these is required):'));
119
122
  logger_1.logger.output(chalk_1.default.gray(' OPENAI_API_KEY=your-openai-api-key'));
123
+ logger_1.logger.output(chalk_1.default.gray(' // For Bedrock (one of these is required):'));
124
+ logger_1.logger.output(chalk_1.default.gray(' BEDROCK_ACCESS_KEY_ID=your-access-key-id'));
125
+ logger_1.logger.output(chalk_1.default.gray(' BEDROCK_SECRET_ACCESS_KEY=your-secret-access-key'));
126
+ logger_1.logger.output(chalk_1.default.gray(' // For Threadlines sync (required if mode is "online"):'));
120
127
  logger_1.logger.output(chalk_1.default.gray(' THREADLINE_API_KEY=your-api-key-here'));
121
128
  logger_1.logger.output(chalk_1.default.gray(' THREADLINE_ACCOUNT=your-email@example.com'));
122
129
  logger_1.logger.output('');
package/dist/git/local.js CHANGED
@@ -95,58 +95,38 @@ async function getLocalContext(repoRoot, commitSha) {
95
95
  * or review unstaged changes if nothing is staged.
96
96
  */
97
97
  async function getDiff(repoRoot) {
98
- // Get git status in porcelain format to determine what changes exist
99
- // Porcelain format: XY filename
100
- // X = staged status, Y = unstaged status
101
- // ' ' = no change, 'M' = modified, 'A' = added, 'D' = deleted, etc.
102
- // '?' = untracked (only in Y position, X is always '?' too)
98
+ // Use git diff commands as source of truth (more reliable than git status --porcelain)
99
+ // git status --porcelain can be inconsistent in some edge cases
100
+ // Check staged files first (source of truth)
101
+ const stagedFilesOutput = (0, child_process_1.execSync)('git diff --cached --name-only', {
102
+ encoding: 'utf-8',
103
+ cwd: repoRoot
104
+ }).trim();
105
+ const actualStagedFiles = stagedFilesOutput ? stagedFilesOutput.split('\n') : [];
106
+ // Check unstaged files (source of truth)
107
+ const unstagedFilesOutput = (0, child_process_1.execSync)('git diff --name-only', {
108
+ encoding: 'utf-8',
109
+ cwd: repoRoot
110
+ }).trim();
111
+ const actualUnstagedFiles = unstagedFilesOutput ? unstagedFilesOutput.split('\n') : [];
112
+ // Get untracked files from git status --porcelain (only reliable way to get untracked)
103
113
  const statusOutput = (0, child_process_1.execSync)('git status --porcelain', {
104
114
  encoding: 'utf-8',
105
115
  cwd: repoRoot
106
116
  }).trim();
107
117
  const lines = statusOutput ? statusOutput.split('\n') : [];
108
- const staged = [];
109
- const unstaged = [];
110
118
  const untracked = [];
111
119
  for (const line of lines) {
112
120
  const stagedStatus = line[0];
113
121
  const unstagedStatus = line[1];
114
- // Collect untracked files separately (they need special handling)
122
+ // Collect untracked files (only reliable way to detect them)
115
123
  if (stagedStatus === '?' && unstagedStatus === '?') {
116
- // Format: "?? filename" - skip 3 characters
117
124
  const file = line.slice(3);
118
125
  untracked.push(file);
119
- continue;
120
- }
121
- // For tracked files, the format can be:
122
- // - "M filename" (staged, no leading space) - skip 2 characters
123
- // - " M filename" (unstaged, leading space) - skip 3 characters
124
- // - "MM filename" (both staged and unstaged) - skip 3 characters
125
- let file;
126
- if (stagedStatus !== ' ' && unstagedStatus === ' ') {
127
- // Staged only: "M filename" - skip 2 characters (M + space)
128
- file = line.slice(2);
129
- }
130
- else {
131
- // Unstaged or both: " M filename" or "MM filename" - skip 3 characters
132
- file = line.slice(3);
133
- }
134
- if (stagedStatus !== ' ') {
135
- staged.push(file);
136
- }
137
- if (unstagedStatus !== ' ' && unstagedStatus !== '?') {
138
- unstaged.push(file);
139
126
  }
140
127
  }
141
128
  let diff;
142
129
  let changedFiles;
143
- // Check if there are actually staged files (use git diff as source of truth)
144
- // git status parsing can be inconsistent, so we verify with git diff
145
- const stagedFilesOutput = (0, child_process_1.execSync)('git diff --cached --name-only', {
146
- encoding: 'utf-8',
147
- cwd: repoRoot
148
- }).trim();
149
- const actualStagedFiles = stagedFilesOutput ? stagedFilesOutput.split('\n') : [];
150
130
  // Workflow A: Developer has staged files - check ONLY staged files
151
131
  // (Ignore unstaged and untracked - developer explicitly chose to check staged)
152
132
  if (actualStagedFiles.length > 0) {
@@ -168,27 +148,22 @@ async function getDiff(repoRoot) {
168
148
  };
169
149
  }
170
150
  // No staged files - log clearly and continue to unstaged/untracked
171
- if (staged.length > 0) {
172
- // git status showed staged files but git diff doesn't - they were likely unstaged
173
- logger_1.logger.info(`No staged files detected (files may have been unstaged), checking unstaged/untracked files instead.`);
151
+ if (actualUnstagedFiles.length > 0 || untracked.length > 0) {
152
+ logger_1.logger.info(`No staged files, checking unstaged/untracked files.`);
174
153
  }
175
154
  else {
176
- logger_1.logger.info(`No staged files, checking unstaged/untracked files.`);
155
+ logger_1.logger.info(`No staged files detected.`);
177
156
  }
178
157
  // Workflow B: Developer hasn't staged files - check unstaged + untracked files
179
158
  // (Untracked files are conceptually "unstaged" - files being worked on but not committed)
180
- if (unstaged.length > 0 || untracked.length > 0) {
159
+ if (actualUnstagedFiles.length > 0 || untracked.length > 0) {
181
160
  // Get unstaged diff if there are unstaged files
182
- if (unstaged.length > 0) {
161
+ if (actualUnstagedFiles.length > 0) {
183
162
  diff = (0, child_process_1.execSync)('git diff -U200', {
184
163
  encoding: 'utf-8',
185
164
  cwd: repoRoot
186
165
  });
187
- const changedFilesOutput = (0, child_process_1.execSync)('git diff --name-only', {
188
- encoding: 'utf-8',
189
- cwd: repoRoot
190
- }).trim();
191
- changedFiles = changedFilesOutput ? changedFilesOutput.split('\n') : [];
166
+ changedFiles = actualUnstagedFiles;
192
167
  }
193
168
  else {
194
169
  diff = '';
@@ -222,6 +197,14 @@ async function getDiff(repoRoot) {
222
197
  ? (diff ? diff + '\n' : '') + untrackedDiffs.join('\n')
223
198
  : diff;
224
199
  const allChangedFiles = [...changedFiles, ...untrackedFileList];
200
+ // Validate that we actually have changes to review
201
+ // This can happen if:
202
+ // 1. git status showed files but git diff returns empty (files were staged/unstaged between commands)
203
+ // 2. All untracked items are directories (skipped)
204
+ // 3. Parsing incorrectly categorized files
205
+ if (allChangedFiles.length === 0 || !combinedDiff || combinedDiff.trim() === '') {
206
+ throw new Error('No changes detected. Stage files with "git add" or modify files to run threadlines.');
207
+ }
225
208
  const unstagedCount = changedFiles.length;
226
209
  const untrackedCount = untrackedFileList.length;
227
210
  if (unstagedCount > 0 && untrackedCount > 0) {
@@ -230,7 +213,7 @@ async function getDiff(repoRoot) {
230
213
  else if (unstagedCount > 0) {
231
214
  logger_1.logger.info(`Checking UNSTAGED changes (${unstagedCount} file(s))`);
232
215
  }
233
- else {
216
+ else if (untrackedCount > 0) {
234
217
  logger_1.logger.info(`Checking UNTRACKED files (${untrackedCount} file(s))`);
235
218
  }
236
219
  return {
@@ -5,9 +5,18 @@ const single_expert_1 = require("./single-expert");
5
5
  const logger_1 = require("../utils/logger");
6
6
  const EXPERT_TIMEOUT = 40000; // 40 seconds
7
7
  async function processThreadlines(request) {
8
- const { threadlines, diff, files, apiKey, model, serviceTier, contextLinesForLLM } = request;
8
+ const { threadlines, diff, files, provider, bedrockConfig, openaiConfig, contextLinesForLLM } = request;
9
9
  // Determine LLM model (same for all threadlines in this check)
10
- const llmModel = `${model} ${serviceTier}`;
10
+ let llmModel;
11
+ if (provider === 'bedrock' && bedrockConfig) {
12
+ llmModel = bedrockConfig.model;
13
+ }
14
+ else if (provider === 'openai' && openaiConfig) {
15
+ llmModel = `${openaiConfig.model} ${openaiConfig.serviceTier}`;
16
+ }
17
+ else {
18
+ throw new Error('Invalid provider configuration');
19
+ }
11
20
  // Create promises with timeout
12
21
  const promises = threadlines.map(threadline => {
13
22
  let timeoutId = null;
@@ -35,7 +44,7 @@ async function processThreadlines(request) {
35
44
  }
36
45
  }, EXPERT_TIMEOUT);
37
46
  });
38
- const actualPromise = (0, single_expert_1.processThreadline)(threadline, diff, files, apiKey, model, serviceTier, contextLinesForLLM).then(result => {
47
+ const actualPromise = (0, single_expert_1.processThreadline)(threadline, diff, files, provider, bedrockConfig, openaiConfig, contextLinesForLLM).then(result => {
39
48
  // Mark as resolved and clear timeout if it hasn't fired yet
40
49
  resolved = true;
41
50
  if (timeoutId) {
@@ -94,10 +103,15 @@ async function processThreadlines(request) {
94
103
  });
95
104
  }
96
105
  }
97
- // Use actual model from OpenAI response, append service tier
106
+ // Use actual model from response
98
107
  let modelToStore;
99
108
  if (actualModelFromResponse) {
100
- modelToStore = `${actualModelFromResponse} ${serviceTier}`;
109
+ if (provider === 'openai' && openaiConfig) {
110
+ modelToStore = `${actualModelFromResponse} ${openaiConfig.serviceTier}`;
111
+ }
112
+ else {
113
+ modelToStore = actualModelFromResponse;
114
+ }
101
115
  }
102
116
  else {
103
117
  // All calls failed - log prominently and preserve requested model for debugging
@@ -1,11 +1,44 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
2
35
  Object.defineProperty(exports, "__esModule", { value: true });
3
36
  exports.processThreadline = processThreadline;
4
37
  const prompt_builder_1 = require("../llm/prompt-builder");
5
38
  const diff_filter_1 = require("../utils/diff-filter");
6
39
  const slim_diff_1 = require("../utils/slim-diff");
7
40
  const logger_1 = require("../utils/logger");
8
- async function processThreadline(threadline, diff, files, apiKey, model, serviceTier, contextLinesForLLM) {
41
+ async function processThreadline(threadline, diff, files, provider, bedrockConfig, openaiConfig, contextLinesForLLM) {
9
42
  // Filter files that match threadline patterns
10
43
  const relevantFiles = files.filter(file => threadline.patterns.some(pattern => matchesPattern(file, pattern)));
11
44
  // If no files match, return not_relevant
@@ -40,8 +73,9 @@ async function processThreadline(threadline, diff, files, apiKey, model, service
40
73
  }
41
74
  // Build prompt with trimmed diff (full filtered diff is still stored for UI)
42
75
  const prompt = (0, prompt_builder_1.buildPrompt)(threadline, trimmedDiffForLLM, filesInFilteredDiff);
76
+ const modelName = provider === 'bedrock' ? bedrockConfig?.model : openaiConfig?.model;
43
77
  logger_1.logger.debug(` 📝 Processing ${threadline.id}: ${relevantFiles.length} relevant files, ${filesInFilteredDiff.length} files in filtered diff`);
44
- logger_1.logger.debug(` 🤖 Calling LLM (${model}) for ${threadline.id}...`);
78
+ logger_1.logger.debug(` 🤖 Calling LLM (${provider}, ${modelName}) for ${threadline.id}...`);
45
79
  // Capture timing for LLM call
46
80
  const llmCallStartedAt = new Date().toISOString();
47
81
  let llmCallFinishedAt;
@@ -50,98 +84,25 @@ async function processThreadline(threadline, diff, files, apiKey, model, service
50
84
  let llmCallStatus = 'success';
51
85
  let llmCallErrorMessage = null;
52
86
  try {
53
- // Build request body for OpenAI API (direct HTTP call - zero dependencies)
54
- const requestBody = {
55
- model,
56
- messages: [
57
- {
58
- role: 'system',
59
- content: 'You are a code quality checker. Analyze code changes against the threadline guidelines. Be precise - only flag actual violations. Return only valid JSON, no other text.'
60
- },
61
- {
62
- role: 'user',
63
- content: prompt
64
- }
65
- ],
66
- response_format: { type: 'json_object' },
67
- temperature: 0.1
68
- };
69
- // Add service_tier if not 'standard'
70
- const normalizedServiceTier = serviceTier.toLowerCase();
71
- if (normalizedServiceTier !== 'standard' && (normalizedServiceTier === 'auto' || normalizedServiceTier === 'default' || normalizedServiceTier === 'flex')) {
72
- requestBody.service_tier = normalizedServiceTier;
87
+ let actualModel;
88
+ let content;
89
+ if (provider === 'bedrock' && bedrockConfig) {
90
+ const bedrockResult = await callBedrockAPI(bedrockConfig, prompt);
91
+ actualModel = bedrockResult.model;
92
+ content = bedrockResult.content;
93
+ llmCallTokens = bedrockResult.tokens;
73
94
  }
74
- // Direct HTTP call to OpenAI API (native fetch - zero dependencies)
75
- // Use AbortController for timeout (higher-level timeout in expert.ts is 40s, use 45s here as safety margin)
76
- const controller = new AbortController();
77
- const timeoutId = setTimeout(() => controller.abort(), 45000);
78
- let httpResponse;
79
- try {
80
- httpResponse = await fetch('https://api.openai.com/v1/chat/completions', {
81
- method: 'POST',
82
- headers: {
83
- 'Authorization': `Bearer ${apiKey}`,
84
- 'Content-Type': 'application/json',
85
- },
86
- body: JSON.stringify(requestBody),
87
- signal: controller.signal,
88
- });
89
- clearTimeout(timeoutId);
90
- }
91
- catch (fetchError) {
92
- clearTimeout(timeoutId);
93
- // Handle AbortError from timeout
94
- if (fetchError instanceof Error && fetchError.name === 'AbortError') {
95
- throw new Error('Request timeout');
96
- }
97
- throw fetchError;
95
+ else if (provider === 'openai' && openaiConfig) {
96
+ const openaiResult = await callOpenAIAPI(openaiConfig, prompt);
97
+ actualModel = openaiResult.model;
98
+ content = openaiResult.content;
99
+ llmCallTokens = openaiResult.tokens;
98
100
  }
99
- if (!httpResponse.ok) {
100
- const errorText = await httpResponse.text();
101
- let errorMessage = `HTTP ${httpResponse.status}: ${errorText}`;
102
- // Try to parse OpenAI error structure
103
- try {
104
- const errorData = JSON.parse(errorText);
105
- if (errorData.error) {
106
- errorMessage = errorData.error.message || errorText;
107
- // Create error object matching SDK error structure for compatibility
108
- const errorObj = new Error(errorMessage);
109
- errorObj.status = httpResponse.status;
110
- errorObj.error = {
111
- type: errorData.error.type,
112
- code: errorData.error.code,
113
- param: errorData.error.param,
114
- };
115
- throw errorObj;
116
- }
117
- }
118
- catch (parseError) {
119
- // If it's already our structured error, re-throw it
120
- const structuredError = parseError;
121
- if (structuredError.status) {
122
- throw parseError;
123
- }
124
- // Otherwise create a basic error
125
- throw new Error(errorMessage);
126
- }
101
+ else {
102
+ throw new Error(`Invalid provider configuration: ${provider}`);
127
103
  }
128
- const response = await httpResponse.json();
129
- // Capture the actual model returned by OpenAI (may differ from requested)
130
- const actualModel = response.model;
131
104
  llmCallFinishedAt = new Date().toISOString();
132
105
  llmCallResponseTimeMs = new Date(llmCallFinishedAt).getTime() - new Date(llmCallStartedAt).getTime();
133
- // Capture token usage if available
134
- if (response.usage) {
135
- llmCallTokens = {
136
- prompt_tokens: response.usage.prompt_tokens || 0,
137
- completion_tokens: response.usage.completion_tokens || 0,
138
- total_tokens: response.usage.total_tokens || 0
139
- };
140
- }
141
- const content = response.choices?.[0]?.message?.content;
142
- if (!content) {
143
- throw new Error('No response from LLM');
144
- }
145
106
  const parsed = JSON.parse(content);
146
107
  logger_1.logger.debug(` ✅ ${threadline.id}: ${parsed.status}`);
147
108
  // Extract file references - rely entirely on LLM to provide them
@@ -193,11 +154,11 @@ async function processThreadline(threadline, diff, files, apiKey, model, service
193
154
  const errorMessage = error instanceof Error ? error.message : 'Unknown error';
194
155
  llmCallErrorMessage = errorMessage;
195
156
  // Log full error for debugging
196
- logger_1.logger.error(` ❌ OpenAI error: ${JSON.stringify(error, null, 2)}`);
197
- // Extract OpenAI error details from the error object
157
+ logger_1.logger.error(` ❌ ${provider.toUpperCase()} error: ${JSON.stringify(error, null, 2)}`);
158
+ // Extract error details from the error object
198
159
  // Handle both SDK-style errors and HTTP errors
199
160
  const errorObj = error;
200
- const openAIError = errorObj?.error || {};
161
+ const apiError = errorObj?.error || {};
201
162
  const rawErrorResponse = {
202
163
  status: errorObj?.status,
203
164
  headers: errorObj?.headers,
@@ -221,8 +182,8 @@ async function processThreadline(threadline, diff, files, apiKey, model, service
221
182
  reasoning: `Error: ${errorMessage}`,
222
183
  error: {
223
184
  message: errorMessage,
224
- type: openAIError?.type || errorObj?.type,
225
- code: openAIError?.code || errorObj?.code,
185
+ type: apiError?.type || errorObj?.type,
186
+ code: apiError?.code || errorObj?.code,
226
187
  rawResponse: rawErrorResponse
227
188
  },
228
189
  fileReferences: [],
@@ -240,6 +201,288 @@ async function processThreadline(threadline, diff, files, apiKey, model, service
240
201
  };
241
202
  }
242
203
  }
204
+ async function callBedrockAPI(config, prompt) {
205
+ // Dynamic import - only loads aws4 when Bedrock is configured
206
+ // aws4 is a required dependency, so it should always be available
207
+ const aws4Module = await Promise.resolve(`${'aws4'}`).then(s => __importStar(require(s)));
208
+ const aws4 = aws4Module.default || aws4Module;
209
+ // Define JSON schema for structured output via Tool Use
210
+ // This ensures Claude returns properly structured JSON without markdown wrapping
211
+ const toolSchema = {
212
+ type: 'object',
213
+ properties: {
214
+ status: {
215
+ type: 'string',
216
+ enum: ['compliant', 'attention', 'not_relevant'],
217
+ },
218
+ reasoning: {
219
+ type: 'string',
220
+ },
221
+ file_references: {
222
+ type: 'array',
223
+ items: {
224
+ type: 'string',
225
+ },
226
+ },
227
+ },
228
+ required: ['status', 'reasoning', 'file_references'],
229
+ };
230
+ // System message focused on analysis, not JSON format (tool enforces structure)
231
+ const systemMessage = 'You are a code quality checker. Analyze code changes against the threadline guidelines. Be precise - only flag actual violations.';
232
+ logger_1.logger.debug(` 🔧 Bedrock: Using Tool Use for structured JSON output`);
233
+ // Prepare the Converse API request body with Tool Use configuration
234
+ const body = JSON.stringify({
235
+ modelId: config.model,
236
+ system: [
237
+ {
238
+ text: systemMessage,
239
+ },
240
+ ],
241
+ messages: [
242
+ {
243
+ role: 'user',
244
+ content: [{ text: prompt }],
245
+ },
246
+ ],
247
+ toolConfig: {
248
+ tools: [
249
+ {
250
+ toolSpec: {
251
+ name: 'return_analysis_result',
252
+ description: 'Returns the code quality analysis result as structured JSON',
253
+ inputSchema: {
254
+ json: toolSchema,
255
+ },
256
+ },
257
+ },
258
+ ],
259
+ },
260
+ toolChoice: {
261
+ type: 'tool',
262
+ tool: {
263
+ name: 'return_analysis_result',
264
+ },
265
+ },
266
+ inferenceConfig: {
267
+ maxTokens: 4000, // Match OpenAI's typical max
268
+ },
269
+ });
270
+ // Prepare request options for aws4 signing
271
+ // Bedrock Converse API endpoint: POST /model/{modelId}/converse
272
+ const requestOptions = {
273
+ hostname: `bedrock-runtime.${config.region}.amazonaws.com`,
274
+ path: `/model/${config.model}/converse`,
275
+ method: 'POST',
276
+ service: 'bedrock',
277
+ region: config.region,
278
+ headers: {
279
+ 'Content-Type': 'application/json',
280
+ },
281
+ body: body,
282
+ };
283
+ // Sign the request with AWS SigV4
284
+ aws4.sign(requestOptions, {
285
+ accessKeyId: config.accessKeyId,
286
+ secretAccessKey: config.secretAccessKey,
287
+ });
288
+ // Use AbortController for timeout (higher-level timeout in expert.ts is 40s, use 45s here as safety margin)
289
+ const controller = new AbortController();
290
+ const timeoutId = setTimeout(() => controller.abort(), 45000);
291
+ let httpResponse;
292
+ try {
293
+ // Make the HTTP request using native fetch (Node 18+)
294
+ const url = `https://${requestOptions.hostname}${requestOptions.path}`;
295
+ httpResponse = await fetch(url, {
296
+ method: requestOptions.method,
297
+ headers: requestOptions.headers,
298
+ body: requestOptions.body,
299
+ signal: controller.signal,
300
+ });
301
+ clearTimeout(timeoutId);
302
+ }
303
+ catch (fetchError) {
304
+ clearTimeout(timeoutId);
305
+ // Handle AbortError from timeout
306
+ if (fetchError instanceof Error && fetchError.name === 'AbortError') {
307
+ throw new Error('Request timeout');
308
+ }
309
+ throw fetchError;
310
+ }
311
+ if (!httpResponse.ok) {
312
+ const errorText = await httpResponse.text();
313
+ let errorMessage = `HTTP ${httpResponse.status}: ${errorText}`;
314
+ // Try to parse Bedrock error structure
315
+ try {
316
+ const errorData = JSON.parse(errorText);
317
+ if (errorData.message) {
318
+ errorMessage = errorData.message;
319
+ // Create error object matching SDK error structure for compatibility
320
+ const errorObj = new Error(errorMessage);
321
+ errorObj.status = httpResponse.status;
322
+ errorObj.error = {
323
+ type: errorData.__type,
324
+ };
325
+ throw errorObj;
326
+ }
327
+ }
328
+ catch (parseError) {
329
+ // If it's already our structured error, re-throw it
330
+ const structuredError = parseError;
331
+ if (structuredError.status) {
332
+ throw parseError;
333
+ }
334
+ // Otherwise create a basic error
335
+ throw new Error(errorMessage);
336
+ }
337
+ }
338
+ const responseData = await httpResponse.json();
339
+ logger_1.logger.debug(` 🔧 Bedrock: Parsing Tool Use response`);
340
+ // Extract structured JSON from Tool Use response
341
+ // With toolChoice set, Claude should always return a toolUse block
342
+ let toolUseInput = null;
343
+ if (responseData.output?.message?.content) {
344
+ const contentBlocks = responseData.output.message.content;
345
+ logger_1.logger.debug(` 🔧 Bedrock: Found ${contentBlocks.length} content block(s)`);
346
+ for (let i = 0; i < contentBlocks.length; i++) {
347
+ const block = contentBlocks[i];
348
+ if (block.toolUse) {
349
+ logger_1.logger.debug(` 🔧 Bedrock: Found toolUse block ${i + 1}: name="${block.toolUse.name}", id="${block.toolUse.id}"`);
350
+ if (block.toolUse.name !== 'return_analysis_result') {
351
+ throw new Error(`Unexpected tool name: ${block.toolUse.name}. Expected: return_analysis_result`);
352
+ }
353
+ if (!block.toolUse.input) {
354
+ throw new Error(`Tool Use block missing input field. Tool ID: ${block.toolUse.id}`);
355
+ }
356
+ toolUseInput = block.toolUse.input;
357
+ break; // Use first matching toolUse block
358
+ }
359
+ else if (block.text) {
360
+ logger_1.logger.debug(` 🔧 Bedrock: Found text block ${i + 1} (unexpected when toolChoice is set)`);
361
+ }
362
+ }
363
+ }
364
+ // Hard error if no toolUse block found (shouldn't happen with toolChoice)
365
+ if (!toolUseInput) {
366
+ logger_1.logger.error(` ❌ Bedrock: No toolUse block found in response`);
367
+ logger_1.logger.error(` ❌ Bedrock: Response structure: ${JSON.stringify(responseData, null, 2)}`);
368
+ throw new Error('Bedrock Tool Use failed: No toolUse block found in response. Claude did not use the required tool.');
369
+ }
370
+ logger_1.logger.debug(` 🔧 Bedrock: Successfully extracted tool input`);
371
+ // Convert tool input (already a JSON object) to string for consistency with OpenAI path
372
+ const content = JSON.stringify(toolUseInput);
373
+ // Map Bedrock token structure to OpenAI format
374
+ let tokens = null;
375
+ if (responseData.usage) {
376
+ tokens = {
377
+ prompt_tokens: responseData.usage.inputTokens || 0,
378
+ completion_tokens: responseData.usage.outputTokens || 0,
379
+ total_tokens: responseData.usage.totalTokens || 0
380
+ };
381
+ }
382
+ return {
383
+ model: config.model,
384
+ content,
385
+ tokens
386
+ };
387
+ }
388
+ async function callOpenAIAPI(config, prompt) {
389
+ // Build request body for OpenAI API (direct HTTP call - zero dependencies)
390
+ const requestBody = {
391
+ model: config.model,
392
+ messages: [
393
+ {
394
+ role: 'system',
395
+ content: 'You are a code quality checker. Analyze code changes against the threadline guidelines. Be precise - only flag actual violations. Return only valid JSON, no other text.'
396
+ },
397
+ {
398
+ role: 'user',
399
+ content: prompt
400
+ }
401
+ ],
402
+ response_format: { type: 'json_object' },
403
+ temperature: 0.1
404
+ };
405
+ // Add service_tier if not 'standard'
406
+ const normalizedServiceTier = config.serviceTier.toLowerCase();
407
+ if (normalizedServiceTier !== 'standard' && (normalizedServiceTier === 'auto' || normalizedServiceTier === 'default' || normalizedServiceTier === 'flex')) {
408
+ requestBody.service_tier = normalizedServiceTier;
409
+ }
410
+ // Direct HTTP call to OpenAI API (native fetch - zero dependencies)
411
+ // Use AbortController for timeout (higher-level timeout in expert.ts is 40s, use 45s here as safety margin)
412
+ const controller = new AbortController();
413
+ const timeoutId = setTimeout(() => controller.abort(), 45000);
414
+ let httpResponse;
415
+ try {
416
+ httpResponse = await fetch('https://api.openai.com/v1/chat/completions', {
417
+ method: 'POST',
418
+ headers: {
419
+ 'Authorization': `Bearer ${config.apiKey}`,
420
+ 'Content-Type': 'application/json',
421
+ },
422
+ body: JSON.stringify(requestBody),
423
+ signal: controller.signal,
424
+ });
425
+ clearTimeout(timeoutId);
426
+ }
427
+ catch (fetchError) {
428
+ clearTimeout(timeoutId);
429
+ // Handle AbortError from timeout
430
+ if (fetchError instanceof Error && fetchError.name === 'AbortError') {
431
+ throw new Error('Request timeout');
432
+ }
433
+ throw fetchError;
434
+ }
435
+ if (!httpResponse.ok) {
436
+ const errorText = await httpResponse.text();
437
+ let errorMessage = `HTTP ${httpResponse.status}: ${errorText}`;
438
+ // Try to parse OpenAI error structure
439
+ try {
440
+ const errorData = JSON.parse(errorText);
441
+ if (errorData.error) {
442
+ errorMessage = errorData.error.message || errorText;
443
+ // Create error object matching SDK error structure for compatibility
444
+ const errorObj = new Error(errorMessage);
445
+ errorObj.status = httpResponse.status;
446
+ errorObj.error = {
447
+ type: errorData.error.type,
448
+ code: errorData.error.code,
449
+ param: errorData.error.param,
450
+ };
451
+ throw errorObj;
452
+ }
453
+ }
454
+ catch (parseError) {
455
+ // If it's already our structured error, re-throw it
456
+ const structuredError = parseError;
457
+ if (structuredError.status) {
458
+ throw parseError;
459
+ }
460
+ // Otherwise create a basic error
461
+ throw new Error(errorMessage);
462
+ }
463
+ }
464
+ const response = await httpResponse.json();
465
+ // Capture the actual model returned by OpenAI (may differ from requested)
466
+ const actualModel = response.model || config.model;
467
+ // Capture token usage if available
468
+ let tokens = null;
469
+ if (response.usage) {
470
+ tokens = {
471
+ prompt_tokens: response.usage.prompt_tokens || 0,
472
+ completion_tokens: response.usage.completion_tokens || 0,
473
+ total_tokens: response.usage.total_tokens || 0
474
+ };
475
+ }
476
+ const content = response.choices?.[0]?.message?.content;
477
+ if (!content) {
478
+ throw new Error('No response from LLM');
479
+ }
480
+ return {
481
+ model: actualModel,
482
+ content,
483
+ tokens
484
+ };
485
+ }
243
486
  function matchesPattern(filePath, pattern) {
244
487
  // Convert glob pattern to regex
245
488
  // Handle ** first (before single *), escape it to avoid double replacement
@@ -7,12 +7,11 @@ exports.getThreadlineApiKey = getThreadlineApiKey;
7
7
  exports.getThreadlineAccount = getThreadlineAccount;
8
8
  exports.getOpenAIConfig = getOpenAIConfig;
9
9
  exports.logOpenAIConfig = logOpenAIConfig;
10
+ exports.getBedrockConfig = getBedrockConfig;
11
+ exports.logBedrockConfig = logBedrockConfig;
10
12
  exports.isDirectModeAvailable = isDirectModeAvailable;
11
13
  const chalk_1 = __importDefault(require("chalk"));
12
14
  const logger_1 = require("./logger");
13
- // Default values for OpenAI configuration
14
- const OPENAI_MODEL_DEFAULT = 'gpt-5.2';
15
- const OPENAI_SERVICE_TIER_DEFAULT = 'Flex';
16
15
  /**
17
16
  * Gets THREADLINE_API_KEY from environment.
18
17
  *
@@ -49,15 +48,19 @@ function getThreadlineAccount() {
49
48
  * Gets OpenAI configuration from environment variables and config file.
50
49
  *
51
50
  * Required:
52
- * - OPENAI_API_KEY: Your OpenAI API key (from environment)
53
- *
54
- * Model and service tier come from ThreadlineConfig (.threadlinerc file).
55
- * Falls back to environment variables if not in config, then to defaults.
51
+ * - OPENAI_API_KEY: Your OpenAI API key (from environment - secret)
52
+ * - openai_model: Model name (from .threadlinerc - required)
53
+ * - openai_service_tier: Service tier (from .threadlinerc - required)
56
54
  *
57
55
  * Returns undefined if OPENAI_API_KEY is not set.
56
+ * Throws an error if model or service tier are missing from .threadlinerc.
58
57
  *
59
58
  * Note: .env.local is automatically loaded at CLI startup (see index.ts).
60
59
  * In CI/CD, environment variables are injected directly into process.env.
60
+ *
61
+ * Configuration philosophy:
62
+ * - Secrets (API keys) -> environment variables
63
+ * - Config (model, service tier) -> .threadlinerc file (required, no fallbacks)
61
64
  */
62
65
  function getOpenAIConfig(config) {
63
66
  const apiKey = process.env.OPENAI_API_KEY;
@@ -66,31 +69,21 @@ function getOpenAIConfig(config) {
66
69
  return undefined;
67
70
  }
68
71
  logger_1.logger.debug('OPENAI_API_KEY: found (value hidden for security)');
69
- // Priority: config file > environment variable > default
70
- const model = config?.openai_model || process.env.OPENAI_MODEL || OPENAI_MODEL_DEFAULT;
71
- const serviceTier = config?.openai_service_tier || process.env.OPENAI_SERVICE_TIER || OPENAI_SERVICE_TIER_DEFAULT;
72
- if (config?.openai_model) {
73
- logger_1.logger.debug(`OPENAI_MODEL: ${model} (from .threadlinerc)`);
74
- }
75
- else if (process.env.OPENAI_MODEL) {
76
- logger_1.logger.debug(`OPENAI_MODEL: ${model} (from environment)`);
77
- }
78
- else {
79
- logger_1.logger.debug(`OPENAI_MODEL: ${model} (using default)`);
80
- }
81
- if (config?.openai_service_tier) {
82
- logger_1.logger.debug(`OPENAI_SERVICE_TIER: ${serviceTier} (from .threadlinerc)`);
83
- }
84
- else if (process.env.OPENAI_SERVICE_TIER) {
85
- logger_1.logger.debug(`OPENAI_SERVICE_TIER: ${serviceTier} (from environment)`);
72
+ // Require config values from .threadlinerc - no fallbacks
73
+ if (!config?.openai_model) {
74
+ throw new Error('Missing required configuration: openai_model must be set in .threadlinerc file.\n' +
75
+ 'Add "openai_model": "gpt-5.2" (or your preferred model) to your .threadlinerc file.');
86
76
  }
87
- else {
88
- logger_1.logger.debug(`OPENAI_SERVICE_TIER: ${serviceTier} (using default)`);
77
+ if (!config?.openai_service_tier) {
78
+ throw new Error('Missing required configuration: openai_service_tier must be set in .threadlinerc file.\n' +
79
+ 'Add "openai_service_tier": "Flex" (or your preferred tier) to your .threadlinerc file.');
89
80
  }
81
+ logger_1.logger.debug(`OPENAI_MODEL: ${config.openai_model} (from .threadlinerc)`);
82
+ logger_1.logger.debug(`OPENAI_SERVICE_TIER: ${config.openai_service_tier} (from .threadlinerc)`);
90
83
  return {
91
84
  apiKey,
92
- model,
93
- serviceTier
85
+ model: config.openai_model,
86
+ serviceTier: config.openai_service_tier
94
87
  };
95
88
  }
96
89
  /**
@@ -99,8 +92,64 @@ function getOpenAIConfig(config) {
99
92
  */
100
93
  function logOpenAIConfig(config) {
101
94
  logger_1.logger.output(chalk_1.default.blue('OpenAI Direct Mode:'));
102
- logger_1.logger.output(chalk_1.default.gray(` Model: ${config.model}${config.model === OPENAI_MODEL_DEFAULT ? ' (default)' : ''}`));
103
- logger_1.logger.output(chalk_1.default.gray(` Service Tier: ${config.serviceTier}${config.serviceTier === OPENAI_SERVICE_TIER_DEFAULT ? ' (default)' : ''}`));
95
+ logger_1.logger.output(chalk_1.default.gray(` Model: ${config.model}`));
96
+ logger_1.logger.output(chalk_1.default.gray(` Service Tier: ${config.serviceTier}`));
97
+ logger_1.logger.output('');
98
+ }
99
+ /**
100
+ * Gets Bedrock configuration from environment variables and config file.
101
+ *
102
+ * Required:
103
+ * - BEDROCK_ACCESS_KEY_ID: Your AWS access key ID (from environment - secret)
104
+ * - BEDROCK_SECRET_ACCESS_KEY: Your AWS secret access key (from environment - secret)
105
+ * - bedrock_model: Model name (from .threadlinerc - required)
106
+ * - bedrock_region: AWS region (from .threadlinerc - required)
107
+ *
108
+ * Returns undefined if BEDROCK_ACCESS_KEY_ID or BEDROCK_SECRET_ACCESS_KEY is not set.
109
+ * Throws an error if model or region are missing from .threadlinerc.
110
+ *
111
+ * Note: .env.local is automatically loaded at CLI startup (see index.ts).
112
+ * In CI/CD, environment variables are injected directly into process.env.
113
+ *
114
+ * Configuration philosophy:
115
+ * - Secrets (access keys) -> environment variables
116
+ * - Config (model, region) -> .threadlinerc file (required, no fallbacks)
117
+ */
118
+ function getBedrockConfig(config) {
119
+ const accessKeyId = process.env.BEDROCK_ACCESS_KEY_ID;
120
+ const secretAccessKey = process.env.BEDROCK_SECRET_ACCESS_KEY;
121
+ if (!accessKeyId || !secretAccessKey) {
122
+ logger_1.logger.debug('BEDROCK_ACCESS_KEY_ID or BEDROCK_SECRET_ACCESS_KEY: not set (Bedrock mode unavailable)');
123
+ return undefined;
124
+ }
125
+ logger_1.logger.debug('BEDROCK_ACCESS_KEY_ID: found (value hidden for security)');
126
+ logger_1.logger.debug('BEDROCK_SECRET_ACCESS_KEY: found (value hidden for security)');
127
+ // Require config values from .threadlinerc - no fallbacks
128
+ if (!config?.bedrock_model) {
129
+ throw new Error('Missing required configuration: bedrock_model must be set in .threadlinerc file.\n' +
130
+ 'Add "bedrock_model": "us.anthropic.claude-sonnet-4-5-20250929-v1:0" (or your preferred model) to your .threadlinerc file.');
131
+ }
132
+ if (!config?.bedrock_region) {
133
+ throw new Error('Missing required configuration: bedrock_region must be set in .threadlinerc file.\n' +
134
+ 'Add "bedrock_region": "us-east-1" (or your preferred AWS region) to your .threadlinerc file.');
135
+ }
136
+ logger_1.logger.debug(`BEDROCK_MODEL: ${config.bedrock_model} (from .threadlinerc)`);
137
+ logger_1.logger.debug(`BEDROCK_REGION: ${config.bedrock_region} (from .threadlinerc)`);
138
+ return {
139
+ accessKeyId,
140
+ secretAccessKey,
141
+ model: config.bedrock_model,
142
+ region: config.bedrock_region
143
+ };
144
+ }
145
+ /**
146
+ * Logs the Bedrock configuration being used.
147
+ * Call this when starting Bedrock mode to inform the user.
148
+ */
149
+ function logBedrockConfig(config) {
150
+ logger_1.logger.output(chalk_1.default.blue('Amazon Bedrock Direct Mode:'));
151
+ logger_1.logger.output(chalk_1.default.gray(` Model: ${config.model}`));
152
+ logger_1.logger.output(chalk_1.default.gray(` Region: ${config.region}`));
104
153
  logger_1.logger.output('');
105
154
  }
106
155
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "threadlines",
3
- "version": "0.4.0",
3
+ "version": "0.6.0",
4
4
  "description": "Threadlines CLI - AI-powered linter based on your natural language documentation",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
@@ -48,6 +48,7 @@
48
48
  "node": ">=18.0.0"
49
49
  },
50
50
  "dependencies": {
51
+ "aws4": "^1.13.2",
51
52
  "chalk": "^4.1.2",
52
53
  "commander": "^12.1.0",
53
54
  "dotenv": "^16.4.7",
@@ -55,6 +56,7 @@
55
56
  "js-yaml": "^4.1.0"
56
57
  },
57
58
  "devDependencies": {
59
+ "@types/aws4": "^1.11.4",
58
60
  "@types/glob": "^8.1.0",
59
61
  "@types/js-yaml": "^4.0.9",
60
62
  "@types/node": "^22.10.2",