threadlines 0.2.25 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -41,7 +41,6 @@ const fs = __importStar(require("fs"));
41
41
  const path = __importStar(require("path"));
42
42
  const chalk_1 = __importDefault(require("chalk"));
43
43
  const logger_1 = require("../utils/logger");
44
- const config_file_1 = require("../utils/config-file");
45
44
  const TEMPLATE = `---
46
45
  id: example-threadline
47
46
  version: 1.0.0
@@ -82,39 +81,49 @@ async function initCommand() {
82
81
  // Create threadlines directory if it doesn't exist
83
82
  if (!fs.existsSync(threadlinesDir)) {
84
83
  fs.mkdirSync(threadlinesDir, { recursive: true });
85
- console.log(chalk_1.default.green(`✓ Created /threadlines directory`));
84
+ logger_1.logger.output(chalk_1.default.green(`✓ Created /threadlines directory`));
86
85
  }
87
86
  // Create .threadlinerc if it doesn't exist
88
87
  if (!fs.existsSync(configFile)) {
89
- const configContent = JSON.stringify(config_file_1.DEFAULT_CONFIG, null, 2);
88
+ // Generate config with comment explaining mode
89
+ const configContent = `{
90
+ // mode: "online" syncs results to web app (requires THREADLINE_API_KEY and THREADLINE_ACCOUNT)
91
+ // mode: "offline" processes locally only, no sync
92
+ "mode": "online",
93
+ "api_url": "https://devthreadline.com",
94
+ "openai_model": "gpt-5.2",
95
+ "openai_service_tier": "Flex",
96
+ "diff_context_lines": 10
97
+ }`;
90
98
  fs.writeFileSync(configFile, configContent, 'utf-8');
91
- console.log(chalk_1.default.green(`✓ Created .threadlinerc`));
99
+ logger_1.logger.output(chalk_1.default.green(`✓ Created .threadlinerc`));
92
100
  }
93
101
  // Check if example file already exists
94
102
  if (fs.existsSync(exampleFile)) {
95
- console.log(chalk_1.default.yellow(`⚠️ ${exampleFile} already exists`));
96
- console.log(chalk_1.default.gray(' Edit it to create your threadline, or delete it and run init again.'));
103
+ logger_1.logger.warn(`${exampleFile} already exists`);
104
+ logger_1.logger.output(chalk_1.default.gray(' Edit it to create your threadline, or delete it and run init again.'));
97
105
  return;
98
106
  }
99
107
  // Write template file
100
108
  fs.writeFileSync(exampleFile, TEMPLATE, 'utf-8');
101
- console.log(chalk_1.default.green(`✓ Created ${exampleFile}`));
102
- console.log('');
103
- console.log(chalk_1.default.blue('Next steps:'));
104
- console.log(chalk_1.default.gray(' 1. Edit threadlines/example.md with your coding standards'));
105
- console.log(chalk_1.default.gray(' 2. Rename it to something descriptive (e.g., error-handling.md)'));
106
- console.log('');
107
- console.log(chalk_1.default.yellow('⚠️ IMPORTANT: Configuration Required'));
108
- console.log(chalk_1.default.white(' To use threadlines check, you need:'));
109
- console.log('');
110
- console.log(chalk_1.default.white(' Create a .env.local file in your project root with:'));
111
- console.log(chalk_1.default.gray(' THREADLINE_API_KEY=your-api-key-here'));
112
- console.log(chalk_1.default.gray(' THREADLINE_ACCOUNT=your-email@example.com'));
113
- console.log('');
114
- console.log(chalk_1.default.white(' Make sure .env.local is in your .gitignore file!'));
115
- console.log('');
116
- console.log(chalk_1.default.gray(' 3. Run: npx threadlines check'));
117
- console.log(chalk_1.default.gray(' (Use npx --yes threadlines check in non-interactive environments)'));
109
+ logger_1.logger.output(chalk_1.default.green(`✓ Created ${exampleFile}`));
110
+ logger_1.logger.output('');
111
+ logger_1.logger.output(chalk_1.default.blue('Next steps:'));
112
+ logger_1.logger.output(chalk_1.default.gray(' 1. Edit threadlines/example.md with your coding standards'));
113
+ logger_1.logger.output(chalk_1.default.gray(' 2. Rename it to something descriptive (e.g., error-handling.md)'));
114
+ logger_1.logger.output('');
115
+ logger_1.logger.output(chalk_1.default.yellow('⚠️ IMPORTANT: Configuration Required'));
116
+ logger_1.logger.output(chalk_1.default.white(' To use threadlines check, you need:'));
117
+ logger_1.logger.output('');
118
+ logger_1.logger.output(chalk_1.default.white(' Create a .env.local file in your project root with:'));
119
+ logger_1.logger.output(chalk_1.default.gray(' OPENAI_API_KEY=your-openai-api-key'));
120
+ logger_1.logger.output(chalk_1.default.gray(' THREADLINE_API_KEY=your-api-key-here'));
121
+ logger_1.logger.output(chalk_1.default.gray(' THREADLINE_ACCOUNT=your-email@example.com'));
122
+ logger_1.logger.output('');
123
+ logger_1.logger.output(chalk_1.default.white(' Make sure .env.local is in your .gitignore file!'));
124
+ logger_1.logger.output('');
125
+ logger_1.logger.output(chalk_1.default.gray(' 3. Run: npx threadlines check'));
126
+ logger_1.logger.output(chalk_1.default.gray(' (Use npx --yes threadlines check in non-interactive environments)'));
118
127
  }
119
128
  catch (error) {
120
129
  const errorMessage = error instanceof Error ? error.message : 'Unknown error';
@@ -0,0 +1,72 @@
1
+ "use strict";
2
+ /**
3
+ * Prompt Builder for LLM Threadline Checks
4
+ *
5
+ * Builds prompts for OpenAI API calls to check code changes against threadline guidelines.
6
+ */
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.buildPrompt = buildPrompt;
9
+ function buildPrompt(threadline, diff, matchingFiles) {
10
+ // Build context files section if available
11
+ const contextFilesSection = threadline.contextContent && Object.keys(threadline.contextContent).length > 0
12
+ ? `Context Files:\n${Object.entries(threadline.contextContent)
13
+ .map(([file, content]) => `\n--- ${file} ---\n${content}`)
14
+ .join('\n')}\n\n`
15
+ : '';
16
+ return `You are a code quality checker focused EXCLUSIVELY on: ${threadline.id}
17
+
18
+ CRITICAL: You must ONLY check for violations of THIS SPECIFIC threadline. Do NOT flag other code quality issues, style problems, or unrelated concerns.
19
+ If the code does not violate THIS threadline's specific rules, return "compliant" even if other issues exist.
20
+
21
+ Threadline Guidelines:
22
+ ${threadline.content}
23
+
24
+ ${contextFilesSection}Code Changes (Git Diff Format):
25
+ ${diff}
26
+
27
+ Changed Files:
28
+ ${matchingFiles.join('\n')}
29
+
30
+ Review the code changes AGAINST ONLY THE THREADLINE GUIDELINES ABOVE.
31
+
32
+ YOUR OBJECTIVES:
33
+ 1. Detect new violations being introduced in the code changes
34
+ 2. Review whether engineers have successfully addressed earlier violations
35
+
36
+ This is why it's important to look very carefully at the diff structure. You'll come across diffs that introduce new violations. You will also come across some that address earlier violations. The diff structure should allow you to tell which is which, because lines starting with '-' are removed in favour of lines with '+'.
37
+
38
+ CRITICAL CHECK BEFORE FLAGGING VIOLATIONS:
39
+ Before commenting on or flagging a violation in any line, look at the FIRST CHARACTER of that line:
40
+ * If it's a "-", the code is deleted.
41
+ → Only flag violations in lines starting with "+" (new code being added)
42
+ * If the first character is "+", this is NEW code being added - flag violations here if they violate the threadline
43
+ * If the line doesn't start with "+" or "-" (context lines), these are UNCHANGED - do NOT flag violations here
44
+ * Some violations may not be line-specific (e.g., file-level patterns, overall structure) - include those in your reasoning as well
45
+
46
+
47
+ IMPORTANT:
48
+ - Only flag violations of the specific rules defined in this threadline
49
+ - Ignore all other code quality issues, style problems, or unrelated concerns
50
+ - Focus on understanding the diff structure to distinguish between new violations and fixes
51
+
52
+ Return JSON only with this exact structure:
53
+ {
54
+ "status": "compliant" | "attention" | "not_relevant",
55
+ "reasoning": "explanation with file paths and line numbers embedded in the text (e.g., 'app/api/checks/route.ts:8 - The addition of...')",
56
+ "file_references": [file paths where violations occur - MUST match files from the diff, include ONLY files with violations]
57
+ }
58
+
59
+ CRITICAL: For each violation, you MUST:
60
+ 1. Embed the file path and line number(s) directly in your reasoning text (e.g., "app/api/checks/route.ts:8 - The addition of 'c.files_changed_counts' violates...")
61
+ 2. For line-specific violations, include the line number (e.g., "file.ts:42")
62
+ 3. For file-level or pattern violations, just include the file path (e.g., "file.ts")
63
+ 4. Include ONLY files that actually contain violations in "file_references" array
64
+ 5. Do NOT include files that don't have violations, even if they appear in the diff
65
+ 6. The "file_references" array should be a simple list of file paths - no line numbers needed there since they're in the reasoning
66
+
67
+ Status meanings:
68
+ - "compliant": Code follows THIS threadline's guidelines, no violations found (even if other issues exist)
69
+ - "attention": Code DIRECTLY violates THIS threadline's specific guidelines
70
+ - "not_relevant": This threadline doesn't apply to these files/changes (e.g., wrong file type, no matching code patterns)
71
+ `;
72
+ }
@@ -0,0 +1,120 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.processThreadlines = processThreadlines;
4
+ const single_expert_1 = require("./single-expert");
5
+ const logger_1 = require("../utils/logger");
6
+ const EXPERT_TIMEOUT = 40000; // 40 seconds
7
+ async function processThreadlines(request) {
8
+ const { threadlines, diff, files, apiKey, model, serviceTier, contextLinesForLLM } = request;
9
+ // Determine LLM model (same for all threadlines in this check)
10
+ const llmModel = `${model} ${serviceTier}`;
11
+ // Create promises with timeout
12
+ const promises = threadlines.map(threadline => {
13
+ let timeoutId = null;
14
+ let resolved = false;
15
+ const timeoutPromise = new Promise((resolve) => {
16
+ timeoutId = setTimeout(() => {
17
+ // Only log and resolve if we haven't already resolved
18
+ if (!resolved) {
19
+ logger_1.logger.error(`Request timed out after ${EXPERT_TIMEOUT / 1000}s for threadline: ${threadline.id}`);
20
+ resolved = true;
21
+ resolve({
22
+ expertId: threadline.id,
23
+ status: 'error',
24
+ reasoning: `Error: Request timed out after ${EXPERT_TIMEOUT / 1000}s`,
25
+ error: {
26
+ message: `Request timed out after ${EXPERT_TIMEOUT / 1000}s`,
27
+ type: 'timeout'
28
+ },
29
+ fileReferences: [],
30
+ relevantFiles: [],
31
+ filteredDiff: '',
32
+ filesInFilteredDiff: [],
33
+ actualModel: undefined
34
+ });
35
+ }
36
+ }, EXPERT_TIMEOUT);
37
+ });
38
+ const actualPromise = (0, single_expert_1.processThreadline)(threadline, diff, files, apiKey, model, serviceTier, contextLinesForLLM).then(result => {
39
+ // Mark as resolved and clear timeout if it hasn't fired yet
40
+ resolved = true;
41
+ if (timeoutId) {
42
+ clearTimeout(timeoutId);
43
+ }
44
+ return result;
45
+ });
46
+ return Promise.race([actualPromise, timeoutPromise]);
47
+ });
48
+ // Wait for all (some may timeout)
49
+ const results = await Promise.allSettled(promises);
50
+ // Process results
51
+ const expertResults = [];
52
+ let completed = 0;
53
+ let timedOut = 0;
54
+ let errors = 0;
55
+ let actualModelFromResponse;
56
+ for (let i = 0; i < results.length; i++) {
57
+ const result = results[i];
58
+ const threadline = threadlines[i];
59
+ if (result.status === 'fulfilled') {
60
+ const expertResult = result.value;
61
+ // Check status directly - errors and timeouts are now 'error' status
62
+ if (expertResult.status === 'error') {
63
+ // Check if it's a timeout (has error.type === 'timeout')
64
+ if ('error' in expertResult && expertResult.error?.type === 'timeout') {
65
+ timedOut++;
66
+ }
67
+ else {
68
+ errors++;
69
+ }
70
+ }
71
+ else {
72
+ completed++;
73
+ }
74
+ expertResults.push(expertResult);
75
+ // Capture actual model from first successful result (all threadlines use same model)
76
+ if (!actualModelFromResponse && 'actualModel' in expertResult && expertResult.actualModel) {
77
+ actualModelFromResponse = expertResult.actualModel;
78
+ }
79
+ }
80
+ else {
81
+ errors++;
82
+ expertResults.push({
83
+ expertId: threadline.id,
84
+ status: 'error',
85
+ reasoning: `Error: ${result.reason?.message || 'Unknown error'}`,
86
+ error: {
87
+ message: result.reason?.message || 'Unknown error',
88
+ rawResponse: result.reason
89
+ },
90
+ fileReferences: [],
91
+ relevantFiles: [],
92
+ filteredDiff: '',
93
+ filesInFilteredDiff: []
94
+ });
95
+ }
96
+ }
97
+ // Use actual model from OpenAI response, append service tier
98
+ let modelToStore;
99
+ if (actualModelFromResponse) {
100
+ modelToStore = `${actualModelFromResponse} ${serviceTier}`;
101
+ }
102
+ else {
103
+ // All calls failed - log prominently and preserve requested model for debugging
104
+ logger_1.logger.error(`No successful LLM responses received. Requested model: ${llmModel}`);
105
+ logger_1.logger.error(`Completed: ${completed}, Timed out: ${timedOut}, Errors: ${errors}`);
106
+ // Store requested model so we can debug what was attempted
107
+ modelToStore = `${llmModel} (no successful responses)`;
108
+ }
109
+ // Return all results - CLI will handle filtering/display
110
+ return {
111
+ results: expertResults,
112
+ metadata: {
113
+ totalThreadlines: threadlines.length,
114
+ completed,
115
+ timedOut,
116
+ errors,
117
+ llmModel: modelToStore
118
+ }
119
+ };
120
+ }
@@ -0,0 +1,197 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.processThreadline = processThreadline;
7
+ const openai_1 = __importDefault(require("openai"));
8
+ const prompt_builder_1 = require("../llm/prompt-builder");
9
+ const diff_filter_1 = require("../utils/diff-filter");
10
+ const slim_diff_1 = require("../utils/slim-diff");
11
+ const logger_1 = require("../utils/logger");
12
+ async function processThreadline(threadline, diff, files, apiKey, model, serviceTier, contextLinesForLLM) {
13
+ const openai = new openai_1.default({ apiKey });
14
+ // Filter files that match threadline patterns
15
+ const relevantFiles = files.filter(file => threadline.patterns.some(pattern => matchesPattern(file, pattern)));
16
+ // If no files match, return not_relevant
17
+ if (relevantFiles.length === 0) {
18
+ logger_1.logger.debug(` ⚠️ ${threadline.id}: No files matched patterns ${threadline.patterns.join(', ')}`);
19
+ logger_1.logger.debug(` Files checked: ${files.slice(0, 5).join(', ')}${files.length > 5 ? '...' : ''}`);
20
+ return {
21
+ expertId: threadline.id,
22
+ status: 'not_relevant',
23
+ reasoning: `No files match threadline patterns: ${threadline.patterns.join(', ')}`,
24
+ fileReferences: [],
25
+ relevantFiles: [],
26
+ filteredDiff: '',
27
+ filesInFilteredDiff: []
28
+ };
29
+ }
30
+ // Filter diff to only include relevant files
31
+ const filteredDiff = (0, diff_filter_1.filterDiffByFiles)(diff, relevantFiles);
32
+ // Extract files actually present in the filtered diff
33
+ const filesInFilteredDiff = (0, diff_filter_1.extractFilesFromDiff)(filteredDiff);
34
+ // Trim diff for LLM to reduce token costs (keep full diff for storage/UI)
35
+ // The CLI sends diffs with -U200 (200 lines context), which can be expensive.
36
+ // This trims the diff to only N context lines before sending to LLM.
37
+ // Note: Full filtered diff is still stored in DB for UI viewing.
38
+ const trimmedDiffForLLM = (0, slim_diff_1.createSlimDiff)(filteredDiff, contextLinesForLLM);
39
+ // Log diff trimming if it occurred
40
+ const originalLines = filteredDiff.split('\n').length;
41
+ const trimmedLines = trimmedDiffForLLM.split('\n').length;
42
+ if (trimmedLines < originalLines) {
43
+ const reductionPercent = Math.round(((originalLines - trimmedLines) / originalLines) * 100);
44
+ logger_1.logger.debug(` ✂️ Trimmed diff for LLM: ${originalLines} → ${trimmedLines} lines (${reductionPercent}% reduction, ${contextLinesForLLM} context lines)`);
45
+ }
46
+ // Build prompt with trimmed diff (full filtered diff is still stored for UI)
47
+ const prompt = (0, prompt_builder_1.buildPrompt)(threadline, trimmedDiffForLLM, filesInFilteredDiff);
48
+ logger_1.logger.debug(` 📝 Processing ${threadline.id}: ${relevantFiles.length} relevant files, ${filesInFilteredDiff.length} files in filtered diff`);
49
+ logger_1.logger.debug(` 🤖 Calling LLM (${model}) for ${threadline.id}...`);
50
+ // Capture timing for LLM call
51
+ const llmCallStartedAt = new Date().toISOString();
52
+ let llmCallFinishedAt;
53
+ let llmCallResponseTimeMs;
54
+ let llmCallTokens = null;
55
+ let llmCallStatus = 'success';
56
+ let llmCallErrorMessage = null;
57
+ try {
58
+ const requestParams = {
59
+ model,
60
+ messages: [
61
+ {
62
+ role: 'system',
63
+ content: 'You are a code quality checker. Analyze code changes against the threadline guidelines. Be precise - only flag actual violations. Return only valid JSON, no other text.'
64
+ },
65
+ {
66
+ role: 'user',
67
+ content: prompt
68
+ }
69
+ ],
70
+ response_format: { type: 'json_object' },
71
+ temperature: 0.1
72
+ };
73
+ // Add service_tier if not 'standard'
74
+ const normalizedServiceTier = serviceTier.toLowerCase();
75
+ if (normalizedServiceTier !== 'standard' && (normalizedServiceTier === 'auto' || normalizedServiceTier === 'default' || normalizedServiceTier === 'flex')) {
76
+ requestParams.service_tier = normalizedServiceTier;
77
+ }
78
+ const response = await openai.chat.completions.create(requestParams);
79
+ // Capture the actual model returned by OpenAI (may differ from requested)
80
+ const actualModel = response.model;
81
+ llmCallFinishedAt = new Date().toISOString();
82
+ llmCallResponseTimeMs = new Date(llmCallFinishedAt).getTime() - new Date(llmCallStartedAt).getTime();
83
+ // Capture token usage if available
84
+ if (response.usage) {
85
+ llmCallTokens = {
86
+ prompt_tokens: response.usage.prompt_tokens,
87
+ completion_tokens: response.usage.completion_tokens,
88
+ total_tokens: response.usage.total_tokens
89
+ };
90
+ }
91
+ const content = response.choices[0]?.message?.content;
92
+ if (!content) {
93
+ throw new Error('No response from LLM');
94
+ }
95
+ const parsed = JSON.parse(content);
96
+ logger_1.logger.debug(` ✅ ${threadline.id}: ${parsed.status}`);
97
+ // Extract file references - rely entirely on LLM to provide them
98
+ let fileReferences = [];
99
+ if (parsed.file_references && Array.isArray(parsed.file_references) && parsed.file_references.length > 0) {
100
+ // LLM provided file references - validate they're in filesInFilteredDiff
101
+ fileReferences = parsed.file_references.filter((file) => filesInFilteredDiff.includes(file));
102
+ if (parsed.file_references.length !== fileReferences.length) {
103
+ logger_1.logger.debug(` ⚠️ Warning: LLM provided ${parsed.file_references.length} file references, but only ${fileReferences.length} match the files sent to LLM`);
104
+ }
105
+ }
106
+ else {
107
+ // LLM did not provide file_references
108
+ const status = parsed.status || 'not_relevant';
109
+ if (status === 'attention') {
110
+ // This is a problem - we have violations but don't know which files
111
+ logger_1.logger.error(` ❌ Error: LLM returned "attention" status but no file_references for threadline ${threadline.id}`);
112
+ logger_1.logger.error(` Cannot accurately report violations without file references. This may indicate a prompt/LLM issue.`);
113
+ // Return empty file references - better than guessing
114
+ fileReferences = [];
115
+ }
116
+ // For "compliant" or "not_relevant" status, file references are optional
117
+ }
118
+ return {
119
+ expertId: threadline.id,
120
+ status: parsed.status || 'not_relevant',
121
+ reasoning: parsed.reasoning,
122
+ fileReferences: fileReferences,
123
+ relevantFiles: relevantFiles,
124
+ filteredDiff: filteredDiff,
125
+ filesInFilteredDiff: filesInFilteredDiff,
126
+ actualModel: actualModel,
127
+ llmCallMetrics: {
128
+ startedAt: llmCallStartedAt,
129
+ finishedAt: llmCallFinishedAt,
130
+ responseTimeMs: llmCallResponseTimeMs,
131
+ tokens: llmCallTokens,
132
+ status: llmCallStatus,
133
+ errorMessage: llmCallErrorMessage
134
+ }
135
+ };
136
+ }
137
+ catch (error) {
138
+ // Capture error timing
139
+ llmCallFinishedAt = new Date().toISOString();
140
+ llmCallResponseTimeMs = new Date(llmCallFinishedAt).getTime() - new Date(llmCallStartedAt).getTime();
141
+ llmCallStatus = 'error';
142
+ // Extract error details safely
143
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
144
+ llmCallErrorMessage = errorMessage;
145
+ // Log full error for debugging
146
+ logger_1.logger.error(` ❌ OpenAI error: ${JSON.stringify(error, null, 2)}`);
147
+ // Extract OpenAI error details from the error object
148
+ const errorObj = error;
149
+ const openAIError = errorObj?.error || {};
150
+ const rawErrorResponse = {
151
+ status: errorObj?.status,
152
+ headers: errorObj?.headers,
153
+ request_id: errorObj?.request_id,
154
+ error: errorObj?.error,
155
+ code: errorObj?.code,
156
+ param: errorObj?.param,
157
+ type: errorObj?.type
158
+ };
159
+ // Return error result with metrics instead of throwing
160
+ // This allows metrics to be captured even when LLM call fails
161
+ // Use 'error' status - errors are errors, not attention items
162
+ return {
163
+ expertId: threadline.id,
164
+ status: 'error',
165
+ reasoning: `Error: ${errorMessage}`,
166
+ error: {
167
+ message: errorMessage,
168
+ type: openAIError?.type || errorObj?.type,
169
+ code: openAIError?.code || errorObj?.code,
170
+ rawResponse: rawErrorResponse
171
+ },
172
+ fileReferences: [],
173
+ relevantFiles: relevantFiles,
174
+ filteredDiff: filteredDiff,
175
+ filesInFilteredDiff: filesInFilteredDiff,
176
+ llmCallMetrics: {
177
+ startedAt: llmCallStartedAt,
178
+ finishedAt: llmCallFinishedAt,
179
+ responseTimeMs: llmCallResponseTimeMs,
180
+ tokens: llmCallTokens,
181
+ status: llmCallStatus,
182
+ errorMessage: llmCallErrorMessage
183
+ }
184
+ };
185
+ }
186
+ }
187
+ function matchesPattern(filePath, pattern) {
188
+ // Convert glob pattern to regex
189
+ // Handle ** first (before single *), escape it to avoid double replacement
190
+ let regexPattern = pattern
191
+ .replace(/\*\*/g, '__DOUBLE_STAR__')
192
+ .replace(/\*/g, '[^/]*')
193
+ .replace(/__DOUBLE_STAR__/g, '.*')
194
+ .replace(/\?/g, '.');
195
+ const regex = new RegExp(`^${regexPattern}$`);
196
+ return regex.test(filePath);
197
+ }
@@ -42,7 +42,7 @@ const fs = __importStar(require("fs"));
42
42
  const path = __importStar(require("path"));
43
43
  const simple_git_1 = __importDefault(require("simple-git"));
44
44
  exports.DEFAULT_CONFIG = {
45
- mode: 'online',
45
+ mode: 'online', // Default: sync enabled. Set to "offline" for local-only processing.
46
46
  api_url: 'https://devthreadline.com',
47
47
  openai_model: 'gpt-5.2',
48
48
  openai_service_tier: 'Flex',
@@ -103,15 +103,24 @@ async function loadConfig(startDir) {
103
103
  }
104
104
  // If config file found, parse and merge
105
105
  if (configPath) {
106
+ let configContent = fs.readFileSync(configPath, 'utf-8');
107
+ // Strip single-line comments (// ...) before parsing JSON
108
+ // This allows comments in .threadlinerc for documentation
109
+ // Only match comments at the start of a line (after whitespace) to avoid matching // inside strings
110
+ // Also remove empty lines left after comment removal
111
+ configContent = configContent
112
+ .replace(/^\s*\/\/.*$/gm, '') // Remove comments (only at start of line after whitespace)
113
+ .replace(/^\s*[\r\n]/gm, ''); // Remove empty lines
106
114
  try {
107
- const configContent = fs.readFileSync(configPath, 'utf-8');
108
115
  const fileConfig = JSON.parse(configContent);
109
116
  // Merge file config into defaults (file overrides defaults)
110
117
  Object.assign(config, fileConfig);
111
118
  }
112
119
  catch (error) {
113
- // If file exists but can't be parsed, log warning but continue with defaults
114
- console.warn(`Warning: Failed to parse .threadlinerc at ${configPath}: ${error instanceof Error ? error.message : 'Unknown error'}`);
120
+ // If file exists but can't be parsed, fail loudly - this is a user error that needs fixing
121
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
122
+ throw new Error(`Failed to parse .threadlinerc at ${configPath}: ${errorMessage}\n` +
123
+ `Please fix the syntax error in your .threadlinerc file.`);
115
124
  }
116
125
  }
117
126
  return config;
@@ -46,36 +46,42 @@ function getThreadlineAccount() {
46
46
  return account;
47
47
  }
48
48
  /**
49
- * Gets OpenAI configuration from environment variables.
49
+ * Gets OpenAI configuration from environment variables and config file.
50
50
  *
51
51
  * Required:
52
- * - OPENAI_API_KEY: Your OpenAI API key
52
+ * - OPENAI_API_KEY: Your OpenAI API key (from environment)
53
53
  *
54
- * Optional (with defaults):
55
- * - OPENAI_MODEL: Model to use (default: gpt-5.2)
56
- * - OPENAI_SERVICE_TIER: Service tier (default: Flex)
54
+ * Model and service tier come from ThreadlineConfig (.threadlinerc file).
55
+ * Falls back to environment variables if not in config, then to defaults.
57
56
  *
58
57
  * Returns undefined if OPENAI_API_KEY is not set.
59
58
  *
60
59
  * Note: .env.local is automatically loaded at CLI startup (see index.ts).
61
60
  * In CI/CD, environment variables are injected directly into process.env.
62
61
  */
63
- function getOpenAIConfig() {
62
+ function getOpenAIConfig(config) {
64
63
  const apiKey = process.env.OPENAI_API_KEY;
65
64
  if (!apiKey) {
66
65
  logger_1.logger.debug('OPENAI_API_KEY: not set (direct mode unavailable)');
67
66
  return undefined;
68
67
  }
69
68
  logger_1.logger.debug('OPENAI_API_KEY: found (value hidden for security)');
70
- const model = process.env.OPENAI_MODEL || OPENAI_MODEL_DEFAULT;
71
- const serviceTier = process.env.OPENAI_SERVICE_TIER || OPENAI_SERVICE_TIER_DEFAULT;
72
- if (process.env.OPENAI_MODEL) {
69
+ // Priority: config file > environment variable > default
70
+ const model = config?.openai_model || process.env.OPENAI_MODEL || OPENAI_MODEL_DEFAULT;
71
+ const serviceTier = config?.openai_service_tier || process.env.OPENAI_SERVICE_TIER || OPENAI_SERVICE_TIER_DEFAULT;
72
+ if (config?.openai_model) {
73
+ logger_1.logger.debug(`OPENAI_MODEL: ${model} (from .threadlinerc)`);
74
+ }
75
+ else if (process.env.OPENAI_MODEL) {
73
76
  logger_1.logger.debug(`OPENAI_MODEL: ${model} (from environment)`);
74
77
  }
75
78
  else {
76
79
  logger_1.logger.debug(`OPENAI_MODEL: ${model} (using default)`);
77
80
  }
78
- if (process.env.OPENAI_SERVICE_TIER) {
81
+ if (config?.openai_service_tier) {
82
+ logger_1.logger.debug(`OPENAI_SERVICE_TIER: ${serviceTier} (from .threadlinerc)`);
83
+ }
84
+ else if (process.env.OPENAI_SERVICE_TIER) {
79
85
  logger_1.logger.debug(`OPENAI_SERVICE_TIER: ${serviceTier} (from environment)`);
80
86
  }
81
87
  else {
@@ -92,10 +98,10 @@ function getOpenAIConfig() {
92
98
  * Call this when starting direct LLM mode to inform the user.
93
99
  */
94
100
  function logOpenAIConfig(config) {
95
- console.log(chalk_1.default.blue('OpenAI Direct Mode:'));
96
- console.log(chalk_1.default.gray(` Model: ${config.model}${config.model === OPENAI_MODEL_DEFAULT ? ' (default)' : ''}`));
97
- console.log(chalk_1.default.gray(` Service Tier: ${config.serviceTier}${config.serviceTier === OPENAI_SERVICE_TIER_DEFAULT ? ' (default)' : ''}`));
98
- console.log('');
101
+ logger_1.logger.output(chalk_1.default.blue('OpenAI Direct Mode:'));
102
+ logger_1.logger.output(chalk_1.default.gray(` Model: ${config.model}${config.model === OPENAI_MODEL_DEFAULT ? ' (default)' : ''}`));
103
+ logger_1.logger.output(chalk_1.default.gray(` Service Tier: ${config.serviceTier}${config.serviceTier === OPENAI_SERVICE_TIER_DEFAULT ? ' (default)' : ''}`));
104
+ logger_1.logger.output('');
99
105
  }
100
106
  /**
101
107
  * Checks if direct OpenAI mode is available (OPENAI_API_KEY is set).