@eldrforge/ai-service 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.github/dependabot.yml +12 -0
  2. package/.github/workflows/npm-publish.yml +48 -0
  3. package/.github/workflows/test.yml +33 -0
  4. package/LICENSE +190 -0
  5. package/README.md +48 -0
  6. package/dist/index.js +816 -0
  7. package/dist/instructions/commit.md +133 -0
  8. package/dist/instructions/release.md +188 -0
  9. package/dist/instructions/review.md +169 -0
  10. package/dist/personas/releaser.md +24 -0
  11. package/dist/personas/you.md +55 -0
  12. package/dist/src/ai.d.ts.map +1 -0
  13. package/dist/src/index.d.ts.map +1 -0
  14. package/dist/src/interactive.d.ts.map +1 -0
  15. package/dist/src/logger.d.ts.map +1 -0
  16. package/dist/src/prompts/commit.d.ts.map +1 -0
  17. package/dist/src/prompts/index.d.ts.map +1 -0
  18. package/dist/src/prompts/release.d.ts.map +1 -0
  19. package/dist/src/prompts/review.d.ts.map +1 -0
  20. package/dist/src/types.d.ts.map +1 -0
  21. package/eslint.config.mjs +84 -0
  22. package/package.json +75 -0
  23. package/src/ai.ts +421 -0
  24. package/src/index.ts +14 -0
  25. package/src/interactive.ts +562 -0
  26. package/src/logger.ts +69 -0
  27. package/src/prompts/commit.ts +85 -0
  28. package/src/prompts/index.ts +28 -0
  29. package/src/prompts/instructions/commit.md +133 -0
  30. package/src/prompts/instructions/release.md +188 -0
  31. package/src/prompts/instructions/review.md +169 -0
  32. package/src/prompts/personas/releaser.md +24 -0
  33. package/src/prompts/personas/you.md +55 -0
  34. package/src/prompts/release.ts +118 -0
  35. package/src/prompts/review.ts +72 -0
  36. package/src/types.ts +112 -0
  37. package/tests/ai-complete-coverage.test.ts +241 -0
  38. package/tests/ai-create-completion.test.ts +288 -0
  39. package/tests/ai-edge-cases.test.ts +221 -0
  40. package/tests/ai-openai-error.test.ts +35 -0
  41. package/tests/ai-transcribe.test.ts +169 -0
  42. package/tests/ai.test.ts +139 -0
  43. package/tests/interactive-editor.test.ts +253 -0
  44. package/tests/interactive-secure-temp.test.ts +264 -0
  45. package/tests/interactive-user-choice.test.ts +173 -0
  46. package/tests/interactive-user-text.test.ts +174 -0
  47. package/tests/interactive.test.ts +94 -0
  48. package/tests/logger-noop.test.ts +40 -0
  49. package/tests/logger.test.ts +122 -0
  50. package/tests/prompts.test.ts +179 -0
  51. package/tsconfig.json +35 -0
  52. package/vite.config.ts +69 -0
  53. package/vitest.config.ts +25 -0
package/dist/index.js ADDED
@@ -0,0 +1,816 @@
1
+ import { OpenAI } from "openai";
2
+ import { safeJsonParse } from "@eldrforge/git-tools";
3
+ import fs$1 from "fs";
4
+ import { spawnSync } from "child_process";
5
+ import * as path from "path";
6
+ import path__default from "path";
7
+ import * as os from "os";
8
+ import * as fs from "fs/promises";
9
+ import { recipe } from "@riotprompt/riotprompt";
10
+ import { fileURLToPath } from "url";
11
+ let logger;
12
+ function setLogger(customLogger) {
13
+ logger = customLogger;
14
+ }
15
+ function createNoOpLogger() {
16
+ return {
17
+ info: () => {
18
+ },
19
+ error: () => {
20
+ },
21
+ warn: () => {
22
+ },
23
+ debug: () => {
24
+ }
25
+ };
26
+ }
27
+ function tryLoadWinston() {
28
+ try {
29
+ const winston = require("winston");
30
+ if (winston && winston.createLogger) {
31
+ return winston.createLogger({
32
+ level: "info",
33
+ format: winston.format.simple(),
34
+ transports: [new winston.transports.Console()]
35
+ });
36
+ }
37
+ } catch {
38
+ }
39
+ return null;
40
+ }
41
+ function getLogger() {
42
+ if (logger) {
43
+ return logger;
44
+ }
45
+ const winstonLogger = tryLoadWinston();
46
+ if (winstonLogger) {
47
+ logger = winstonLogger;
48
+ return winstonLogger;
49
+ }
50
+ return createNoOpLogger();
51
+ }
52
+ function getModelForCommand(config, commandName) {
53
+ let commandModel;
54
+ switch (commandName) {
55
+ case "commit":
56
+ case "audio-commit":
57
+ commandModel = config.commands?.commit?.model;
58
+ break;
59
+ case "release":
60
+ commandModel = config.commands?.release?.model;
61
+ break;
62
+ case "review":
63
+ case "audio-review":
64
+ commandModel = config.commands?.review?.model;
65
+ break;
66
+ }
67
+ return commandModel || config.model || "gpt-4o-mini";
68
+ }
69
+ function getOpenAIReasoningForCommand(config, commandName) {
70
+ let commandReasoning;
71
+ switch (commandName) {
72
+ case "commit":
73
+ case "audio-commit":
74
+ commandReasoning = config.commands?.commit?.reasoning;
75
+ break;
76
+ case "release":
77
+ commandReasoning = config.commands?.release?.reasoning;
78
+ break;
79
+ case "review":
80
+ case "audio-review":
81
+ commandReasoning = config.commands?.review?.reasoning;
82
+ break;
83
+ }
84
+ return commandReasoning || config.reasoning || "low";
85
+ }
86
+ class OpenAIError extends Error {
87
+ constructor(message, isTokenLimitError2 = false) {
88
+ super(message);
89
+ this.isTokenLimitError = isTokenLimitError2;
90
+ this.name = "OpenAIError";
91
+ }
92
+ }
93
+ function isTokenLimitError(error) {
94
+ if (!error?.message) return false;
95
+ const message = error.message.toLowerCase();
96
+ return message.includes("maximum context length") || message.includes("context_length_exceeded") || message.includes("token limit") || message.includes("too many tokens") || message.includes("reduce the length");
97
+ }
98
+ function isRateLimitError(error) {
99
+ if (!error?.message && !error?.code && !error?.status) return false;
100
+ if (error.status === 429 || error.code === "rate_limit_exceeded") {
101
+ return true;
102
+ }
103
+ if (error.message) {
104
+ const message = error.message.toLowerCase();
105
+ return message.includes("rate limit exceeded") || message.includes("too many requests") || message.includes("quota exceeded") || message.includes("rate") && message.includes("limit");
106
+ }
107
+ return false;
108
+ }
109
+ async function createCompletion(messages, options = { model: "gpt-4o-mini" }) {
110
+ const logger2 = options.logger || getLogger();
111
+ let openai = null;
112
+ try {
113
+ const apiKey = process.env.OPENAI_API_KEY;
114
+ if (!apiKey) {
115
+ throw new OpenAIError("OPENAI_API_KEY environment variable is not set");
116
+ }
117
+ const timeoutMs = parseInt(process.env.OPENAI_TIMEOUT_MS || "300000");
118
+ openai = new OpenAI({
119
+ apiKey,
120
+ timeout: timeoutMs
121
+ });
122
+ const modelToUse = options.model || "gpt-4o-mini";
123
+ const requestSize = JSON.stringify(messages).length;
124
+ const requestSizeKB = (requestSize / 1024).toFixed(2);
125
+ const reasoningInfo = options.openaiReasoning ? ` | Reasoning: ${options.openaiReasoning}` : "";
126
+ logger2.info("🤖 Making request to OpenAI");
127
+ logger2.info(" Model: %s%s", modelToUse, reasoningInfo);
128
+ logger2.info(" Request size: %s KB (%s bytes)", requestSizeKB, requestSize.toLocaleString());
129
+ logger2.debug("Sending prompt to OpenAI: %j", messages);
130
+ const maxCompletionTokens = options.openaiMaxOutputTokens ?? options.maxTokens ?? 1e4;
131
+ if (options.debug && (options.debugRequestFile || options.debugFile) && options.storage) {
132
+ const requestData = {
133
+ model: modelToUse,
134
+ messages,
135
+ max_completion_tokens: maxCompletionTokens,
136
+ response_format: options.responseFormat,
137
+ reasoning_effort: options.openaiReasoning
138
+ };
139
+ const debugFile = options.debugRequestFile || options.debugFile;
140
+ await options.storage.writeTemp(debugFile, JSON.stringify(requestData, null, 2));
141
+ logger2.debug("Wrote request debug file to %s", debugFile);
142
+ }
143
+ const apiOptions = {
144
+ model: modelToUse,
145
+ messages,
146
+ max_completion_tokens: maxCompletionTokens,
147
+ response_format: options.responseFormat
148
+ };
149
+ if (options.openaiReasoning && (modelToUse.includes("gpt-5") || modelToUse.includes("o3"))) {
150
+ apiOptions.reasoning_effort = options.openaiReasoning;
151
+ }
152
+ const startTime = Date.now();
153
+ const completionPromise = openai.chat.completions.create(apiOptions);
154
+ let timeoutId = null;
155
+ const timeoutPromise = new Promise((_, reject) => {
156
+ const timeoutMs2 = parseInt(process.env.OPENAI_TIMEOUT_MS || "300000");
157
+ timeoutId = setTimeout(() => reject(new OpenAIError(`OpenAI API call timed out after ${timeoutMs2 / 1e3} seconds`)), timeoutMs2);
158
+ });
159
+ let completion;
160
+ try {
161
+ completion = await Promise.race([completionPromise, timeoutPromise]);
162
+ } finally {
163
+ if (timeoutId !== null) {
164
+ clearTimeout(timeoutId);
165
+ }
166
+ }
167
+ const elapsedTime = Date.now() - startTime;
168
+ if (options.debug && (options.debugResponseFile || options.debugFile) && options.storage) {
169
+ const debugFile = options.debugResponseFile || options.debugFile;
170
+ await options.storage.writeTemp(debugFile, JSON.stringify(completion, null, 2));
171
+ logger2.debug("Wrote response debug file to %s", debugFile);
172
+ }
173
+ const response = completion.choices[0]?.message?.content?.trim();
174
+ if (!response) {
175
+ throw new OpenAIError("No response received from OpenAI");
176
+ }
177
+ const responseSize = response.length;
178
+ const responseSizeKB = (responseSize / 1024).toFixed(2);
179
+ logger2.info(" Response size: %s KB (%s bytes)", responseSizeKB, responseSize.toLocaleString());
180
+ const elapsedTimeFormatted = elapsedTime >= 1e3 ? `${(elapsedTime / 1e3).toFixed(1)}s` : `${elapsedTime}ms`;
181
+ logger2.info(" Time: %s", elapsedTimeFormatted);
182
+ if (completion.usage) {
183
+ logger2.info(
184
+ " Token usage: %s prompt + %s completion = %s total",
185
+ completion.usage.prompt_tokens?.toLocaleString() || "?",
186
+ completion.usage.completion_tokens?.toLocaleString() || "?",
187
+ completion.usage.total_tokens?.toLocaleString() || "?"
188
+ );
189
+ }
190
+ logger2.debug("Received response from OpenAI: %s...", response.substring(0, 30));
191
+ if (options.responseFormat) {
192
+ return safeJsonParse(response, "OpenAI API response");
193
+ } else {
194
+ return response;
195
+ }
196
+ } catch (error) {
197
+ logger2.error("Error calling OpenAI API: %s %s", error.message, error.stack);
198
+ const isTokenError = isTokenLimitError(error);
199
+ throw new OpenAIError(`Failed to create completion: ${error.message}`, isTokenError);
200
+ } finally {
201
+ }
202
+ }
203
+ async function createCompletionWithRetry(messages, options = { model: "gpt-4o-mini" }, retryCallback) {
204
+ const logger2 = options.logger || getLogger();
205
+ const maxRetries = 3;
206
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
207
+ try {
208
+ const messagesToSend = attempt === 1 ? messages : retryCallback ? await retryCallback(attempt) : messages;
209
+ return await createCompletion(messagesToSend, options);
210
+ } catch (error) {
211
+ if (error instanceof OpenAIError && error.isTokenLimitError && attempt < maxRetries && retryCallback) {
212
+ logger2.warn("Token limit exceeded on attempt %d/%d, retrying with reduced content...", attempt, maxRetries);
213
+ const backoffMs = Math.min(1e3 * Math.pow(2, attempt - 1), 1e4);
214
+ await new Promise((resolve) => setTimeout(resolve, backoffMs));
215
+ continue;
216
+ } else if (isRateLimitError(error) && attempt < maxRetries) {
217
+ const backoffMs = Math.min(2e3 * Math.pow(2, attempt - 1), 15e3);
218
+ logger2.warn(`Rate limit hit on attempt ${attempt}/${maxRetries}, waiting ${backoffMs}ms before retry...`);
219
+ await new Promise((resolve) => setTimeout(resolve, backoffMs));
220
+ continue;
221
+ }
222
+ throw error;
223
+ }
224
+ }
225
+ throw new OpenAIError("Max retries exceeded");
226
+ }
227
+ async function transcribeAudio(filePath, options = { model: "whisper-1" }) {
228
+ const logger2 = options.logger || getLogger();
229
+ let openai = null;
230
+ let audioStream = null;
231
+ let streamClosed = false;
232
+ const closeAudioStream = () => {
233
+ if (audioStream && !streamClosed) {
234
+ try {
235
+ if (typeof audioStream.destroy === "function" && !audioStream.destroyed) {
236
+ audioStream.destroy();
237
+ }
238
+ streamClosed = true;
239
+ logger2.debug("Audio stream closed successfully");
240
+ } catch (streamErr) {
241
+ logger2.debug("Failed to destroy audio read stream: %s", streamErr.message);
242
+ streamClosed = true;
243
+ }
244
+ }
245
+ };
246
+ try {
247
+ const apiKey = process.env.OPENAI_API_KEY;
248
+ if (!apiKey) {
249
+ throw new OpenAIError("OPENAI_API_KEY environment variable is not set");
250
+ }
251
+ openai = new OpenAI({
252
+ apiKey
253
+ });
254
+ logger2.debug("Transcribing audio file: %s", filePath);
255
+ if (options.debug && (options.debugRequestFile || options.debugFile) && options.storage) {
256
+ const requestData = {
257
+ model: options.model || "whisper-1",
258
+ file: filePath,
259
+ // Can't serialize the stream, so just save the file path
260
+ response_format: "json"
261
+ };
262
+ const debugFile = options.debugRequestFile || options.debugFile;
263
+ await options.storage.writeTemp(debugFile, JSON.stringify(requestData, null, 2));
264
+ logger2.debug("Wrote request debug file to %s", debugFile);
265
+ }
266
+ audioStream = fs$1.createReadStream(filePath);
267
+ if (audioStream && typeof audioStream.on === "function") {
268
+ audioStream.on("error", (streamError) => {
269
+ logger2.error("Audio stream error: %s", streamError.message);
270
+ closeAudioStream();
271
+ });
272
+ }
273
+ let transcription;
274
+ try {
275
+ transcription = await openai.audio.transcriptions.create({
276
+ model: options.model || "whisper-1",
277
+ file: audioStream,
278
+ response_format: "json"
279
+ });
280
+ closeAudioStream();
281
+ } catch (apiError) {
282
+ closeAudioStream();
283
+ throw apiError;
284
+ }
285
+ if (options.debug && (options.debugResponseFile || options.debugFile) && options.storage) {
286
+ const debugFile = options.debugResponseFile || options.debugFile;
287
+ await options.storage.writeTemp(debugFile, JSON.stringify(transcription, null, 2));
288
+ logger2.debug("Wrote response debug file to %s", debugFile);
289
+ }
290
+ const response = transcription;
291
+ if (!response) {
292
+ throw new OpenAIError("No transcription received from OpenAI");
293
+ }
294
+ logger2.debug("Received transcription from OpenAI: %s", response);
295
+ if (options.onArchive) {
296
+ try {
297
+ await options.onArchive(filePath, response.text);
298
+ } catch (archiveError) {
299
+ logger2.warn("Failed to archive audio file: %s", archiveError.message);
300
+ }
301
+ }
302
+ return response;
303
+ } catch (error) {
304
+ logger2.error("Error transcribing audio file: %s %s", error.message, error.stack);
305
+ throw new OpenAIError(`Failed to transcribe audio: ${error.message}`);
306
+ } finally {
307
+ closeAudioStream();
308
+ }
309
+ }
310
+ async function getUserChoice(prompt, choices, options = {}) {
311
+ const logger2 = options.logger || getLogger();
312
+ logger2.info(prompt);
313
+ choices.forEach((choice) => {
314
+ logger2.info(` [${choice.key}] ${choice.label}`);
315
+ });
316
+ logger2.info("");
317
+ if (!process.stdin.isTTY) {
318
+ logger2.error("⚠️ STDIN is piped but interactive mode is enabled");
319
+ logger2.error(" Interactive prompts cannot be used when input is piped");
320
+ logger2.error(" Solutions:");
321
+ logger2.error(" • Use terminal input instead of piping");
322
+ if (options.nonTtyErrorSuggestions) {
323
+ options.nonTtyErrorSuggestions.forEach((suggestion) => {
324
+ logger2.error(` • ${suggestion}`);
325
+ });
326
+ }
327
+ return "s";
328
+ }
329
+ return new Promise((resolve, reject) => {
330
+ let isResolved = false;
331
+ let dataHandler = null;
332
+ let errorHandler = null;
333
+ const cleanup = () => {
334
+ if (dataHandler) {
335
+ process.stdin.removeListener("data", dataHandler);
336
+ }
337
+ if (errorHandler) {
338
+ process.stdin.removeListener("error", errorHandler);
339
+ }
340
+ try {
341
+ if (process.stdin.setRawMode) {
342
+ process.stdin.setRawMode(false);
343
+ }
344
+ process.stdin.pause();
345
+ if (typeof process.stdin.unref === "function") {
346
+ process.stdin.unref();
347
+ }
348
+ } catch {
349
+ }
350
+ };
351
+ const safeResolve = (value) => {
352
+ if (!isResolved) {
353
+ isResolved = true;
354
+ cleanup();
355
+ resolve(value);
356
+ }
357
+ };
358
+ const safeReject = (error) => {
359
+ if (!isResolved) {
360
+ isResolved = true;
361
+ cleanup();
362
+ reject(error);
363
+ }
364
+ };
365
+ try {
366
+ if (typeof process.stdin.ref === "function") {
367
+ process.stdin.ref();
368
+ }
369
+ process.stdin.setRawMode(true);
370
+ process.stdin.resume();
371
+ dataHandler = (key) => {
372
+ try {
373
+ const keyStr = key.toString().toLowerCase();
374
+ const choice = choices.find((c) => c.key === keyStr);
375
+ if (choice) {
376
+ logger2.info(`Selected: ${choice.label}
377
+ `);
378
+ safeResolve(choice.key);
379
+ }
380
+ } catch (error) {
381
+ safeReject(error instanceof Error ? error : new Error("Unknown error processing input"));
382
+ }
383
+ };
384
+ errorHandler = (error) => {
385
+ safeReject(error);
386
+ };
387
+ process.stdin.on("data", dataHandler);
388
+ process.stdin.on("error", errorHandler);
389
+ } catch (error) {
390
+ safeReject(error instanceof Error ? error : new Error("Failed to setup input handlers"));
391
+ }
392
+ });
393
+ }
394
+ class SecureTempFile {
395
+ fd = null;
396
+ filePath;
397
+ isCleanedUp = false;
398
+ logger;
399
+ constructor(filePath, fd, logger2) {
400
+ this.filePath = filePath;
401
+ this.fd = fd;
402
+ this.logger = logger2 || getLogger();
403
+ }
404
+ /**
405
+ * Create a secure temporary file with proper permissions and atomic operations
406
+ * @param prefix Prefix for the temporary filename
407
+ * @param extension File extension (e.g., '.txt', '.md')
408
+ * @param logger Optional logger instance
409
+ * @returns Promise resolving to SecureTempFile instance
410
+ */
411
+ static async create(prefix = "ai-service", extension = ".txt", logger2) {
412
+ const tmpDir = os.tmpdir();
413
+ const log = logger2 || getLogger();
414
+ if (!process.env.VITEST) {
415
+ try {
416
+ await fs.access(tmpDir, fs.constants.W_OK);
417
+ } catch (error) {
418
+ try {
419
+ await fs.mkdir(tmpDir, { recursive: true, mode: 448 });
420
+ } catch (mkdirError) {
421
+ throw new Error(`Temp directory not writable: ${tmpDir} - ${error.message}. Failed to create: ${mkdirError.message}`);
422
+ }
423
+ }
424
+ }
425
+ const tmpFilePath = path.join(tmpDir, `${prefix}_${Date.now()}_${Math.random().toString(36).substring(7)}${extension}`);
426
+ let fd;
427
+ try {
428
+ fd = await fs.open(tmpFilePath, "wx", 384);
429
+ } catch (error) {
430
+ if (error.code === "EEXIST") {
431
+ throw new Error(`Temporary file already exists: ${tmpFilePath}`);
432
+ }
433
+ throw new Error(`Failed to create temporary file: ${error.message}`);
434
+ }
435
+ return new SecureTempFile(tmpFilePath, fd, log);
436
+ }
437
+ /**
438
+ * Get the file path (use with caution in external commands)
439
+ */
440
+ get path() {
441
+ if (this.isCleanedUp) {
442
+ throw new Error("Temp file has been cleaned up");
443
+ }
444
+ return this.filePath;
445
+ }
446
+ /**
447
+ * Write content to the temporary file
448
+ */
449
+ async writeContent(content) {
450
+ if (!this.fd || this.isCleanedUp) {
451
+ throw new Error("Temp file is not available for writing");
452
+ }
453
+ await this.fd.writeFile(content, "utf8");
454
+ }
455
+ /**
456
+ * Read content from the temporary file
457
+ */
458
+ async readContent() {
459
+ if (!this.fd || this.isCleanedUp) {
460
+ throw new Error("Temp file is not available for reading");
461
+ }
462
+ const content = await this.fd.readFile("utf8");
463
+ return content;
464
+ }
465
+ /**
466
+ * Close the file handle
467
+ */
468
+ async close() {
469
+ if (this.fd && !this.isCleanedUp) {
470
+ await this.fd.close();
471
+ this.fd = null;
472
+ }
473
+ }
474
+ /**
475
+ * Securely cleanup the temporary file - prevents TOCTOU by using file descriptor
476
+ */
477
+ async cleanup() {
478
+ if (this.isCleanedUp) {
479
+ return;
480
+ }
481
+ try {
482
+ if (this.fd) {
483
+ await this.fd.close();
484
+ this.fd = null;
485
+ }
486
+ await fs.unlink(this.filePath);
487
+ } catch (error) {
488
+ if (error.code !== "ENOENT") {
489
+ this.logger.warn(`Failed to cleanup temp file ${this.filePath}: ${error.message}`);
490
+ }
491
+ } finally {
492
+ this.isCleanedUp = true;
493
+ }
494
+ }
495
+ }
496
+ async function createSecureTempFile(prefix = "ai-service", extension = ".txt", logger2) {
497
+ const secureTempFile = await SecureTempFile.create(prefix, extension, logger2);
498
+ await secureTempFile.close();
499
+ return secureTempFile.path;
500
+ }
501
+ async function cleanupTempFile(filePath, logger2) {
502
+ const log = logger2 || getLogger();
503
+ try {
504
+ await fs.unlink(filePath);
505
+ } catch (error) {
506
+ if (error.code !== "ENOENT") {
507
+ log.warn(`Failed to cleanup temp file ${filePath}: ${error.message}`);
508
+ }
509
+ }
510
+ }
511
+ async function editContentInEditor(content, templateLines = [], fileExtension = ".txt", editor, logger2) {
512
+ const log = logger2 || getLogger();
513
+ const editorCmd = editor || process.env.EDITOR || process.env.VISUAL || "vi";
514
+ const secureTempFile = await SecureTempFile.create("ai-service_edit", fileExtension, log);
515
+ try {
516
+ const templateContent = [
517
+ ...templateLines,
518
+ ...templateLines.length > 0 ? [""] : [],
519
+ // Add separator if we have template lines
520
+ content,
521
+ ""
522
+ ].join("\n");
523
+ await secureTempFile.writeContent(templateContent);
524
+ await secureTempFile.close();
525
+ log.info(`📝 Opening ${editorCmd} to edit content...`);
526
+ const result = spawnSync(editorCmd, [secureTempFile.path], { stdio: "inherit" });
527
+ if (result.error) {
528
+ throw new Error(`Failed to launch editor '${editorCmd}': ${result.error.message}`);
529
+ }
530
+ const fileContent = (await fs.readFile(secureTempFile.path, "utf8")).split("\n").filter((line) => !line.trim().startsWith("#")).join("\n").trim();
531
+ if (!fileContent) {
532
+ throw new Error("Content is empty after editing");
533
+ }
534
+ log.info("✅ Content updated successfully");
535
+ return {
536
+ content: fileContent,
537
+ wasEdited: fileContent !== content.trim()
538
+ };
539
+ } finally {
540
+ await secureTempFile.cleanup();
541
+ }
542
+ }
543
+ const STANDARD_CHOICES = {
544
+ CONFIRM: { key: "c", label: "Confirm and proceed" },
545
+ EDIT: { key: "e", label: "Edit in editor" },
546
+ SKIP: { key: "s", label: "Skip and abort" },
547
+ IMPROVE: { key: "i", label: "Improve with LLM feedback" }
548
+ };
549
+ async function getUserTextInput(prompt, options = {}) {
550
+ const logger2 = options.logger || getLogger();
551
+ if (!process.stdin.isTTY) {
552
+ logger2.error("⚠️ STDIN is piped but interactive text input is required");
553
+ logger2.error(" Interactive text input cannot be used when input is piped");
554
+ logger2.error(" Solutions:");
555
+ logger2.error(" • Use terminal input instead of piping");
556
+ if (options.nonTtyErrorSuggestions) {
557
+ options.nonTtyErrorSuggestions.forEach((suggestion) => {
558
+ logger2.error(` • ${suggestion}`);
559
+ });
560
+ }
561
+ throw new Error("Interactive text input requires a terminal");
562
+ }
563
+ logger2.info(prompt);
564
+ logger2.info("(Press Enter when done, or type Ctrl+C to cancel)");
565
+ logger2.info("");
566
+ return new Promise((resolve, reject) => {
567
+ let inputBuffer = "";
568
+ let isResolved = false;
569
+ let dataHandler = null;
570
+ let errorHandler = null;
571
+ const cleanup = () => {
572
+ if (dataHandler) {
573
+ process.stdin.removeListener("data", dataHandler);
574
+ }
575
+ if (errorHandler) {
576
+ process.stdin.removeListener("error", errorHandler);
577
+ }
578
+ try {
579
+ process.stdin.pause();
580
+ if (typeof process.stdin.unref === "function") {
581
+ process.stdin.unref();
582
+ }
583
+ } catch {
584
+ }
585
+ };
586
+ const safeResolve = (value) => {
587
+ if (!isResolved) {
588
+ isResolved = true;
589
+ cleanup();
590
+ resolve(value);
591
+ }
592
+ };
593
+ const safeReject = (error) => {
594
+ if (!isResolved) {
595
+ isResolved = true;
596
+ cleanup();
597
+ reject(error);
598
+ }
599
+ };
600
+ try {
601
+ if (typeof process.stdin.ref === "function") {
602
+ process.stdin.ref();
603
+ }
604
+ process.stdin.setEncoding("utf8");
605
+ process.stdin.resume();
606
+ dataHandler = (chunk) => {
607
+ try {
608
+ inputBuffer += chunk;
609
+ if (inputBuffer.includes("\n")) {
610
+ const userInput = inputBuffer.replace(/\n$/, "").trim();
611
+ if (userInput === "") {
612
+ logger2.warn("Empty input received. Please provide feedback text.");
613
+ safeReject(new Error("Empty input received"));
614
+ } else {
615
+ logger2.info(`✅ Received feedback: "${userInput}"
616
+ `);
617
+ safeResolve(userInput);
618
+ }
619
+ }
620
+ } catch (error) {
621
+ safeReject(error instanceof Error ? error : new Error("Unknown error processing input"));
622
+ }
623
+ };
624
+ errorHandler = (error) => {
625
+ safeReject(error);
626
+ };
627
+ process.stdin.on("data", dataHandler);
628
+ process.stdin.on("error", errorHandler);
629
+ } catch (error) {
630
+ safeReject(error instanceof Error ? error : new Error("Failed to setup input handlers"));
631
+ }
632
+ });
633
+ }
634
+ async function getLLMFeedbackInEditor(contentType, currentContent, editor, logger2) {
635
+ const templateLines = [
636
+ "# Provide Your Instructions and Guidance for a Revision Here",
637
+ "#",
638
+ "# Type your guidance above this line. Be specific about what you want changed,",
639
+ "# added, or improved. You can also edit the original content below directly",
640
+ "# to provide examples or show desired changes.",
641
+ "#",
642
+ '# Lines starting with "#" will be ignored.',
643
+ "",
644
+ "### YOUR FEEDBACK AND GUIDANCE:",
645
+ "",
646
+ "# (Type your improvement instructions here)",
647
+ "",
648
+ `### ORIGINAL ${contentType.toUpperCase()}:`,
649
+ ""
650
+ ];
651
+ const result = await editContentInEditor(
652
+ currentContent,
653
+ templateLines,
654
+ ".md",
655
+ editor,
656
+ logger2
657
+ );
658
+ const lines = result.content.split("\n");
659
+ const originalSectionIndex = lines.findIndex(
660
+ (line) => line.trim().toLowerCase().startsWith("### original")
661
+ );
662
+ let feedback;
663
+ if (originalSectionIndex >= 0) {
664
+ feedback = lines.slice(0, originalSectionIndex).join("\n").trim();
665
+ } else {
666
+ feedback = result.content.trim();
667
+ }
668
+ feedback = feedback.replace(/^### YOUR FEEDBACK AND GUIDANCE:\s*/i, "").trim();
669
+ if (!feedback) {
670
+ throw new Error("No feedback provided. Please provide improvement instructions.");
671
+ }
672
+ return feedback;
673
+ }
674
+ function requireTTY(errorMessage = "Interactive mode requires a terminal. Use --dry-run instead.", logger2) {
675
+ const log = logger2 || getLogger();
676
+ if (!process.stdin.isTTY) {
677
+ log.error("❌ Interactive mode requires a terminal (TTY)");
678
+ log.error(" Solutions:");
679
+ log.error(" • Run without piping input");
680
+ log.error(" • Use --dry-run to see the generated content");
681
+ throw new Error(errorMessage);
682
+ }
683
+ }
684
+ const __filename$3 = fileURLToPath(import.meta.url);
685
+ const __dirname$3 = path__default.dirname(__filename$3);
686
+ const createCommitPrompt = async ({ overridePaths: _overridePaths, overrides: _overrides }, { diffContent, userDirection, isFileContent, githubIssuesContext }, { logContext, context, directories } = {}) => {
687
+ const basePath = __dirname$3;
688
+ const contentItems = [];
689
+ const contextItems = [];
690
+ if (userDirection) {
691
+ contentItems.push({ content: userDirection, title: "User Direction" });
692
+ }
693
+ if (diffContent) {
694
+ const contentTitle = isFileContent ? "Project Files" : "Diff";
695
+ contentItems.push({ content: diffContent, title: contentTitle });
696
+ }
697
+ if (githubIssuesContext) {
698
+ contentItems.push({ content: githubIssuesContext, title: "Recent GitHub Issues" });
699
+ }
700
+ if (logContext) {
701
+ contextItems.push({ content: logContext, title: "Log Context" });
702
+ }
703
+ if (context) {
704
+ contextItems.push({ content: context, title: "User Context" });
705
+ }
706
+ if (directories && directories.length > 0) {
707
+ contextItems.push({ directories, title: "Directories" });
708
+ }
709
+ return recipe(basePath).persona({ path: "personas/you.md" }).instructions({ path: "instructions/commit.md" }).overridePaths(_overridePaths ?? []).overrides(_overrides ?? true).content(...contentItems).context(...contextItems).cook();
710
+ };
711
+ const __filename$2 = fileURLToPath(import.meta.url);
712
+ const __dirname$2 = path__default.dirname(__filename$2);
713
+ const analyzeReleaseSize = (logContent, diffContent, milestoneIssues) => {
714
+ const logLines = logContent.split("\n").length;
715
+ const diffLines = diffContent ? diffContent.split("\n").length : 0;
716
+ const milestoneLines = milestoneIssues ? milestoneIssues.split("\n").length : 0;
717
+ const totalContentLength = logContent.length + (diffContent?.length || 0) + (milestoneIssues?.length || 0);
718
+ const isLarge = logLines > 60 || diffLines > 500 || milestoneLines > 50 || totalContentLength > 5e4;
719
+ if (isLarge) {
720
+ return { isLarge: true, maxTokens: 25e3 };
721
+ } else {
722
+ return { isLarge: false, maxTokens: 1e4 };
723
+ }
724
+ };
725
+ const createReleasePrompt = async ({ overrides: _overrides, overridePaths: _overridePaths }, { releaseFocus, logContent, diffContent, milestoneIssues }, { context, directories } = {}) => {
726
+ const basePath = __dirname$2;
727
+ const { isLarge: isLargeRelease, maxTokens } = analyzeReleaseSize(logContent, diffContent, milestoneIssues);
728
+ const contentItems = [];
729
+ const contextItems = [];
730
+ if (diffContent) {
731
+ contentItems.push({ content: diffContent, title: "Diff" });
732
+ }
733
+ if (logContent) {
734
+ contentItems.push({ content: logContent, title: "Log Context" });
735
+ }
736
+ if (milestoneIssues) {
737
+ contentItems.push({ content: milestoneIssues, title: "Resolved Issues from Milestone" });
738
+ }
739
+ if (releaseFocus) {
740
+ contentItems.push({ content: releaseFocus, title: "Release Focus" });
741
+ }
742
+ if (isLargeRelease) {
743
+ contextItems.push({
744
+ content: `This appears to be a LARGE RELEASE with significant changes. Please provide comprehensive, detailed release notes that thoroughly document all major changes, improvements, and fixes. Don't summarize - dive deep into the details.`,
745
+ title: "Release Size Context"
746
+ });
747
+ }
748
+ if (context) {
749
+ contextItems.push({ content: context, title: "User Context" });
750
+ }
751
+ if (directories && directories.length > 0) {
752
+ contextItems.push({ directories, title: "Directories" });
753
+ }
754
+ const prompt = await recipe(basePath).persona({ path: "personas/releaser.md" }).instructions({ path: "instructions/release.md" }).overridePaths(_overridePaths ?? []).overrides(_overrides ?? true).content(...contentItems).context(...contextItems).cook();
755
+ return {
756
+ prompt,
757
+ maxTokens,
758
+ isLargeRelease
759
+ };
760
+ };
761
+ const __filename$1 = fileURLToPath(import.meta.url);
762
+ const __dirname$1 = path__default.dirname(__filename$1);
763
+ const createReviewPrompt = async ({ overridePaths: _overridePaths, overrides: _overrides }, { notes }, { logContext, diffContext, releaseNotesContext, issuesContext, context, directories } = {}) => {
764
+ const basePath = __dirname$1;
765
+ const contentItems = [];
766
+ const contextItems = [];
767
+ if (notes) {
768
+ contentItems.push({ content: notes, title: "Review Notes" });
769
+ }
770
+ if (logContext) {
771
+ contextItems.push({ content: logContext, title: "Log Context" });
772
+ }
773
+ if (diffContext) {
774
+ contextItems.push({ content: diffContext, title: "Diff Context" });
775
+ }
776
+ if (releaseNotesContext) {
777
+ contextItems.push({ content: releaseNotesContext, title: "Release Notes Context" });
778
+ }
779
+ if (issuesContext) {
780
+ contextItems.push({ content: issuesContext, title: "Issues Context" });
781
+ }
782
+ if (context) {
783
+ contextItems.push({ content: context, title: "User Context" });
784
+ }
785
+ if (directories && directories.length > 0) {
786
+ contextItems.push({ directories, title: "Directories" });
787
+ }
788
+ return recipe(basePath).persona({ path: "personas/you.md" }).instructions({ path: "instructions/review.md" }).overridePaths(_overridePaths ?? []).overrides(_overrides ?? true).content(...contentItems).context(...contextItems).cook();
789
+ };
790
+ export {
791
+ OpenAIError,
792
+ STANDARD_CHOICES,
793
+ SecureTempFile,
794
+ cleanupTempFile,
795
+ createCommitPrompt,
796
+ createCompletion,
797
+ createCompletionWithRetry,
798
+ createNoOpLogger,
799
+ createReleasePrompt,
800
+ createReviewPrompt,
801
+ createSecureTempFile,
802
+ editContentInEditor,
803
+ getLLMFeedbackInEditor,
804
+ getLogger,
805
+ getModelForCommand,
806
+ getOpenAIReasoningForCommand,
807
+ getUserChoice,
808
+ getUserTextInput,
809
+ isRateLimitError,
810
+ isTokenLimitError,
811
+ requireTTY,
812
+ setLogger,
813
+ transcribeAudio,
814
+ tryLoadWinston
815
+ };
816
+ //# sourceMappingURL=index.js.map