@involvex/prompt-enhancer 0.0.3 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.js +83 -23
- package/dist/commands/direct-enhance.d.ts +7 -1
- package/dist/commands/direct-enhance.js +66 -4
- package/dist/commands/help.js +1 -2
- package/dist/lib/providers/copilot.js +38 -0
- package/dist/lib/providers/gemini.js +24 -2
- package/dist/lib/providers/kilo.js +30 -2
- package/dist/lib/providers/opencode.js +30 -2
- package/dist/lib/utils/output-writer.d.ts +13 -0
- package/dist/lib/utils/output-writer.js +70 -0
- package/dist/lib/utils/runtime-logging.d.ts +16 -0
- package/dist/lib/utils/runtime-logging.js +94 -0
- package/dist/prompt-enhancer.exe +0 -0
- package/package.json +1 -1
- package/readme.md +19 -3
package/dist/cli.js
CHANGED
|
@@ -4,50 +4,110 @@ import { render } from 'ink';
|
|
|
4
4
|
import meow from 'meow';
|
|
5
5
|
import App from './app.js';
|
|
6
6
|
import Help from './commands/help.js';
|
|
7
|
-
import hasFlag from 'has-flag';
|
|
8
7
|
import About from './commands/about.js';
|
|
9
8
|
import DirectEnhance from './commands/direct-enhance.js';
|
|
9
|
+
import DisplayVersion from './commands/version.js';
|
|
10
|
+
import { configureRuntimeLogging, normalizeVerboseLevel, writeRuntimeBanner, } from './lib/utils/runtime-logging.js';
|
|
10
11
|
// Handle Ctrl+C globally to exit from any screen
|
|
11
12
|
process.on('SIGINT', () => {
|
|
12
13
|
process.exit(0);
|
|
13
14
|
});
|
|
14
15
|
const cli = meow(`
|
|
15
|
-
|
|
16
|
-
$ prompt-enhancer
|
|
17
|
-
|
|
18
|
-
Options
|
|
19
|
-
--prompt,-p Your prompt to enhance
|
|
20
|
-
|
|
21
|
-
Examples
|
|
22
|
-
$ prompt-enhancer
|
|
23
|
-
$ prompt-enhancer --prompt="Your prompt here"
|
|
24
|
-
$ prompt-enhancer -p "Your prompt here"
|
|
16
|
+
Run "prompt-enhancer --help" for comprehensive usage.
|
|
25
17
|
`, {
|
|
26
18
|
importMeta: import.meta,
|
|
19
|
+
autoHelp: false,
|
|
20
|
+
autoVersion: false,
|
|
27
21
|
flags: {
|
|
28
22
|
prompt: {
|
|
29
23
|
type: 'string',
|
|
30
24
|
shortFlag: 'p',
|
|
31
25
|
},
|
|
26
|
+
help: {
|
|
27
|
+
type: 'boolean',
|
|
28
|
+
default: false,
|
|
29
|
+
shortFlag: 'h',
|
|
30
|
+
},
|
|
31
|
+
version: {
|
|
32
|
+
type: 'boolean',
|
|
33
|
+
default: false,
|
|
34
|
+
shortFlag: 'v',
|
|
35
|
+
},
|
|
36
|
+
about: {
|
|
37
|
+
type: 'boolean',
|
|
38
|
+
default: false,
|
|
39
|
+
},
|
|
40
|
+
debug: {
|
|
41
|
+
type: 'boolean',
|
|
42
|
+
default: false,
|
|
43
|
+
},
|
|
44
|
+
trace: {
|
|
45
|
+
type: 'boolean',
|
|
46
|
+
default: false,
|
|
47
|
+
},
|
|
48
|
+
verbose: {
|
|
49
|
+
type: 'number',
|
|
50
|
+
default: 1,
|
|
51
|
+
},
|
|
52
|
+
output: {
|
|
53
|
+
type: 'string',
|
|
54
|
+
shortFlag: 'o',
|
|
55
|
+
},
|
|
56
|
+
outputFormat: {
|
|
57
|
+
type: 'string',
|
|
58
|
+
default: 'auto',
|
|
59
|
+
},
|
|
32
60
|
},
|
|
33
61
|
});
|
|
34
|
-
if (cli.
|
|
62
|
+
if (!Number.isInteger(cli.flags.verbose) ||
|
|
63
|
+
cli.flags.verbose < 1 ||
|
|
64
|
+
cli.flags.verbose > 3) {
|
|
65
|
+
console.error(`Invalid --verbose value "${cli.flags.verbose}". Use integer values: 1, 2, or 3.`);
|
|
66
|
+
process.exit(1);
|
|
67
|
+
}
|
|
68
|
+
if (!['auto', 'txt', 'json', 'md'].includes(cli.flags.outputFormat.toLowerCase())) {
|
|
69
|
+
console.error(`Invalid --output-format "${cli.flags.outputFormat}". Supported formats: auto, txt, json, md.`);
|
|
70
|
+
process.exit(1);
|
|
71
|
+
}
|
|
72
|
+
const subcommand = cli.input[0]?.toLowerCase();
|
|
73
|
+
const normalizedVerbose = normalizeVerboseLevel(cli.flags.verbose);
|
|
74
|
+
const effectiveVerbose = cli.flags.trace
|
|
75
|
+
? 3
|
|
76
|
+
: cli.flags.debug
|
|
77
|
+
? Math.max(2, normalizedVerbose)
|
|
78
|
+
: normalizedVerbose;
|
|
79
|
+
configureRuntimeLogging({
|
|
80
|
+
debug: cli.flags.debug,
|
|
81
|
+
trace: cli.flags.trace,
|
|
82
|
+
verbose: effectiveVerbose,
|
|
83
|
+
});
|
|
84
|
+
if (cli.flags.debug || cli.flags.trace || effectiveVerbose > 1) {
|
|
85
|
+
writeRuntimeBanner('prompt-enhancer');
|
|
86
|
+
}
|
|
87
|
+
if (subcommand === 'help' || cli.flags.help) {
|
|
35
88
|
render(_jsx(Help, {}));
|
|
36
89
|
}
|
|
37
|
-
else if (cli.flags.
|
|
90
|
+
else if (subcommand === 'version' || cli.flags.version) {
|
|
91
|
+
render(_jsx(DisplayVersion, {}));
|
|
92
|
+
}
|
|
93
|
+
else if (subcommand === 'about' || cli.flags.about) {
|
|
94
|
+
render(_jsx(About, {}));
|
|
95
|
+
}
|
|
96
|
+
else if (cli.flags.prompt ||
|
|
97
|
+
subcommand === 'enhance' ||
|
|
98
|
+
(subcommand === 'headless' && cli.input.length > 1)) {
|
|
99
|
+
const promptFromSubcommand = subcommand === 'enhance' || subcommand === 'headless'
|
|
100
|
+
? cli.input.slice(1).join(' ').trim()
|
|
101
|
+
: '';
|
|
102
|
+
const prompt = cli.flags.prompt || promptFromSubcommand;
|
|
103
|
+
if (!prompt) {
|
|
104
|
+
console.error('Headless mode requires a prompt. Use --prompt "..." or `prompt-enhancer enhance "..."`.');
|
|
105
|
+
process.exit(1);
|
|
106
|
+
}
|
|
38
107
|
// Direct enhancement mode when --prompt/-p flag is provided
|
|
39
|
-
render(_jsx(DirectEnhance, { prompt: cli.flags.
|
|
108
|
+
render(_jsx(DirectEnhance, { prompt: prompt, debug: cli.flags.debug, trace: cli.flags.trace, verbose: effectiveVerbose, outputPath: cli.flags.output, outputFormat: cli.flags.outputFormat.toLowerCase() }));
|
|
40
109
|
}
|
|
41
110
|
else {
|
|
42
111
|
// Interactive TUI mode (default)
|
|
43
112
|
render(_jsx(App, { prompt: "" }));
|
|
44
113
|
}
|
|
45
|
-
if (hasFlag('--debug')) {
|
|
46
|
-
console.log('Debug flags:', cli.flags);
|
|
47
|
-
}
|
|
48
|
-
if (hasFlag('--version')) {
|
|
49
|
-
await import('./commands/version.js');
|
|
50
|
-
}
|
|
51
|
-
if (hasFlag('--about')) {
|
|
52
|
-
render(_jsx(About, {}));
|
|
53
|
-
}
|
|
@@ -1,5 +1,11 @@
|
|
|
1
|
+
import { type OutputFormat } from '../lib/utils/output-writer.js';
|
|
1
2
|
interface DirectEnhanceProps {
|
|
2
3
|
prompt: string;
|
|
4
|
+
debug?: boolean;
|
|
5
|
+
trace?: boolean;
|
|
6
|
+
verbose?: 1 | 2 | 3;
|
|
7
|
+
outputPath?: string;
|
|
8
|
+
outputFormat?: OutputFormat | 'auto';
|
|
3
9
|
}
|
|
4
|
-
export default function DirectEnhance({ prompt }: DirectEnhanceProps): import("react/jsx-runtime").JSX.Element;
|
|
10
|
+
export default function DirectEnhance({ prompt, debug, trace, verbose, outputPath, outputFormat, }: DirectEnhanceProps): import("react/jsx-runtime").JSX.Element;
|
|
5
11
|
export {};
|
|
@@ -4,41 +4,103 @@ import { Text, Box } from 'ink';
|
|
|
4
4
|
import { ConfigManager } from '../lib/config/manager.js';
|
|
5
5
|
import { HistoryManager } from '../lib/history/manager.js';
|
|
6
6
|
import { EnhancementEngine } from '../lib/enhancement/engine.js';
|
|
7
|
-
|
|
7
|
+
import { debugLog, formatErrorDetails, logWithLevel, traceLog, } from '../lib/utils/runtime-logging.js';
|
|
8
|
+
import { writeEnhancementOutputFile, } from '../lib/utils/output-writer.js';
|
|
9
|
+
export default function DirectEnhance({ prompt, debug = false, trace = false, verbose = 1, outputPath, outputFormat = 'auto', }) {
|
|
8
10
|
const [status, setStatus] = useState('initializing');
|
|
9
11
|
const [output, setOutput] = useState('');
|
|
10
12
|
const [error, setError] = useState('');
|
|
13
|
+
const [outputFileMessage, setOutputFileMessage] = useState('');
|
|
11
14
|
useEffect(() => {
|
|
12
15
|
(async () => {
|
|
16
|
+
const startedAt = Date.now();
|
|
17
|
+
let providerName = 'unknown';
|
|
18
|
+
let modelName = 'default';
|
|
13
19
|
try {
|
|
20
|
+
traceLog('Starting headless enhancement run', {
|
|
21
|
+
trace,
|
|
22
|
+
debug,
|
|
23
|
+
verbose,
|
|
24
|
+
outputPath: outputPath ?? null,
|
|
25
|
+
outputFormat,
|
|
26
|
+
});
|
|
14
27
|
// Initialize managers
|
|
28
|
+
traceLog('Initializing configuration manager');
|
|
15
29
|
const configManager = new ConfigManager();
|
|
16
30
|
await configManager.load(); // Load config from file
|
|
31
|
+
const config = configManager.getConfig();
|
|
32
|
+
providerName = config.defaultProvider;
|
|
33
|
+
modelName = config.defaultModel ?? 'default';
|
|
34
|
+
logWithLevel(2, 'Loaded configuration for headless mode', {
|
|
35
|
+
defaultProvider: providerName,
|
|
36
|
+
defaultModel: modelName,
|
|
37
|
+
saveHistory: config.saveHistory,
|
|
38
|
+
});
|
|
39
|
+
traceLog('Initializing history manager');
|
|
17
40
|
const historyManager = new HistoryManager();
|
|
18
41
|
await historyManager.load(); // Load history from file
|
|
42
|
+
traceLog('Initializing enhancement engine');
|
|
19
43
|
const engine = new EnhancementEngine(configManager, historyManager);
|
|
20
44
|
setStatus('enhancing');
|
|
45
|
+
logWithLevel(1, 'Enhancement request started.');
|
|
21
46
|
// Stream enhancement
|
|
22
47
|
let result = '';
|
|
48
|
+
let chunkCount = 0;
|
|
23
49
|
const generator = engine.enhanceStream({
|
|
24
50
|
prompt,
|
|
25
51
|
saveToHistory: true,
|
|
26
52
|
});
|
|
27
53
|
for await (const chunk of generator) {
|
|
54
|
+
chunkCount++;
|
|
28
55
|
result += chunk;
|
|
29
56
|
setOutput(result);
|
|
57
|
+
if (verbose >= 3) {
|
|
58
|
+
logWithLevel(3, `Received stream chunk #${chunkCount}`, {
|
|
59
|
+
chunkLength: chunk.length,
|
|
60
|
+
totalLength: result.length,
|
|
61
|
+
});
|
|
62
|
+
}
|
|
30
63
|
}
|
|
31
64
|
if (!result.trim()) {
|
|
32
65
|
throw new Error('Enhancement returned an empty result — the model may be rate-limited or temporarily unavailable.');
|
|
33
66
|
}
|
|
67
|
+
logWithLevel(1, 'Enhancement request completed.', {
|
|
68
|
+
durationMs: Date.now() - startedAt,
|
|
69
|
+
outputLength: result.length,
|
|
70
|
+
chunkCount,
|
|
71
|
+
});
|
|
72
|
+
if (outputPath) {
|
|
73
|
+
traceLog('Writing enhancement output to file', {
|
|
74
|
+
outputPath,
|
|
75
|
+
outputFormat,
|
|
76
|
+
});
|
|
77
|
+
const outputFile = await writeEnhancementOutputFile(outputPath, {
|
|
78
|
+
originalPrompt: prompt,
|
|
79
|
+
enhancedPrompt: result,
|
|
80
|
+
provider: providerName,
|
|
81
|
+
model: modelName,
|
|
82
|
+
timestamp: new Date().toISOString(),
|
|
83
|
+
durationMs: Date.now() - startedAt,
|
|
84
|
+
}, outputFormat);
|
|
85
|
+
setOutputFileMessage(`Output written to ${outputFile.path} (${outputFile.format})`);
|
|
86
|
+
logWithLevel(2, 'Output file written successfully.', outputFile);
|
|
87
|
+
}
|
|
34
88
|
setStatus('complete');
|
|
35
89
|
}
|
|
36
90
|
catch (err) {
|
|
37
91
|
setStatus('error');
|
|
38
|
-
|
|
92
|
+
const errorText = formatErrorDetails(err);
|
|
93
|
+
setError(errorText);
|
|
94
|
+
if (debug) {
|
|
95
|
+
debugLog('Headless enhancement failed with detailed error', {
|
|
96
|
+
error: errorText,
|
|
97
|
+
promptLength: prompt.length,
|
|
98
|
+
outputPath: outputPath ?? null,
|
|
99
|
+
});
|
|
100
|
+
}
|
|
39
101
|
}
|
|
40
102
|
})();
|
|
41
|
-
}, [prompt]);
|
|
103
|
+
}, [debug, outputFormat, outputPath, prompt, trace, verbose]);
|
|
42
104
|
if (status === 'error') {
|
|
43
105
|
return (_jsx(Box, { flexDirection: "column", marginY: 1, children: _jsxs(Text, { color: "red", children: ["Error: ", error] }) }));
|
|
44
106
|
}
|
|
@@ -48,5 +110,5 @@ export default function DirectEnhance({ prompt }) {
|
|
|
48
110
|
if (status === 'enhancing') {
|
|
49
111
|
return (_jsxs(Box, { flexDirection: "column", marginY: 1, children: [_jsx(Text, { bold: true, color: "cyan", children: "Enhancing prompt..." }), _jsx(Text, { children: output })] }));
|
|
50
112
|
}
|
|
51
|
-
return (_jsxs(Box, { flexDirection: "column", marginY: 1, children: [_jsx(Text, { bold: true, color: "green", children: "Enhanced prompt:" }), _jsx(Text, { children: output })] }));
|
|
113
|
+
return (_jsxs(Box, { flexDirection: "column", marginY: 1, children: [_jsx(Text, { bold: true, color: "green", children: "Enhanced prompt:" }), _jsx(Text, { children: output }), outputFileMessage && _jsx(Text, { color: "cyan", children: outputFileMessage })] }));
|
|
52
114
|
}
|
package/dist/commands/help.js
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
|
|
2
|
-
// import React from 'react';
|
|
3
2
|
import { Text, Box } from 'ink';
|
|
4
3
|
export default function Help() {
|
|
5
|
-
return (_jsxs(Box, { flexDirection: "column", padding: 1, children: [_jsx(Text, { bold: true, children: "
|
|
4
|
+
return (_jsxs(Box, { flexDirection: "column", padding: 1, children: [_jsx(Text, { bold: true, color: "cyan", children: "Prompt Enhancer \u2014 Comprehensive Help" }), _jsx(Text, { children: "Enhance prompts in interactive TUI mode or headless CLI mode." }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Usage" }), _jsx(Text, { children: " prompt-enhancer" }), _jsx(Text, { children: " prompt-enhancer --prompt \"Your prompt text\"" }), _jsx(Text, { children: " prompt-enhancer enhance \"Your prompt text\"" }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Subcommands" }), _jsx(Text, { children: " help Show this help output" }), _jsx(Text, { children: " about Show package and repository information" }), _jsx(Text, { children: " version Show CLI name, description, and version" }), _jsx(Text, { children: " enhance Run headless enhancement from positional text" }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Options (headless and global)" }), _jsxs(Text, { children: [' ', "-p, --prompt <text> Prompt to enhance (headless mode) [default: undefined]"] }), _jsxs(Text, { children: [' ', "-o, --output <path> Write enhancement output to file [default: undefined]"] }), _jsxs(Text, { children: [' ', "--output-format <fmt> Output file format: auto|txt|json|md [default: auto]"] }), _jsxs(Text, { children: [' ', "--verbose <level> Output verbosity level 1-3 [default: 1]"] }), _jsxs(Text, { children: [' ', "--debug Enable debug diagnostics, API payload logs, and detailed error details [default: false]"] }), _jsx(Text, { children: " --trace Enable step-by-step execution trace [default: false]" }), _jsx(Text, { children: " --about Show package metadata [default: false]" }), _jsx(Text, { children: " -v, --version Show version info [default: false]" }), _jsx(Text, { children: " --help Show this help output [default: false]" }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Verbose Levels" }), _jsx(Text, { children: " 1 Minimal runtime information (default)" }), _jsx(Text, { children: " 2 Lifecycle progress and major checkpoints" }), _jsx(Text, { children: " 3 Chunk-level streaming diagnostics and deep detail" }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Debug and Trace Behavior" }), _jsxs(Text, { children: [' ', "--debug automatically enables debug diagnostics and raw provider request/response payload logging."] }), _jsxs(Text, { children: [' ', "--trace enables step-by-step execution logs and implies highest runtime detail during execution."] }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Common Workflows" }), _jsx(Text, { children: " Interactive mode (menu):" }), _jsx(Text, { children: " prompt-enhancer" }), _jsx(Text, { children: " Basic headless run:" }), _jsx(Text, { children: " prompt-enhancer --prompt \"Write a better API README\"" }), _jsx(Text, { children: " Save output as JSON:" }), _jsxs(Text, { children: [' ', "prompt-enhancer -p \"Improve this prompt\" -o .\\output\\result.json --output-format json"] }), _jsx(Text, { children: " Full diagnostics with tracing:" }), _jsxs(Text, { children: [' ', "prompt-enhancer enhance \"Create a test plan\" --debug --trace --verbose 3"] }), _jsx(Text, { children: " Markdown output (format inferred from extension):" }), _jsxs(Text, { children: [' ', "prompt-enhancer -p \"Draft a deployment checklist\" -o .\\out\\checklist.md"] }), _jsx(Text, {}), _jsx(Text, { bold: true, children: "Troubleshooting" }), _jsxs(Text, { children: [' ', "- Empty enhancement output: try a different model/provider or rerun with --debug."] }), _jsxs(Text, { children: [' ', "- API/auth failures: verify provider keys in Settings (interactive mode) or config file."] }), _jsxs(Text, { children: [' ', "- Output write failures: ensure target directory is writable and format is valid."] }), _jsxs(Text, { children: [' ', "- Verbose level errors: use only --verbose 1, --verbose 2, or --verbose 3."] }), _jsx(Text, {}), _jsx(Text, { color: "gray", children: "Tip: use interactive mode for iterative prompt refinement and provider configuration." })] }));
|
|
6
5
|
}
|
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
import { OpenAI } from 'openai';
|
|
8
8
|
import { Provider } from './base.js';
|
|
9
9
|
import { getCopilotToken } from '../utils/copilot-auth.js';
|
|
10
|
+
import { debugLog, logWithLevel } from '../utils/runtime-logging.js';
|
|
10
11
|
export class CopilotProvider extends Provider {
|
|
11
12
|
client;
|
|
12
13
|
initialized = false;
|
|
@@ -39,6 +40,21 @@ export class CopilotProvider extends Provider {
|
|
|
39
40
|
await this.ensureInitialized();
|
|
40
41
|
const systemPrompt = options?.systemPrompt ||
|
|
41
42
|
'You are an expert at enhancing and improving user prompts for LLMs. Analyze the given prompt and return an improved version that is clearer, more specific, and more likely to produce better results. Return ONLY the enhanced prompt, no explanations.';
|
|
43
|
+
debugLog('Copilot API request payload', {
|
|
44
|
+
model: options?.model || this.defaultModel,
|
|
45
|
+
temperature: options?.temperature || 0.7,
|
|
46
|
+
max_tokens: options?.maxTokens || 1000,
|
|
47
|
+
messages: [
|
|
48
|
+
{
|
|
49
|
+
role: 'system',
|
|
50
|
+
content: systemPrompt,
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
role: 'user',
|
|
54
|
+
content: `Original prompt:\n${prompt}`,
|
|
55
|
+
},
|
|
56
|
+
],
|
|
57
|
+
});
|
|
42
58
|
const response = await this.client.chat.completions.create({
|
|
43
59
|
messages: [
|
|
44
60
|
{
|
|
@@ -54,6 +70,7 @@ export class CopilotProvider extends Provider {
|
|
|
54
70
|
temperature: options?.temperature || 0.7,
|
|
55
71
|
max_tokens: options?.maxTokens || 1000,
|
|
56
72
|
});
|
|
73
|
+
debugLog('Copilot API response payload', response);
|
|
57
74
|
const content = response.choices[0]?.message?.content;
|
|
58
75
|
if (!content) {
|
|
59
76
|
throw new Error('No response received from Copilot');
|
|
@@ -64,6 +81,22 @@ export class CopilotProvider extends Provider {
|
|
|
64
81
|
await this.ensureInitialized();
|
|
65
82
|
const systemPrompt = options?.systemPrompt ||
|
|
66
83
|
'You are an expert at enhancing and improving user prompts for LLMs. Analyze the given prompt and return an improved version that is clearer, more specific, and more likely to produce better results. Return ONLY the enhanced prompt, no explanations.';
|
|
84
|
+
debugLog('Copilot stream API request payload', {
|
|
85
|
+
messages: [
|
|
86
|
+
{
|
|
87
|
+
role: 'system',
|
|
88
|
+
content: systemPrompt,
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
role: 'user',
|
|
92
|
+
content: `Original prompt:\n${prompt}`,
|
|
93
|
+
},
|
|
94
|
+
],
|
|
95
|
+
model: options?.model || this.defaultModel,
|
|
96
|
+
temperature: options?.temperature || 0.7,
|
|
97
|
+
max_tokens: options?.maxTokens || 1000,
|
|
98
|
+
stream: true,
|
|
99
|
+
});
|
|
67
100
|
const stream = await this.client.chat.completions.create({
|
|
68
101
|
messages: [
|
|
69
102
|
{
|
|
@@ -80,7 +113,12 @@ export class CopilotProvider extends Provider {
|
|
|
80
113
|
max_tokens: options?.maxTokens || 1000,
|
|
81
114
|
stream: true,
|
|
82
115
|
});
|
|
116
|
+
let chunkCount = 0;
|
|
83
117
|
for await (const event of stream) {
|
|
118
|
+
chunkCount++;
|
|
119
|
+
logWithLevel(3, `Copilot stream event #${chunkCount}`, {
|
|
120
|
+
hasChoices: Boolean(event.choices?.length),
|
|
121
|
+
});
|
|
84
122
|
const content = event.choices[0]?.delta?.content;
|
|
85
123
|
if (content) {
|
|
86
124
|
yield content;
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
5
5
|
import { Provider } from './base.js';
|
|
6
|
+
import { debugLog, logWithLevel } from '../utils/runtime-logging.js';
|
|
6
7
|
export class GeminiProvider extends Provider {
|
|
7
8
|
client;
|
|
8
9
|
constructor(credentials) {
|
|
@@ -18,7 +19,7 @@ export class GeminiProvider extends Provider {
|
|
|
18
19
|
});
|
|
19
20
|
const systemPrompt = options?.systemPrompt ||
|
|
20
21
|
'You are an expert at enhancing and improving user prompts for LLMs. Analyze the given prompt and return an improved version that is clearer, more specific, and more likely to produce better results. Return ONLY the enhanced prompt, no explanations.';
|
|
21
|
-
const
|
|
22
|
+
const payload = {
|
|
22
23
|
contents: [
|
|
23
24
|
{
|
|
24
25
|
role: 'user',
|
|
@@ -29,8 +30,19 @@ export class GeminiProvider extends Provider {
|
|
|
29
30
|
],
|
|
30
31
|
},
|
|
31
32
|
],
|
|
33
|
+
};
|
|
34
|
+
debugLog('Gemini API request payload', {
|
|
35
|
+
model: options?.model || this.defaultModel,
|
|
36
|
+
temperature: options?.temperature,
|
|
37
|
+
maxTokens: options?.maxTokens,
|
|
38
|
+
payload,
|
|
32
39
|
});
|
|
40
|
+
const result = await model.generateContent(payload);
|
|
33
41
|
const response = result.response;
|
|
42
|
+
debugLog('Gemini API response metadata', {
|
|
43
|
+
candidates: response.candidates?.length ?? 0,
|
|
44
|
+
usageMetadata: response.usageMetadata ?? null,
|
|
45
|
+
});
|
|
34
46
|
const text = response.text();
|
|
35
47
|
if (!text) {
|
|
36
48
|
throw new Error('No response received from Gemini');
|
|
@@ -43,7 +55,7 @@ export class GeminiProvider extends Provider {
|
|
|
43
55
|
});
|
|
44
56
|
const systemPrompt = options?.systemPrompt ||
|
|
45
57
|
'You are an expert at enhancing and improving user prompts for LLMs. Analyze the given prompt and return an improved version that is clearer, more specific, and more likely to produce better results. Return ONLY the enhanced prompt, no explanations.';
|
|
46
|
-
const
|
|
58
|
+
const streamPayload = {
|
|
47
59
|
contents: [
|
|
48
60
|
{
|
|
49
61
|
role: 'user',
|
|
@@ -54,10 +66,20 @@ export class GeminiProvider extends Provider {
|
|
|
54
66
|
],
|
|
55
67
|
},
|
|
56
68
|
],
|
|
69
|
+
};
|
|
70
|
+
debugLog('Gemini stream request payload', {
|
|
71
|
+
model: options?.model || this.defaultModel,
|
|
72
|
+
streamPayload,
|
|
57
73
|
});
|
|
74
|
+
const result = await model.generateContentStream(streamPayload);
|
|
75
|
+
let chunkCount = 0;
|
|
58
76
|
for await (const chunk of result.stream) {
|
|
77
|
+
chunkCount++;
|
|
59
78
|
const text = chunk.text();
|
|
60
79
|
if (text) {
|
|
80
|
+
logWithLevel(3, `Gemini stream chunk #${chunkCount}`, {
|
|
81
|
+
length: text.length,
|
|
82
|
+
});
|
|
61
83
|
yield text;
|
|
62
84
|
}
|
|
63
85
|
}
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
* API: https://kilo.ai/ — free models still require an API key from https://app.kilo.ai
|
|
5
5
|
*/
|
|
6
6
|
import { Provider } from './base.js';
|
|
7
|
+
import { debugLog, logWithLevel, redactAuthorizationHeaders, } from '../utils/runtime-logging.js';
|
|
7
8
|
export const KILO_MODELS = [
|
|
8
9
|
{
|
|
9
10
|
id: 'openrouter/free',
|
|
@@ -75,11 +76,25 @@ export class KiloProvider extends Provider {
|
|
|
75
76
|
max_tokens: options?.maxTokens ?? 1000,
|
|
76
77
|
stream: false,
|
|
77
78
|
};
|
|
79
|
+
const headers = this.buildHeaders();
|
|
80
|
+
debugLog('Kilo API request payload', {
|
|
81
|
+
endpoint: this.endpoint,
|
|
82
|
+
headers: redactAuthorizationHeaders(headers),
|
|
83
|
+
body,
|
|
84
|
+
});
|
|
78
85
|
const response = await fetch(this.endpoint, {
|
|
79
86
|
method: 'POST',
|
|
80
|
-
headers
|
|
87
|
+
headers,
|
|
81
88
|
body: JSON.stringify(body),
|
|
82
89
|
});
|
|
90
|
+
debugLog('Kilo API response status', {
|
|
91
|
+
status: response.status,
|
|
92
|
+
statusText: response.statusText,
|
|
93
|
+
});
|
|
94
|
+
if (response.ok) {
|
|
95
|
+
const rawResponse = await response.clone().text();
|
|
96
|
+
debugLog('Kilo API raw response body', rawResponse);
|
|
97
|
+
}
|
|
83
98
|
if (!response.ok) {
|
|
84
99
|
await throwOnError(response);
|
|
85
100
|
}
|
|
@@ -100,11 +115,21 @@ export class KiloProvider extends Provider {
|
|
|
100
115
|
max_tokens: options?.maxTokens ?? 1000,
|
|
101
116
|
stream: true,
|
|
102
117
|
};
|
|
118
|
+
const headers = this.buildHeaders();
|
|
119
|
+
debugLog('Kilo stream request payload', {
|
|
120
|
+
endpoint: this.endpoint,
|
|
121
|
+
headers: redactAuthorizationHeaders(headers),
|
|
122
|
+
body,
|
|
123
|
+
});
|
|
103
124
|
const response = await fetch(this.endpoint, {
|
|
104
125
|
method: 'POST',
|
|
105
|
-
headers
|
|
126
|
+
headers,
|
|
106
127
|
body: JSON.stringify(body),
|
|
107
128
|
});
|
|
129
|
+
debugLog('Kilo stream response status', {
|
|
130
|
+
status: response.status,
|
|
131
|
+
statusText: response.statusText,
|
|
132
|
+
});
|
|
108
133
|
if (!response.ok) {
|
|
109
134
|
await throwOnError(response);
|
|
110
135
|
}
|
|
@@ -113,6 +138,7 @@ export class KiloProvider extends Provider {
|
|
|
113
138
|
const reader = response.body.getReader();
|
|
114
139
|
const decoder = new TextDecoder();
|
|
115
140
|
let buffer = '';
|
|
141
|
+
let eventCount = 0;
|
|
116
142
|
try {
|
|
117
143
|
while (true) {
|
|
118
144
|
const { done, value } = await reader.read();
|
|
@@ -127,6 +153,8 @@ export class KiloProvider extends Provider {
|
|
|
127
153
|
if (data === '[DONE]')
|
|
128
154
|
return;
|
|
129
155
|
try {
|
|
156
|
+
eventCount++;
|
|
157
|
+
logWithLevel(3, `Kilo stream event #${eventCount}`, data);
|
|
130
158
|
const parsed = JSON.parse(data);
|
|
131
159
|
if (parsed.error) {
|
|
132
160
|
const msg = parsed.error.message ?? JSON.stringify(parsed.error);
|
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
* Chat completions: https://opencode.ai/zen/v1/chat/completions
|
|
6
6
|
*/
|
|
7
7
|
import { Provider } from './base.js';
|
|
8
|
+
import { debugLog, logWithLevel, redactAuthorizationHeaders, } from '../utils/runtime-logging.js';
|
|
8
9
|
export const OPENCODE_BASE_URL = 'https://opencode.ai/zen/v1';
|
|
9
10
|
export const OPENCODE_MODELS_ENDPOINT = `${OPENCODE_BASE_URL}/models`;
|
|
10
11
|
export const OPENCODE_CHAT_ENDPOINT = `${OPENCODE_BASE_URL}/chat/completions`;
|
|
@@ -71,11 +72,25 @@ export class OpenCodeProvider extends Provider {
|
|
|
71
72
|
max_tokens: options?.maxTokens ?? 1000,
|
|
72
73
|
stream: false,
|
|
73
74
|
};
|
|
75
|
+
const headers = this.buildHeaders();
|
|
76
|
+
debugLog('OpenCode API request payload', {
|
|
77
|
+
endpoint: this.chatEndpoint,
|
|
78
|
+
headers: redactAuthorizationHeaders(headers),
|
|
79
|
+
body,
|
|
80
|
+
});
|
|
74
81
|
const response = await fetch(this.chatEndpoint, {
|
|
75
82
|
method: 'POST',
|
|
76
|
-
headers
|
|
83
|
+
headers,
|
|
77
84
|
body: JSON.stringify(body),
|
|
78
85
|
});
|
|
86
|
+
debugLog('OpenCode API response status', {
|
|
87
|
+
status: response.status,
|
|
88
|
+
statusText: response.statusText,
|
|
89
|
+
});
|
|
90
|
+
if (response.ok) {
|
|
91
|
+
const rawResponse = await response.clone().text();
|
|
92
|
+
debugLog('OpenCode raw response body', rawResponse);
|
|
93
|
+
}
|
|
79
94
|
if (!response.ok) {
|
|
80
95
|
await throwOnError(response);
|
|
81
96
|
}
|
|
@@ -96,11 +111,21 @@ export class OpenCodeProvider extends Provider {
|
|
|
96
111
|
max_tokens: options?.maxTokens ?? 1000,
|
|
97
112
|
stream: true,
|
|
98
113
|
};
|
|
114
|
+
const headers = this.buildHeaders();
|
|
115
|
+
debugLog('OpenCode stream request payload', {
|
|
116
|
+
endpoint: this.chatEndpoint,
|
|
117
|
+
headers: redactAuthorizationHeaders(headers),
|
|
118
|
+
body,
|
|
119
|
+
});
|
|
99
120
|
const response = await fetch(this.chatEndpoint, {
|
|
100
121
|
method: 'POST',
|
|
101
|
-
headers
|
|
122
|
+
headers,
|
|
102
123
|
body: JSON.stringify(body),
|
|
103
124
|
});
|
|
125
|
+
debugLog('OpenCode stream response status', {
|
|
126
|
+
status: response.status,
|
|
127
|
+
statusText: response.statusText,
|
|
128
|
+
});
|
|
104
129
|
if (!response.ok) {
|
|
105
130
|
await throwOnError(response);
|
|
106
131
|
}
|
|
@@ -109,6 +134,7 @@ export class OpenCodeProvider extends Provider {
|
|
|
109
134
|
const reader = response.body.getReader();
|
|
110
135
|
const decoder = new TextDecoder();
|
|
111
136
|
let buffer = '';
|
|
137
|
+
let eventCount = 0;
|
|
112
138
|
try {
|
|
113
139
|
while (true) {
|
|
114
140
|
const { done, value } = await reader.read();
|
|
@@ -123,6 +149,8 @@ export class OpenCodeProvider extends Provider {
|
|
|
123
149
|
if (data === '[DONE]')
|
|
124
150
|
return;
|
|
125
151
|
try {
|
|
152
|
+
eventCount++;
|
|
153
|
+
logWithLevel(3, `OpenCode stream event #${eventCount}`, data);
|
|
126
154
|
const parsed = JSON.parse(data);
|
|
127
155
|
if (parsed.error) {
|
|
128
156
|
const msg = parsed.error.message ?? JSON.stringify(parsed.error);
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export type OutputFormat = 'txt' | 'json' | 'md';
|
|
2
|
+
export interface EnhancementOutputPayload {
|
|
3
|
+
originalPrompt: string;
|
|
4
|
+
enhancedPrompt: string;
|
|
5
|
+
provider: string;
|
|
6
|
+
model: string;
|
|
7
|
+
timestamp: string;
|
|
8
|
+
durationMs: number;
|
|
9
|
+
}
|
|
10
|
+
export declare function writeEnhancementOutputFile(path: string, payload: EnhancementOutputPayload, format?: string): Promise<{
|
|
11
|
+
path: string;
|
|
12
|
+
format: OutputFormat;
|
|
13
|
+
}>;
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import { mkdir, writeFile } from 'node:fs/promises';
|
|
2
|
+
import { dirname, extname } from 'node:path';
|
|
3
|
+
function inferOutputFormat(path) {
|
|
4
|
+
const extension = extname(path).toLowerCase();
|
|
5
|
+
if (extension === '.json')
|
|
6
|
+
return 'json';
|
|
7
|
+
if (extension === '.md' || extension === '.markdown')
|
|
8
|
+
return 'md';
|
|
9
|
+
return 'txt';
|
|
10
|
+
}
|
|
11
|
+
function resolveOutputFormat(path, format) {
|
|
12
|
+
if (!format || format === 'auto') {
|
|
13
|
+
return inferOutputFormat(path);
|
|
14
|
+
}
|
|
15
|
+
if (format === 'txt' || format === 'json' || format === 'md') {
|
|
16
|
+
return format;
|
|
17
|
+
}
|
|
18
|
+
throw new Error(`Unsupported output format "${format}". Supported formats: txt, json, md, auto.`);
|
|
19
|
+
}
|
|
20
|
+
function toText(payload) {
|
|
21
|
+
return `Prompt Enhancer Output
|
|
22
|
+
Timestamp: ${payload.timestamp}
|
|
23
|
+
Provider: ${payload.provider}
|
|
24
|
+
Model: ${payload.model}
|
|
25
|
+
Duration: ${payload.durationMs} ms
|
|
26
|
+
|
|
27
|
+
Original Prompt:
|
|
28
|
+
${payload.originalPrompt}
|
|
29
|
+
|
|
30
|
+
Enhanced Prompt:
|
|
31
|
+
${payload.enhancedPrompt}
|
|
32
|
+
`;
|
|
33
|
+
}
|
|
34
|
+
function toMarkdown(payload) {
|
|
35
|
+
return `## Prompt Enhancer Output
|
|
36
|
+
|
|
37
|
+
- **Timestamp:** ${payload.timestamp}
|
|
38
|
+
- **Provider:** ${payload.provider}
|
|
39
|
+
- **Model:** ${payload.model}
|
|
40
|
+
- **Duration:** ${payload.durationMs} ms
|
|
41
|
+
|
|
42
|
+
### Original Prompt
|
|
43
|
+
|
|
44
|
+
\`\`\`text
|
|
45
|
+
${payload.originalPrompt}
|
|
46
|
+
\`\`\`
|
|
47
|
+
|
|
48
|
+
### Enhanced Prompt
|
|
49
|
+
|
|
50
|
+
\`\`\`text
|
|
51
|
+
${payload.enhancedPrompt}
|
|
52
|
+
\`\`\`
|
|
53
|
+
`;
|
|
54
|
+
}
|
|
55
|
+
export async function writeEnhancementOutputFile(path, payload, format) {
|
|
56
|
+
const outputFormat = resolveOutputFormat(path, format);
|
|
57
|
+
await mkdir(dirname(path), { recursive: true });
|
|
58
|
+
let content = '';
|
|
59
|
+
if (outputFormat === 'json') {
|
|
60
|
+
content = JSON.stringify(payload, null, 2);
|
|
61
|
+
}
|
|
62
|
+
else if (outputFormat === 'md') {
|
|
63
|
+
content = toMarkdown(payload);
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
content = toText(payload);
|
|
67
|
+
}
|
|
68
|
+
await writeFile(path, content, 'utf8');
|
|
69
|
+
return { path, format: outputFormat };
|
|
70
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export type VerboseLevel = 1 | 2 | 3;
|
|
2
|
+
export interface RuntimeLoggingOptions {
|
|
3
|
+
debug: boolean;
|
|
4
|
+
trace: boolean;
|
|
5
|
+
verbose: VerboseLevel;
|
|
6
|
+
}
|
|
7
|
+
export declare function normalizeVerboseLevel(value: number | undefined): VerboseLevel;
|
|
8
|
+
export declare function configureRuntimeLogging(options: Partial<RuntimeLoggingOptions>): RuntimeLoggingOptions;
|
|
9
|
+
export declare function getRuntimeLoggingOptions(): RuntimeLoggingOptions;
|
|
10
|
+
export declare function logWithLevel(level: number, message: string, details?: unknown): void;
|
|
11
|
+
export declare function debugLog(message: string, details?: unknown): void;
|
|
12
|
+
export declare function traceLog(step: string, details?: unknown): void;
|
|
13
|
+
export declare function formatErrorDetails(error: unknown): string;
|
|
14
|
+
export declare function safeSerialize(value: unknown): string;
|
|
15
|
+
export declare function redactAuthorizationHeaders(headers: Record<string, string>): Record<string, string>;
|
|
16
|
+
export declare function writeRuntimeBanner(command: string): void;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import process from 'node:process';
|
|
2
|
+
const runtimeLoggingOptions = {
|
|
3
|
+
debug: false,
|
|
4
|
+
trace: false,
|
|
5
|
+
verbose: 1,
|
|
6
|
+
};
|
|
7
|
+
function clampVerboseLevel(value) {
|
|
8
|
+
if (value <= 1)
|
|
9
|
+
return 1;
|
|
10
|
+
if (value >= 3)
|
|
11
|
+
return 3;
|
|
12
|
+
return 2;
|
|
13
|
+
}
|
|
14
|
+
export function normalizeVerboseLevel(value) {
|
|
15
|
+
if (typeof value !== 'number' || Number.isNaN(value)) {
|
|
16
|
+
return 1;
|
|
17
|
+
}
|
|
18
|
+
return clampVerboseLevel(Math.floor(value));
|
|
19
|
+
}
|
|
20
|
+
export function configureRuntimeLogging(options) {
|
|
21
|
+
if (typeof options.debug === 'boolean') {
|
|
22
|
+
runtimeLoggingOptions.debug = options.debug;
|
|
23
|
+
}
|
|
24
|
+
if (typeof options.trace === 'boolean') {
|
|
25
|
+
runtimeLoggingOptions.trace = options.trace;
|
|
26
|
+
}
|
|
27
|
+
if (typeof options.verbose === 'number') {
|
|
28
|
+
runtimeLoggingOptions.verbose = normalizeVerboseLevel(options.verbose);
|
|
29
|
+
}
|
|
30
|
+
return { ...runtimeLoggingOptions };
|
|
31
|
+
}
|
|
32
|
+
export function getRuntimeLoggingOptions() {
|
|
33
|
+
return { ...runtimeLoggingOptions };
|
|
34
|
+
}
|
|
35
|
+
export function logWithLevel(level, message, details) {
|
|
36
|
+
if (runtimeLoggingOptions.verbose < level)
|
|
37
|
+
return;
|
|
38
|
+
if (details === undefined) {
|
|
39
|
+
console.error(`[verbose:${level}] ${message}`);
|
|
40
|
+
return;
|
|
41
|
+
}
|
|
42
|
+
console.error(`[verbose:${level}] ${message}`);
|
|
43
|
+
console.error(safeSerialize(details));
|
|
44
|
+
}
|
|
45
|
+
export function debugLog(message, details) {
|
|
46
|
+
if (!runtimeLoggingOptions.debug)
|
|
47
|
+
return;
|
|
48
|
+
if (details === undefined) {
|
|
49
|
+
console.error(`[debug] ${message}`);
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
console.error(`[debug] ${message}`);
|
|
53
|
+
console.error(safeSerialize(details));
|
|
54
|
+
}
|
|
55
|
+
export function traceLog(step, details) {
|
|
56
|
+
if (!runtimeLoggingOptions.trace)
|
|
57
|
+
return;
|
|
58
|
+
if (details === undefined) {
|
|
59
|
+
console.error(`[trace] ${step}`);
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
console.error(`[trace] ${step}`);
|
|
63
|
+
console.error(safeSerialize(details));
|
|
64
|
+
}
|
|
65
|
+
export function formatErrorDetails(error) {
|
|
66
|
+
if (error instanceof Error) {
|
|
67
|
+
if (runtimeLoggingOptions.debug && error.stack) {
|
|
68
|
+
return error.stack;
|
|
69
|
+
}
|
|
70
|
+
return error.message;
|
|
71
|
+
}
|
|
72
|
+
return String(error);
|
|
73
|
+
}
|
|
74
|
+
export function safeSerialize(value) {
|
|
75
|
+
try {
|
|
76
|
+
return JSON.stringify(value, null, 2);
|
|
77
|
+
}
|
|
78
|
+
catch {
|
|
79
|
+
return String(value);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
export function redactAuthorizationHeaders(headers) {
|
|
83
|
+
const redacted = { ...headers };
|
|
84
|
+
for (const key of Object.keys(redacted)) {
|
|
85
|
+
if (key.toLowerCase() === 'authorization') {
|
|
86
|
+
redacted[key] = 'Bearer ***redacted***';
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
return redacted;
|
|
90
|
+
}
|
|
91
|
+
export function writeRuntimeBanner(command) {
|
|
92
|
+
const options = getRuntimeLoggingOptions();
|
|
93
|
+
console.error(`[runtime] ${command} | debug=${options.debug} trace=${options.trace} verbose=${options.verbose} node=${process.version}`);
|
|
94
|
+
}
|
package/dist/prompt-enhancer.exe
CHANGED
|
Binary file
|
package/package.json
CHANGED
package/readme.md
CHANGED
|
@@ -180,15 +180,31 @@ $ prompt-enhancer --help
|
|
|
180
180
|
|
|
181
181
|
Usage
|
|
182
182
|
$ prompt-enhancer
|
|
183
|
+
$ prompt-enhancer --prompt "your prompt"
|
|
184
|
+
$ prompt-enhancer enhance "your prompt"
|
|
183
185
|
|
|
184
186
|
Options
|
|
185
|
-
--prompt, -p
|
|
186
|
-
--
|
|
187
|
-
--
|
|
187
|
+
--prompt, -p Prompt text for headless mode
|
|
188
|
+
--output, -o Write output to a file
|
|
189
|
+
--output-format Output format: auto | txt | json | md (default: auto)
|
|
190
|
+
--verbose Verbosity level: 1 | 2 | 3 (default: 1)
|
|
191
|
+
--debug Enable debug diagnostics + API request/response logs
|
|
192
|
+
--trace Enable step-by-step execution trace
|
|
193
|
+
--about Show package metadata
|
|
194
|
+
--version, -v Show version
|
|
195
|
+
--help, -h Show help
|
|
196
|
+
|
|
197
|
+
Subcommands
|
|
198
|
+
help Show comprehensive help
|
|
199
|
+
about Show package metadata
|
|
200
|
+
version Show version details
|
|
201
|
+
enhance Run headless enhancement using positional prompt text
|
|
188
202
|
|
|
189
203
|
Examples
|
|
190
204
|
$ prompt-enhancer
|
|
191
205
|
$ prompt-enhancer -p "build a cli to enhance prompts"
|
|
206
|
+
$ prompt-enhancer -p "summarize this architecture" -o .\out\result.json --output-format json
|
|
207
|
+
$ prompt-enhancer enhance "design API retries" --debug --trace --verbose 3
|
|
192
208
|
$ prompt-enhancer --help
|
|
193
209
|
```
|
|
194
210
|
|