commit-analyzer 1.0.2 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/package.json +1 -1
  2. package/src/cli.ts +1 -1
  3. package/src/llm.ts +18 -3
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "commit-analyzer",
3
- "version": "1.0.2",
3
+ "version": "1.0.3",
4
4
  "description": "Analyze git commits and generate categories, summaries, and descriptions for each commit. Optionally generate a yearly breakdown report of your commit history.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
package/src/cli.ts CHANGED
@@ -27,7 +27,7 @@ export class CLIService {
27
27
  .description(
28
28
  "Analyze user authored git commits and generate rich commit descriptions and stakeholder reports from them.",
29
29
  )
30
- .version("1.0.2")
30
+ .version("1.0.3")
31
31
  .option("-o, --output <file>", "Output CSV file (default: commits.csv)")
32
32
  .option(
33
33
  "--output-dir <dir>",
package/src/llm.ts CHANGED
@@ -60,6 +60,20 @@ export class LLMService {
60
60
  }
61
61
  return this.model
62
62
  }
63
+
64
+ /**
65
+ * Get the model command with appropriate flags.
66
+ */
67
+ static getModelCommand(): string {
68
+ const model = this.getModel()
69
+
70
+ // Append -q flag for codex model
71
+ if (model === 'codex') {
72
+ return 'codex -q'
73
+ }
74
+
75
+ return model
76
+ }
63
77
  private static readonly MAX_RETRIES = parseInt(
64
78
  process.env.LLM_MAX_RETRIES || "3",
65
79
  10,
@@ -87,6 +101,7 @@ export class LLMService {
87
101
 
88
102
  static async analyzeCommit(commit: CommitInfo): Promise<LLMAnalysis> {
89
103
  const currentModel = this.getModel()
104
+ const currentModelCommand = this.getModelCommand()
90
105
  const prompt = this.buildPrompt(commit.message, commit.diff, currentModel)
91
106
 
92
107
  // Log prompt length for debugging - only for Claude models
@@ -101,7 +116,7 @@ export class LLMService {
101
116
 
102
117
  for (let attempt = 1; attempt <= this.MAX_RETRIES; attempt++) {
103
118
  try {
104
- const output = execSync(currentModel, {
119
+ const output = execSync(currentModelCommand, {
105
120
  input: prompt,
106
121
  encoding: "utf8",
107
122
  stdio: ["pipe", "pipe", "pipe"],
@@ -123,7 +138,7 @@ export class LLMService {
123
138
  // Show detailed error info only in verbose mode
124
139
  if (this.verbose) {
125
140
  console.log(` - Verbose error details for commit ${commit.hash.substring(0, 8)}:`)
126
- console.log(` Command: ${currentModel}`)
141
+ console.log(` Command: ${currentModelCommand}`)
127
142
  console.log(` Error message: ${lastError.message}`)
128
143
  if (this.isClaudeModel(currentModel)) {
129
144
  console.log(` Prompt length: ${prompt.length} characters`)
@@ -150,7 +165,7 @@ export class LLMService {
150
165
  // For non-rate-limit errors, show detailed info based on verbose mode
151
166
  if (this.verbose) {
152
167
  console.log(` - Error details for commit ${commit.hash.substring(0, 8)}:`)
153
- console.log(` Command: ${currentModel}`)
168
+ console.log(` Command: ${currentModelCommand}`)
154
169
  console.log(` Error message: ${lastError.message}`)
155
170
  if (this.isClaudeModel(currentModel)) {
156
171
  console.log(` Prompt length: ${prompt.length} characters`)