@mate_tsaava/pr-review 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +21 -0
- package/README.md +109 -0
- package/dist/cli.d.ts +2 -0
- package/dist/cli.js +133 -0
- package/dist/commands/init.d.ts +1 -0
- package/dist/commands/init.js +44 -0
- package/dist/config.d.ts +84 -0
- package/dist/config.js +77 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +3 -0
- package/dist/llm/adapter.d.ts +13 -0
- package/dist/llm/adapter.js +1 -0
- package/dist/llm/azure-openai.d.ts +10 -0
- package/dist/llm/azure-openai.js +73 -0
- package/dist/llm/claude.d.ts +11 -0
- package/dist/llm/claude.js +187 -0
- package/dist/llm/index.d.ts +5 -0
- package/dist/llm/index.js +15 -0
- package/dist/llm/openai.d.ts +10 -0
- package/dist/llm/openai.js +73 -0
- package/dist/llm/types.d.ts +31 -0
- package/dist/llm/types.js +1 -0
- package/dist/reviewer.d.ts +29 -0
- package/dist/reviewer.js +142 -0
- package/dist/rules/clean-code-dotnet.md +3252 -0
- package/dist/rules/pr-review.md +232 -0
- package/dist/utils/batcher.d.ts +13 -0
- package/dist/utils/batcher.js +39 -0
- package/dist/utils/tokens.d.ts +8 -0
- package/dist/utils/tokens.js +11 -0
- package/package.json +54 -0
package/LICENSE.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) Microsoft Corporation.
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE
|
package/README.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# @mate_tsaava/pr-review
|
|
2
|
+
|
|
3
|
+
AI-powered PR review CLI for Azure DevOps.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install -g @mate_tsaava/pr-review
|
|
9
|
+
# or
|
|
10
|
+
npx @mate_tsaava/pr-review --help
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Setup
|
|
14
|
+
|
|
15
|
+
1. Set environment variables:
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
export AZURE_DEVOPS_PAT="your-personal-access-token"
|
|
19
|
+
export ANTHROPIC_API_KEY="sk-ant-..." # or OPENAI_API_KEY
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
2. (Optional) Create config file:
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
pr-review init
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Usage
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
# Basic usage
|
|
32
|
+
pr-review --org myorg --project MyProject --pr 123
|
|
33
|
+
|
|
34
|
+
# With specific LLM provider
|
|
35
|
+
pr-review --org myorg --project MyProject --pr 123 --provider openai --model gpt-4o
|
|
36
|
+
|
|
37
|
+
# Dry run (show review without posting)
|
|
38
|
+
pr-review --org myorg --project MyProject --pr 123 --dry-run
|
|
39
|
+
|
|
40
|
+
# Specify repository (if different from project name)
|
|
41
|
+
pr-review --org myorg --project MyProject --repo MyRepo --pr 123
|
|
42
|
+
|
|
43
|
+
# Verbose mode (show prompts, responses, API calls)
|
|
44
|
+
pr-review --org myorg --project MyProject --pr 123 --verbose
|
|
45
|
+
|
|
46
|
+
# Large PRs - adjust batching
|
|
47
|
+
pr-review --org myorg --project MyProject --pr 123 --max-tokens 100000 --max-files 5
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Configuration
|
|
51
|
+
|
|
52
|
+
Config file locations (checked in order):
|
|
53
|
+
|
|
54
|
+
1. Path specified via `--config` option
|
|
55
|
+
2. `.pr-review.json` (current directory)
|
|
56
|
+
3. `~/.pr-review/config.json` (home directory)
|
|
57
|
+
|
|
58
|
+
Example config:
|
|
59
|
+
|
|
60
|
+
```json
|
|
61
|
+
{
|
|
62
|
+
"azureDevOps": {
|
|
63
|
+
"defaultOrg": "myorg",
|
|
64
|
+
"defaultProject": "MyProject"
|
|
65
|
+
},
|
|
66
|
+
"llm": {
|
|
67
|
+
"provider": "claude",
|
|
68
|
+
"model": "claude-sonnet-4-20250514"
|
|
69
|
+
},
|
|
70
|
+
"rules": {
|
|
71
|
+
"path": "./rules/pr-review.md",
|
|
72
|
+
"cleanCodeGuide": "./rules/clean-code-dotnet.md"
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## LLM Providers
|
|
78
|
+
|
|
79
|
+
| Provider | API Key Env Var | Default Model |
|
|
80
|
+
|----------|-----------------|---------------|
|
|
81
|
+
| claude | `ANTHROPIC_API_KEY` | claude-sonnet-4-20250514 |
|
|
82
|
+
| openai | `OPENAI_API_KEY` | gpt-4o |
|
|
83
|
+
| azure-openai | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_ENDPOINT` | gpt-4o |
|
|
84
|
+
|
|
85
|
+
## Exit Codes
|
|
86
|
+
|
|
87
|
+
| Code | Meaning |
|
|
88
|
+
|------|---------|
|
|
89
|
+
| 0 | Review complete, no blocking issues |
|
|
90
|
+
| 1 | Review complete, blocking issues found |
|
|
91
|
+
| 2 | Error occurred |
|
|
92
|
+
|
|
93
|
+
## Custom Review Rules
|
|
94
|
+
|
|
95
|
+
You can provide custom review rules by creating a markdown file and referencing it in the config:
|
|
96
|
+
|
|
97
|
+
```json
|
|
98
|
+
{
|
|
99
|
+
"rules": {
|
|
100
|
+
"path": "./my-team-rules.md"
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
The rules file should contain guidelines for the AI reviewer to follow when analyzing code.
|
|
106
|
+
|
|
107
|
+
## License
|
|
108
|
+
|
|
109
|
+
MIT
|
package/dist/cli.d.ts
ADDED
package/dist/cli.js
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import yargs from "yargs";
|
|
3
|
+
import { hideBin } from "yargs/helpers";
|
|
4
|
+
import { config as loadEnv } from "dotenv";
|
|
5
|
+
import chalk from "chalk";
|
|
6
|
+
// Load environment variables
|
|
7
|
+
loadEnv();
|
|
8
|
+
yargs(hideBin(process.argv))
|
|
9
|
+
.scriptName("pr-review")
|
|
10
|
+
.usage("Usage: $0 <command> [options]")
|
|
11
|
+
.command("init", "Initialize configuration file", (yargs) => {
|
|
12
|
+
return yargs.option("local", {
|
|
13
|
+
describe: "Create config in current directory instead of home",
|
|
14
|
+
type: "boolean",
|
|
15
|
+
default: false,
|
|
16
|
+
});
|
|
17
|
+
}, async (argv) => {
|
|
18
|
+
const { initConfig } = await import("./commands/init.js");
|
|
19
|
+
initConfig(argv.local ? "local" : "global");
|
|
20
|
+
})
|
|
21
|
+
.command("$0", "Review a pull request", (yargs) => {
|
|
22
|
+
return yargs
|
|
23
|
+
.option("org", {
|
|
24
|
+
alias: "o",
|
|
25
|
+
describe: "Azure DevOps organization name",
|
|
26
|
+
type: "string",
|
|
27
|
+
demandOption: true,
|
|
28
|
+
})
|
|
29
|
+
.option("project", {
|
|
30
|
+
alias: "p",
|
|
31
|
+
describe: "Azure DevOps project name",
|
|
32
|
+
type: "string",
|
|
33
|
+
demandOption: true,
|
|
34
|
+
})
|
|
35
|
+
.option("pr", {
|
|
36
|
+
describe: "Pull request ID",
|
|
37
|
+
type: "number",
|
|
38
|
+
demandOption: true,
|
|
39
|
+
})
|
|
40
|
+
.option("repo", {
|
|
41
|
+
alias: "r",
|
|
42
|
+
describe: "Repository name or ID (defaults to project name)",
|
|
43
|
+
type: "string",
|
|
44
|
+
})
|
|
45
|
+
.option("provider", {
|
|
46
|
+
describe: "LLM provider to use",
|
|
47
|
+
choices: ["claude", "azure-openai", "openai"],
|
|
48
|
+
default: "claude",
|
|
49
|
+
})
|
|
50
|
+
.option("model", {
|
|
51
|
+
alias: "m",
|
|
52
|
+
describe: "Model to use (provider-specific)",
|
|
53
|
+
type: "string",
|
|
54
|
+
})
|
|
55
|
+
.option("dry-run", {
|
|
56
|
+
describe: "Show review without posting comments",
|
|
57
|
+
type: "boolean",
|
|
58
|
+
default: false,
|
|
59
|
+
})
|
|
60
|
+
.option("max-tokens", {
|
|
61
|
+
describe: "Max tokens per batch (default: 150000)",
|
|
62
|
+
type: "number",
|
|
63
|
+
default: 150000,
|
|
64
|
+
})
|
|
65
|
+
.option("max-files", {
|
|
66
|
+
describe: "Max files per batch (default: 10)",
|
|
67
|
+
type: "number",
|
|
68
|
+
default: 10,
|
|
69
|
+
})
|
|
70
|
+
.option("config", {
|
|
71
|
+
alias: "c",
|
|
72
|
+
describe: "Path to config file",
|
|
73
|
+
type: "string",
|
|
74
|
+
})
|
|
75
|
+
.option("verbose", {
|
|
76
|
+
alias: "v",
|
|
77
|
+
describe: "Show detailed logs (prompts, responses, API calls)",
|
|
78
|
+
type: "boolean",
|
|
79
|
+
default: false,
|
|
80
|
+
});
|
|
81
|
+
}, async (argv) => {
|
|
82
|
+
const { loadConfig } = await import("./config.js");
|
|
83
|
+
const { PRReviewer } = await import("./reviewer.js");
|
|
84
|
+
console.log(chalk.bold("PR Review CLI"));
|
|
85
|
+
console.log(chalk.dim("=============\n"));
|
|
86
|
+
try {
|
|
87
|
+
// Load configuration
|
|
88
|
+
const config = loadConfig({
|
|
89
|
+
provider: argv.provider,
|
|
90
|
+
model: argv.model,
|
|
91
|
+
config: argv.config,
|
|
92
|
+
});
|
|
93
|
+
// Create reviewer
|
|
94
|
+
const reviewer = new PRReviewer(config, argv.org, argv.project);
|
|
95
|
+
// Run review
|
|
96
|
+
const { result, postedComments, summaryPosted } = await reviewer.review({
|
|
97
|
+
organization: argv.org,
|
|
98
|
+
project: argv.project,
|
|
99
|
+
repositoryId: argv.repo || argv.project,
|
|
100
|
+
pullRequestId: argv.pr,
|
|
101
|
+
dryRun: argv.dryRun,
|
|
102
|
+
maxTokensPerBatch: argv.maxTokens,
|
|
103
|
+
maxFilesPerBatch: argv.maxFiles,
|
|
104
|
+
verbose: argv.verbose,
|
|
105
|
+
});
|
|
106
|
+
// Print summary
|
|
107
|
+
console.log("\n" + chalk.bold("Result:"));
|
|
108
|
+
const issues = result.issues || [];
|
|
109
|
+
const blockCount = issues.filter((i) => i.severity === "BLOCK").length;
|
|
110
|
+
const highCount = issues.filter((i) => i.severity === "HIGH").length;
|
|
111
|
+
const mediumCount = issues.filter((i) => i.severity === "MEDIUM").length;
|
|
112
|
+
const blockText = blockCount > 0 ? chalk.red(`${blockCount} BLOCK`) : chalk.green("0 BLOCK");
|
|
113
|
+
const highText = highCount > 0 ? chalk.yellow(`${highCount} HIGH`) : chalk.green("0 HIGH");
|
|
114
|
+
const mediumText = `${mediumCount} MEDIUM`;
|
|
115
|
+
console.log(`${blockText} | ${highText} | ${mediumText}`);
|
|
116
|
+
if (!argv.dryRun) {
|
|
117
|
+
console.log(chalk.dim(`\nPosted ${postedComments} inline comments`));
|
|
118
|
+
if (summaryPosted) {
|
|
119
|
+
console.log(chalk.dim("Summary comment posted"));
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
// Exit code based on blockers
|
|
123
|
+
process.exit(blockCount > 0 ? 1 : 0);
|
|
124
|
+
}
|
|
125
|
+
catch (error) {
|
|
126
|
+
console.error(chalk.red("\nError:"), error instanceof Error ? error.message : String(error));
|
|
127
|
+
process.exit(2);
|
|
128
|
+
}
|
|
129
|
+
})
|
|
130
|
+
.help()
|
|
131
|
+
.version()
|
|
132
|
+
.strict()
|
|
133
|
+
.parse();
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function initConfig(location?: "local" | "global"): void;
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import { writeFileSync, mkdirSync, existsSync } from "fs";
|
|
2
|
+
import { join } from "path";
|
|
3
|
+
import { homedir } from "os";
|
|
4
|
+
import chalk from "chalk";
|
|
5
|
+
const DEFAULT_CONFIG = {
|
|
6
|
+
azureDevOps: {
|
|
7
|
+
pat: "${AZURE_DEVOPS_PAT}",
|
|
8
|
+
defaultOrg: "",
|
|
9
|
+
defaultProject: "",
|
|
10
|
+
},
|
|
11
|
+
llm: {
|
|
12
|
+
provider: "claude",
|
|
13
|
+
model: "claude-sonnet-4-20250514",
|
|
14
|
+
},
|
|
15
|
+
rules: {
|
|
16
|
+
path: "./rules/pr-review.md",
|
|
17
|
+
cleanCodeGuide: "./rules/clean-code-dotnet.md",
|
|
18
|
+
},
|
|
19
|
+
};
|
|
20
|
+
export function initConfig(location = "global") {
|
|
21
|
+
let configPath;
|
|
22
|
+
let configDir;
|
|
23
|
+
if (location === "local") {
|
|
24
|
+
configDir = process.cwd();
|
|
25
|
+
configPath = join(configDir, ".pr-review.json");
|
|
26
|
+
}
|
|
27
|
+
else {
|
|
28
|
+
configDir = join(homedir(), ".pr-review");
|
|
29
|
+
configPath = join(configDir, "config.json");
|
|
30
|
+
}
|
|
31
|
+
if (existsSync(configPath)) {
|
|
32
|
+
console.log(chalk.yellow(`Config already exists at ${configPath}`));
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
if (!existsSync(configDir)) {
|
|
36
|
+
mkdirSync(configDir, { recursive: true });
|
|
37
|
+
}
|
|
38
|
+
writeFileSync(configPath, JSON.stringify(DEFAULT_CONFIG, null, 2));
|
|
39
|
+
console.log(chalk.green(`Created config at ${configPath}`));
|
|
40
|
+
console.log(chalk.dim("\nNext steps:"));
|
|
41
|
+
console.log("1. Set AZURE_DEVOPS_PAT environment variable");
|
|
42
|
+
console.log("2. Set your LLM API key (ANTHROPIC_API_KEY, OPENAI_API_KEY, etc.)");
|
|
43
|
+
console.log("3. Update defaultOrg and defaultProject in config");
|
|
44
|
+
}
|
package/dist/config.d.ts
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
declare const LLMProviderSchema: z.ZodEnum<["claude", "azure-openai", "openai"]>;
|
|
3
|
+
type LLMProvider = z.infer<typeof LLMProviderSchema>;
|
|
4
|
+
declare const ConfigSchema: z.ZodObject<{
|
|
5
|
+
azureDevOps: z.ZodObject<{
|
|
6
|
+
pat: z.ZodString;
|
|
7
|
+
defaultOrg: z.ZodOptional<z.ZodString>;
|
|
8
|
+
defaultProject: z.ZodOptional<z.ZodString>;
|
|
9
|
+
}, "strip", z.ZodTypeAny, {
|
|
10
|
+
pat: string;
|
|
11
|
+
defaultOrg?: string | undefined;
|
|
12
|
+
defaultProject?: string | undefined;
|
|
13
|
+
}, {
|
|
14
|
+
pat: string;
|
|
15
|
+
defaultOrg?: string | undefined;
|
|
16
|
+
defaultProject?: string | undefined;
|
|
17
|
+
}>;
|
|
18
|
+
llm: z.ZodObject<{
|
|
19
|
+
provider: z.ZodEnum<["claude", "azure-openai", "openai"]>;
|
|
20
|
+
apiKey: z.ZodString;
|
|
21
|
+
model: z.ZodOptional<z.ZodString>;
|
|
22
|
+
endpoint: z.ZodOptional<z.ZodString>;
|
|
23
|
+
}, "strip", z.ZodTypeAny, {
|
|
24
|
+
provider: "claude" | "azure-openai" | "openai";
|
|
25
|
+
apiKey: string;
|
|
26
|
+
model?: string | undefined;
|
|
27
|
+
endpoint?: string | undefined;
|
|
28
|
+
}, {
|
|
29
|
+
provider: "claude" | "azure-openai" | "openai";
|
|
30
|
+
apiKey: string;
|
|
31
|
+
model?: string | undefined;
|
|
32
|
+
endpoint?: string | undefined;
|
|
33
|
+
}>;
|
|
34
|
+
rules: z.ZodOptional<z.ZodObject<{
|
|
35
|
+
path: z.ZodOptional<z.ZodString>;
|
|
36
|
+
cleanCodeGuide: z.ZodOptional<z.ZodString>;
|
|
37
|
+
}, "strip", z.ZodTypeAny, {
|
|
38
|
+
path?: string | undefined;
|
|
39
|
+
cleanCodeGuide?: string | undefined;
|
|
40
|
+
}, {
|
|
41
|
+
path?: string | undefined;
|
|
42
|
+
cleanCodeGuide?: string | undefined;
|
|
43
|
+
}>>;
|
|
44
|
+
}, "strip", z.ZodTypeAny, {
|
|
45
|
+
azureDevOps: {
|
|
46
|
+
pat: string;
|
|
47
|
+
defaultOrg?: string | undefined;
|
|
48
|
+
defaultProject?: string | undefined;
|
|
49
|
+
};
|
|
50
|
+
llm: {
|
|
51
|
+
provider: "claude" | "azure-openai" | "openai";
|
|
52
|
+
apiKey: string;
|
|
53
|
+
model?: string | undefined;
|
|
54
|
+
endpoint?: string | undefined;
|
|
55
|
+
};
|
|
56
|
+
rules?: {
|
|
57
|
+
path?: string | undefined;
|
|
58
|
+
cleanCodeGuide?: string | undefined;
|
|
59
|
+
} | undefined;
|
|
60
|
+
}, {
|
|
61
|
+
azureDevOps: {
|
|
62
|
+
pat: string;
|
|
63
|
+
defaultOrg?: string | undefined;
|
|
64
|
+
defaultProject?: string | undefined;
|
|
65
|
+
};
|
|
66
|
+
llm: {
|
|
67
|
+
provider: "claude" | "azure-openai" | "openai";
|
|
68
|
+
apiKey: string;
|
|
69
|
+
model?: string | undefined;
|
|
70
|
+
endpoint?: string | undefined;
|
|
71
|
+
};
|
|
72
|
+
rules?: {
|
|
73
|
+
path?: string | undefined;
|
|
74
|
+
cleanCodeGuide?: string | undefined;
|
|
75
|
+
} | undefined;
|
|
76
|
+
}>;
|
|
77
|
+
export type Config = z.infer<typeof ConfigSchema>;
|
|
78
|
+
interface CLIOverrides {
|
|
79
|
+
provider?: LLMProvider;
|
|
80
|
+
model?: string;
|
|
81
|
+
config?: string;
|
|
82
|
+
}
|
|
83
|
+
export declare function loadConfig(overrides: CLIOverrides): Config;
|
|
84
|
+
export {};
|
package/dist/config.js
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { readFileSync, existsSync } from "fs";
|
|
3
|
+
import { homedir } from "os";
|
|
4
|
+
import { join } from "path";
|
|
5
|
+
const LLMProviderSchema = z.enum(["claude", "azure-openai", "openai"]);
|
|
6
|
+
const ConfigSchema = z.object({
|
|
7
|
+
azureDevOps: z.object({
|
|
8
|
+
pat: z.string().min(1, "AZURE_DEVOPS_PAT is required"),
|
|
9
|
+
defaultOrg: z.string().optional(),
|
|
10
|
+
defaultProject: z.string().optional(),
|
|
11
|
+
}),
|
|
12
|
+
llm: z.object({
|
|
13
|
+
provider: LLMProviderSchema,
|
|
14
|
+
apiKey: z.string().min(1, "LLM API key is required"),
|
|
15
|
+
model: z.string().optional(),
|
|
16
|
+
endpoint: z.string().optional(),
|
|
17
|
+
}),
|
|
18
|
+
rules: z
|
|
19
|
+
.object({
|
|
20
|
+
path: z.string().optional(),
|
|
21
|
+
cleanCodeGuide: z.string().optional(),
|
|
22
|
+
})
|
|
23
|
+
.optional(),
|
|
24
|
+
});
|
|
25
|
+
export function loadConfig(overrides) {
|
|
26
|
+
let fileConfig = {};
|
|
27
|
+
const configPaths = [overrides.config, ".pr-review.json", join(homedir(), ".pr-review", "config.json")].filter(Boolean);
|
|
28
|
+
for (const configPath of configPaths) {
|
|
29
|
+
if (existsSync(configPath)) {
|
|
30
|
+
try {
|
|
31
|
+
const content = readFileSync(configPath, "utf-8");
|
|
32
|
+
fileConfig = JSON.parse(content);
|
|
33
|
+
break;
|
|
34
|
+
}
|
|
35
|
+
catch {
|
|
36
|
+
// Ignore invalid config files
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
const provider = overrides.provider || fileConfig.llm?.provider || "claude";
|
|
41
|
+
const apiKey = getApiKey(provider, fileConfig.llm?.apiKey);
|
|
42
|
+
const config = {
|
|
43
|
+
azureDevOps: {
|
|
44
|
+
pat: process.env.AZURE_DEVOPS_PAT || fileConfig.azureDevOps?.pat || "",
|
|
45
|
+
defaultOrg: fileConfig.azureDevOps?.defaultOrg,
|
|
46
|
+
defaultProject: fileConfig.azureDevOps?.defaultProject,
|
|
47
|
+
},
|
|
48
|
+
llm: {
|
|
49
|
+
provider,
|
|
50
|
+
apiKey,
|
|
51
|
+
model: overrides.model || fileConfig.llm?.model || getDefaultModel(provider),
|
|
52
|
+
endpoint: process.env.AZURE_OPENAI_ENDPOINT || fileConfig.llm?.endpoint,
|
|
53
|
+
},
|
|
54
|
+
rules: fileConfig.rules,
|
|
55
|
+
};
|
|
56
|
+
return ConfigSchema.parse(config);
|
|
57
|
+
}
|
|
58
|
+
function getApiKey(provider, fileApiKey) {
|
|
59
|
+
switch (provider) {
|
|
60
|
+
case "claude":
|
|
61
|
+
return process.env.ANTHROPIC_API_KEY || fileApiKey || "";
|
|
62
|
+
case "openai":
|
|
63
|
+
return process.env.OPENAI_API_KEY || fileApiKey || "";
|
|
64
|
+
case "azure-openai":
|
|
65
|
+
return process.env.AZURE_OPENAI_API_KEY || fileApiKey || "";
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
function getDefaultModel(provider) {
|
|
69
|
+
switch (provider) {
|
|
70
|
+
case "claude":
|
|
71
|
+
return "claude-sonnet-4-20250514";
|
|
72
|
+
case "openai":
|
|
73
|
+
return "gpt-4o";
|
|
74
|
+
case "azure-openai":
|
|
75
|
+
return "gpt-4o";
|
|
76
|
+
}
|
|
77
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { ReviewPrompt, ReviewResult } from "./types.js";
|
|
2
|
+
export interface ReviewContext {
|
|
3
|
+
verbose?: boolean;
|
|
4
|
+
}
|
|
5
|
+
export interface LLMAdapter {
|
|
6
|
+
review(prompt: ReviewPrompt, context?: ReviewContext): Promise<ReviewResult>;
|
|
7
|
+
}
|
|
8
|
+
export interface LLMAdapterConfig {
|
|
9
|
+
apiKey: string;
|
|
10
|
+
model: string;
|
|
11
|
+
endpoint?: string;
|
|
12
|
+
verbose?: boolean;
|
|
13
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { LLMAdapter, LLMAdapterConfig } from "./adapter.js";
|
|
2
|
+
import type { ReviewPrompt, ReviewResult } from "./types.js";
|
|
3
|
+
export declare class AzureOpenAIAdapter implements LLMAdapter {
|
|
4
|
+
private client;
|
|
5
|
+
private model;
|
|
6
|
+
constructor(config: LLMAdapterConfig);
|
|
7
|
+
review(prompt: ReviewPrompt): Promise<ReviewResult>;
|
|
8
|
+
private buildUserPrompt;
|
|
9
|
+
private parseResponse;
|
|
10
|
+
}
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import { AzureOpenAI } from "openai";
|
|
2
|
+
const SYSTEM_PROMPT = `You are a Senior Principal Engineer conducting a code review. You are direct, blunt, and pragmatic.
|
|
3
|
+
|
|
4
|
+
Your task is to review the PR diff and identify issues. For each issue, provide:
|
|
5
|
+
- file: The file path
|
|
6
|
+
- line: The line number where the issue occurs
|
|
7
|
+
- severity: BLOCK (must fix before merge), HIGH (should fix), or MEDIUM (nice to fix)
|
|
8
|
+
- category: Type of issue (Security, Architecture, Naming, Performance, Clean Code, etc.)
|
|
9
|
+
- message: A witty, memorable comment about the issue
|
|
10
|
+
- fix: The specific action to fix the issue
|
|
11
|
+
|
|
12
|
+
IMPORTANT: Return your response as valid JSON in this exact format:
|
|
13
|
+
{
|
|
14
|
+
"issues": [...],
|
|
15
|
+
"summary": "Review complete. BLOCK: X | HIGH: X | MEDIUM: X"
|
|
16
|
+
}`;
|
|
17
|
+
export class AzureOpenAIAdapter {
|
|
18
|
+
client;
|
|
19
|
+
model;
|
|
20
|
+
constructor(config) {
|
|
21
|
+
if (!config.endpoint) {
|
|
22
|
+
throw new Error("Azure OpenAI endpoint is required");
|
|
23
|
+
}
|
|
24
|
+
this.client = new AzureOpenAI({
|
|
25
|
+
apiKey: config.apiKey,
|
|
26
|
+
endpoint: config.endpoint,
|
|
27
|
+
apiVersion: "2024-02-15-preview",
|
|
28
|
+
});
|
|
29
|
+
this.model = config.model;
|
|
30
|
+
}
|
|
31
|
+
async review(prompt) {
|
|
32
|
+
const response = await this.client.chat.completions.create({
|
|
33
|
+
model: this.model,
|
|
34
|
+
messages: [
|
|
35
|
+
{ role: "system", content: SYSTEM_PROMPT + "\n\n" + prompt.rules },
|
|
36
|
+
{ role: "user", content: this.buildUserPrompt(prompt) },
|
|
37
|
+
],
|
|
38
|
+
response_format: { type: "json_object" },
|
|
39
|
+
});
|
|
40
|
+
const content = response.choices[0]?.message?.content;
|
|
41
|
+
if (!content) {
|
|
42
|
+
throw new Error("Empty response from Azure OpenAI");
|
|
43
|
+
}
|
|
44
|
+
return this.parseResponse(content);
|
|
45
|
+
}
|
|
46
|
+
buildUserPrompt(prompt) {
|
|
47
|
+
let userPrompt = `## PR Metadata
|
|
48
|
+
- ID: ${prompt.prMetadata.id}
|
|
49
|
+
- Title: ${prompt.prMetadata.title}
|
|
50
|
+
- Author: ${prompt.prMetadata.author}
|
|
51
|
+
- Source Branch: ${prompt.prMetadata.sourceBranch}
|
|
52
|
+
- Target Branch: ${prompt.prMetadata.targetBranch}
|
|
53
|
+
|
|
54
|
+
## File Diffs
|
|
55
|
+
`;
|
|
56
|
+
for (const diff of prompt.diffs) {
|
|
57
|
+
userPrompt += `\n### ${diff.path}\n\`\`\`diff\n${diff.diff}\n\`\`\`\n`;
|
|
58
|
+
}
|
|
59
|
+
return userPrompt;
|
|
60
|
+
}
|
|
61
|
+
parseResponse(text) {
|
|
62
|
+
try {
|
|
63
|
+
const parsed = JSON.parse(text);
|
|
64
|
+
return {
|
|
65
|
+
issues: parsed.issues || [],
|
|
66
|
+
summary: parsed.summary || "Review complete.",
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
catch (error) {
|
|
70
|
+
throw new Error(`Failed to parse Azure OpenAI response: ${error}`);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { LLMAdapter, LLMAdapterConfig } from "./adapter.js";
|
|
2
|
+
import type { ReviewPrompt, ReviewResult } from "./types.js";
|
|
3
|
+
export declare class ClaudeAdapter implements LLMAdapter {
|
|
4
|
+
private client;
|
|
5
|
+
private model;
|
|
6
|
+
private verbose;
|
|
7
|
+
constructor(config: LLMAdapterConfig);
|
|
8
|
+
review(prompt: ReviewPrompt): Promise<ReviewResult>;
|
|
9
|
+
private buildUserPrompt;
|
|
10
|
+
private parseResponse;
|
|
11
|
+
}
|