@klitchevo/code-council 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +17 -12
  2. package/dist/index.js +5 -5
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -51,9 +51,9 @@ Add to your `claude_desktop_config.json`:
51
51
  "args": ["-y", "@klitchevo/code-council"],
52
52
  "env": {
53
53
  "OPENROUTER_API_KEY": "your-api-key-here",
54
- "CODE_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet", "openai/gpt-4-turbo"],
55
- "FRONTEND_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet"],
56
- "BACKEND_REVIEW_MODELS": ["openai/gpt-4-turbo", "google/gemini-pro"]
54
+ "CODE_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5", "openai/gpt-4o"],
55
+ "FRONTEND_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5"],
56
+ "BACKEND_REVIEW_MODELS": ["openai/gpt-4o", "google/gemini-2.0-flash-exp"]
57
57
  }
58
58
  }
59
59
  }
@@ -243,11 +243,17 @@ Show which AI models are currently configured for each review type.
243
243
  You can customize which AI models are used for reviews by setting environment variables in your MCP client configuration. Each review type can use different models.
244
244
 
245
245
  **Available Environment Variables:**
246
+
247
+ **Model Configuration:**
246
248
  - `CODE_REVIEW_MODELS` - Models for general code reviews
247
249
  - `FRONTEND_REVIEW_MODELS` - Models for frontend reviews
248
250
  - `BACKEND_REVIEW_MODELS` - Models for backend reviews
249
251
  - `PLAN_REVIEW_MODELS` - Models for plan reviews
250
252
 
253
+ **LLM Parameters:**
254
+ - `TEMPERATURE` - Response temperature (0.0-2.0, default: 0.3)
255
+ - `MAX_TOKENS` - Maximum response tokens (default: 16384)
256
+
251
257
  **Format:** Array of strings (JSON array)
252
258
 
253
259
  **Example:**
@@ -259,9 +265,9 @@ You can customize which AI models are used for reviews by setting environment va
259
265
  "args": ["-y", "@klitchevo/code-council"],
260
266
  "env": {
261
267
  "OPENROUTER_API_KEY": "your-api-key",
262
- "CODE_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet", "openai/gpt-4-turbo", "google/gemini-pro"],
263
- "FRONTEND_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet"],
264
- "BACKEND_REVIEW_MODELS": ["openai/gpt-4-turbo", "anthropic/claude-3.5-sonnet"]
268
+ "CODE_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5", "openai/gpt-4o", "google/gemini-2.0-flash-exp"],
269
+ "FRONTEND_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5"],
270
+ "BACKEND_REVIEW_MODELS": ["openai/gpt-4o", "anthropic/claude-sonnet-4.5"]
265
271
  }
266
272
  }
267
273
  }
@@ -275,12 +281,11 @@ If you don't specify models, the server uses these defaults:
275
281
 
276
282
  **Finding Models:**
277
283
  Browse all available models at [OpenRouter Models](https://openrouter.ai/models). Popular choices include:
278
- - `anthropic/claude-3.5-sonnet` - Excellent for code review
279
- - `openai/gpt-4-turbo` - Strong general-purpose model
280
- - `google/gemini-pro` - Fast and cost-effective
281
- - `meta-llama/llama-3.1-70b-instruct` - Open source option
282
- - `x-ai/grok-code-fast-1` - Optimized for code
283
- - `minimax/minimax-m2.1` - Good balance of speed and quality
284
+ - `anthropic/claude-sonnet-4.5` - Latest Sonnet, excellent for code review
285
+ - `anthropic/claude-opus-4.5` - Frontier reasoning model for complex tasks
286
+ - `openai/gpt-4o` - Latest GPT-4 Omni model
287
+ - `google/gemini-2.0-flash-exp` - Fast and affordable
288
+ - `meta-llama/llama-3.3-70b-instruct` - Latest open source option
284
289
 
285
290
  ### Local Development
286
291
 
package/dist/index.js CHANGED
@@ -7,10 +7,10 @@ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"
7
7
 
8
8
  // src/constants.ts
9
9
  var LLM_CONFIG = {
10
- /** Default temperature for model responses */
11
- DEFAULT_TEMPERATURE: 0.3,
12
- /** Default max tokens for responses */
13
- DEFAULT_MAX_TOKENS: 4096
10
+ /** Default temperature for model responses (can override with TEMPERATURE env var) */
11
+ DEFAULT_TEMPERATURE: Number(process.env.TEMPERATURE) || 0.3,
12
+ /** Default max tokens for responses (can override with MAX_TOKENS env var) */
13
+ DEFAULT_MAX_TOKENS: Number(process.env.MAX_TOKENS) || 16384
14
14
  };
15
15
  var DEFAULT_MODELS = [
16
16
  "minimax/minimax-m2.1",
@@ -27,7 +27,7 @@ function parseModels(envVar, defaults) {
27
27
  return filtered.length > 0 ? filtered : defaults;
28
28
  }
29
29
  throw new Error(
30
- `Model configuration must be an array of strings, got: ${typeof envVar}. Example: ["anthropic/claude-3.5-sonnet", "openai/gpt-4-turbo"]`
30
+ `Model configuration must be an array of strings, got: ${typeof envVar}. Example: ["anthropic/claude-sonnet-4.5", "openai/gpt-4o"]`
31
31
  );
32
32
  }
33
33
  var CODE_REVIEW_MODELS = parseModels(
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.0.2",
3
+ "version": "0.0.3",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",