@klitchevo/code-council 0.0.2 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +11 -12
  2. package/dist/index.js +17 -5
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -51,9 +51,9 @@ Add to your `claude_desktop_config.json`:
51
51
  "args": ["-y", "@klitchevo/code-council"],
52
52
  "env": {
53
53
  "OPENROUTER_API_KEY": "your-api-key-here",
54
- "CODE_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet", "openai/gpt-4-turbo"],
55
- "FRONTEND_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet"],
56
- "BACKEND_REVIEW_MODELS": ["openai/gpt-4-turbo", "google/gemini-pro"]
54
+ "CODE_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5", "openai/gpt-4o"],
55
+ "FRONTEND_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5"],
56
+ "BACKEND_REVIEW_MODELS": ["openai/gpt-4o", "google/gemini-2.0-flash-exp"]
57
57
  }
58
58
  }
59
59
  }
@@ -259,9 +259,9 @@ You can customize which AI models are used for reviews by setting environment va
259
259
  "args": ["-y", "@klitchevo/code-council"],
260
260
  "env": {
261
261
  "OPENROUTER_API_KEY": "your-api-key",
262
- "CODE_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet", "openai/gpt-4-turbo", "google/gemini-pro"],
263
- "FRONTEND_REVIEW_MODELS": ["anthropic/claude-3.5-sonnet"],
264
- "BACKEND_REVIEW_MODELS": ["openai/gpt-4-turbo", "anthropic/claude-3.5-sonnet"]
262
+ "CODE_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5", "openai/gpt-4o", "google/gemini-2.0-flash-exp"],
263
+ "FRONTEND_REVIEW_MODELS": ["anthropic/claude-sonnet-4.5"],
264
+ "BACKEND_REVIEW_MODELS": ["openai/gpt-4o", "anthropic/claude-sonnet-4.5"]
265
265
  }
266
266
  }
267
267
  }
@@ -275,12 +275,11 @@ If you don't specify models, the server uses these defaults:
275
275
 
276
276
  **Finding Models:**
277
277
  Browse all available models at [OpenRouter Models](https://openrouter.ai/models). Popular choices include:
278
- - `anthropic/claude-3.5-sonnet` - Excellent for code review
279
- - `openai/gpt-4-turbo` - Strong general-purpose model
280
- - `google/gemini-pro` - Fast and cost-effective
281
- - `meta-llama/llama-3.1-70b-instruct` - Open source option
282
- - `x-ai/grok-code-fast-1` - Optimized for code
283
- - `minimax/minimax-m2.1` - Good balance of speed and quality
278
+ - `anthropic/claude-sonnet-4.5` - Latest Sonnet, excellent for code review
279
+ - `anthropic/claude-opus-4.5` - Frontier reasoning model for complex tasks
280
+ - `openai/gpt-4o` - Latest GPT-4 Omni model
281
+ - `google/gemini-2.0-flash-exp` - Fast and affordable
282
+ - `meta-llama/llama-3.3-70b-instruct` - Latest open source option
284
283
 
285
284
  ### Local Development
286
285
 
package/dist/index.js CHANGED
@@ -7,10 +7,10 @@ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"
7
7
 
8
8
  // src/constants.ts
9
9
  var LLM_CONFIG = {
10
- /** Default temperature for model responses */
11
- DEFAULT_TEMPERATURE: 0.3,
12
- /** Default max tokens for responses */
13
- DEFAULT_MAX_TOKENS: 4096
10
+ /** Default temperature for model responses (can override with TEMPERATURE env var) */
11
+ DEFAULT_TEMPERATURE: Number(process.env.TEMPERATURE) || 0.3,
12
+ /** Default max tokens for responses (can override with MAX_TOKENS env var) */
13
+ DEFAULT_MAX_TOKENS: Number(process.env.MAX_TOKENS) || 16384
14
14
  };
15
15
  var DEFAULT_MODELS = [
16
16
  "minimax/minimax-m2.1",
@@ -26,8 +26,20 @@ function parseModels(envVar, defaults) {
26
26
  const filtered = envVar.filter((m) => m && m.trim().length > 0);
27
27
  return filtered.length > 0 ? filtered : defaults;
28
28
  }
29
+ if (typeof envVar === "string") {
30
+ try {
31
+ const parsed = JSON.parse(envVar);
32
+ if (Array.isArray(parsed)) {
33
+ const filtered = parsed.filter(
34
+ (m) => typeof m === "string" && m.trim().length > 0
35
+ );
36
+ return filtered.length > 0 ? filtered : defaults;
37
+ }
38
+ } catch {
39
+ }
40
+ }
29
41
  throw new Error(
30
- `Model configuration must be an array of strings, got: ${typeof envVar}. Example: ["anthropic/claude-3.5-sonnet", "openai/gpt-4-turbo"]`
42
+ `Model configuration must be an array of strings, got: ${typeof envVar}. Example: ["anthropic/claude-sonnet-4.5", "openai/gpt-4o"]`
31
43
  );
32
44
  }
33
45
  var CODE_REVIEW_MODELS = parseModels(
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.0.2",
3
+ "version": "0.0.4",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",