@klitchevo/code-council 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -519,7 +519,7 @@ Config file settings take priority over environment variables:
519
519
  If you don't specify models, the server uses these defaults:
520
520
  - `minimax/minimax-m2.1` - Fast, cost-effective reasoning
521
521
  - `z-ai/glm-4.7` - Strong multilingual capabilities
522
- - `moonshotai/kimi-k2-thinking` - Advanced reasoning with thinking
522
+ - `moonshotai/kimi-k2.5` - Advanced reasoning with thinking
523
523
  - `deepseek/deepseek-v3.2` - State-of-the-art open model
524
524
 
525
525
  ### Finding Models
@@ -11,7 +11,7 @@ import { z } from 'zod';
11
11
  *
12
12
  * Find all models at: https://openrouter.ai/models
13
13
  */
14
- type KnownModel = "anthropic/claude-opus-4" | "anthropic/claude-opus-4.5" | "anthropic/claude-sonnet-4" | "anthropic/claude-sonnet-4.5" | "anthropic/claude-haiku-4" | "anthropic/claude-haiku-4.5" | "anthropic/claude-3.5-sonnet" | "anthropic/claude-3.5-haiku" | "anthropic/claude-3-opus" | "anthropic/claude-3-sonnet" | "anthropic/claude-3-haiku" | "openai/gpt-4o" | "openai/gpt-4o-mini" | "openai/gpt-4-turbo" | "openai/gpt-4" | "openai/gpt-3.5-turbo" | "openai/o1" | "openai/o1-mini" | "openai/o1-preview" | "openai/o3" | "openai/o3-mini" | "openai/gpt-5" | "openai/gpt-5.1" | "openai/gpt-5.2" | "google/gemini-2.5-pro" | "google/gemini-2.5-flash" | "google/gemini-2.0-pro" | "google/gemini-2.0-flash" | "google/gemini-2.0-flash-001" | "google/gemini-pro" | "google/gemini-pro-vision" | "google/gemini-3-pro-preview" | "google/gemini-3-flash-preview" | "meta-llama/llama-3.3-70b-instruct" | "meta-llama/llama-3.2-90b-vision-instruct" | "meta-llama/llama-3.2-11b-vision-instruct" | "meta-llama/llama-3.1-405b-instruct" | "meta-llama/llama-3.1-70b-instruct" | "meta-llama/llama-3.1-8b-instruct" | "meta-llama/llama-4-maverick" | "meta-llama/llama-4-scout" | "mistralai/mistral-large" | "mistralai/mistral-large-2512" | "mistralai/mistral-medium" | "mistralai/mistral-small" | "mistralai/mistral-small-creative" | "mistralai/mixtral-8x7b-instruct" | "mistralai/mixtral-8x22b-instruct" | "mistralai/codestral" | "mistralai/devstral-2512" | "deepseek/deepseek-chat" | "deepseek/deepseek-coder" | "deepseek/deepseek-r1" | "deepseek/deepseek-v3" | "deepseek/deepseek-v3.1" | "deepseek/deepseek-v3.2" | "qwen/qwen-2.5-72b-instruct" | "qwen/qwen-2.5-coder-32b-instruct" | "qwen/qwen-2-72b-instruct" | "qwen/qwq-32b" | "qwen/qwen3-vl-32b-instruct" | "x-ai/grok-2" | "x-ai/grok-2-vision" | "x-ai/grok-3" | "x-ai/grok-4" | "x-ai/grok-4.1-fast" | "amazon/nova-pro-v1" | "amazon/nova-lite-v1" | "amazon/nova-micro-v1" | "amazon/nova-premier-v1" | "amazon/nova-2-lite-v1" | "cohere/command-r-plus" | "cohere/command-r" | "cohere/command" | "minimax/minimax-m2" | "minimax/minimax-m2.1" | "z-ai/glm-4.7" | "moonshotai/kimi-k2-thinking" | "perplexity/sonar-pro" | "perplexity/sonar-pro-search" | "nvidia/nemotron-3-nano-30b-a3b";
14
+ type KnownModel = "anthropic/claude-opus-4" | "anthropic/claude-opus-4.5" | "anthropic/claude-sonnet-4" | "anthropic/claude-sonnet-4.5" | "anthropic/claude-haiku-4" | "anthropic/claude-haiku-4.5" | "anthropic/claude-3.5-sonnet" | "anthropic/claude-3.5-haiku" | "anthropic/claude-3-opus" | "anthropic/claude-3-sonnet" | "anthropic/claude-3-haiku" | "openai/gpt-4o" | "openai/gpt-4o-mini" | "openai/gpt-4-turbo" | "openai/gpt-4" | "openai/gpt-3.5-turbo" | "openai/o1" | "openai/o1-mini" | "openai/o1-preview" | "openai/o3" | "openai/o3-mini" | "openai/gpt-5" | "openai/gpt-5.1" | "openai/gpt-5.2" | "google/gemini-2.5-pro" | "google/gemini-2.5-flash" | "google/gemini-2.0-pro" | "google/gemini-2.0-flash" | "google/gemini-2.0-flash-001" | "google/gemini-pro" | "google/gemini-pro-vision" | "google/gemini-3-pro-preview" | "google/gemini-3-flash-preview" | "meta-llama/llama-3.3-70b-instruct" | "meta-llama/llama-3.2-90b-vision-instruct" | "meta-llama/llama-3.2-11b-vision-instruct" | "meta-llama/llama-3.1-405b-instruct" | "meta-llama/llama-3.1-70b-instruct" | "meta-llama/llama-3.1-8b-instruct" | "meta-llama/llama-4-maverick" | "meta-llama/llama-4-scout" | "mistralai/mistral-large" | "mistralai/mistral-large-2512" | "mistralai/mistral-medium" | "mistralai/mistral-small" | "mistralai/mistral-small-creative" | "mistralai/mixtral-8x7b-instruct" | "mistralai/mixtral-8x22b-instruct" | "mistralai/codestral" | "mistralai/devstral-2512" | "deepseek/deepseek-chat" | "deepseek/deepseek-coder" | "deepseek/deepseek-r1" | "deepseek/deepseek-v3" | "deepseek/deepseek-v3.1" | "deepseek/deepseek-v3.2" | "qwen/qwen-2.5-72b-instruct" | "qwen/qwen-2.5-coder-32b-instruct" | "qwen/qwen-2-72b-instruct" | "qwen/qwq-32b" | "qwen/qwen3-vl-32b-instruct" | "x-ai/grok-2" | "x-ai/grok-2-vision" | "x-ai/grok-3" | "x-ai/grok-4" | "x-ai/grok-4.1-fast" | "amazon/nova-pro-v1" | "amazon/nova-lite-v1" | "amazon/nova-micro-v1" | "amazon/nova-premier-v1" | "amazon/nova-2-lite-v1" | "cohere/command-r-plus" | "cohere/command-r" | "cohere/command" | "minimax/minimax-m2" | "minimax/minimax-m2.1" | "z-ai/glm-4.7" | "moonshotai/kimi-k2-thinking" | "moonshotai/kimi-k2.5" | "perplexity/sonar-pro" | "perplexity/sonar-pro-search" | "nvidia/nemotron-3-nano-30b-a3b";
15
15
  /**
16
16
  * Model identifier - accepts known models for autocomplete, but any string is valid
17
17
  */
package/dist/index.js CHANGED
@@ -31,7 +31,7 @@ var LLM_CONFIG = {
31
31
  var DEFAULT_MODELS = [
32
32
  "minimax/minimax-m2.1",
33
33
  "z-ai/glm-4.7",
34
- "moonshotai/kimi-k2-thinking",
34
+ "moonshotai/kimi-k2.5",
35
35
  "deepseek/deepseek-v3.2"
36
36
  ];
37
37
  var SESSION_LIMITS = {
@@ -94,6 +94,38 @@ function parseModels(envVar, defaults) {
94
94
  `Model configuration must be an array of strings, got: ${typeof envVar}. Example: ["anthropic/claude-sonnet-4.5", "openai/gpt-4o"]`
95
95
  );
96
96
  }
97
+ function getModels(fileConfigKey, envVarName) {
98
+ const fromFile = fileConfig.models?.[fileConfigKey];
99
+ if (fromFile && fromFile.length > 0) {
100
+ return fromFile;
101
+ }
102
+ const defaultModels = fileConfig.models?.defaultModels;
103
+ if (defaultModels && defaultModels.length > 0) {
104
+ return defaultModels;
105
+ }
106
+ return parseModels(
107
+ process.env[envVarName],
108
+ DEFAULT_MODELS
109
+ );
110
+ }
111
+ function getCodeReviewModels() {
112
+ return getModels("codeReview", "CODE_REVIEW_MODELS");
113
+ }
114
+ function getFrontendReviewModels() {
115
+ return getModels("frontendReview", "FRONTEND_REVIEW_MODELS");
116
+ }
117
+ function getBackendReviewModels() {
118
+ return getModels("backendReview", "BACKEND_REVIEW_MODELS");
119
+ }
120
+ function getPlanReviewModels() {
121
+ return getModels("planReview", "PLAN_REVIEW_MODELS");
122
+ }
123
+ function getDiscussionModels() {
124
+ return getModels("discussion", "DISCUSSION_MODELS");
125
+ }
126
+ function getTpsAuditModels() {
127
+ return getModels("tpsAudit", "TPS_AUDIT_MODELS");
128
+ }
97
129
  var CODE_REVIEW_MODELS = parseModels(
98
130
  process.env.CODE_REVIEW_MODELS,
99
131
  DEFAULT_MODELS
@@ -137,6 +169,64 @@ function parseModelWeights(envVar) {
137
169
  }
138
170
  return {};
139
171
  }
172
+ function getEnableConsensus() {
173
+ if ((fileConfig.consensus?.enabled !== void 0 || process.env.ENABLE_CONSENSUS !== void 0) && !consensusDeprecationWarned) {
174
+ console.warn(
175
+ "[code-council] Warning: consensus.enabled is deprecated. All review tools now use host extraction mode by default. This setting will be removed in a future version."
176
+ );
177
+ consensusDeprecationWarned = true;
178
+ }
179
+ return true;
180
+ }
181
+ var consensusDeprecationWarned = false;
182
+ function getModelWeights() {
183
+ if (fileConfig.consensus?.modelWeights && Object.keys(fileConfig.consensus.modelWeights).length > 0) {
184
+ return fileConfig.consensus.modelWeights;
185
+ }
186
+ return parseModelWeights(process.env.MODEL_WEIGHTS);
187
+ }
188
+ function getHighConfidenceThreshold() {
189
+ if (fileConfig.consensus?.highConfidenceThreshold !== void 0) {
190
+ return fileConfig.consensus.highConfidenceThreshold;
191
+ }
192
+ return Math.min(
193
+ 1,
194
+ Math.max(
195
+ 0,
196
+ Number.parseFloat(process.env.HIGH_CONFIDENCE_THRESHOLD ?? "0.8")
197
+ )
198
+ );
199
+ }
200
+ function getModerateConfidenceThreshold() {
201
+ if (fileConfig.consensus?.moderateConfidenceThreshold !== void 0) {
202
+ return fileConfig.consensus.moderateConfidenceThreshold;
203
+ }
204
+ return Math.min(
205
+ 1,
206
+ Math.max(
207
+ 0,
208
+ Number.parseFloat(process.env.MODERATE_CONFIDENCE_THRESHOLD ?? "0.5")
209
+ )
210
+ );
211
+ }
212
+ function getConsensusExtractionModel() {
213
+ if (fileConfig.consensus?.extractionModel) {
214
+ return fileConfig.consensus.extractionModel;
215
+ }
216
+ return process.env.CONSENSUS_EXTRACTION_MODEL ?? "anthropic/claude-3-haiku";
217
+ }
218
+ function getConsensusFallbackOnError() {
219
+ if (fileConfig.consensus?.fallbackOnError !== void 0) {
220
+ return fileConfig.consensus.fallbackOnError;
221
+ }
222
+ return process.env.CONSENSUS_FALLBACK_ON_ERROR?.toLowerCase() !== "false";
223
+ }
224
+ function getHostExtraction() {
225
+ if (fileConfig.consensus?.hostExtraction !== void 0) {
226
+ return fileConfig.consensus.hostExtraction;
227
+ }
228
+ return process.env.CONSENSUS_HOST_EXTRACTION?.toLowerCase() !== "false";
229
+ }
140
230
  var MODEL_WEIGHTS = parseModelWeights(
141
231
  process.env.MODEL_WEIGHTS
142
232
  );
@@ -156,18 +246,18 @@ var MODERATE_CONFIDENCE_THRESHOLD = Math.min(
156
246
  );
157
247
  var CONSENSUS_EXTRACTION_MODEL = process.env.CONSENSUS_EXTRACTION_MODEL ?? "anthropic/claude-3-haiku";
158
248
  var CONSENSUS_FALLBACK_ON_ERROR = process.env.CONSENSUS_FALLBACK_ON_ERROR?.toLowerCase() !== "false";
249
+ function getConsensusConfig() {
250
+ return {
251
+ enabled: getEnableConsensus(),
252
+ modelWeights: getModelWeights(),
253
+ highConfidenceThreshold: getHighConfidenceThreshold(),
254
+ moderateConfidenceThreshold: getModerateConfidenceThreshold(),
255
+ extractionModel: getConsensusExtractionModel(),
256
+ fallbackOnError: getConsensusFallbackOnError(),
257
+ hostExtraction: getHostExtraction()
258
+ };
259
+ }
159
260
  var HOST_EXTRACTION = process.env.CONSENSUS_HOST_EXTRACTION?.toLowerCase() !== "false";
160
- var CONSENSUS_CONFIG = {
161
- enabled: true,
162
- // Always enabled via host extraction
163
- modelWeights: MODEL_WEIGHTS,
164
- highConfidenceThreshold: HIGH_CONFIDENCE_THRESHOLD,
165
- moderateConfidenceThreshold: MODERATE_CONFIDENCE_THRESHOLD,
166
- extractionModel: CONSENSUS_EXTRACTION_MODEL,
167
- fallbackOnError: CONSENSUS_FALLBACK_ON_ERROR,
168
- hostExtraction: true
169
- // Always use host extraction
170
- };
171
261
 
172
262
  // src/errors.ts
173
263
  init_esm_shims();
@@ -1117,7 +1207,7 @@ async function handleDiscussCouncil(client2, input, sessionStore2) {
1117
1207
  const session2 = sessionStore2.createSession({
1118
1208
  topic,
1119
1209
  discussionType: type,
1120
- models: DISCUSSION_MODELS,
1210
+ models: getDiscussionModels(),
1121
1211
  systemPrompt,
1122
1212
  initialUserMessage: initialMessage
1123
1213
  });
@@ -1125,7 +1215,7 @@ async function handleDiscussCouncil(client2, input, sessionStore2) {
1125
1215
  logger.info("Started new council discussion", {
1126
1216
  sessionId,
1127
1217
  discussionType: type,
1128
- modelCount: DISCUSSION_MODELS.length
1218
+ modelCount: getDiscussionModels().length
1129
1219
  });
1130
1220
  }
1131
1221
  const session = sessionStore2.getSession(sessionId);
@@ -1797,39 +1887,38 @@ function handleInitConfig(input) {
1797
1887
  // src/tools/list-config.ts
1798
1888
  init_esm_shims();
1799
1889
  async function handleListConfig() {
1890
+ const consensusConfig = getConsensusConfig();
1800
1891
  const consensusSection = `
1801
1892
  ## Consensus Analysis
1802
1893
 
1803
- **Status:** ${CONSENSUS_CONFIG.enabled ? "\u2705 Enabled" : "\u274C Disabled"}
1804
- ${CONSENSUS_CONFIG.enabled ? `
1805
- **Extraction Model:** \`${CONSENSUS_CONFIG.extractionModel}\`
1806
- **High Confidence Threshold:** ${CONSENSUS_CONFIG.highConfidenceThreshold * 100}%
1807
- **Moderate Confidence Threshold:** ${CONSENSUS_CONFIG.moderateConfidenceThreshold * 100}%
1808
- **Fallback on Error:** ${CONSENSUS_CONFIG.fallbackOnError ? "Yes" : "No"}
1809
- ${Object.keys(CONSENSUS_CONFIG.modelWeights).length > 0 ? `**Custom Model Weights:**
1810
- ${Object.entries(
1811
- CONSENSUS_CONFIG.modelWeights
1812
- ).map(([m, w]) => `- \`${m}\`: ${w}`).join("\n")}` : "*Using equal weights for all models*"}` : `
1894
+ **Status:** ${consensusConfig.enabled ? "\u2705 Enabled" : "\u274C Disabled"}
1895
+ ${consensusConfig.enabled ? `
1896
+ **Extraction Model:** \`${consensusConfig.extractionModel}\`
1897
+ **High Confidence Threshold:** ${consensusConfig.highConfidenceThreshold * 100}%
1898
+ **Moderate Confidence Threshold:** ${consensusConfig.moderateConfidenceThreshold * 100}%
1899
+ **Fallback on Error:** ${consensusConfig.fallbackOnError ? "Yes" : "No"}
1900
+ ${Object.keys(consensusConfig.modelWeights).length > 0 ? `**Custom Model Weights:**
1901
+ ${Object.entries(consensusConfig.modelWeights).map(([m, w]) => `- \`${m}\`: ${w}`).join("\n")}` : "*Using equal weights for all models*"}` : `
1813
1902
  To enable consensus analysis, set \`ENABLE_CONSENSUS=true\``}`;
1814
1903
  const text = `## Current Configuration
1815
1904
 
1816
1905
  **Code Review Models:**
1817
- ${CODE_REVIEW_MODELS.map((m) => `- \`${m}\``).join("\n")}
1906
+ ${getCodeReviewModels().map((m) => `- \`${m}\``).join("\n")}
1818
1907
 
1819
1908
  **Frontend Review Models:**
1820
- ${FRONTEND_REVIEW_MODELS.map((m) => `- \`${m}\``).join("\n")}
1909
+ ${getFrontendReviewModels().map((m) => `- \`${m}\``).join("\n")}
1821
1910
 
1822
1911
  **Backend Review Models:**
1823
- ${BACKEND_REVIEW_MODELS.map((m) => `- \`${m}\``).join("\n")}
1912
+ ${getBackendReviewModels().map((m) => `- \`${m}\``).join("\n")}
1824
1913
 
1825
1914
  **Plan Review Models:**
1826
- ${PLAN_REVIEW_MODELS.map((m) => `- \`${m}\``).join("\n")}
1915
+ ${getPlanReviewModels().map((m) => `- \`${m}\``).join("\n")}
1827
1916
 
1828
1917
  **Discussion Models:**
1829
- ${DISCUSSION_MODELS.map((m) => `- \`${m}\``).join("\n")}
1918
+ ${getDiscussionModels().map((m) => `- \`${m}\``).join("\n")}
1830
1919
 
1831
1920
  **TPS Audit Models:**
1832
- ${TPS_AUDIT_MODELS.map((m) => `- \`${m}\``).join("\n")}
1921
+ ${getTpsAuditModels().map((m) => `- \`${m}\``).join("\n")}
1833
1922
  ${consensusSection}
1834
1923
 
1835
1924
  ## Environment Variables
@@ -1869,19 +1958,19 @@ var backendReviewSchema = {
1869
1958
  async function handleBackendReview(client2, input) {
1870
1959
  const { code, language, review_type, context } = input;
1871
1960
  logger.info("Running backend review", {
1872
- modelCount: BACKEND_REVIEW_MODELS.length,
1873
- models: BACKEND_REVIEW_MODELS,
1961
+ modelCount: getBackendReviewModels().length,
1962
+ models: getBackendReviewModels(),
1874
1963
  language,
1875
1964
  reviewType: review_type || "full"
1876
1965
  });
1877
- const results = await client2.reviewBackend(code, BACKEND_REVIEW_MODELS, {
1966
+ const results = await client2.reviewBackend(code, getBackendReviewModels(), {
1878
1967
  language,
1879
1968
  reviewType: review_type,
1880
1969
  context
1881
1970
  });
1882
1971
  return {
1883
1972
  results,
1884
- models: BACKEND_REVIEW_MODELS,
1973
+ models: getBackendReviewModels(),
1885
1974
  reviewType: review_type || "full"
1886
1975
  };
1887
1976
  }
@@ -1900,19 +1989,19 @@ async function handleCodeReview(client2, input) {
1900
1989
  const fullContext = language ? `Language: ${language}${context ? `
1901
1990
  ${context}` : ""}` : context;
1902
1991
  logger.info("Running code review", {
1903
- modelCount: CODE_REVIEW_MODELS.length,
1904
- models: CODE_REVIEW_MODELS,
1992
+ modelCount: getCodeReviewModels().length,
1993
+ models: getCodeReviewModels(),
1905
1994
  hasLanguage: !!language,
1906
1995
  hasContext: !!context
1907
1996
  });
1908
1997
  const results = await client2.reviewCode(
1909
1998
  code,
1910
- CODE_REVIEW_MODELS,
1999
+ getCodeReviewModels(),
1911
2000
  fullContext
1912
2001
  );
1913
2002
  return {
1914
2003
  results,
1915
- models: CODE_REVIEW_MODELS
2004
+ models: getCodeReviewModels()
1916
2005
  };
1917
2006
  }
1918
2007
 
@@ -1929,19 +2018,19 @@ var frontendReviewSchema = {
1929
2018
  async function handleFrontendReview(client2, input) {
1930
2019
  const { code, framework, review_type, context } = input;
1931
2020
  logger.info("Running frontend review", {
1932
- modelCount: FRONTEND_REVIEW_MODELS.length,
1933
- models: FRONTEND_REVIEW_MODELS,
2021
+ modelCount: getFrontendReviewModels().length,
2022
+ models: getFrontendReviewModels(),
1934
2023
  framework,
1935
2024
  reviewType: review_type || "full"
1936
2025
  });
1937
- const results = await client2.reviewFrontend(code, FRONTEND_REVIEW_MODELS, {
2026
+ const results = await client2.reviewFrontend(code, getFrontendReviewModels(), {
1938
2027
  framework,
1939
2028
  reviewType: review_type,
1940
2029
  context
1941
2030
  });
1942
2031
  return {
1943
2032
  results,
1944
- models: FRONTEND_REVIEW_MODELS,
2033
+ models: getFrontendReviewModels(),
1945
2034
  reviewType: review_type || "full"
1946
2035
  };
1947
2036
  }
@@ -2028,17 +2117,17 @@ var planReviewSchema = {
2028
2117
  async function handlePlanReview(client2, input) {
2029
2118
  const { plan, review_type, context } = input;
2030
2119
  logger.info("Running plan review", {
2031
- modelCount: PLAN_REVIEW_MODELS.length,
2032
- models: PLAN_REVIEW_MODELS,
2120
+ modelCount: getPlanReviewModels().length,
2121
+ models: getPlanReviewModels(),
2033
2122
  reviewType: review_type || "full"
2034
2123
  });
2035
- const results = await client2.reviewPlan(plan, PLAN_REVIEW_MODELS, {
2124
+ const results = await client2.reviewPlan(plan, getPlanReviewModels(), {
2036
2125
  reviewType: review_type,
2037
2126
  context
2038
2127
  });
2039
2128
  return {
2040
2129
  results,
2041
- models: PLAN_REVIEW_MODELS,
2130
+ models: getPlanReviewModels(),
2042
2131
  reviewType: review_type || "full"
2043
2132
  };
2044
2133
  }
@@ -3060,7 +3149,7 @@ createReviewTool(server, {
3060
3149
  name: "review_git_changes",
3061
3150
  description: "Review git changes (staged, unstaged, diff, or specific commit) using multiple AI models in parallel",
3062
3151
  inputSchema: gitReviewSchema,
3063
- handler: (input) => handleGitReview(client, CODE_REVIEW_MODELS, input)
3152
+ handler: (input) => handleGitReview(client, getCodeReviewModels(), input)
3064
3153
  });
3065
3154
  server.registerTool(
3066
3155
  "tps_audit",
@@ -3073,7 +3162,7 @@ server.registerTool(
3073
3162
  logger.debug("Starting tps_audit", {
3074
3163
  inputKeys: Object.keys(input)
3075
3164
  });
3076
- const result = await handleTpsAudit(client, TPS_AUDIT_MODELS, input);
3165
+ const result = await handleTpsAudit(client, getTpsAuditModels(), input);
3077
3166
  const formattedOutput = formatTpsAuditResults(result);
3078
3167
  logger.info("Completed tps_audit", {
3079
3168
  modelCount: result.models.length,
@@ -3175,12 +3264,12 @@ async function main() {
3175
3264
  await server.connect(transport);
3176
3265
  logger.info("Code Council MCP server started", {
3177
3266
  configFile: configFilePath,
3178
- codeReviewModels: CODE_REVIEW_MODELS,
3179
- frontendReviewModels: FRONTEND_REVIEW_MODELS,
3180
- backendReviewModels: BACKEND_REVIEW_MODELS,
3181
- planReviewModels: PLAN_REVIEW_MODELS,
3182
- discussionModels: DISCUSSION_MODELS,
3183
- tpsAuditModels: TPS_AUDIT_MODELS
3267
+ codeReviewModels: getCodeReviewModels(),
3268
+ frontendReviewModels: getFrontendReviewModels(),
3269
+ backendReviewModels: getBackendReviewModels(),
3270
+ planReviewModels: getPlanReviewModels(),
3271
+ discussionModels: getDiscussionModels(),
3272
+ tpsAuditModels: getTpsAuditModels()
3184
3273
  });
3185
3274
  }
3186
3275
  main().catch((error) => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -40,8 +40,7 @@
40
40
  "check": "biome check src",
41
41
  "check:fix": "biome check --write src",
42
42
  "typecheck": "tsc --noEmit",
43
- "prepare": "lefthook install",
44
- "prepublishOnly": "bun run typecheck && bun test && bun run build"
43
+ "prepare": "lefthook install"
45
44
  },
46
45
  "keywords": [
47
46
  "mcp",