@klitchevo/code-council 0.2.4 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -838,6 +838,9 @@ jobs:
838
838
  with:
839
839
  fetch-depth: 0
840
840
 
841
+ - name: Fetch base branch
842
+ run: git fetch origin \${{ github.base_ref }}:\${{ github.base_ref }} || true
843
+
841
844
  - name: Setup Node.js
842
845
  uses: actions/setup-node@v4
843
846
  with:
@@ -849,63 +852,101 @@ jobs:
849
852
  OPENROUTER_API_KEY: \${{ secrets.OPENROUTER_API_KEY }}
850
853
  run: |
851
854
  # Run review with pr-comments format for inline comments
852
- npx @klitchevo/code-council review git \\
853
- --review-type diff \\
854
- --format pr-comments \\
855
- > review.json 2>&1 || {
856
- # If pr-comments fails, fall back to markdown format
857
- echo "pr-comments format failed, falling back to markdown"
858
- npx @klitchevo/code-council review git \\
859
- --review-type diff \\
860
- --format markdown \\
861
- > review.md
862
- echo "format=markdown" >> $GITHUB_OUTPUT
863
- if [ -s review.md ]; then
864
- echo "has_review=true" >> $GITHUB_OUTPUT
865
- else
866
- echo "has_review=false" >> $GITHUB_OUTPUT
867
- fi
868
- exit 0
869
- }
870
-
871
- # Check if review JSON has content
872
- if [ -s review.json ] && [ "$(cat review.json | head -c 10)" != "Error" ]; then
873
- echo "has_review=true" >> $GITHUB_OUTPUT
855
+ if npx @klitchevo/code-council review git \\
856
+ --review-type diff \\
857
+ --format pr-comments > review.json; then
874
858
  echo "format=pr-comments" >> $GITHUB_OUTPUT
859
+ echo "has_review=true" >> $GITHUB_OUTPUT
875
860
  else
876
- echo "has_review=false" >> $GITHUB_OUTPUT
861
+ echo "pr-comments format failed, falling back to markdown"
862
+ if npx @klitchevo/code-council review git \\
863
+ --review-type diff \\
864
+ --format markdown > review.md; then
865
+ echo "format=markdown" >> $GITHUB_OUTPUT
866
+ echo "has_review=true" >> $GITHUB_OUTPUT
867
+ else
868
+ echo "has_review=false" >> $GITHUB_OUTPUT
869
+ fi
877
870
  fi
878
871
 
879
- - name: Post Inline Review
872
+ - name: Clean Previous Code Council Reviews
880
873
  if: steps.review.outputs.has_review == 'true' && steps.review.outputs.format == 'pr-comments'
881
874
  env:
882
875
  GH_TOKEN: \${{ secrets.GITHUB_TOKEN }}
883
876
  run: |
884
- # Post PR review with inline comments
877
+ # Dismiss pending reviews to avoid "only one pending review" error
885
878
  gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews \\
886
- --method POST \\
887
- --input review.json
879
+ --jq '.[] | select(.state == "PENDING") | .id' | \\
880
+ while read REVIEW_ID; do
881
+ gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews/$REVIEW_ID \\
882
+ --method DELETE 2>/dev/null || true
883
+ done
884
+
885
+ # Delete previous Code Council comments
886
+ gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/comments \\
887
+ --jq '.[] | select(.body | contains("[CRITICAL]") or contains("[HIGH]") or contains("[MEDIUM]")) | .id' | \\
888
+ while read COMMENT_ID; do
889
+ gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/comments/$COMMENT_ID \\
890
+ --method DELETE 2>/dev/null || true
891
+ done
892
+
893
+ - name: Post Inline Review
894
+ if: steps.review.outputs.has_review == 'true' && steps.review.outputs.format == 'pr-comments'
895
+ env:
896
+ GH_TOKEN: \${{ secrets.GITHUB_TOKEN }}
897
+ run: |
898
+ # Validate JSON
899
+ if ! jq . review.json > /dev/null 2>&1; then
900
+ echo "Invalid JSON in review.json"
901
+ cat review.json
902
+ exit 1
903
+ fi
904
+
905
+ # Try batch review first
906
+ if gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews \\
907
+ --method POST \\
908
+ --input review.json 2>/dev/null; then
909
+ echo "Review posted successfully"
910
+ else
911
+ echo "Batch review failed, falling back to individual comments"
912
+
913
+ # Post summary as standalone review
914
+ SUMMARY_BODY=$(jq -r '.body' review.json)
915
+ gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews \\
916
+ --method POST \\
917
+ -f body="$SUMMARY_BODY" \\
918
+ -f event="COMMENT" || true
919
+
920
+ # Post individual inline comments
921
+ COMMENT_COUNT=$(jq '.comments | length' review.json)
922
+ for i in $(seq 0 $((COMMENT_COUNT - 1))); do
923
+ PATH_VAL=$(jq -r ".comments[$i].path" review.json)
924
+ LINE_VAL=$(jq -r ".comments[$i].line" review.json)
925
+ BODY_VAL=$(jq -r ".comments[$i].body" review.json)
926
+
927
+ gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/comments \\
928
+ --method POST \\
929
+ -f path="$PATH_VAL" \\
930
+ -F line="$LINE_VAL" \\
931
+ -f body="$BODY_VAL" \\
932
+ -f commit_id="\${{ github.event.pull_request.head.sha }}" 2>/dev/null || true
933
+ done
934
+ fi
888
935
 
889
936
  - name: Post Markdown Review (Fallback)
890
937
  if: steps.review.outputs.has_review == 'true' && steps.review.outputs.format == 'markdown'
891
938
  env:
892
939
  GH_TOKEN: \${{ secrets.GITHUB_TOKEN }}
893
940
  run: |
894
- # Read review content
895
941
  REVIEW_BODY=$(cat review.md)
896
-
897
- # Add header and footer
898
- FULL_REVIEW="## Code Council Multi-Model Review
942
+ gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews \\
943
+ --method POST \\
944
+ -f body="## Code Council Multi-Model Review
899
945
 
900
946
  $REVIEW_BODY
901
947
 
902
948
  ---
903
- *Reviewed by [Code Council](https://github.com/klitchevo/code-council) using multiple AI models*"
904
-
905
- # Post as a PR review (not just a comment)
906
- gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews \\
907
- --method POST \\
908
- -f body="$FULL_REVIEW" \\
949
+ *Reviewed by [Code Council](https://github.com/klitchevo/code-council)*" \\
909
950
  -f event="COMMENT"
910
951
  `;
911
952
  var WORKFLOW_TEMPLATE_SIMPLE = `# Code Council PR Review
@@ -936,6 +977,9 @@ jobs:
936
977
  with:
937
978
  fetch-depth: 0
938
979
 
980
+ - name: Fetch base branch
981
+ run: git fetch origin \${{ github.base_ref }}:\${{ github.base_ref }} || true
982
+
939
983
  - name: Setup Node.js
940
984
  uses: actions/setup-node@v4
941
985
  with:
@@ -963,7 +1007,6 @@ jobs:
963
1007
  GH_TOKEN: \${{ secrets.GITHUB_TOKEN }}
964
1008
  run: |
965
1009
  REVIEW_BODY=$(cat review.md)
966
-
967
1010
  gh api repos/\${{ github.repository }}/pulls/\${{ github.event.pull_request.number }}/reviews \\
968
1011
  --method POST \\
969
1012
  -f body="## Code Council Review
@@ -1182,4 +1225,4 @@ export {
1182
1225
  processResult,
1183
1226
  runCli
1184
1227
  };
1185
- //# sourceMappingURL=cli-QCODHVTG.js.map
1228
+ //# sourceMappingURL=cli-ZOWF2WJK.js.map
@@ -10,8 +10,9 @@ import { z } from 'zod';
10
10
  * This is not exhaustive - any valid OpenRouter model ID string will work.
11
11
  *
12
12
  * Find all models at: https://openrouter.ai/models
13
+ * Auto-generated on 2026-02-06T15:23:32.120Z - run `bun run update-models` to regenerate.
13
14
  */
14
- type KnownModel = "anthropic/claude-opus-4" | "anthropic/claude-opus-4.5" | "anthropic/claude-sonnet-4" | "anthropic/claude-sonnet-4.5" | "anthropic/claude-haiku-4" | "anthropic/claude-haiku-4.5" | "anthropic/claude-3.5-sonnet" | "anthropic/claude-3.5-haiku" | "anthropic/claude-3-opus" | "anthropic/claude-3-sonnet" | "anthropic/claude-3-haiku" | "openai/gpt-4o" | "openai/gpt-4o-mini" | "openai/gpt-4-turbo" | "openai/gpt-4" | "openai/gpt-3.5-turbo" | "openai/o1" | "openai/o1-mini" | "openai/o1-preview" | "openai/o3" | "openai/o3-mini" | "openai/gpt-5" | "openai/gpt-5.1" | "openai/gpt-5.2" | "google/gemini-2.5-pro" | "google/gemini-2.5-flash" | "google/gemini-2.0-pro" | "google/gemini-2.0-flash" | "google/gemini-2.0-flash-001" | "google/gemini-pro" | "google/gemini-pro-vision" | "google/gemini-3-pro-preview" | "google/gemini-3-flash-preview" | "meta-llama/llama-3.3-70b-instruct" | "meta-llama/llama-3.2-90b-vision-instruct" | "meta-llama/llama-3.2-11b-vision-instruct" | "meta-llama/llama-3.1-405b-instruct" | "meta-llama/llama-3.1-70b-instruct" | "meta-llama/llama-3.1-8b-instruct" | "meta-llama/llama-4-maverick" | "meta-llama/llama-4-scout" | "mistralai/mistral-large" | "mistralai/mistral-large-2512" | "mistralai/mistral-medium" | "mistralai/mistral-small" | "mistralai/mistral-small-creative" | "mistralai/mixtral-8x7b-instruct" | "mistralai/mixtral-8x22b-instruct" | "mistralai/codestral" | "mistralai/devstral-2512" | "deepseek/deepseek-chat" | "deepseek/deepseek-coder" | "deepseek/deepseek-r1" | "deepseek/deepseek-v3" | "deepseek/deepseek-v3.1" | "deepseek/deepseek-v3.2" | "qwen/qwen-2.5-72b-instruct" | "qwen/qwen-2.5-coder-32b-instruct" | "qwen/qwen-2-72b-instruct" | "qwen/qwq-32b" | "qwen/qwen3-vl-32b-instruct" | "x-ai/grok-2" | "x-ai/grok-2-vision" | "x-ai/grok-3" | "x-ai/grok-4" | "x-ai/grok-4.1-fast" | "amazon/nova-pro-v1" | "amazon/nova-lite-v1" | "amazon/nova-micro-v1" | "amazon/nova-premier-v1" | "amazon/nova-2-lite-v1" | "cohere/command-r-plus" | "cohere/command-r" | "cohere/command" | "minimax/minimax-m2" | "minimax/minimax-m2.1" | "z-ai/glm-4.7" | "moonshotai/kimi-k2-thinking" | "moonshotai/kimi-k2.5" | "perplexity/sonar-pro" | "perplexity/sonar-pro-search" | "nvidia/nemotron-3-nano-30b-a3b";
15
+ type KnownModel = "ai21/jamba-large-1.7" | "ai21/jamba-mini-1.7" | "aion-labs/aion-1.0" | "aion-labs/aion-1.0-mini" | "aion-labs/aion-rp-llama-3.1-8b" | "alfredpros/codellama-7b-instruct-solidity" | "alibaba/tongyi-deepresearch-30b-a3b" | "allenai/molmo-2-8b" | "allenai/olmo-2-0325-32b-instruct" | "allenai/olmo-3-32b-think" | "allenai/olmo-3-7b-instruct" | "allenai/olmo-3-7b-think" | "allenai/olmo-3.1-32b-instruct" | "allenai/olmo-3.1-32b-think" | "alpindale/goliath-120b" | "amazon/nova-2-lite-v1" | "amazon/nova-lite-v1" | "amazon/nova-micro-v1" | "amazon/nova-premier-v1" | "amazon/nova-pro-v1" | "anthracite-org/magnum-v4-72b" | "anthropic/claude-3-haiku" | "anthropic/claude-3.5-haiku" | "anthropic/claude-3.5-sonnet" | "anthropic/claude-3.7-sonnet" | "anthropic/claude-3.7-sonnet:thinking" | "anthropic/claude-haiku-4.5" | "anthropic/claude-opus-4" | "anthropic/claude-opus-4.1" | "anthropic/claude-opus-4.5" | "anthropic/claude-opus-4.6" | "anthropic/claude-sonnet-4" | "anthropic/claude-sonnet-4.5" | "arcee-ai/coder-large" | "arcee-ai/maestro-reasoning" | "arcee-ai/spotlight" | "arcee-ai/trinity-mini" | "arcee-ai/virtuoso-large" | "baidu/ernie-4.5-21b-a3b" | "baidu/ernie-4.5-21b-a3b-thinking" | "baidu/ernie-4.5-300b-a47b" | "baidu/ernie-4.5-vl-28b-a3b" | "baidu/ernie-4.5-vl-424b-a47b" | "bytedance/ui-tars-1.5-7b" | "bytedance-seed/seed-1.6" | "bytedance-seed/seed-1.6-flash" | "cohere/command-a" | "cohere/command-r-08-2024" | "cohere/command-r-plus-08-2024" | "cohere/command-r7b-12-2024" | "deepcogito/cogito-v2.1-671b" | "deepseek/deepseek-chat" | "deepseek/deepseek-chat-v3-0324" | "deepseek/deepseek-chat-v3.1" | "deepseek/deepseek-r1" | "deepseek/deepseek-r1-0528" | "deepseek/deepseek-r1-distill-llama-70b" | "deepseek/deepseek-r1-distill-qwen-32b" | "deepseek/deepseek-v3.1-terminus" | "deepseek/deepseek-v3.1-terminus:exacto" | "deepseek/deepseek-v3.2" | "deepseek/deepseek-v3.2-exp" | "deepseek/deepseek-v3.2-speciale" | "eleutherai/llemma_7b" | "essentialai/rnj-1-instruct" | "google/gemini-2.0-flash-001" | "google/gemini-2.0-flash-lite-001" | "google/gemini-2.5-flash" | "google/gemini-2.5-flash-image" | "google/gemini-2.5-flash-lite" | "google/gemini-2.5-flash-lite-preview-09-2025" | "google/gemini-2.5-flash-preview-09-2025" | "google/gemini-2.5-pro" | "google/gemini-2.5-pro-preview" | "google/gemini-2.5-pro-preview-05-06" | "google/gemini-3-flash-preview" | "google/gemini-3-pro-image-preview" | "google/gemini-3-pro-preview" | "google/gemma-2-27b-it" | "google/gemma-2-9b-it" | "google/gemma-3-12b-it" | "google/gemma-3-27b-it" | "google/gemma-3-4b-it" | "google/gemma-3n-e4b-it" | "gryphe/mythomax-l2-13b" | "ibm-granite/granite-4.0-h-micro" | "inception/mercury" | "inception/mercury-coder" | "inflection/inflection-3-pi" | "inflection/inflection-3-productivity" | "kwaipilot/kat-coder-pro" | "liquid/lfm-2.2-6b" | "liquid/lfm2-8b-a1b" | "mancer/weaver" | "meituan/longcat-flash-chat" | "meta-llama/llama-3-70b-instruct" | "meta-llama/llama-3-8b-instruct" | "meta-llama/llama-3.1-405b" | "meta-llama/llama-3.1-405b-instruct" | "meta-llama/llama-3.1-70b-instruct" | "meta-llama/llama-3.1-8b-instruct" | "meta-llama/llama-3.2-11b-vision-instruct" | "meta-llama/llama-3.2-1b-instruct" | "meta-llama/llama-3.2-3b-instruct" | "meta-llama/llama-3.3-70b-instruct" | "meta-llama/llama-4-maverick" | "meta-llama/llama-4-scout" | "meta-llama/llama-guard-2-8b" | "meta-llama/llama-guard-3-8b" | "meta-llama/llama-guard-4-12b" | "microsoft/phi-4" | "microsoft/wizardlm-2-8x22b" | "minimax/minimax-01" | "minimax/minimax-m1" | "minimax/minimax-m2" | "minimax/minimax-m2-her" | "minimax/minimax-m2.1" | "mistralai/codestral-2508" | "mistralai/devstral-2512" | "mistralai/devstral-medium" | "mistralai/devstral-small" | "mistralai/ministral-14b-2512" | "mistralai/ministral-3b" | "mistralai/ministral-3b-2512" | "mistralai/ministral-8b" | "mistralai/ministral-8b-2512" | "mistralai/mistral-7b-instruct" | "mistralai/mistral-7b-instruct-v0.1" | "mistralai/mistral-7b-instruct-v0.2" | "mistralai/mistral-7b-instruct-v0.3" | "mistralai/mistral-large" | "mistralai/mistral-large-2407" | "mistralai/mistral-large-2411" | "mistralai/mistral-large-2512" | "mistralai/mistral-medium-3" | "mistralai/mistral-medium-3.1" | "mistralai/mistral-nemo" | "mistralai/mistral-saba" | "mistralai/mistral-small-24b-instruct-2501" | "mistralai/mistral-small-3.1-24b-instruct" | "mistralai/mistral-small-3.2-24b-instruct" | "mistralai/mistral-small-creative" | "mistralai/mistral-tiny" | "mistralai/mixtral-8x22b-instruct" | "mistralai/mixtral-8x7b-instruct" | "mistralai/pixtral-12b" | "mistralai/pixtral-large-2411" | "mistralai/voxtral-small-24b-2507" | "moonshotai/kimi-dev-72b" | "moonshotai/kimi-k2" | "moonshotai/kimi-k2-0905" | "moonshotai/kimi-k2-0905:exacto" | "moonshotai/kimi-k2-thinking" | "moonshotai/kimi-k2.5" | "morph/morph-v3-fast" | "morph/morph-v3-large" | "neversleep/llama-3.1-lumimaid-8b" | "neversleep/noromaid-20b" | "nex-agi/deepseek-v3.1-nex-n1" | "nousresearch/deephermes-3-mistral-24b-preview" | "nousresearch/hermes-2-pro-llama-3-8b" | "nousresearch/hermes-3-llama-3.1-405b" | "nousresearch/hermes-3-llama-3.1-70b" | "nousresearch/hermes-4-405b" | "nousresearch/hermes-4-70b" | "nvidia/llama-3.1-nemotron-70b-instruct" | "nvidia/llama-3.1-nemotron-ultra-253b-v1" | "nvidia/llama-3.3-nemotron-super-49b-v1.5" | "nvidia/nemotron-3-nano-30b-a3b" | "nvidia/nemotron-nano-12b-v2-vl" | "nvidia/nemotron-nano-9b-v2" | "openai/chatgpt-4o-latest" | "openai/gpt-3.5-turbo" | "openai/gpt-3.5-turbo-0613" | "openai/gpt-3.5-turbo-16k" | "openai/gpt-3.5-turbo-instruct" | "openai/gpt-4" | "openai/gpt-4-0314" | "openai/gpt-4-1106-preview" | "openai/gpt-4-turbo" | "openai/gpt-4-turbo-preview" | "openai/gpt-4.1" | "openai/gpt-4.1-mini" | "openai/gpt-4.1-nano" | "openai/gpt-4o" | "openai/gpt-4o-2024-05-13" | "openai/gpt-4o-2024-08-06" | "openai/gpt-4o-2024-11-20" | "openai/gpt-4o-audio-preview" | "openai/gpt-4o-mini" | "openai/gpt-4o-mini-2024-07-18" | "openai/gpt-4o-mini-search-preview" | "openai/gpt-4o-search-preview" | "openai/gpt-5" | "openai/gpt-5-chat" | "openai/gpt-5-codex" | "openai/gpt-5-image" | "openai/gpt-5-image-mini" | "openai/gpt-5-mini" | "openai/gpt-5-nano" | "openai/gpt-5-pro" | "openai/gpt-5.1" | "openai/gpt-5.1-chat" | "openai/gpt-5.1-codex" | "openai/gpt-5.1-codex-max" | "openai/gpt-5.1-codex-mini" | "openai/gpt-5.2" | "openai/gpt-5.2-chat" | "openai/gpt-5.2-codex" | "openai/gpt-5.2-pro" | "openai/gpt-audio" | "openai/gpt-audio-mini" | "openai/gpt-oss-120b" | "openai/gpt-oss-120b:exacto" | "openai/gpt-oss-20b" | "openai/gpt-oss-safeguard-20b" | "openai/o1" | "openai/o1-pro" | "openai/o3" | "openai/o3-deep-research" | "openai/o3-mini" | "openai/o3-mini-high" | "openai/o3-pro" | "openai/o4-mini" | "openai/o4-mini-deep-research" | "openai/o4-mini-high" | "opengvlab/internvl3-78b" | "openrouter/auto" | "openrouter/bodybuilder" | "openrouter/free" | "perplexity/sonar" | "perplexity/sonar-deep-research" | "perplexity/sonar-pro" | "perplexity/sonar-pro-search" | "perplexity/sonar-reasoning-pro" | "prime-intellect/intellect-3" | "qwen/qwen-2.5-72b-instruct" | "qwen/qwen-2.5-7b-instruct" | "qwen/qwen-2.5-coder-32b-instruct" | "qwen/qwen-2.5-vl-7b-instruct" | "qwen/qwen-max" | "qwen/qwen-plus" | "qwen/qwen-plus-2025-07-28" | "qwen/qwen-plus-2025-07-28:thinking" | "qwen/qwen-turbo" | "qwen/qwen-vl-max" | "qwen/qwen-vl-plus" | "qwen/qwen2.5-coder-7b-instruct" | "qwen/qwen2.5-vl-32b-instruct" | "qwen/qwen2.5-vl-72b-instruct" | "qwen/qwen3-14b" | "qwen/qwen3-235b-a22b" | "qwen/qwen3-235b-a22b-2507" | "qwen/qwen3-235b-a22b-thinking-2507" | "qwen/qwen3-30b-a3b" | "qwen/qwen3-30b-a3b-instruct-2507" | "qwen/qwen3-30b-a3b-thinking-2507" | "qwen/qwen3-32b" | "qwen/qwen3-8b" | "qwen/qwen3-coder" | "qwen/qwen3-coder-30b-a3b-instruct" | "qwen/qwen3-coder-flash" | "qwen/qwen3-coder-next" | "qwen/qwen3-coder-plus" | "qwen/qwen3-coder:exacto" | "qwen/qwen3-max" | "qwen/qwen3-next-80b-a3b-instruct" | "qwen/qwen3-next-80b-a3b-thinking" | "qwen/qwen3-vl-235b-a22b-instruct" | "qwen/qwen3-vl-235b-a22b-thinking" | "qwen/qwen3-vl-30b-a3b-instruct" | "qwen/qwen3-vl-30b-a3b-thinking" | "qwen/qwen3-vl-32b-instruct" | "qwen/qwen3-vl-8b-instruct" | "qwen/qwen3-vl-8b-thinking" | "qwen/qwq-32b" | "raifle/sorcererlm-8x22b" | "relace/relace-apply-3" | "relace/relace-search" | "sao10k/l3-euryale-70b" | "sao10k/l3-lunaris-8b" | "sao10k/l3.1-70b-hanami-x1" | "sao10k/l3.1-euryale-70b" | "sao10k/l3.3-euryale-70b" | "stepfun-ai/step3" | "switchpoint/router" | "tencent/hunyuan-a13b-instruct" | "thedrummer/cydonia-24b-v4.1" | "thedrummer/rocinante-12b" | "thedrummer/skyfall-36b-v2" | "thedrummer/unslopnemo-12b" | "tngtech/deepseek-r1t-chimera" | "tngtech/deepseek-r1t2-chimera" | "tngtech/tng-r1t-chimera" | "undi95/remm-slerp-l2-13b" | "writer/palmyra-x5" | "x-ai/grok-3" | "x-ai/grok-3-beta" | "x-ai/grok-3-mini" | "x-ai/grok-3-mini-beta" | "x-ai/grok-4" | "x-ai/grok-4-fast" | "x-ai/grok-4.1-fast" | "x-ai/grok-code-fast-1" | "xiaomi/mimo-v2-flash" | "z-ai/glm-4-32b" | "z-ai/glm-4.5" | "z-ai/glm-4.5-air" | "z-ai/glm-4.5v" | "z-ai/glm-4.6" | "z-ai/glm-4.6:exacto" | "z-ai/glm-4.6v" | "z-ai/glm-4.7" | "z-ai/glm-4.7-flash";
15
16
  /**
16
17
  * Model identifier - accepts known models for autocomplete, but any string is valid
17
18
  */
package/dist/index.js CHANGED
@@ -1905,7 +1905,7 @@ ${r.review}
1905
1905
  var args = process.argv.slice(2);
1906
1906
  var command = args[0];
1907
1907
  if (command === "review" || command === "setup") {
1908
- import("./cli-QCODHVTG.js").then(async ({ processResult, runCli }) => {
1908
+ import("./cli-ZOWF2WJK.js").then(async ({ processResult, runCli }) => {
1909
1909
  try {
1910
1910
  const result = await runCli(process.argv);
1911
1911
  if (result) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.2.4",
3
+ "version": "0.2.6",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -40,6 +40,7 @@
40
40
  "check": "biome check src",
41
41
  "check:fix": "biome check --write src",
42
42
  "typecheck": "tsc --noEmit",
43
+ "update-models": "bun scripts/update-models.ts",
43
44
  "prepare": "lefthook install"
44
45
  },
45
46
  "keywords": [