loki-mode 6.20.0 → 6.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/SKILL.md +2 -2
- package/VERSION +1 -1
- package/autonomy/loki +1499 -0
- package/completions/_loki +1 -0
- package/completions/loki.bash +17 -1
- package/dashboard/__init__.py +1 -1
- package/docs/INSTALLATION.md +1 -1
- package/mcp/__init__.py +1 -1
- package/package.json +1 -1
package/autonomy/loki
CHANGED
|
@@ -436,7 +436,9 @@ show_help() {
|
|
|
436
436
|
echo " remote [PRD] Start remote session (connect from phone/browser, Claude Pro/Max)"
|
|
437
437
|
echo " trigger Event-driven autonomous execution (schedules, webhooks)"
|
|
438
438
|
echo " failover [cmd] Cross-provider auto-failover (status|--enable|--test|--chain)"
|
|
439
|
+
echo " onboard [path] Analyze a repo and generate CLAUDE.md (structure, conventions, commands)"
|
|
439
440
|
echo " plan <PRD> Dry-run PRD analysis: complexity, cost, and execution plan"
|
|
441
|
+
echo " ci [opts] CI/CD quality gate integration (--pr, --report, --github-comment)"
|
|
440
442
|
echo " version Show version"
|
|
441
443
|
echo " help Show this help"
|
|
442
444
|
echo ""
|
|
@@ -9189,6 +9191,12 @@ main() {
|
|
|
9189
9191
|
failover)
|
|
9190
9192
|
cmd_failover "$@"
|
|
9191
9193
|
;;
|
|
9194
|
+
onboard)
|
|
9195
|
+
cmd_onboard "$@"
|
|
9196
|
+
;;
|
|
9197
|
+
ci)
|
|
9198
|
+
cmd_ci "$@"
|
|
9199
|
+
;;
|
|
9192
9200
|
version|--version|-v)
|
|
9193
9201
|
cmd_version
|
|
9194
9202
|
;;
|
|
@@ -13827,4 +13835,1495 @@ $diff"
|
|
|
13827
13835
|
esac
|
|
13828
13836
|
}
|
|
13829
13837
|
|
|
13838
|
+
# Project onboarding - analyze repo and generate CLAUDE.md (v6.21.0)
|
|
13839
|
+
cmd_onboard() {
|
|
13840
|
+
local target_path="."
|
|
13841
|
+
local depth=2
|
|
13842
|
+
local format="markdown"
|
|
13843
|
+
local output_path=""
|
|
13844
|
+
local use_stdout=false
|
|
13845
|
+
local update_mode=false
|
|
13846
|
+
|
|
13847
|
+
# Parse arguments
|
|
13848
|
+
while [[ $# -gt 0 ]]; do
|
|
13849
|
+
case "$1" in
|
|
13850
|
+
--depth)
|
|
13851
|
+
depth="${2:-2}"
|
|
13852
|
+
shift 2
|
|
13853
|
+
;;
|
|
13854
|
+
--format)
|
|
13855
|
+
format="${2:-markdown}"
|
|
13856
|
+
shift 2
|
|
13857
|
+
;;
|
|
13858
|
+
--output)
|
|
13859
|
+
output_path="${2:-}"
|
|
13860
|
+
shift 2
|
|
13861
|
+
;;
|
|
13862
|
+
--stdout)
|
|
13863
|
+
use_stdout=true
|
|
13864
|
+
shift
|
|
13865
|
+
;;
|
|
13866
|
+
--update)
|
|
13867
|
+
update_mode=true
|
|
13868
|
+
shift
|
|
13869
|
+
;;
|
|
13870
|
+
--help|-h)
|
|
13871
|
+
echo -e "${BOLD}loki onboard${NC} - Analyze a project and generate CLAUDE.md"
|
|
13872
|
+
echo ""
|
|
13873
|
+
echo "Usage: loki onboard [path] [options]"
|
|
13874
|
+
echo ""
|
|
13875
|
+
echo "Arguments:"
|
|
13876
|
+
echo " path Path to repository (default: current directory)"
|
|
13877
|
+
echo ""
|
|
13878
|
+
echo "Options:"
|
|
13879
|
+
echo " --depth N Analysis depth: 1=surface, 2=moderate, 3=deep (default: 2)"
|
|
13880
|
+
echo " --format FORMAT Output format: markdown, json, yaml (default: markdown)"
|
|
13881
|
+
echo " --output PATH Custom output file path"
|
|
13882
|
+
echo " --stdout Print to stdout instead of writing file"
|
|
13883
|
+
echo " --update Update existing CLAUDE.md with new findings"
|
|
13884
|
+
echo " --help Show this help"
|
|
13885
|
+
echo ""
|
|
13886
|
+
echo "Examples:"
|
|
13887
|
+
echo " loki onboard # Analyze current directory"
|
|
13888
|
+
echo " loki onboard ~/projects/myapp # Analyze specific repo"
|
|
13889
|
+
echo " loki onboard --depth 3 # Deep analysis with dependency mapping"
|
|
13890
|
+
echo " loki onboard --format json # JSON output"
|
|
13891
|
+
echo " loki onboard --stdout # Print to terminal"
|
|
13892
|
+
return 0
|
|
13893
|
+
;;
|
|
13894
|
+
-*)
|
|
13895
|
+
log_error "Unknown option: $1"
|
|
13896
|
+
echo "Run 'loki onboard --help' for usage."
|
|
13897
|
+
return 1
|
|
13898
|
+
;;
|
|
13899
|
+
*)
|
|
13900
|
+
target_path="$1"
|
|
13901
|
+
shift
|
|
13902
|
+
;;
|
|
13903
|
+
esac
|
|
13904
|
+
done
|
|
13905
|
+
|
|
13906
|
+
# Validate target path
|
|
13907
|
+
if [ ! -d "$target_path" ]; then
|
|
13908
|
+
log_error "Directory not found: $target_path"
|
|
13909
|
+
return 1
|
|
13910
|
+
fi
|
|
13911
|
+
|
|
13912
|
+
# Resolve to absolute path
|
|
13913
|
+
target_path="$(cd "$target_path" && pwd)"
|
|
13914
|
+
|
|
13915
|
+
# When --stdout, send log messages to stderr to keep stdout clean
|
|
13916
|
+
if [ "$use_stdout" = true ]; then
|
|
13917
|
+
log_info "Analyzing project at: $target_path" >&2
|
|
13918
|
+
log_info "Depth: $depth | Format: $format" >&2
|
|
13919
|
+
else
|
|
13920
|
+
log_info "Analyzing project at: $target_path"
|
|
13921
|
+
log_info "Depth: $depth | Format: $format"
|
|
13922
|
+
fi
|
|
13923
|
+
|
|
13924
|
+
# --- Detect project metadata ---
|
|
13925
|
+
local project_name
|
|
13926
|
+
project_name="$(basename "$target_path")"
|
|
13927
|
+
|
|
13928
|
+
local languages=""
|
|
13929
|
+
local frameworks=""
|
|
13930
|
+
local build_system=""
|
|
13931
|
+
local test_framework=""
|
|
13932
|
+
local entry_points=""
|
|
13933
|
+
local package_manager=""
|
|
13934
|
+
local project_description=""
|
|
13935
|
+
local project_version=""
|
|
13936
|
+
|
|
13937
|
+
# Detect languages and config files
|
|
13938
|
+
local config_files=""
|
|
13939
|
+
|
|
13940
|
+
if [ -f "$target_path/package.json" ]; then
|
|
13941
|
+
config_files="$config_files package.json"
|
|
13942
|
+
languages="$languages JavaScript/TypeScript"
|
|
13943
|
+
package_manager="npm"
|
|
13944
|
+
if [ -f "$target_path/yarn.lock" ]; then
|
|
13945
|
+
package_manager="yarn"
|
|
13946
|
+
elif [ -f "$target_path/pnpm-lock.yaml" ]; then
|
|
13947
|
+
package_manager="pnpm"
|
|
13948
|
+
elif [ -f "$target_path/bun.lockb" ]; then
|
|
13949
|
+
package_manager="bun"
|
|
13950
|
+
fi
|
|
13951
|
+
# Extract metadata from package.json
|
|
13952
|
+
if command -v python3 &>/dev/null; then
|
|
13953
|
+
local pkg_name
|
|
13954
|
+
pkg_name=$(python3 -c "
|
|
13955
|
+
import json, sys
|
|
13956
|
+
try:
|
|
13957
|
+
d = json.load(open('$target_path/package.json'))
|
|
13958
|
+
print(d.get('name', ''))
|
|
13959
|
+
except: pass
|
|
13960
|
+
" 2>/dev/null || true)
|
|
13961
|
+
if [ -n "$pkg_name" ]; then
|
|
13962
|
+
project_name="$pkg_name"
|
|
13963
|
+
fi
|
|
13964
|
+
project_description=$(python3 -c "
|
|
13965
|
+
import json, sys
|
|
13966
|
+
try:
|
|
13967
|
+
d = json.load(open('$target_path/package.json'))
|
|
13968
|
+
print(d.get('description', ''))
|
|
13969
|
+
except: pass
|
|
13970
|
+
" 2>/dev/null || true)
|
|
13971
|
+
project_version=$(python3 -c "
|
|
13972
|
+
import json, sys
|
|
13973
|
+
try:
|
|
13974
|
+
d = json.load(open('$target_path/package.json'))
|
|
13975
|
+
print(d.get('version', ''))
|
|
13976
|
+
except: pass
|
|
13977
|
+
" 2>/dev/null || true)
|
|
13978
|
+
entry_points=$(python3 -c "
|
|
13979
|
+
import json, sys
|
|
13980
|
+
try:
|
|
13981
|
+
d = json.load(open('$target_path/package.json'))
|
|
13982
|
+
main = d.get('main', '')
|
|
13983
|
+
if main: print(main)
|
|
13984
|
+
scripts = d.get('scripts', {})
|
|
13985
|
+
if 'start' in scripts: print('scripts.start: ' + scripts['start'])
|
|
13986
|
+
except: pass
|
|
13987
|
+
" 2>/dev/null || true)
|
|
13988
|
+
fi
|
|
13989
|
+
# Detect frameworks from dependencies
|
|
13990
|
+
if grep -q '"react"' "$target_path/package.json" 2>/dev/null; then
|
|
13991
|
+
frameworks="$frameworks React"
|
|
13992
|
+
fi
|
|
13993
|
+
if grep -q '"next"' "$target_path/package.json" 2>/dev/null; then
|
|
13994
|
+
frameworks="$frameworks Next.js"
|
|
13995
|
+
fi
|
|
13996
|
+
if grep -q '"vue"' "$target_path/package.json" 2>/dev/null; then
|
|
13997
|
+
frameworks="$frameworks Vue"
|
|
13998
|
+
fi
|
|
13999
|
+
if grep -q '"express"' "$target_path/package.json" 2>/dev/null; then
|
|
14000
|
+
frameworks="$frameworks Express"
|
|
14001
|
+
fi
|
|
14002
|
+
if grep -q '"fastify"' "$target_path/package.json" 2>/dev/null; then
|
|
14003
|
+
frameworks="$frameworks Fastify"
|
|
14004
|
+
fi
|
|
14005
|
+
if grep -q '"svelte"' "$target_path/package.json" 2>/dev/null; then
|
|
14006
|
+
frameworks="$frameworks Svelte"
|
|
14007
|
+
fi
|
|
14008
|
+
# Detect test framework
|
|
14009
|
+
if grep -q '"jest"' "$target_path/package.json" 2>/dev/null; then
|
|
14010
|
+
test_framework="$test_framework jest"
|
|
14011
|
+
fi
|
|
14012
|
+
if grep -q '"vitest"' "$target_path/package.json" 2>/dev/null; then
|
|
14013
|
+
test_framework="$test_framework vitest"
|
|
14014
|
+
fi
|
|
14015
|
+
if grep -q '"mocha"' "$target_path/package.json" 2>/dev/null; then
|
|
14016
|
+
test_framework="$test_framework mocha"
|
|
14017
|
+
fi
|
|
14018
|
+
if grep -q '"playwright"' "$target_path/package.json" 2>/dev/null; then
|
|
14019
|
+
test_framework="$test_framework playwright"
|
|
14020
|
+
fi
|
|
14021
|
+
fi
|
|
14022
|
+
|
|
14023
|
+
if [ -f "$target_path/pyproject.toml" ]; then
|
|
14024
|
+
config_files="$config_files pyproject.toml"
|
|
14025
|
+
languages="$languages Python"
|
|
14026
|
+
package_manager="pip/poetry"
|
|
14027
|
+
if grep -q "django" "$target_path/pyproject.toml" 2>/dev/null; then
|
|
14028
|
+
frameworks="$frameworks Django"
|
|
14029
|
+
fi
|
|
14030
|
+
if grep -q "flask" "$target_path/pyproject.toml" 2>/dev/null; then
|
|
14031
|
+
frameworks="$frameworks Flask"
|
|
14032
|
+
fi
|
|
14033
|
+
if grep -q "fastapi" "$target_path/pyproject.toml" 2>/dev/null; then
|
|
14034
|
+
frameworks="$frameworks FastAPI"
|
|
14035
|
+
fi
|
|
14036
|
+
if grep -q "pytest" "$target_path/pyproject.toml" 2>/dev/null; then
|
|
14037
|
+
test_framework="$test_framework pytest"
|
|
14038
|
+
fi
|
|
14039
|
+
fi
|
|
14040
|
+
|
|
14041
|
+
if [ -f "$target_path/setup.py" ] || [ -f "$target_path/setup.cfg" ]; then
|
|
14042
|
+
config_files="$config_files setup.py"
|
|
14043
|
+
languages="$languages Python"
|
|
14044
|
+
[ -z "$package_manager" ] && package_manager="pip"
|
|
14045
|
+
fi
|
|
14046
|
+
|
|
14047
|
+
if [ -f "$target_path/requirements.txt" ]; then
|
|
14048
|
+
config_files="$config_files requirements.txt"
|
|
14049
|
+
languages="$languages Python"
|
|
14050
|
+
[ -z "$package_manager" ] && package_manager="pip"
|
|
14051
|
+
fi
|
|
14052
|
+
|
|
14053
|
+
if [ -f "$target_path/Cargo.toml" ]; then
|
|
14054
|
+
config_files="$config_files Cargo.toml"
|
|
14055
|
+
languages="$languages Rust"
|
|
14056
|
+
package_manager="cargo"
|
|
14057
|
+
build_system="cargo"
|
|
14058
|
+
test_framework="$test_framework cargo-test"
|
|
14059
|
+
fi
|
|
14060
|
+
|
|
14061
|
+
if [ -f "$target_path/go.mod" ]; then
|
|
14062
|
+
config_files="$config_files go.mod"
|
|
14063
|
+
languages="$languages Go"
|
|
14064
|
+
package_manager="go-modules"
|
|
14065
|
+
build_system="go"
|
|
14066
|
+
test_framework="$test_framework go-test"
|
|
14067
|
+
fi
|
|
14068
|
+
|
|
14069
|
+
if [ -f "$target_path/Gemfile" ]; then
|
|
14070
|
+
config_files="$config_files Gemfile"
|
|
14071
|
+
languages="$languages Ruby"
|
|
14072
|
+
package_manager="bundler"
|
|
14073
|
+
if grep -q "rails" "$target_path/Gemfile" 2>/dev/null; then
|
|
14074
|
+
frameworks="$frameworks Rails"
|
|
14075
|
+
fi
|
|
14076
|
+
if grep -q "rspec" "$target_path/Gemfile" 2>/dev/null; then
|
|
14077
|
+
test_framework="$test_framework rspec"
|
|
14078
|
+
fi
|
|
14079
|
+
fi
|
|
14080
|
+
|
|
14081
|
+
if [ -f "$target_path/pom.xml" ]; then
|
|
14082
|
+
config_files="$config_files pom.xml"
|
|
14083
|
+
languages="$languages Java"
|
|
14084
|
+
build_system="maven"
|
|
14085
|
+
package_manager="maven"
|
|
14086
|
+
fi
|
|
14087
|
+
|
|
14088
|
+
if [ -f "$target_path/build.gradle" ] || [ -f "$target_path/build.gradle.kts" ]; then
|
|
14089
|
+
config_files="$config_files build.gradle"
|
|
14090
|
+
languages="$languages Java/Kotlin"
|
|
14091
|
+
build_system="gradle"
|
|
14092
|
+
package_manager="gradle"
|
|
14093
|
+
fi
|
|
14094
|
+
|
|
14095
|
+
if [ -f "$target_path/Makefile" ]; then
|
|
14096
|
+
config_files="$config_files Makefile"
|
|
14097
|
+
[ -z "$build_system" ] && build_system="make"
|
|
14098
|
+
fi
|
|
14099
|
+
|
|
14100
|
+
if [ -f "$target_path/CMakeLists.txt" ]; then
|
|
14101
|
+
config_files="$config_files CMakeLists.txt"
|
|
14102
|
+
languages="$languages C/C++"
|
|
14103
|
+
build_system="cmake"
|
|
14104
|
+
fi
|
|
14105
|
+
|
|
14106
|
+
# Detect shell scripts
|
|
14107
|
+
local shell_count=0
|
|
14108
|
+
shell_count=$(find "$target_path" -maxdepth 2 -name "*.sh" -type f 2>/dev/null | wc -l | tr -d ' ')
|
|
14109
|
+
if [ "$shell_count" -gt 0 ]; then
|
|
14110
|
+
languages="$languages Bash"
|
|
14111
|
+
fi
|
|
14112
|
+
|
|
14113
|
+
# Deduplicate languages
|
|
14114
|
+
languages=$(echo "$languages" | tr ' ' '\n' | sort -u | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
|
|
14115
|
+
frameworks=$(echo "$frameworks" | tr ' ' '\n' | sort -u | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
|
|
14116
|
+
test_framework=$(echo "$test_framework" | tr ' ' '\n' | sort -u | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
|
|
14117
|
+
|
|
14118
|
+
# --- Detect CI/CD ---
|
|
14119
|
+
local ci_system=""
|
|
14120
|
+
if [ -d "$target_path/.github/workflows" ]; then
|
|
14121
|
+
ci_system="GitHub Actions"
|
|
14122
|
+
fi
|
|
14123
|
+
if [ -f "$target_path/.gitlab-ci.yml" ]; then
|
|
14124
|
+
ci_system="$ci_system GitLab CI"
|
|
14125
|
+
fi
|
|
14126
|
+
if [ -f "$target_path/Jenkinsfile" ]; then
|
|
14127
|
+
ci_system="$ci_system Jenkins"
|
|
14128
|
+
fi
|
|
14129
|
+
if [ -f "$target_path/.circleci/config.yml" ]; then
|
|
14130
|
+
ci_system="$ci_system CircleCI"
|
|
14131
|
+
fi
|
|
14132
|
+
if [ -f "$target_path/.travis.yml" ]; then
|
|
14133
|
+
ci_system="$ci_system Travis CI"
|
|
14134
|
+
fi
|
|
14135
|
+
|
|
14136
|
+
# --- Read README ---
|
|
14137
|
+
local readme_content=""
|
|
14138
|
+
local readme_file=""
|
|
14139
|
+
for f in README.md readme.md README.rst README README.txt; do
|
|
14140
|
+
if [ -f "$target_path/$f" ]; then
|
|
14141
|
+
readme_file="$f"
|
|
14142
|
+
readme_content=$(head -50 "$target_path/$f" 2>/dev/null || true)
|
|
14143
|
+
break
|
|
14144
|
+
fi
|
|
14145
|
+
done
|
|
14146
|
+
|
|
14147
|
+
# Extract first meaningful line from README as description fallback
|
|
14148
|
+
if [ -z "$project_description" ] && [ -n "$readme_content" ]; then
|
|
14149
|
+
project_description=$(echo "$readme_content" | grep -v '^#' | grep -v '^$' | grep -v '^\[' | grep -v '^!' | head -1 | sed 's/^ *//')
|
|
14150
|
+
fi
|
|
14151
|
+
|
|
14152
|
+
# --- Build directory tree ---
|
|
14153
|
+
local tree_output=""
|
|
14154
|
+
if command -v git &>/dev/null && [ -d "$target_path/.git" ]; then
|
|
14155
|
+
# Use git ls-files for accurate tree (respects .gitignore)
|
|
14156
|
+
tree_output=$(cd "$target_path" && git ls-files 2>/dev/null | head -200 || true)
|
|
14157
|
+
else
|
|
14158
|
+
# Fallback: find with common excludions
|
|
14159
|
+
tree_output=$(find "$target_path" -maxdepth 4 -type f \
|
|
14160
|
+
-not -path '*/node_modules/*' \
|
|
14161
|
+
-not -path '*/.git/*' \
|
|
14162
|
+
-not -path '*/vendor/*' \
|
|
14163
|
+
-not -path '*/__pycache__/*' \
|
|
14164
|
+
-not -path '*/dist/*' \
|
|
14165
|
+
-not -path '*/build/*' \
|
|
14166
|
+
-not -path '*/.next/*' \
|
|
14167
|
+
-not -path '*/target/*' \
|
|
14168
|
+
2>/dev/null | sed "s|$target_path/||" | sort | head -200)
|
|
14169
|
+
fi
|
|
14170
|
+
|
|
14171
|
+
# Categorize files
|
|
14172
|
+
local src_files=""
|
|
14173
|
+
local test_files=""
|
|
14174
|
+
local doc_files=""
|
|
14175
|
+
local config_file_list=""
|
|
14176
|
+
local ci_files=""
|
|
14177
|
+
local other_files=""
|
|
14178
|
+
|
|
14179
|
+
while IFS= read -r file; do
|
|
14180
|
+
[ -z "$file" ] && continue
|
|
14181
|
+
case "$file" in
|
|
14182
|
+
*.test.*|*.spec.*|*_test.*|*_spec.*|tests/*|test/*|__tests__/*|spec/*)
|
|
14183
|
+
test_files="$test_files $file"
|
|
14184
|
+
;;
|
|
14185
|
+
*.md|*.rst|*.txt|docs/*|doc/*|wiki/*)
|
|
14186
|
+
doc_files="$doc_files $file"
|
|
14187
|
+
;;
|
|
14188
|
+
.github/*|.gitlab-ci*|.circleci/*|Jenkinsfile|.travis*)
|
|
14189
|
+
ci_files="$ci_files $file"
|
|
14190
|
+
;;
|
|
14191
|
+
package.json|pyproject.toml|Cargo.toml|go.mod|Gemfile|pom.xml|build.gradle*|Makefile|CMakeLists.txt|*.toml|*.cfg|*.ini|*.yml|*.yaml|Dockerfile*|docker-compose*|.env*|.eslint*|.prettier*|tsconfig*|jest.config*|vitest.config*)
|
|
14192
|
+
config_file_list="$config_file_list $file"
|
|
14193
|
+
;;
|
|
14194
|
+
*.js|*.ts|*.tsx|*.jsx|*.py|*.rs|*.go|*.rb|*.java|*.kt|*.c|*.cpp|*.h|*.hpp|*.sh|*.swift|*.cs|*.php|*.lua|*.zig|*.el|*.clj)
|
|
14195
|
+
src_files="$src_files $file"
|
|
14196
|
+
;;
|
|
14197
|
+
*)
|
|
14198
|
+
other_files="$other_files $file"
|
|
14199
|
+
;;
|
|
14200
|
+
esac
|
|
14201
|
+
done <<< "$tree_output"
|
|
14202
|
+
|
|
14203
|
+
local src_count=$(echo "$src_files" | wc -w | tr -d ' ')
|
|
14204
|
+
local test_count=$(echo "$test_files" | wc -w | tr -d ' ')
|
|
14205
|
+
local doc_count=$(echo "$doc_files" | wc -w | tr -d ' ')
|
|
14206
|
+
|
|
14207
|
+
# --- Depth 2+: Analyze source files ---
|
|
14208
|
+
local key_exports=""
|
|
14209
|
+
local key_functions=""
|
|
14210
|
+
local key_classes=""
|
|
14211
|
+
|
|
14212
|
+
if [ "$depth" -ge 2 ]; then
|
|
14213
|
+
if [ "$use_stdout" = true ]; then
|
|
14214
|
+
log_info "Depth 2: Scanning source files for exports, functions, classes..." >&2
|
|
14215
|
+
else
|
|
14216
|
+
log_info "Depth 2: Scanning source files for exports, functions, classes..."
|
|
14217
|
+
fi
|
|
14218
|
+
|
|
14219
|
+
for src_file in $src_files; do
|
|
14220
|
+
[ ! -f "$target_path/$src_file" ] && continue
|
|
14221
|
+
local file_lines
|
|
14222
|
+
file_lines=$(wc -l < "$target_path/$src_file" 2>/dev/null | tr -d ' ')
|
|
14223
|
+
|
|
14224
|
+
# Only scan files under 2000 lines for performance
|
|
14225
|
+
[ "$file_lines" -gt 2000 ] && continue
|
|
14226
|
+
|
|
14227
|
+
local file_ext="${src_file##*.}"
|
|
14228
|
+
case "$file_ext" in
|
|
14229
|
+
js|ts|tsx|jsx)
|
|
14230
|
+
# Find exported functions/classes
|
|
14231
|
+
local exports
|
|
14232
|
+
exports=$(grep -n "^export " "$target_path/$src_file" 2>/dev/null | head -10 || true)
|
|
14233
|
+
if [ -n "$exports" ]; then
|
|
14234
|
+
key_exports="$key_exports
|
|
14235
|
+
$src_file:
|
|
14236
|
+
$exports"
|
|
14237
|
+
fi
|
|
14238
|
+
;;
|
|
14239
|
+
py)
|
|
14240
|
+
# Find class and function definitions
|
|
14241
|
+
local classes
|
|
14242
|
+
classes=$(grep -n "^class " "$target_path/$src_file" 2>/dev/null | head -5 || true)
|
|
14243
|
+
local funcs
|
|
14244
|
+
funcs=$(grep -n "^def \|^async def " "$target_path/$src_file" 2>/dev/null | head -10 || true)
|
|
14245
|
+
if [ -n "$classes" ]; then
|
|
14246
|
+
key_classes="$key_classes
|
|
14247
|
+
$src_file:
|
|
14248
|
+
$classes"
|
|
14249
|
+
fi
|
|
14250
|
+
if [ -n "$funcs" ]; then
|
|
14251
|
+
key_functions="$key_functions
|
|
14252
|
+
$src_file:
|
|
14253
|
+
$funcs"
|
|
14254
|
+
fi
|
|
14255
|
+
;;
|
|
14256
|
+
go)
|
|
14257
|
+
local funcs
|
|
14258
|
+
funcs=$(grep -n "^func " "$target_path/$src_file" 2>/dev/null | head -10 || true)
|
|
14259
|
+
if [ -n "$funcs" ]; then
|
|
14260
|
+
key_functions="$key_functions
|
|
14261
|
+
$src_file:
|
|
14262
|
+
$funcs"
|
|
14263
|
+
fi
|
|
14264
|
+
;;
|
|
14265
|
+
rs)
|
|
14266
|
+
local funcs
|
|
14267
|
+
funcs=$(grep -n "^pub fn \|^pub async fn " "$target_path/$src_file" 2>/dev/null | head -10 || true)
|
|
14268
|
+
if [ -n "$funcs" ]; then
|
|
14269
|
+
key_functions="$key_functions
|
|
14270
|
+
$src_file:
|
|
14271
|
+
$funcs"
|
|
14272
|
+
fi
|
|
14273
|
+
;;
|
|
14274
|
+
sh)
|
|
14275
|
+
local funcs
|
|
14276
|
+
funcs=$(grep -n "^[a-zA-Z_][a-zA-Z_0-9]*() {" "$target_path/$src_file" 2>/dev/null | head -10 || true)
|
|
14277
|
+
if [ -n "$funcs" ]; then
|
|
14278
|
+
key_functions="$key_functions
|
|
14279
|
+
$src_file:
|
|
14280
|
+
$funcs"
|
|
14281
|
+
fi
|
|
14282
|
+
;;
|
|
14283
|
+
rb)
|
|
14284
|
+
local classes
|
|
14285
|
+
classes=$(grep -n "^class " "$target_path/$src_file" 2>/dev/null | head -5 || true)
|
|
14286
|
+
local funcs
|
|
14287
|
+
funcs=$(grep -n "^ def " "$target_path/$src_file" 2>/dev/null | head -10 || true)
|
|
14288
|
+
if [ -n "$classes" ]; then
|
|
14289
|
+
key_classes="$key_classes
|
|
14290
|
+
$src_file:
|
|
14291
|
+
$classes"
|
|
14292
|
+
fi
|
|
14293
|
+
if [ -n "$funcs" ]; then
|
|
14294
|
+
key_functions="$key_functions
|
|
14295
|
+
$src_file:
|
|
14296
|
+
$funcs"
|
|
14297
|
+
fi
|
|
14298
|
+
;;
|
|
14299
|
+
java|kt)
|
|
14300
|
+
local classes
|
|
14301
|
+
classes=$(grep -n "^public class \|^class \|^data class " "$target_path/$src_file" 2>/dev/null | head -5 || true)
|
|
14302
|
+
if [ -n "$classes" ]; then
|
|
14303
|
+
key_classes="$key_classes
|
|
14304
|
+
$src_file:
|
|
14305
|
+
$classes"
|
|
14306
|
+
fi
|
|
14307
|
+
;;
|
|
14308
|
+
esac
|
|
14309
|
+
done
|
|
14310
|
+
fi
|
|
14311
|
+
|
|
14312
|
+
# --- Depth 3: Dependency analysis ---
|
|
14313
|
+
local dep_graph=""
|
|
14314
|
+
|
|
14315
|
+
if [ "$depth" -ge 3 ]; then
|
|
14316
|
+
if [ "$use_stdout" = true ]; then
|
|
14317
|
+
log_info "Depth 3: Analyzing imports and dependencies..." >&2
|
|
14318
|
+
else
|
|
14319
|
+
log_info "Depth 3: Analyzing imports and dependencies..."
|
|
14320
|
+
fi
|
|
14321
|
+
|
|
14322
|
+
for src_file in $src_files; do
|
|
14323
|
+
[ ! -f "$target_path/$src_file" ] && continue
|
|
14324
|
+
local file_lines
|
|
14325
|
+
file_lines=$(wc -l < "$target_path/$src_file" 2>/dev/null | tr -d ' ')
|
|
14326
|
+
[ "$file_lines" -gt 2000 ] && continue
|
|
14327
|
+
|
|
14328
|
+
local imports=""
|
|
14329
|
+
local file_ext="${src_file##*.}"
|
|
14330
|
+
case "$file_ext" in
|
|
14331
|
+
js|ts|tsx|jsx)
|
|
14332
|
+
imports=$(grep "^import " "$target_path/$src_file" 2>/dev/null | grep -v "node_modules" | head -15 || true)
|
|
14333
|
+
;;
|
|
14334
|
+
py)
|
|
14335
|
+
imports=$(grep "^import \|^from " "$target_path/$src_file" 2>/dev/null | head -15 || true)
|
|
14336
|
+
;;
|
|
14337
|
+
go)
|
|
14338
|
+
imports=$(sed -n '/^import (/,/^)/p' "$target_path/$src_file" 2>/dev/null | grep -v '^import\|^)' | head -15 || true)
|
|
14339
|
+
;;
|
|
14340
|
+
rs)
|
|
14341
|
+
imports=$(grep "^use " "$target_path/$src_file" 2>/dev/null | head -15 || true)
|
|
14342
|
+
;;
|
|
14343
|
+
esac
|
|
14344
|
+
|
|
14345
|
+
if [ -n "$imports" ]; then
|
|
14346
|
+
dep_graph="$dep_graph
|
|
14347
|
+
$src_file:
|
|
14348
|
+
$imports"
|
|
14349
|
+
fi
|
|
14350
|
+
done
|
|
14351
|
+
fi
|
|
14352
|
+
|
|
14353
|
+
# --- Detect build/run/test commands ---
|
|
14354
|
+
local build_cmd=""
|
|
14355
|
+
local run_cmd=""
|
|
14356
|
+
local test_cmd=""
|
|
14357
|
+
|
|
14358
|
+
if [ -f "$target_path/package.json" ]; then
|
|
14359
|
+
if command -v python3 &>/dev/null; then
|
|
14360
|
+
local scripts_json
|
|
14361
|
+
scripts_json=$(python3 -c "
|
|
14362
|
+
import json
|
|
14363
|
+
try:
|
|
14364
|
+
d = json.load(open('$target_path/package.json'))
|
|
14365
|
+
s = d.get('scripts', {})
|
|
14366
|
+
for k in ['build', 'dev', 'start', 'test', 'lint', 'format', 'check', 'typecheck']:
|
|
14367
|
+
if k in s:
|
|
14368
|
+
print(f'{k}: {s[k]}')
|
|
14369
|
+
except: pass
|
|
14370
|
+
" 2>/dev/null || true)
|
|
14371
|
+
if echo "$scripts_json" | grep -q "^build:"; then
|
|
14372
|
+
build_cmd="${package_manager:-npm} run build"
|
|
14373
|
+
fi
|
|
14374
|
+
if echo "$scripts_json" | grep -q "^dev:"; then
|
|
14375
|
+
run_cmd="${package_manager:-npm} run dev"
|
|
14376
|
+
elif echo "$scripts_json" | grep -q "^start:"; then
|
|
14377
|
+
run_cmd="${package_manager:-npm} start"
|
|
14378
|
+
fi
|
|
14379
|
+
if echo "$scripts_json" | grep -q "^test:"; then
|
|
14380
|
+
test_cmd="${package_manager:-npm} test"
|
|
14381
|
+
fi
|
|
14382
|
+
fi
|
|
14383
|
+
fi
|
|
14384
|
+
|
|
14385
|
+
if [ -f "$target_path/Makefile" ]; then
|
|
14386
|
+
[ -z "$build_cmd" ] && build_cmd="make"
|
|
14387
|
+
if grep -q "^test:" "$target_path/Makefile" 2>/dev/null; then
|
|
14388
|
+
[ -z "$test_cmd" ] && test_cmd="make test"
|
|
14389
|
+
fi
|
|
14390
|
+
if grep -q "^run:" "$target_path/Makefile" 2>/dev/null; then
|
|
14391
|
+
[ -z "$run_cmd" ] && run_cmd="make run"
|
|
14392
|
+
fi
|
|
14393
|
+
fi
|
|
14394
|
+
|
|
14395
|
+
if [ -f "$target_path/Cargo.toml" ]; then
|
|
14396
|
+
build_cmd="cargo build"
|
|
14397
|
+
run_cmd="cargo run"
|
|
14398
|
+
test_cmd="cargo test"
|
|
14399
|
+
fi
|
|
14400
|
+
|
|
14401
|
+
if [ -f "$target_path/go.mod" ]; then
|
|
14402
|
+
build_cmd="go build ./..."
|
|
14403
|
+
run_cmd="go run ."
|
|
14404
|
+
test_cmd="go test ./..."
|
|
14405
|
+
fi
|
|
14406
|
+
|
|
14407
|
+
if [ -f "$target_path/pyproject.toml" ]; then
|
|
14408
|
+
if grep -q '\[tool.pytest' "$target_path/pyproject.toml" 2>/dev/null; then
|
|
14409
|
+
test_cmd="pytest"
|
|
14410
|
+
fi
|
|
14411
|
+
if grep -q '\[tool.poetry' "$target_path/pyproject.toml" 2>/dev/null; then
|
|
14412
|
+
build_cmd="poetry build"
|
|
14413
|
+
run_cmd="poetry run python -m ${project_name}"
|
|
14414
|
+
fi
|
|
14415
|
+
fi
|
|
14416
|
+
|
|
14417
|
+
# --- Generate output ---
|
|
14418
|
+
local output=""
|
|
14419
|
+
|
|
14420
|
+
if [ "$format" = "json" ]; then
|
|
14421
|
+
# JSON output
|
|
14422
|
+
output=$(cat <<ENDJSON
|
|
14423
|
+
{
|
|
14424
|
+
"project": {
|
|
14425
|
+
"name": "$project_name",
|
|
14426
|
+
"description": $(python3 -c "import json; print(json.dumps('$project_description'))" 2>/dev/null || echo "\"$project_description\""),
|
|
14427
|
+
"version": "$project_version",
|
|
14428
|
+
"path": "$target_path"
|
|
14429
|
+
},
|
|
14430
|
+
"languages": "$(echo $languages | sed 's/ */ /g')",
|
|
14431
|
+
"frameworks": "$(echo $frameworks | sed 's/ */ /g')",
|
|
14432
|
+
"build_system": "$build_system",
|
|
14433
|
+
"package_manager": "$package_manager",
|
|
14434
|
+
"test_framework": "$(echo $test_framework | sed 's/ */ /g')",
|
|
14435
|
+
"ci": "$(echo $ci_system | sed 's/ */ /g')",
|
|
14436
|
+
"files": {
|
|
14437
|
+
"source": $src_count,
|
|
14438
|
+
"test": $test_count,
|
|
14439
|
+
"docs": $doc_count
|
|
14440
|
+
},
|
|
14441
|
+
"commands": {
|
|
14442
|
+
"build": "$build_cmd",
|
|
14443
|
+
"run": "$run_cmd",
|
|
14444
|
+
"test": "$test_cmd"
|
|
14445
|
+
},
|
|
14446
|
+
"depth": $depth
|
|
14447
|
+
}
|
|
14448
|
+
ENDJSON
|
|
14449
|
+
)
|
|
14450
|
+
elif [ "$format" = "yaml" ]; then
|
|
14451
|
+
# YAML output
|
|
14452
|
+
output=$(cat <<ENDYAML
|
|
14453
|
+
project:
|
|
14454
|
+
name: $project_name
|
|
14455
|
+
description: "$project_description"
|
|
14456
|
+
version: "$project_version"
|
|
14457
|
+
path: $target_path
|
|
14458
|
+
languages: $languages
|
|
14459
|
+
frameworks: $frameworks
|
|
14460
|
+
build_system: $build_system
|
|
14461
|
+
package_manager: $package_manager
|
|
14462
|
+
test_framework: $(echo $test_framework | sed 's/ */ /g')
|
|
14463
|
+
ci: $(echo $ci_system | sed 's/ */ /g')
|
|
14464
|
+
files:
|
|
14465
|
+
source: $src_count
|
|
14466
|
+
test: $test_count
|
|
14467
|
+
docs: $doc_count
|
|
14468
|
+
commands:
|
|
14469
|
+
build: "$build_cmd"
|
|
14470
|
+
run: "$run_cmd"
|
|
14471
|
+
test: "$test_cmd"
|
|
14472
|
+
depth: $depth
|
|
14473
|
+
ENDYAML
|
|
14474
|
+
)
|
|
14475
|
+
else
|
|
14476
|
+
# Markdown output (CLAUDE.md format)
|
|
14477
|
+
output="# $project_name"
|
|
14478
|
+
[ -n "$project_description" ] && output="$output
|
|
14479
|
+
|
|
14480
|
+
$project_description"
|
|
14481
|
+
[ -n "$project_version" ] && output="$output
|
|
14482
|
+
|
|
14483
|
+
Version: $project_version"
|
|
14484
|
+
|
|
14485
|
+
output="$output
|
|
14486
|
+
|
|
14487
|
+
## Overview
|
|
14488
|
+
|
|
14489
|
+
| Property | Value |
|
|
14490
|
+
|----------|-------|
|
|
14491
|
+
| Languages | ${languages:-N/A} |
|
|
14492
|
+
| Frameworks | ${frameworks:-N/A} |
|
|
14493
|
+
| Build System | ${build_system:-N/A} |
|
|
14494
|
+
| Package Manager | ${package_manager:-N/A} |
|
|
14495
|
+
| Test Framework | ${test_framework:-N/A} |
|
|
14496
|
+
| CI/CD | ${ci_system:-N/A} |"
|
|
14497
|
+
|
|
14498
|
+
# Commands section
|
|
14499
|
+
if [ -n "$build_cmd" ] || [ -n "$run_cmd" ] || [ -n "$test_cmd" ]; then
|
|
14500
|
+
output="$output
|
|
14501
|
+
|
|
14502
|
+
## Commands
|
|
14503
|
+
|
|
14504
|
+
\`\`\`bash"
|
|
14505
|
+
[ -n "$build_cmd" ] && output="$output
|
|
14506
|
+
# Build
|
|
14507
|
+
$build_cmd"
|
|
14508
|
+
[ -n "$run_cmd" ] && output="$output
|
|
14509
|
+
|
|
14510
|
+
# Run
|
|
14511
|
+
$run_cmd"
|
|
14512
|
+
[ -n "$test_cmd" ] && output="$output
|
|
14513
|
+
|
|
14514
|
+
# Test
|
|
14515
|
+
$test_cmd"
|
|
14516
|
+
output="$output
|
|
14517
|
+
\`\`\`"
|
|
14518
|
+
fi
|
|
14519
|
+
|
|
14520
|
+
# Project structure
|
|
14521
|
+
output="$output
|
|
14522
|
+
|
|
14523
|
+
## Project Structure
|
|
14524
|
+
|
|
14525
|
+
Files: $src_count source, $test_count test, $doc_count docs"
|
|
14526
|
+
|
|
14527
|
+
# Show directory structure (top-level)
|
|
14528
|
+
local top_dirs
|
|
14529
|
+
top_dirs=$(echo "$tree_output" | sed 's|/.*||' | sort -u | head -30)
|
|
14530
|
+
if [ -n "$top_dirs" ]; then
|
|
14531
|
+
output="$output
|
|
14532
|
+
|
|
14533
|
+
\`\`\`"
|
|
14534
|
+
while IFS= read -r dir; do
|
|
14535
|
+
[ -z "$dir" ] && continue
|
|
14536
|
+
if [ -d "$target_path/$dir" ]; then
|
|
14537
|
+
# Count files in directory
|
|
14538
|
+
local dir_count
|
|
14539
|
+
dir_count=$(echo "$tree_output" | grep "^${dir}/" | wc -l | tr -d ' ')
|
|
14540
|
+
output="$output
|
|
14541
|
+
$dir/ ($dir_count files)"
|
|
14542
|
+
else
|
|
14543
|
+
output="$output
|
|
14544
|
+
$dir"
|
|
14545
|
+
fi
|
|
14546
|
+
done <<< "$top_dirs"
|
|
14547
|
+
output="$output
|
|
14548
|
+
\`\`\`"
|
|
14549
|
+
fi
|
|
14550
|
+
|
|
14551
|
+
# Key files
|
|
14552
|
+
if [ -n "$config_file_list" ]; then
|
|
14553
|
+
output="$output
|
|
14554
|
+
|
|
14555
|
+
## Key Files
|
|
14556
|
+
"
|
|
14557
|
+
for cf in $config_file_list; do
|
|
14558
|
+
output="$output
|
|
14559
|
+
- \`$cf\`"
|
|
14560
|
+
done
|
|
14561
|
+
fi
|
|
14562
|
+
|
|
14563
|
+
if [ -n "$entry_points" ]; then
|
|
14564
|
+
output="$output
|
|
14565
|
+
|
|
14566
|
+
## Entry Points
|
|
14567
|
+
|
|
14568
|
+
\`\`\`
|
|
14569
|
+
$entry_points
|
|
14570
|
+
\`\`\`"
|
|
14571
|
+
fi
|
|
14572
|
+
|
|
14573
|
+
# Depth 2+: exports, functions, classes
|
|
14574
|
+
if [ "$depth" -ge 2 ]; then
|
|
14575
|
+
if [ -n "$key_classes" ]; then
|
|
14576
|
+
output="$output
|
|
14577
|
+
|
|
14578
|
+
## Key Classes
|
|
14579
|
+
\`\`\`
|
|
14580
|
+
$key_classes
|
|
14581
|
+
\`\`\`"
|
|
14582
|
+
fi
|
|
14583
|
+
|
|
14584
|
+
if [ -n "$key_functions" ]; then
|
|
14585
|
+
output="$output
|
|
14586
|
+
|
|
14587
|
+
## Key Functions
|
|
14588
|
+
\`\`\`
|
|
14589
|
+
$key_functions
|
|
14590
|
+
\`\`\`"
|
|
14591
|
+
fi
|
|
14592
|
+
|
|
14593
|
+
if [ -n "$key_exports" ]; then
|
|
14594
|
+
output="$output
|
|
14595
|
+
|
|
14596
|
+
## Exports
|
|
14597
|
+
\`\`\`
|
|
14598
|
+
$key_exports
|
|
14599
|
+
\`\`\`"
|
|
14600
|
+
fi
|
|
14601
|
+
fi
|
|
14602
|
+
|
|
14603
|
+
# Depth 3: dependency graph
|
|
14604
|
+
if [ "$depth" -ge 3 ] && [ -n "$dep_graph" ]; then
|
|
14605
|
+
output="$output
|
|
14606
|
+
|
|
14607
|
+
## Dependency Graph (Imports)
|
|
14608
|
+
\`\`\`
|
|
14609
|
+
$dep_graph
|
|
14610
|
+
\`\`\`"
|
|
14611
|
+
fi
|
|
14612
|
+
|
|
14613
|
+
# CI/CD section
|
|
14614
|
+
if [ -n "$ci_system" ] && [ -n "$ci_files" ]; then
|
|
14615
|
+
output="$output
|
|
14616
|
+
|
|
14617
|
+
## CI/CD ($ci_system)
|
|
14618
|
+
"
|
|
14619
|
+
for cf in $ci_files; do
|
|
14620
|
+
output="$output
|
|
14621
|
+
- \`$cf\`"
|
|
14622
|
+
done
|
|
14623
|
+
fi
|
|
14624
|
+
|
|
14625
|
+
# Architecture notes from README
|
|
14626
|
+
if [ -n "$readme_file" ]; then
|
|
14627
|
+
output="$output
|
|
14628
|
+
|
|
14629
|
+
## Documentation
|
|
14630
|
+
|
|
14631
|
+
See \`$readme_file\` for project documentation."
|
|
14632
|
+
fi
|
|
14633
|
+
|
|
14634
|
+
output="$output
|
|
14635
|
+
|
|
14636
|
+
---
|
|
14637
|
+
Generated by loki onboard (depth $depth) on $(date +%Y-%m-%d)"
|
|
14638
|
+
fi
|
|
14639
|
+
|
|
14640
|
+
# --- Output ---
|
|
14641
|
+
if [ "$use_stdout" = true ]; then
|
|
14642
|
+
echo "$output"
|
|
14643
|
+
return 0
|
|
14644
|
+
fi
|
|
14645
|
+
|
|
14646
|
+
# Determine output path
|
|
14647
|
+
if [ -z "$output_path" ]; then
|
|
14648
|
+
output_path="$target_path/.claude/CLAUDE.md"
|
|
14649
|
+
fi
|
|
14650
|
+
|
|
14651
|
+
# Handle update mode
|
|
14652
|
+
if [ "$update_mode" = true ] && [ -f "$output_path" ]; then
|
|
14653
|
+
local timestamp
|
|
14654
|
+
timestamp=$(date +%Y-%m-%d)
|
|
14655
|
+
local update_marker="## Updated: $timestamp"
|
|
14656
|
+
# Append new findings after a separator
|
|
14657
|
+
{
|
|
14658
|
+
cat "$output_path"
|
|
14659
|
+
echo ""
|
|
14660
|
+
echo "---"
|
|
14661
|
+
echo ""
|
|
14662
|
+
echo "$update_marker"
|
|
14663
|
+
echo ""
|
|
14664
|
+
echo "$output"
|
|
14665
|
+
} > "${output_path}.tmp"
|
|
14666
|
+
mv "${output_path}.tmp" "$output_path"
|
|
14667
|
+
log_info "Updated: $output_path"
|
|
14668
|
+
return 0
|
|
14669
|
+
fi
|
|
14670
|
+
|
|
14671
|
+
# Create directory and write
|
|
14672
|
+
local output_dir
|
|
14673
|
+
output_dir=$(dirname "$output_path")
|
|
14674
|
+
mkdir -p "$output_dir"
|
|
14675
|
+
|
|
14676
|
+
echo "$output" > "$output_path"
|
|
14677
|
+
log_info "Generated: $output_path"
|
|
14678
|
+
log_info "Project: $project_name | $src_count source files | $test_count tests | $doc_count docs"
|
|
14679
|
+
|
|
14680
|
+
if [ -n "$languages" ]; then
|
|
14681
|
+
log_info "Languages: $languages"
|
|
14682
|
+
fi
|
|
14683
|
+
if [ -n "$frameworks" ]; then
|
|
14684
|
+
log_info "Frameworks: $frameworks"
|
|
14685
|
+
fi
|
|
14686
|
+
}
|
|
14687
|
+
|
|
14688
|
+
# CI/CD quality gate integration (v6.22.0)
|
|
14689
|
+
cmd_ci() {
|
|
14690
|
+
local ci_pr=false
|
|
14691
|
+
local ci_test_suggest=false
|
|
14692
|
+
local ci_report=false
|
|
14693
|
+
local ci_github_comment=false
|
|
14694
|
+
local ci_fail_on=""
|
|
14695
|
+
local ci_format="markdown"
|
|
14696
|
+
|
|
14697
|
+
while [[ $# -gt 0 ]]; do
|
|
14698
|
+
case "$1" in
|
|
14699
|
+
--help|-h)
|
|
14700
|
+
echo -e "${BOLD}loki ci${NC} - CI/CD quality gate integration"
|
|
14701
|
+
echo ""
|
|
14702
|
+
echo "Usage: loki ci [options]"
|
|
14703
|
+
echo ""
|
|
14704
|
+
echo "Runs Loki Mode quality gates as a CI step. Works with GitHub Actions,"
|
|
14705
|
+
echo "GitLab CI, Jenkins, CircleCI, and other CI systems."
|
|
14706
|
+
echo ""
|
|
14707
|
+
echo "Modes:"
|
|
14708
|
+
echo " --pr Review the current PR diff with all quality gates"
|
|
14709
|
+
echo " --test-suggest Generate test suggestions for changed files"
|
|
14710
|
+
echo " --report Generate a quality report"
|
|
14711
|
+
echo ""
|
|
14712
|
+
echo "Options:"
|
|
14713
|
+
echo " --github-comment Post review results as PR comment (needs GITHUB_TOKEN)"
|
|
14714
|
+
echo " --fail-on <levels> Set exit code 1 on severity: critical,high,medium,low"
|
|
14715
|
+
echo " --format <fmt> Output format: json, markdown, github (default: markdown)"
|
|
14716
|
+
echo " --help, -h Show this help"
|
|
14717
|
+
echo ""
|
|
14718
|
+
echo "Exit codes:"
|
|
14719
|
+
echo " 0 All checks passed (or below --fail-on threshold)"
|
|
14720
|
+
echo " 1 Findings exceed --fail-on threshold"
|
|
14721
|
+
echo " 2 Error (missing tools, invalid arguments)"
|
|
14722
|
+
echo ""
|
|
14723
|
+
echo "Environment variables (auto-detected):"
|
|
14724
|
+
echo " GITHUB_ACTIONS Detected when running in GitHub Actions"
|
|
14725
|
+
echo " GITLAB_CI Detected when running in GitLab CI"
|
|
14726
|
+
echo " JENKINS_URL Detected when running in Jenkins"
|
|
14727
|
+
echo " CIRCLECI Detected when running in CircleCI"
|
|
14728
|
+
echo " GITHUB_TOKEN Required for --github-comment"
|
|
14729
|
+
echo " GITHUB_EVENT_PATH Auto-set in GitHub Actions for PR context"
|
|
14730
|
+
echo ""
|
|
14731
|
+
echo "Examples:"
|
|
14732
|
+
echo " loki ci --pr --format json # Review PR diff as JSON"
|
|
14733
|
+
echo " loki ci --pr --fail-on critical,high # Fail CI on critical/high findings"
|
|
14734
|
+
echo " loki ci --pr --github-comment # Post results as PR comment"
|
|
14735
|
+
echo " loki ci --report --format markdown # Generate quality report"
|
|
14736
|
+
echo " loki ci --test-suggest # Suggest tests for changed files"
|
|
14737
|
+
echo ""
|
|
14738
|
+
echo "GitHub Actions example:"
|
|
14739
|
+
echo " - uses: asklokesh/loki-mode-action@v1"
|
|
14740
|
+
echo " with:"
|
|
14741
|
+
echo " command: loki ci --pr --github-comment --fail-on critical"
|
|
14742
|
+
return 0
|
|
14743
|
+
;;
|
|
14744
|
+
--pr) ci_pr=true; shift ;;
|
|
14745
|
+
--test-suggest) ci_test_suggest=true; shift ;;
|
|
14746
|
+
--report) ci_report=true; shift ;;
|
|
14747
|
+
--github-comment) ci_github_comment=true; shift ;;
|
|
14748
|
+
--fail-on)
|
|
14749
|
+
shift
|
|
14750
|
+
ci_fail_on="${1:-}"
|
|
14751
|
+
if [ -z "$ci_fail_on" ]; then
|
|
14752
|
+
echo -e "${RED}Error: --fail-on requires severity levels (e.g., critical,high)${NC}"
|
|
14753
|
+
return 2
|
|
14754
|
+
fi
|
|
14755
|
+
ci_fail_on="$(echo "$ci_fail_on" | tr '[:upper:]' '[:lower:]')"
|
|
14756
|
+
shift
|
|
14757
|
+
;;
|
|
14758
|
+
--format)
|
|
14759
|
+
shift
|
|
14760
|
+
ci_format="${1:-markdown}"
|
|
14761
|
+
ci_format="$(echo "$ci_format" | tr '[:upper:]' '[:lower:]')"
|
|
14762
|
+
if [[ "$ci_format" != "json" && "$ci_format" != "markdown" && "$ci_format" != "github" ]]; then
|
|
14763
|
+
echo -e "${RED}Error: --format must be json, markdown, or github${NC}"
|
|
14764
|
+
return 2
|
|
14765
|
+
fi
|
|
14766
|
+
shift
|
|
14767
|
+
;;
|
|
14768
|
+
-*) echo -e "${RED}Unknown option: $1${NC}"; return 2 ;;
|
|
14769
|
+
*) echo -e "${RED}Unknown argument: $1${NC}"; return 2 ;;
|
|
14770
|
+
esac
|
|
14771
|
+
done
|
|
14772
|
+
|
|
14773
|
+
# If no mode specified, default to --pr --report
|
|
14774
|
+
if [ "$ci_pr" = false ] && [ "$ci_test_suggest" = false ] && [ "$ci_report" = false ]; then
|
|
14775
|
+
ci_pr=true
|
|
14776
|
+
ci_report=true
|
|
14777
|
+
fi
|
|
14778
|
+
|
|
14779
|
+
# --- Detect CI environment ---
|
|
14780
|
+
local ci_env="local"
|
|
14781
|
+
local ci_pr_number=""
|
|
14782
|
+
local ci_base_branch="main"
|
|
14783
|
+
local ci_repo=""
|
|
14784
|
+
|
|
14785
|
+
if [ -n "${GITHUB_ACTIONS:-}" ]; then
|
|
14786
|
+
ci_env="github"
|
|
14787
|
+
ci_repo="${GITHUB_REPOSITORY:-}"
|
|
14788
|
+
ci_base_branch="${GITHUB_BASE_REF:-main}"
|
|
14789
|
+
# Extract PR number from GITHUB_EVENT_PATH
|
|
14790
|
+
if [ -n "${GITHUB_EVENT_PATH:-}" ] && [ -f "${GITHUB_EVENT_PATH:-}" ]; then
|
|
14791
|
+
ci_pr_number=$(python3 -c "
|
|
14792
|
+
import json, sys
|
|
14793
|
+
try:
|
|
14794
|
+
with open('${GITHUB_EVENT_PATH}') as f:
|
|
14795
|
+
event = json.load(f)
|
|
14796
|
+
pr = event.get('pull_request', event.get('number', ''))
|
|
14797
|
+
if isinstance(pr, dict):
|
|
14798
|
+
print(pr.get('number', ''))
|
|
14799
|
+
else:
|
|
14800
|
+
print(pr)
|
|
14801
|
+
except Exception:
|
|
14802
|
+
print('')
|
|
14803
|
+
" 2>/dev/null || echo "")
|
|
14804
|
+
fi
|
|
14805
|
+
# Fallback: GITHUB_REF_NAME for PR refs like refs/pull/123/merge
|
|
14806
|
+
if [ -z "$ci_pr_number" ] && [[ "${GITHUB_REF:-}" == refs/pull/*/merge ]]; then
|
|
14807
|
+
ci_pr_number=$(echo "${GITHUB_REF}" | sed 's|refs/pull/\([0-9]*\)/merge|\1|')
|
|
14808
|
+
fi
|
|
14809
|
+
elif [ -n "${GITLAB_CI:-}" ]; then
|
|
14810
|
+
ci_env="gitlab"
|
|
14811
|
+
ci_pr_number="${CI_MERGE_REQUEST_IID:-}"
|
|
14812
|
+
ci_base_branch="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-main}"
|
|
14813
|
+
ci_repo="${CI_PROJECT_PATH:-}"
|
|
14814
|
+
elif [ -n "${JENKINS_URL:-}" ]; then
|
|
14815
|
+
ci_env="jenkins"
|
|
14816
|
+
ci_pr_number="${CHANGE_ID:-}"
|
|
14817
|
+
ci_base_branch="${CHANGE_TARGET:-main}"
|
|
14818
|
+
elif [ -n "${CIRCLECI:-}" ]; then
|
|
14819
|
+
ci_env="circleci"
|
|
14820
|
+
ci_pr_number="${CIRCLE_PULL_REQUEST##*/}"
|
|
14821
|
+
ci_repo="${CIRCLE_PROJECT_USERNAME:-}/${CIRCLE_PROJECT_REPONAME:-}"
|
|
14822
|
+
fi
|
|
14823
|
+
|
|
14824
|
+
# --- Gather diff ---
|
|
14825
|
+
local diff_content=""
|
|
14826
|
+
local changed_files=""
|
|
14827
|
+
|
|
14828
|
+
if [ "$ci_pr" = true ] || [ "$ci_test_suggest" = true ] || [ "$ci_report" = true ]; then
|
|
14829
|
+
if [ -n "$ci_pr_number" ] && command -v gh &>/dev/null && [ "$ci_env" = "github" ]; then
|
|
14830
|
+
diff_content=$(gh pr diff "$ci_pr_number" 2>/dev/null || echo "")
|
|
14831
|
+
changed_files=$(gh pr diff "$ci_pr_number" --name-only 2>/dev/null || echo "")
|
|
14832
|
+
fi
|
|
14833
|
+
|
|
14834
|
+
# Fallback: git diff against base branch
|
|
14835
|
+
if [ -z "$diff_content" ]; then
|
|
14836
|
+
# Fetch base branch if in CI
|
|
14837
|
+
if [ "$ci_env" != "local" ]; then
|
|
14838
|
+
git fetch origin "$ci_base_branch" --depth=1 2>/dev/null || true
|
|
14839
|
+
fi
|
|
14840
|
+
diff_content=$(git diff "origin/${ci_base_branch}...HEAD" 2>/dev/null || git diff HEAD~1 2>/dev/null || git diff HEAD 2>/dev/null || echo "")
|
|
14841
|
+
changed_files=$(git diff --name-only "origin/${ci_base_branch}...HEAD" 2>/dev/null || git diff --name-only HEAD~1 2>/dev/null || git diff --name-only HEAD 2>/dev/null || echo "")
|
|
14842
|
+
fi
|
|
14843
|
+
|
|
14844
|
+
if [ -z "$diff_content" ]; then
|
|
14845
|
+
if [ "$ci_format" = "json" ]; then
|
|
14846
|
+
echo '{"status":"skip","message":"No changes to review","ci_environment":"'"$ci_env"'","pr_number":"'"$ci_pr_number"'","findings":[],"summary":{"critical":0,"high":0,"medium":0,"low":0,"info":0,"total":0},"exit_code":0}'
|
|
14847
|
+
else
|
|
14848
|
+
echo -e "${GREEN}No changes to review.${NC}"
|
|
14849
|
+
fi
|
|
14850
|
+
return 0
|
|
14851
|
+
fi
|
|
14852
|
+
fi
|
|
14853
|
+
|
|
14854
|
+
# --- Severity helpers ---
|
|
14855
|
+
_ci_sev_level() {
|
|
14856
|
+
case "$1" in
|
|
14857
|
+
CRITICAL) echo 5 ;;
|
|
14858
|
+
HIGH) echo 4 ;;
|
|
14859
|
+
MEDIUM) echo 3 ;;
|
|
14860
|
+
LOW) echo 2 ;;
|
|
14861
|
+
INFO) echo 1 ;;
|
|
14862
|
+
*) echo 0 ;;
|
|
14863
|
+
esac
|
|
14864
|
+
}
|
|
14865
|
+
|
|
14866
|
+
# Parse --fail-on into a minimum severity threshold
|
|
14867
|
+
local fail_threshold=99 # Default: never fail
|
|
14868
|
+
if [ -n "$ci_fail_on" ]; then
|
|
14869
|
+
# Take the lowest severity from the comma-separated list
|
|
14870
|
+
IFS=',' read -ra fail_levels <<< "$ci_fail_on"
|
|
14871
|
+
for level in "${fail_levels[@]}"; do
|
|
14872
|
+
level=$(echo "$level" | tr -d ' ' | tr '[:lower:]' '[:upper:]')
|
|
14873
|
+
local level_num
|
|
14874
|
+
level_num=$(_ci_sev_level "$level")
|
|
14875
|
+
if [ "$level_num" -gt 0 ] && [ "$level_num" -lt "$fail_threshold" ]; then
|
|
14876
|
+
fail_threshold=$level_num
|
|
14877
|
+
fi
|
|
14878
|
+
done
|
|
14879
|
+
fi
|
|
14880
|
+
|
|
14881
|
+
# --- Run quality gates (reuses cmd_review logic) ---
|
|
14882
|
+
local findings=()
|
|
14883
|
+
local has_critical=false
|
|
14884
|
+
local has_high=false
|
|
14885
|
+
|
|
14886
|
+
_ci_add_finding() {
|
|
14887
|
+
local file="$1" line="$2" sev="$3" cat="$4" finding="$5" suggestion="$6"
|
|
14888
|
+
findings+=("${file}|${line}|${sev}|${cat}|${finding}|${suggestion}")
|
|
14889
|
+
[ "$sev" = "CRITICAL" ] && has_critical=true || true
|
|
14890
|
+
[ "$sev" = "HIGH" ] && has_high=true || true
|
|
14891
|
+
}
|
|
14892
|
+
|
|
14893
|
+
# Gate 1: Static Analysis - shellcheck
|
|
14894
|
+
if command -v shellcheck &>/dev/null; then
|
|
14895
|
+
local shell_files_ci
|
|
14896
|
+
shell_files_ci=$(echo "$changed_files" | grep -E '\.(sh|bash)$' || true)
|
|
14897
|
+
if [ -n "$shell_files_ci" ]; then
|
|
14898
|
+
while IFS= read -r sf; do
|
|
14899
|
+
[ -z "$sf" ] && continue
|
|
14900
|
+
[ ! -f "$sf" ] && continue
|
|
14901
|
+
local sc_out
|
|
14902
|
+
sc_out=$(shellcheck -f gcc "$sf" 2>/dev/null || true)
|
|
14903
|
+
while IFS= read -r sc_line; do
|
|
14904
|
+
[ -z "$sc_line" ] && continue
|
|
14905
|
+
local sc_file sc_lineno sc_sev sc_msg
|
|
14906
|
+
sc_file=$(echo "$sc_line" | cut -d: -f1)
|
|
14907
|
+
sc_lineno=$(echo "$sc_line" | cut -d: -f2)
|
|
14908
|
+
sc_sev=$(echo "$sc_line" | sed -n 's/.*: \(warning\|error\|note\|info\):.*/\1/p')
|
|
14909
|
+
sc_msg=$(echo "$sc_line" | sed 's/.*: \(warning\|error\|note\|info\): //')
|
|
14910
|
+
local mapped_sev="LOW"
|
|
14911
|
+
case "$sc_sev" in
|
|
14912
|
+
error) mapped_sev="HIGH" ;;
|
|
14913
|
+
warning) mapped_sev="MEDIUM" ;;
|
|
14914
|
+
*) mapped_sev="LOW" ;;
|
|
14915
|
+
esac
|
|
14916
|
+
_ci_add_finding "$sc_file" "$sc_lineno" "$mapped_sev" "static-analysis" "$sc_msg" "Fix shellcheck finding"
|
|
14917
|
+
done <<< "$sc_out"
|
|
14918
|
+
done <<< "$shell_files_ci"
|
|
14919
|
+
fi
|
|
14920
|
+
fi
|
|
14921
|
+
|
|
14922
|
+
# Gate 1b: Static Analysis - eslint
|
|
14923
|
+
if command -v npx &>/dev/null; then
|
|
14924
|
+
local js_files_ci
|
|
14925
|
+
js_files_ci=$(echo "$changed_files" | grep -E '\.(js|ts|jsx|tsx)$' || true)
|
|
14926
|
+
if [ -n "$js_files_ci" ]; then
|
|
14927
|
+
while IFS= read -r jsf; do
|
|
14928
|
+
[ -z "$jsf" ] && continue
|
|
14929
|
+
[ ! -f "$jsf" ] && continue
|
|
14930
|
+
if [ -f ".eslintrc.js" ] || [ -f ".eslintrc.json" ] || [ -f "eslint.config.js" ] || [ -f "eslint.config.mjs" ]; then
|
|
14931
|
+
local eslint_out
|
|
14932
|
+
eslint_out=$(npx eslint --format compact "$jsf" 2>/dev/null || true)
|
|
14933
|
+
while IFS= read -r el; do
|
|
14934
|
+
[ -z "$el" ] && continue
|
|
14935
|
+
[[ "$el" == *"problem"* ]] && continue
|
|
14936
|
+
local el_file el_line el_msg
|
|
14937
|
+
el_file=$(echo "$el" | cut -d: -f1)
|
|
14938
|
+
el_line=$(echo "$el" | cut -d: -f2)
|
|
14939
|
+
el_msg=$(echo "$el" | sed 's/^[^)]*) //')
|
|
14940
|
+
local el_sev="LOW"
|
|
14941
|
+
[[ "$el" == *"Error"* ]] && el_sev="MEDIUM"
|
|
14942
|
+
_ci_add_finding "$el_file" "$el_line" "$el_sev" "static-analysis" "$el_msg" "Fix eslint finding"
|
|
14943
|
+
done <<< "$eslint_out"
|
|
14944
|
+
fi
|
|
14945
|
+
done <<< "$js_files_ci"
|
|
14946
|
+
fi
|
|
14947
|
+
fi
|
|
14948
|
+
|
|
14949
|
+
# Gate 2: Security Scan
|
|
14950
|
+
_ci_scan() {
|
|
14951
|
+
local pattern="$1" sev="$2" cat="$3" finding="$4" suggestion="$5"
|
|
14952
|
+
while IFS= read -r match; do
|
|
14953
|
+
[ -z "$match" ] && continue
|
|
14954
|
+
local ml
|
|
14955
|
+
ml=$(echo "$match" | cut -d: -f1)
|
|
14956
|
+
_ci_add_finding "diff" "$ml" "$sev" "$cat" "$finding" "$suggestion"
|
|
14957
|
+
done < <(echo "$diff_content" | grep -nE "$pattern" 2>/dev/null || true)
|
|
14958
|
+
}
|
|
14959
|
+
|
|
14960
|
+
# Hardcoded secrets
|
|
14961
|
+
local ci_secret_patterns=(
|
|
14962
|
+
'API_KEY\s*[=:]\s*["\x27][A-Za-z0-9+/=_-]{8,}'
|
|
14963
|
+
'SECRET_KEY\s*[=:]\s*["\x27][A-Za-z0-9+/=_-]{8,}'
|
|
14964
|
+
'PASSWORD\s*[=:]\s*["\x27][^\x27"]{4,}'
|
|
14965
|
+
'PRIVATE_KEY\s*[=:]\s*["\x27][A-Za-z0-9+/=_-]{8,}'
|
|
14966
|
+
'AWS_ACCESS_KEY_ID\s*[=:]\s*["\x27]AK[A-Z0-9]{18}'
|
|
14967
|
+
'ghp_[A-Za-z0-9]{36}'
|
|
14968
|
+
'sk-[A-Za-z0-9]{32,}'
|
|
14969
|
+
'Bearer\s+[A-Za-z0-9._-]{20,}'
|
|
14970
|
+
)
|
|
14971
|
+
for pattern in "${ci_secret_patterns[@]}"; do
|
|
14972
|
+
_ci_scan "$pattern" "CRITICAL" "security" \
|
|
14973
|
+
"Potential hardcoded secret detected" \
|
|
14974
|
+
"Use environment variables or a secrets manager"
|
|
14975
|
+
done
|
|
14976
|
+
|
|
14977
|
+
# SQL injection
|
|
14978
|
+
_ci_scan '(SELECT|INSERT|UPDATE|DELETE|DROP)\s.*\+\s*(req\.|request\.|params\.|user)' \
|
|
14979
|
+
"HIGH" "security" \
|
|
14980
|
+
"Potential SQL injection: string concatenation in query" \
|
|
14981
|
+
"Use parameterized queries or prepared statements"
|
|
14982
|
+
|
|
14983
|
+
# eval/exec
|
|
14984
|
+
_ci_scan '(^|\s)(eval|exec)\s*\(' \
|
|
14985
|
+
"HIGH" "security" \
|
|
14986
|
+
"Dangerous eval/exec usage detected" \
|
|
14987
|
+
"Avoid eval/exec with dynamic input"
|
|
14988
|
+
|
|
14989
|
+
# Unsafe deserialization
|
|
14990
|
+
_ci_scan '(pickle\.loads?|yaml\.load\s*\()' \
|
|
14991
|
+
"HIGH" "security" \
|
|
14992
|
+
"Unsafe deserialization detected" \
|
|
14993
|
+
"Use yaml.safe_load or avoid pickle with untrusted data"
|
|
14994
|
+
|
|
14995
|
+
# Disabled SSL
|
|
14996
|
+
_ci_scan '(verify\s*=\s*False|VERIFY_SSL\s*=\s*False|NODE_TLS_REJECT_UNAUTHORIZED.*0|rejectUnauthorized.*false)' \
|
|
14997
|
+
"HIGH" "security" \
|
|
14998
|
+
"SSL verification disabled" \
|
|
14999
|
+
"Enable SSL verification in production"
|
|
15000
|
+
|
|
15001
|
+
# Gate 3: Anti-patterns
|
|
15002
|
+
_ci_scan 'console\.(log|debug)\(' "LOW" "anti-pattern" \
|
|
15003
|
+
"console.log/debug statement found" \
|
|
15004
|
+
"Remove debug logging or use a proper logger"
|
|
15005
|
+
|
|
15006
|
+
_ci_scan 'except\s*:' "MEDIUM" "anti-pattern" \
|
|
15007
|
+
"Bare except clause" \
|
|
15008
|
+
"Catch specific exceptions"
|
|
15009
|
+
|
|
15010
|
+
# Gate 4: TODO/FIXME markers in new code
|
|
15011
|
+
_ci_scan '^\+.*\b(TODO|FIXME|HACK|XXX):' "INFO" "style" \
|
|
15012
|
+
"TODO/FIXME marker in new code" \
|
|
15013
|
+
"Track in issue tracker"
|
|
15014
|
+
|
|
15015
|
+
# --- Test suggestions ---
|
|
15016
|
+
local test_suggestions=""
|
|
15017
|
+
if [ "$ci_test_suggest" = true ] && [ -n "$changed_files" ]; then
|
|
15018
|
+
test_suggestions=$(python3 << 'TEST_SUGGEST_PY'
|
|
15019
|
+
import sys, os
|
|
15020
|
+
|
|
15021
|
+
changed = os.environ.get("LOKI_CI_CHANGED_FILES", "").strip().split("\n")
|
|
15022
|
+
suggestions = []
|
|
15023
|
+
|
|
15024
|
+
for f in changed:
|
|
15025
|
+
f = f.strip()
|
|
15026
|
+
if not f:
|
|
15027
|
+
continue
|
|
15028
|
+
|
|
15029
|
+
base = os.path.basename(f)
|
|
15030
|
+
name, ext = os.path.splitext(base)
|
|
15031
|
+
dirpath = os.path.dirname(f)
|
|
15032
|
+
|
|
15033
|
+
# Skip test files themselves
|
|
15034
|
+
if "test" in name.lower() or "spec" in name.lower():
|
|
15035
|
+
continue
|
|
15036
|
+
# Skip non-code files
|
|
15037
|
+
if ext not in (".py", ".js", ".ts", ".jsx", ".tsx", ".sh", ".bash", ".go", ".rs", ".rb"):
|
|
15038
|
+
continue
|
|
15039
|
+
|
|
15040
|
+
# Suggest test file paths based on language conventions
|
|
15041
|
+
if ext == ".py":
|
|
15042
|
+
test_path = os.path.join(dirpath, f"test_{name}.py")
|
|
15043
|
+
alt_path = os.path.join("tests", f"test_{name}.py")
|
|
15044
|
+
suggestions.append({"file": f, "test_file": test_path, "alt_test_file": alt_path,
|
|
15045
|
+
"framework": "pytest", "hint": f"Add unit tests for {name} module"})
|
|
15046
|
+
elif ext in (".js", ".ts", ".jsx", ".tsx"):
|
|
15047
|
+
test_ext = ext.replace(".ts", ".test.ts").replace(".js", ".test.js").replace(".tsx", ".test.tsx").replace(".jsx", ".test.jsx")
|
|
15048
|
+
if test_ext == ext:
|
|
15049
|
+
test_ext = f".test{ext}"
|
|
15050
|
+
test_path = os.path.join(dirpath, f"{name}{test_ext}")
|
|
15051
|
+
suggestions.append({"file": f, "test_file": test_path,
|
|
15052
|
+
"framework": "jest/vitest", "hint": f"Add tests for {name} component/module"})
|
|
15053
|
+
elif ext in (".sh", ".bash"):
|
|
15054
|
+
test_path = os.path.join("tests", f"test-{name}.sh")
|
|
15055
|
+
suggestions.append({"file": f, "test_file": test_path,
|
|
15056
|
+
"framework": "bash", "hint": f"Add shell tests for {name}"})
|
|
15057
|
+
elif ext == ".go":
|
|
15058
|
+
test_path = os.path.join(dirpath, f"{name}_test.go")
|
|
15059
|
+
suggestions.append({"file": f, "test_file": test_path,
|
|
15060
|
+
"framework": "go test", "hint": f"Add Go tests for {name}"})
|
|
15061
|
+
elif ext == ".rs":
|
|
15062
|
+
suggestions.append({"file": f, "test_file": f"{f} (mod tests)",
|
|
15063
|
+
"framework": "cargo test", "hint": f"Add #[cfg(test)] mod tests in {name}"})
|
|
15064
|
+
elif ext == ".rb":
|
|
15065
|
+
test_path = os.path.join("spec", f"{name}_spec.rb")
|
|
15066
|
+
suggestions.append({"file": f, "test_file": test_path,
|
|
15067
|
+
"framework": "rspec", "hint": f"Add RSpec tests for {name}"})
|
|
15068
|
+
|
|
15069
|
+
import json
|
|
15070
|
+
print(json.dumps(suggestions))
|
|
15071
|
+
TEST_SUGGEST_PY
|
|
15072
|
+
)
|
|
15073
|
+
fi
|
|
15074
|
+
|
|
15075
|
+
# --- Tally findings ---
|
|
15076
|
+
local count_critical=0 count_high=0 count_medium=0 count_low=0 count_info=0
|
|
15077
|
+
for f in "${findings[@]}"; do
|
|
15078
|
+
local sev
|
|
15079
|
+
sev=$(echo "$f" | cut -d'|' -f3)
|
|
15080
|
+
case "$sev" in
|
|
15081
|
+
CRITICAL) count_critical=$((count_critical + 1)) ;;
|
|
15082
|
+
HIGH) count_high=$((count_high + 1)) ;;
|
|
15083
|
+
MEDIUM) count_medium=$((count_medium + 1)) ;;
|
|
15084
|
+
LOW) count_low=$((count_low + 1)) ;;
|
|
15085
|
+
INFO) count_info=$((count_info + 1)) ;;
|
|
15086
|
+
esac
|
|
15087
|
+
done
|
|
15088
|
+
local count_total=${#findings[@]}
|
|
15089
|
+
|
|
15090
|
+
# Determine exit code based on --fail-on threshold
|
|
15091
|
+
local exit_code=0
|
|
15092
|
+
if [ "$fail_threshold" -lt 99 ]; then
|
|
15093
|
+
for f in "${findings[@]}"; do
|
|
15094
|
+
local sev sev_num
|
|
15095
|
+
sev=$(echo "$f" | cut -d'|' -f3)
|
|
15096
|
+
sev_num=$(_ci_sev_level "$sev")
|
|
15097
|
+
if [ "$sev_num" -ge "$fail_threshold" ]; then
|
|
15098
|
+
exit_code=1
|
|
15099
|
+
break
|
|
15100
|
+
fi
|
|
15101
|
+
done
|
|
15102
|
+
fi
|
|
15103
|
+
|
|
15104
|
+
# --- Build output ---
|
|
15105
|
+
local report_timestamp
|
|
15106
|
+
report_timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
|
15107
|
+
local file_count
|
|
15108
|
+
file_count=$(echo "$changed_files" | grep -c '.' 2>/dev/null || echo 0)
|
|
15109
|
+
|
|
15110
|
+
if [ "$ci_format" = "json" ]; then
|
|
15111
|
+
# JSON output via python for proper escaping
|
|
15112
|
+
export LOKI_CI_JSON_FINDINGS=""
|
|
15113
|
+
for f in "${findings[@]}"; do
|
|
15114
|
+
LOKI_CI_JSON_FINDINGS+="${f}"$'\n'
|
|
15115
|
+
done
|
|
15116
|
+
export LOKI_CI_JSON_META="ci_env=${ci_env}|pr=${ci_pr_number}|ts=${report_timestamp}|files=${file_count}|exit=${exit_code}"
|
|
15117
|
+
export LOKI_CI_JSON_COUNTS="critical=${count_critical}|high=${count_high}|medium=${count_medium}|low=${count_low}|info=${count_info}|total=${count_total}"
|
|
15118
|
+
export LOKI_CI_JSON_TESTS="${test_suggestions:-[]}"
|
|
15119
|
+
|
|
15120
|
+
python3 << 'CI_JSON_OUT'
|
|
15121
|
+
import json, os
|
|
15122
|
+
|
|
15123
|
+
findings_raw = os.environ.get("LOKI_CI_JSON_FINDINGS", "").strip().split("\n")
|
|
15124
|
+
meta_raw = os.environ.get("LOKI_CI_JSON_META", "")
|
|
15125
|
+
counts_raw = os.environ.get("LOKI_CI_JSON_COUNTS", "")
|
|
15126
|
+
tests_raw = os.environ.get("LOKI_CI_JSON_TESTS", "[]")
|
|
15127
|
+
|
|
15128
|
+
meta = dict(item.split("=", 1) for item in meta_raw.split("|") if "=" in item)
|
|
15129
|
+
counts = dict(item.split("=", 1) for item in counts_raw.split("|") if "=" in item)
|
|
15130
|
+
|
|
15131
|
+
findings = []
|
|
15132
|
+
for line in findings_raw:
|
|
15133
|
+
if not line or "|" not in line:
|
|
15134
|
+
continue
|
|
15135
|
+
parts = line.split("|", 5)
|
|
15136
|
+
if len(parts) >= 6:
|
|
15137
|
+
findings.append({
|
|
15138
|
+
"file": parts[0],
|
|
15139
|
+
"line": int(parts[1]) if parts[1].isdigit() else 0,
|
|
15140
|
+
"severity": parts[2],
|
|
15141
|
+
"category": parts[3],
|
|
15142
|
+
"finding": parts[4],
|
|
15143
|
+
"suggestion": parts[5]
|
|
15144
|
+
})
|
|
15145
|
+
|
|
15146
|
+
try:
|
|
15147
|
+
tests = json.loads(tests_raw)
|
|
15148
|
+
except Exception:
|
|
15149
|
+
tests = []
|
|
15150
|
+
|
|
15151
|
+
report = {
|
|
15152
|
+
"status": "fail" if int(meta.get("exit", "0")) > 0 else "pass",
|
|
15153
|
+
"ci_environment": meta.get("ci_env", "local"),
|
|
15154
|
+
"pr_number": meta.get("pr", ""),
|
|
15155
|
+
"timestamp": meta.get("ts", ""),
|
|
15156
|
+
"files_changed": int(meta.get("files", "0")),
|
|
15157
|
+
"findings": findings,
|
|
15158
|
+
"summary": {
|
|
15159
|
+
"critical": int(counts.get("critical", "0")),
|
|
15160
|
+
"high": int(counts.get("high", "0")),
|
|
15161
|
+
"medium": int(counts.get("medium", "0")),
|
|
15162
|
+
"low": int(counts.get("low", "0")),
|
|
15163
|
+
"info": int(counts.get("info", "0")),
|
|
15164
|
+
"total": int(counts.get("total", "0"))
|
|
15165
|
+
},
|
|
15166
|
+
"exit_code": int(meta.get("exit", "0"))
|
|
15167
|
+
}
|
|
15168
|
+
if tests:
|
|
15169
|
+
report["test_suggestions"] = tests
|
|
15170
|
+
|
|
15171
|
+
print(json.dumps(report, indent=2))
|
|
15172
|
+
CI_JSON_OUT
|
|
15173
|
+
unset LOKI_CI_JSON_FINDINGS LOKI_CI_JSON_META LOKI_CI_JSON_COUNTS LOKI_CI_JSON_TESTS
|
|
15174
|
+
|
|
15175
|
+
elif [ "$ci_format" = "github" ]; then
|
|
15176
|
+
# GitHub-flavored markdown for PR comments
|
|
15177
|
+
echo "## Loki CI Quality Report"
|
|
15178
|
+
echo ""
|
|
15179
|
+
echo "| Metric | Count |"
|
|
15180
|
+
echo "|--------|-------|"
|
|
15181
|
+
echo "| Files changed | $file_count |"
|
|
15182
|
+
echo "| Critical | $count_critical |"
|
|
15183
|
+
echo "| High | $count_high |"
|
|
15184
|
+
echo "| Medium | $count_medium |"
|
|
15185
|
+
echo "| Low | $count_low |"
|
|
15186
|
+
echo "| Info | $count_info |"
|
|
15187
|
+
echo "| **Total** | **$count_total** |"
|
|
15188
|
+
echo ""
|
|
15189
|
+
|
|
15190
|
+
if [ "$count_total" -gt 0 ]; then
|
|
15191
|
+
echo "### Findings"
|
|
15192
|
+
echo ""
|
|
15193
|
+
for sev_name in CRITICAL HIGH MEDIUM LOW INFO; do
|
|
15194
|
+
local has_sev=false
|
|
15195
|
+
for f in "${findings[@]}"; do
|
|
15196
|
+
local f_sev
|
|
15197
|
+
f_sev=$(echo "$f" | cut -d'|' -f3)
|
|
15198
|
+
[ "$f_sev" != "$sev_name" ] && continue
|
|
15199
|
+
if [ "$has_sev" = false ]; then
|
|
15200
|
+
echo "#### $sev_name"
|
|
15201
|
+
echo ""
|
|
15202
|
+
has_sev=true
|
|
15203
|
+
fi
|
|
15204
|
+
local f_file f_line f_cat f_finding f_suggestion
|
|
15205
|
+
f_file=$(echo "$f" | cut -d'|' -f1)
|
|
15206
|
+
f_line=$(echo "$f" | cut -d'|' -f2)
|
|
15207
|
+
f_cat=$(echo "$f" | cut -d'|' -f4)
|
|
15208
|
+
f_finding=$(echo "$f" | cut -d'|' -f5)
|
|
15209
|
+
f_suggestion=$(echo "$f" | cut -d'|' -f6)
|
|
15210
|
+
echo "- **\`$f_file:$f_line\`** [$f_cat] $f_finding"
|
|
15211
|
+
echo " - Suggestion: $f_suggestion"
|
|
15212
|
+
done
|
|
15213
|
+
[ "$has_sev" = true ] && echo ""
|
|
15214
|
+
done
|
|
15215
|
+
else
|
|
15216
|
+
echo "No findings. All quality gates passed."
|
|
15217
|
+
fi
|
|
15218
|
+
|
|
15219
|
+
if [ -n "${test_suggestions:-}" ] && [ "$test_suggestions" != "[]" ]; then
|
|
15220
|
+
echo "### Test Suggestions"
|
|
15221
|
+
echo ""
|
|
15222
|
+
python3 -c "
|
|
15223
|
+
import json, os
|
|
15224
|
+
tests = json.loads(os.environ.get('LOKI_CI_TEST_SUGG', '[]'))
|
|
15225
|
+
for t in tests:
|
|
15226
|
+
print(f\"- **{t['file']}**: {t['hint']} ({t['framework']})\")
|
|
15227
|
+
print(f\" - Suggested: \`{t['test_file']}\`\")
|
|
15228
|
+
" 2>/dev/null || true
|
|
15229
|
+
fi
|
|
15230
|
+
|
|
15231
|
+
echo ""
|
|
15232
|
+
if [ "$exit_code" -eq 0 ]; then
|
|
15233
|
+
echo "**Result: PASSED**"
|
|
15234
|
+
else
|
|
15235
|
+
echo "**Result: FAILED** (findings exceed threshold)"
|
|
15236
|
+
fi
|
|
15237
|
+
echo ""
|
|
15238
|
+
echo "_Generated by [Loki Mode](https://github.com/asklokesh/loki-mode) at $report_timestamp_"
|
|
15239
|
+
|
|
15240
|
+
else
|
|
15241
|
+
# Default: markdown (terminal-friendly)
|
|
15242
|
+
echo -e "${BOLD}Loki CI Quality Report${NC}"
|
|
15243
|
+
echo -e "Environment: ${CYAN}$ci_env${NC}"
|
|
15244
|
+
[ -n "$ci_pr_number" ] && echo -e "PR: ${CYAN}#$ci_pr_number${NC}"
|
|
15245
|
+
echo -e "Files changed: ${CYAN}$file_count${NC}"
|
|
15246
|
+
echo -e "Timestamp: ${DIM}$report_timestamp${NC}"
|
|
15247
|
+
echo "---"
|
|
15248
|
+
|
|
15249
|
+
if [ "$count_total" -eq 0 ]; then
|
|
15250
|
+
echo -e "${GREEN}All quality gates passed. No findings.${NC}"
|
|
15251
|
+
else
|
|
15252
|
+
for sev_name in CRITICAL HIGH MEDIUM LOW INFO; do
|
|
15253
|
+
local printed_header=false
|
|
15254
|
+
for f in "${findings[@]}"; do
|
|
15255
|
+
local f_sev
|
|
15256
|
+
f_sev=$(echo "$f" | cut -d'|' -f3)
|
|
15257
|
+
[ "$f_sev" != "$sev_name" ] && continue
|
|
15258
|
+
if [ "$printed_header" = false ]; then
|
|
15259
|
+
local sev_color="$NC"
|
|
15260
|
+
case "$sev_name" in
|
|
15261
|
+
CRITICAL) sev_color="$RED" ;;
|
|
15262
|
+
HIGH) sev_color="$RED" ;;
|
|
15263
|
+
MEDIUM) sev_color="$YELLOW" ;;
|
|
15264
|
+
LOW) sev_color="$CYAN" ;;
|
|
15265
|
+
INFO) sev_color="$DIM" ;;
|
|
15266
|
+
esac
|
|
15267
|
+
echo ""
|
|
15268
|
+
echo -e "${sev_color}${BOLD}[$sev_name]${NC}"
|
|
15269
|
+
printed_header=true
|
|
15270
|
+
fi
|
|
15271
|
+
local f_file f_line f_cat f_finding f_suggestion
|
|
15272
|
+
f_file=$(echo "$f" | cut -d'|' -f1)
|
|
15273
|
+
f_line=$(echo "$f" | cut -d'|' -f2)
|
|
15274
|
+
f_cat=$(echo "$f" | cut -d'|' -f4)
|
|
15275
|
+
f_finding=$(echo "$f" | cut -d'|' -f5)
|
|
15276
|
+
f_suggestion=$(echo "$f" | cut -d'|' -f6)
|
|
15277
|
+
echo -e " ${DIM}$f_file:$f_line${NC} [$f_cat] $f_finding"
|
|
15278
|
+
echo -e " -> $f_suggestion"
|
|
15279
|
+
done
|
|
15280
|
+
done
|
|
15281
|
+
fi
|
|
15282
|
+
|
|
15283
|
+
echo ""
|
|
15284
|
+
echo "---"
|
|
15285
|
+
echo -e "Summary: ${RED}$count_critical critical${NC}, ${RED}$count_high high${NC}, ${YELLOW}$count_medium medium${NC}, ${CYAN}$count_low low${NC}, ${DIM}$count_info info${NC} ($count_total total)"
|
|
15286
|
+
|
|
15287
|
+
if [ "$ci_test_suggest" = true ] && [ -n "${test_suggestions:-}" ] && [ "$test_suggestions" != "[]" ]; then
|
|
15288
|
+
echo ""
|
|
15289
|
+
echo -e "${BOLD}Test Suggestions${NC}"
|
|
15290
|
+
export LOKI_CI_TEST_SUGG="${test_suggestions}"
|
|
15291
|
+
python3 -c "
|
|
15292
|
+
import json, os
|
|
15293
|
+
tests = json.loads(os.environ.get('LOKI_CI_TEST_SUGG', '[]'))
|
|
15294
|
+
for t in tests:
|
|
15295
|
+
print(f\" {t['file']} -> {t['test_file']} ({t['framework']})\")
|
|
15296
|
+
print(f\" {t['hint']}\")
|
|
15297
|
+
" 2>/dev/null || true
|
|
15298
|
+
unset LOKI_CI_TEST_SUGG
|
|
15299
|
+
fi
|
|
15300
|
+
|
|
15301
|
+
if [ "$exit_code" -eq 0 ]; then
|
|
15302
|
+
echo -e "${GREEN}Result: PASSED${NC}"
|
|
15303
|
+
else
|
|
15304
|
+
echo -e "${RED}Result: FAILED (findings exceed threshold)${NC}"
|
|
15305
|
+
fi
|
|
15306
|
+
fi
|
|
15307
|
+
|
|
15308
|
+
# --- Post GitHub comment ---
|
|
15309
|
+
if [ "$ci_github_comment" = true ]; then
|
|
15310
|
+
if [ -z "${GITHUB_TOKEN:-}" ]; then
|
|
15311
|
+
echo -e "${YELLOW}Warning: --github-comment requires GITHUB_TOKEN. Skipping comment.${NC}" >&2
|
|
15312
|
+
elif [ -z "$ci_pr_number" ]; then
|
|
15313
|
+
echo -e "${YELLOW}Warning: No PR number detected. Skipping comment.${NC}" >&2
|
|
15314
|
+
elif command -v gh &>/dev/null; then
|
|
15315
|
+
# Generate GitHub-format report for comment
|
|
15316
|
+
local comment_body
|
|
15317
|
+
comment_body=$("$0" ci --pr --format github 2>/dev/null || echo "Loki CI report generation failed.")
|
|
15318
|
+
gh pr comment "$ci_pr_number" --body "$comment_body" 2>/dev/null && \
|
|
15319
|
+
echo -e "${GREEN}Posted review comment to PR #$ci_pr_number${NC}" >&2 || \
|
|
15320
|
+
echo -e "${YELLOW}Warning: Failed to post PR comment${NC}" >&2
|
|
15321
|
+
else
|
|
15322
|
+
echo -e "${YELLOW}Warning: gh CLI required for --github-comment. Install: https://cli.github.com${NC}" >&2
|
|
15323
|
+
fi
|
|
15324
|
+
fi
|
|
15325
|
+
|
|
15326
|
+
return "$exit_code"
|
|
15327
|
+
}
|
|
15328
|
+
|
|
13830
15329
|
main "$@"
|