claude-code-workflow 6.2.4 → 6.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/ccw/dist/core/lite-scanner-complete.d.ts.map +1 -1
  2. package/ccw/dist/core/lite-scanner-complete.js +4 -1
  3. package/ccw/dist/core/lite-scanner-complete.js.map +1 -1
  4. package/ccw/dist/core/lite-scanner.d.ts.map +1 -1
  5. package/ccw/dist/core/lite-scanner.js +4 -1
  6. package/ccw/dist/core/lite-scanner.js.map +1 -1
  7. package/ccw/dist/core/routes/claude-routes.d.ts.map +1 -1
  8. package/ccw/dist/core/routes/claude-routes.js +3 -5
  9. package/ccw/dist/core/routes/claude-routes.js.map +1 -1
  10. package/ccw/dist/core/routes/cli-routes.d.ts.map +1 -1
  11. package/ccw/dist/core/routes/cli-routes.js +2 -1
  12. package/ccw/dist/core/routes/cli-routes.js.map +1 -1
  13. package/ccw/dist/core/routes/codexlens-routes.d.ts.map +1 -1
  14. package/ccw/dist/core/routes/codexlens-routes.js +31 -6
  15. package/ccw/dist/core/routes/codexlens-routes.js.map +1 -1
  16. package/ccw/dist/core/routes/rules-routes.d.ts.map +1 -1
  17. package/ccw/dist/core/routes/rules-routes.js +4 -3
  18. package/ccw/dist/core/routes/rules-routes.js.map +1 -1
  19. package/ccw/dist/core/routes/skills-routes.d.ts.map +1 -1
  20. package/ccw/dist/core/routes/skills-routes.js +124 -6
  21. package/ccw/dist/core/routes/skills-routes.js.map +1 -1
  22. package/ccw/dist/tools/cli-executor.d.ts +4 -1
  23. package/ccw/dist/tools/cli-executor.d.ts.map +1 -1
  24. package/ccw/dist/tools/cli-executor.js +54 -2
  25. package/ccw/dist/tools/cli-executor.js.map +1 -1
  26. package/ccw/dist/tools/codex-lens.d.ts +20 -3
  27. package/ccw/dist/tools/codex-lens.d.ts.map +1 -1
  28. package/ccw/dist/tools/codex-lens.js +166 -37
  29. package/ccw/dist/tools/codex-lens.js.map +1 -1
  30. package/ccw/package.json +1 -1
  31. package/ccw/src/core/lite-scanner-complete.ts +5 -1
  32. package/ccw/src/core/lite-scanner.ts +5 -1
  33. package/ccw/src/core/routes/claude-routes.ts +3 -5
  34. package/ccw/src/core/routes/cli-routes.ts +2 -1
  35. package/ccw/src/core/routes/codexlens-routes.ts +34 -6
  36. package/ccw/src/core/routes/rules-routes.ts +4 -3
  37. package/ccw/src/core/routes/skills-routes.ts +144 -6
  38. package/ccw/src/templates/dashboard-js/components/mcp-manager.js +7 -12
  39. package/ccw/src/templates/dashboard-js/i18n.js +167 -5
  40. package/ccw/src/templates/dashboard-js/views/claude-manager.js +18 -4
  41. package/ccw/src/templates/dashboard-js/views/cli-manager.js +5 -3
  42. package/ccw/src/templates/dashboard-js/views/codexlens-manager.js +790 -25
  43. package/ccw/src/templates/dashboard-js/views/rules-manager.js +35 -6
  44. package/ccw/src/templates/dashboard-js/views/skills-manager.js +385 -21
  45. package/ccw/src/tools/cli-executor.ts +70 -2
  46. package/ccw/src/tools/codex-lens.ts +183 -35
  47. package/codex-lens/pyproject.toml +66 -48
  48. package/codex-lens/src/codexlens/__pycache__/config.cpython-313.pyc +0 -0
  49. package/codex-lens/src/codexlens/cli/__pycache__/embedding_manager.cpython-313.pyc +0 -0
  50. package/codex-lens/src/codexlens/cli/__pycache__/model_manager.cpython-313.pyc +0 -0
  51. package/codex-lens/src/codexlens/cli/embedding_manager.py +3 -3
  52. package/codex-lens/src/codexlens/cli/model_manager.py +24 -2
  53. package/codex-lens/src/codexlens/search/__pycache__/hybrid_search.cpython-313.pyc +0 -0
  54. package/codex-lens/src/codexlens/search/hybrid_search.py +313 -313
  55. package/codex-lens/src/codexlens/semantic/__init__.py +76 -39
  56. package/codex-lens/src/codexlens/semantic/__pycache__/__init__.cpython-313.pyc +0 -0
  57. package/codex-lens/src/codexlens/semantic/__pycache__/embedder.cpython-313.pyc +0 -0
  58. package/codex-lens/src/codexlens/semantic/__pycache__/gpu_support.cpython-313.pyc +0 -0
  59. package/codex-lens/src/codexlens/semantic/__pycache__/ollama_backend.cpython-313.pyc +0 -0
  60. package/codex-lens/src/codexlens/semantic/embedder.py +244 -185
  61. package/codex-lens/src/codexlens/semantic/gpu_support.py +192 -0
  62. package/package.json +1 -1
@@ -0,0 +1,192 @@
1
+ """GPU acceleration support for semantic embeddings.
2
+
3
+ This module provides GPU detection, initialization, and fallback handling
4
+ for ONNX-based embedding generation.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import logging
10
+ from dataclasses import dataclass
11
+ from typing import List, Optional
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class GPUInfo:
18
+ """GPU availability and configuration info."""
19
+
20
+ gpu_available: bool = False
21
+ cuda_available: bool = False
22
+ gpu_count: int = 0
23
+ gpu_name: Optional[str] = None
24
+ onnx_providers: List[str] = None
25
+
26
+ def __post_init__(self):
27
+ if self.onnx_providers is None:
28
+ self.onnx_providers = ["CPUExecutionProvider"]
29
+
30
+
31
+ _gpu_info_cache: Optional[GPUInfo] = None
32
+
33
+
34
+ def detect_gpu(force_refresh: bool = False) -> GPUInfo:
35
+ """Detect available GPU resources for embedding acceleration.
36
+
37
+ Args:
38
+ force_refresh: If True, re-detect GPU even if cached.
39
+
40
+ Returns:
41
+ GPUInfo with detection results.
42
+ """
43
+ global _gpu_info_cache
44
+
45
+ if _gpu_info_cache is not None and not force_refresh:
46
+ return _gpu_info_cache
47
+
48
+ info = GPUInfo()
49
+
50
+ # Check PyTorch CUDA availability (most reliable detection)
51
+ try:
52
+ import torch
53
+ if torch.cuda.is_available():
54
+ info.cuda_available = True
55
+ info.gpu_available = True
56
+ info.gpu_count = torch.cuda.device_count()
57
+ if info.gpu_count > 0:
58
+ info.gpu_name = torch.cuda.get_device_name(0)
59
+ logger.debug(f"PyTorch CUDA detected: {info.gpu_count} GPU(s)")
60
+ except ImportError:
61
+ logger.debug("PyTorch not available for GPU detection")
62
+
63
+ # Check ONNX Runtime providers with validation
64
+ try:
65
+ import onnxruntime as ort
66
+ available_providers = ort.get_available_providers()
67
+
68
+ # Build provider list with priority order
69
+ providers = []
70
+
71
+ # Test each provider to ensure it actually works
72
+ def test_provider(provider_name: str) -> bool:
73
+ """Test if a provider actually works by creating a dummy session."""
74
+ try:
75
+ # Create a minimal ONNX model to test provider
76
+ import numpy as np
77
+ # Simple test: just check if provider can be instantiated
78
+ sess_options = ort.SessionOptions()
79
+ sess_options.log_severity_level = 4 # Suppress warnings
80
+ return True
81
+ except Exception:
82
+ return False
83
+
84
+ # CUDA provider (NVIDIA GPU) - check if CUDA runtime is available
85
+ if "CUDAExecutionProvider" in available_providers:
86
+ # Verify CUDA is actually usable by checking for cuBLAS
87
+ cuda_works = False
88
+ try:
89
+ import ctypes
90
+ # Try to load cuBLAS to verify CUDA installation
91
+ try:
92
+ ctypes.CDLL("cublas64_12.dll")
93
+ cuda_works = True
94
+ except OSError:
95
+ try:
96
+ ctypes.CDLL("cublas64_11.dll")
97
+ cuda_works = True
98
+ except OSError:
99
+ pass
100
+ except Exception:
101
+ pass
102
+
103
+ if cuda_works:
104
+ providers.append("CUDAExecutionProvider")
105
+ info.gpu_available = True
106
+ logger.debug("ONNX CUDAExecutionProvider available and working")
107
+ else:
108
+ logger.debug("ONNX CUDAExecutionProvider listed but CUDA runtime not found")
109
+
110
+ # TensorRT provider (optimized NVIDIA inference)
111
+ if "TensorrtExecutionProvider" in available_providers:
112
+ # TensorRT requires additional libraries, skip for now
113
+ logger.debug("ONNX TensorrtExecutionProvider available (requires TensorRT SDK)")
114
+
115
+ # DirectML provider (Windows GPU - AMD/Intel/NVIDIA)
116
+ if "DmlExecutionProvider" in available_providers:
117
+ providers.append("DmlExecutionProvider")
118
+ info.gpu_available = True
119
+ logger.debug("ONNX DmlExecutionProvider available (DirectML)")
120
+
121
+ # ROCm provider (AMD GPU on Linux)
122
+ if "ROCMExecutionProvider" in available_providers:
123
+ providers.append("ROCMExecutionProvider")
124
+ info.gpu_available = True
125
+ logger.debug("ONNX ROCMExecutionProvider available (AMD)")
126
+
127
+ # CoreML provider (Apple Silicon)
128
+ if "CoreMLExecutionProvider" in available_providers:
129
+ providers.append("CoreMLExecutionProvider")
130
+ info.gpu_available = True
131
+ logger.debug("ONNX CoreMLExecutionProvider available (Apple)")
132
+
133
+ # Always include CPU as fallback
134
+ providers.append("CPUExecutionProvider")
135
+
136
+ info.onnx_providers = providers
137
+
138
+ except ImportError:
139
+ logger.debug("ONNX Runtime not available")
140
+ info.onnx_providers = ["CPUExecutionProvider"]
141
+
142
+ _gpu_info_cache = info
143
+ return info
144
+
145
+
146
+ def get_optimal_providers(use_gpu: bool = True) -> List[str]:
147
+ """Get optimal ONNX execution providers based on availability.
148
+
149
+ Args:
150
+ use_gpu: If True, include GPU providers when available.
151
+ If False, force CPU-only execution.
152
+
153
+ Returns:
154
+ List of provider names in priority order.
155
+ """
156
+ if not use_gpu:
157
+ return ["CPUExecutionProvider"]
158
+
159
+ gpu_info = detect_gpu()
160
+ return gpu_info.onnx_providers
161
+
162
+
163
+ def is_gpu_available() -> bool:
164
+ """Check if any GPU acceleration is available."""
165
+ return detect_gpu().gpu_available
166
+
167
+
168
+ def get_gpu_summary() -> str:
169
+ """Get human-readable GPU status summary."""
170
+ info = detect_gpu()
171
+
172
+ if not info.gpu_available:
173
+ return "GPU: Not available (using CPU)"
174
+
175
+ parts = []
176
+ if info.gpu_name:
177
+ parts.append(f"GPU: {info.gpu_name}")
178
+ if info.gpu_count > 1:
179
+ parts.append(f"({info.gpu_count} devices)")
180
+
181
+ # Show active providers (excluding CPU fallback)
182
+ gpu_providers = [p for p in info.onnx_providers if p != "CPUExecutionProvider"]
183
+ if gpu_providers:
184
+ parts.append(f"Providers: {', '.join(gpu_providers)}")
185
+
186
+ return " | ".join(parts) if parts else "GPU: Available"
187
+
188
+
189
+ def clear_gpu_cache() -> None:
190
+ """Clear cached GPU detection info."""
191
+ global _gpu_info_cache
192
+ _gpu_info_cache = None
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-code-workflow",
3
- "version": "6.2.4",
3
+ "version": "6.2.6",
4
4
  "description": "JSON-driven multi-agent development framework with intelligent CLI orchestration (Gemini/Qwen/Codex), context-first architecture, and automated workflow execution",
5
5
  "type": "module",
6
6
  "main": "ccw/src/index.js",