claude-code-workflow 6.2.4 → 6.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ccw/dist/core/lite-scanner-complete.d.ts.map +1 -1
- package/ccw/dist/core/lite-scanner-complete.js +4 -1
- package/ccw/dist/core/lite-scanner-complete.js.map +1 -1
- package/ccw/dist/core/lite-scanner.d.ts.map +1 -1
- package/ccw/dist/core/lite-scanner.js +4 -1
- package/ccw/dist/core/lite-scanner.js.map +1 -1
- package/ccw/dist/core/routes/claude-routes.d.ts.map +1 -1
- package/ccw/dist/core/routes/claude-routes.js +3 -5
- package/ccw/dist/core/routes/claude-routes.js.map +1 -1
- package/ccw/dist/core/routes/cli-routes.d.ts.map +1 -1
- package/ccw/dist/core/routes/cli-routes.js +2 -1
- package/ccw/dist/core/routes/cli-routes.js.map +1 -1
- package/ccw/dist/core/routes/codexlens-routes.d.ts.map +1 -1
- package/ccw/dist/core/routes/codexlens-routes.js +31 -6
- package/ccw/dist/core/routes/codexlens-routes.js.map +1 -1
- package/ccw/dist/core/routes/rules-routes.d.ts.map +1 -1
- package/ccw/dist/core/routes/rules-routes.js +4 -3
- package/ccw/dist/core/routes/rules-routes.js.map +1 -1
- package/ccw/dist/core/routes/skills-routes.d.ts.map +1 -1
- package/ccw/dist/core/routes/skills-routes.js +124 -6
- package/ccw/dist/core/routes/skills-routes.js.map +1 -1
- package/ccw/dist/tools/cli-executor.d.ts +4 -1
- package/ccw/dist/tools/cli-executor.d.ts.map +1 -1
- package/ccw/dist/tools/cli-executor.js +54 -2
- package/ccw/dist/tools/cli-executor.js.map +1 -1
- package/ccw/dist/tools/codex-lens.d.ts +20 -3
- package/ccw/dist/tools/codex-lens.d.ts.map +1 -1
- package/ccw/dist/tools/codex-lens.js +166 -37
- package/ccw/dist/tools/codex-lens.js.map +1 -1
- package/ccw/package.json +1 -1
- package/ccw/src/core/lite-scanner-complete.ts +5 -1
- package/ccw/src/core/lite-scanner.ts +5 -1
- package/ccw/src/core/routes/claude-routes.ts +3 -5
- package/ccw/src/core/routes/cli-routes.ts +2 -1
- package/ccw/src/core/routes/codexlens-routes.ts +34 -6
- package/ccw/src/core/routes/rules-routes.ts +4 -3
- package/ccw/src/core/routes/skills-routes.ts +144 -6
- package/ccw/src/templates/dashboard-js/components/mcp-manager.js +7 -12
- package/ccw/src/templates/dashboard-js/i18n.js +167 -5
- package/ccw/src/templates/dashboard-js/views/claude-manager.js +18 -4
- package/ccw/src/templates/dashboard-js/views/cli-manager.js +5 -3
- package/ccw/src/templates/dashboard-js/views/codexlens-manager.js +790 -25
- package/ccw/src/templates/dashboard-js/views/rules-manager.js +35 -6
- package/ccw/src/templates/dashboard-js/views/skills-manager.js +385 -21
- package/ccw/src/tools/cli-executor.ts +70 -2
- package/ccw/src/tools/codex-lens.ts +183 -35
- package/codex-lens/pyproject.toml +66 -48
- package/codex-lens/src/codexlens/__pycache__/config.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/cli/__pycache__/embedding_manager.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/cli/__pycache__/model_manager.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/cli/embedding_manager.py +3 -3
- package/codex-lens/src/codexlens/cli/model_manager.py +24 -2
- package/codex-lens/src/codexlens/search/__pycache__/hybrid_search.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/search/hybrid_search.py +313 -313
- package/codex-lens/src/codexlens/semantic/__init__.py +76 -39
- package/codex-lens/src/codexlens/semantic/__pycache__/__init__.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/semantic/__pycache__/embedder.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/semantic/__pycache__/gpu_support.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/semantic/__pycache__/ollama_backend.cpython-313.pyc +0 -0
- package/codex-lens/src/codexlens/semantic/embedder.py +244 -185
- package/codex-lens/src/codexlens/semantic/gpu_support.py +192 -0
- package/package.json +1 -1
|
@@ -1,39 +1,76 @@
|
|
|
1
|
-
"""Optional semantic search module for CodexLens.
|
|
2
|
-
|
|
3
|
-
Install with: pip install codexlens[semantic]
|
|
4
|
-
Uses fastembed (ONNX-based, lightweight ~200MB)
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
1
|
+
"""Optional semantic search module for CodexLens.
|
|
2
|
+
|
|
3
|
+
Install with: pip install codexlens[semantic]
|
|
4
|
+
Uses fastembed (ONNX-based, lightweight ~200MB)
|
|
5
|
+
|
|
6
|
+
GPU Acceleration:
|
|
7
|
+
- Automatic GPU detection and usage when available
|
|
8
|
+
- Supports CUDA (NVIDIA), TensorRT, DirectML (Windows), ROCm (AMD), CoreML (Apple)
|
|
9
|
+
- Install GPU support: pip install onnxruntime-gpu (NVIDIA) or onnxruntime-directml (Windows)
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
SEMANTIC_AVAILABLE = False
|
|
15
|
+
SEMANTIC_BACKEND: str | None = None
|
|
16
|
+
GPU_AVAILABLE = False
|
|
17
|
+
_import_error: str | None = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _detect_backend() -> tuple[bool, str | None, bool, str | None]:
|
|
21
|
+
"""Detect if fastembed and GPU are available."""
|
|
22
|
+
try:
|
|
23
|
+
import numpy as np
|
|
24
|
+
except ImportError as e:
|
|
25
|
+
return False, None, False, f"numpy not available: {e}"
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
from fastembed import TextEmbedding
|
|
29
|
+
except ImportError:
|
|
30
|
+
return False, None, False, "fastembed not available. Install with: pip install codexlens[semantic]"
|
|
31
|
+
|
|
32
|
+
# Check GPU availability
|
|
33
|
+
gpu_available = False
|
|
34
|
+
try:
|
|
35
|
+
from .gpu_support import is_gpu_available
|
|
36
|
+
gpu_available = is_gpu_available()
|
|
37
|
+
except ImportError:
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
return True, "fastembed", gpu_available, None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Initialize on module load
|
|
44
|
+
SEMANTIC_AVAILABLE, SEMANTIC_BACKEND, GPU_AVAILABLE, _import_error = _detect_backend()
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def check_semantic_available() -> tuple[bool, str | None]:
|
|
48
|
+
"""Check if semantic search dependencies are available."""
|
|
49
|
+
return SEMANTIC_AVAILABLE, _import_error
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def check_gpu_available() -> tuple[bool, str]:
|
|
53
|
+
"""Check if GPU acceleration is available.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Tuple of (is_available, status_message)
|
|
57
|
+
"""
|
|
58
|
+
if not SEMANTIC_AVAILABLE:
|
|
59
|
+
return False, "Semantic search not available"
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
from .gpu_support import is_gpu_available, get_gpu_summary
|
|
63
|
+
if is_gpu_available():
|
|
64
|
+
return True, get_gpu_summary()
|
|
65
|
+
return False, "No GPU detected (using CPU)"
|
|
66
|
+
except ImportError:
|
|
67
|
+
return False, "GPU support module not available"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
__all__ = [
|
|
71
|
+
"SEMANTIC_AVAILABLE",
|
|
72
|
+
"SEMANTIC_BACKEND",
|
|
73
|
+
"GPU_AVAILABLE",
|
|
74
|
+
"check_semantic_available",
|
|
75
|
+
"check_gpu_available",
|
|
76
|
+
]
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -1,185 +1,244 @@
|
|
|
1
|
-
"""Embedder for semantic code search using fastembed.
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
from
|
|
8
|
-
|
|
9
|
-
import
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
"""
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
1
|
+
"""Embedder for semantic code search using fastembed.
|
|
2
|
+
|
|
3
|
+
Supports GPU acceleration via ONNX execution providers (CUDA, TensorRT, DirectML, ROCm, CoreML).
|
|
4
|
+
GPU acceleration is automatic when available, with transparent CPU fallback.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import gc
|
|
10
|
+
import logging
|
|
11
|
+
import threading
|
|
12
|
+
from typing import Dict, Iterable, List, Optional
|
|
13
|
+
|
|
14
|
+
import numpy as np
|
|
15
|
+
|
|
16
|
+
from . import SEMANTIC_AVAILABLE
|
|
17
|
+
from .gpu_support import get_optimal_providers, is_gpu_available, get_gpu_summary
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
# Global embedder cache for singleton pattern
|
|
22
|
+
_embedder_cache: Dict[str, "Embedder"] = {}
|
|
23
|
+
_cache_lock = threading.Lock()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def get_embedder(profile: str = "code", use_gpu: bool = True) -> "Embedder":
|
|
27
|
+
"""Get or create a cached Embedder instance (thread-safe singleton).
|
|
28
|
+
|
|
29
|
+
This function provides significant performance improvement by reusing
|
|
30
|
+
Embedder instances across multiple searches, avoiding repeated model
|
|
31
|
+
loading overhead (~0.8s per load).
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
profile: Model profile ("fast", "code", "multilingual", "balanced")
|
|
35
|
+
use_gpu: If True, use GPU acceleration when available (default: True)
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Cached Embedder instance for the given profile
|
|
39
|
+
"""
|
|
40
|
+
global _embedder_cache
|
|
41
|
+
|
|
42
|
+
# Cache key includes GPU preference to support mixed configurations
|
|
43
|
+
cache_key = f"{profile}:{'gpu' if use_gpu else 'cpu'}"
|
|
44
|
+
|
|
45
|
+
# Fast path: check cache without lock
|
|
46
|
+
if cache_key in _embedder_cache:
|
|
47
|
+
return _embedder_cache[cache_key]
|
|
48
|
+
|
|
49
|
+
# Slow path: acquire lock for initialization
|
|
50
|
+
with _cache_lock:
|
|
51
|
+
# Double-check after acquiring lock
|
|
52
|
+
if cache_key in _embedder_cache:
|
|
53
|
+
return _embedder_cache[cache_key]
|
|
54
|
+
|
|
55
|
+
# Create new embedder and cache it
|
|
56
|
+
embedder = Embedder(profile=profile, use_gpu=use_gpu)
|
|
57
|
+
# Pre-load model to ensure it's ready
|
|
58
|
+
embedder._load_model()
|
|
59
|
+
_embedder_cache[cache_key] = embedder
|
|
60
|
+
|
|
61
|
+
# Log GPU status on first embedder creation
|
|
62
|
+
if use_gpu and is_gpu_available():
|
|
63
|
+
logger.info(f"Embedder initialized with GPU: {get_gpu_summary()}")
|
|
64
|
+
elif use_gpu:
|
|
65
|
+
logger.debug("GPU not available, using CPU for embeddings")
|
|
66
|
+
|
|
67
|
+
return embedder
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def clear_embedder_cache() -> None:
|
|
71
|
+
"""Clear the embedder cache and release ONNX resources.
|
|
72
|
+
|
|
73
|
+
This method ensures proper cleanup of ONNX model resources to prevent
|
|
74
|
+
memory leaks when embedders are no longer needed.
|
|
75
|
+
"""
|
|
76
|
+
global _embedder_cache
|
|
77
|
+
with _cache_lock:
|
|
78
|
+
# Release ONNX resources before clearing cache
|
|
79
|
+
for embedder in _embedder_cache.values():
|
|
80
|
+
if embedder._model is not None:
|
|
81
|
+
del embedder._model
|
|
82
|
+
embedder._model = None
|
|
83
|
+
_embedder_cache.clear()
|
|
84
|
+
gc.collect()
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class Embedder:
|
|
88
|
+
"""Generate embeddings for code chunks using fastembed (ONNX-based).
|
|
89
|
+
|
|
90
|
+
Supported Model Profiles:
|
|
91
|
+
- fast: BAAI/bge-small-en-v1.5 (384 dim) - Fast, lightweight, English-optimized
|
|
92
|
+
- code: jinaai/jina-embeddings-v2-base-code (768 dim) - Code-optimized, best for programming languages
|
|
93
|
+
- multilingual: intfloat/multilingual-e5-large (1024 dim) - Multilingual + code support
|
|
94
|
+
- balanced: mixedbread-ai/mxbai-embed-large-v1 (1024 dim) - High accuracy, general purpose
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
# Model profiles for different use cases
|
|
98
|
+
MODELS = {
|
|
99
|
+
"fast": "BAAI/bge-small-en-v1.5", # 384 dim - Fast, lightweight
|
|
100
|
+
"code": "jinaai/jina-embeddings-v2-base-code", # 768 dim - Code-optimized
|
|
101
|
+
"multilingual": "intfloat/multilingual-e5-large", # 1024 dim - Multilingual
|
|
102
|
+
"balanced": "mixedbread-ai/mxbai-embed-large-v1", # 1024 dim - High accuracy
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# Dimension mapping for each model
|
|
106
|
+
MODEL_DIMS = {
|
|
107
|
+
"BAAI/bge-small-en-v1.5": 384,
|
|
108
|
+
"jinaai/jina-embeddings-v2-base-code": 768,
|
|
109
|
+
"intfloat/multilingual-e5-large": 1024,
|
|
110
|
+
"mixedbread-ai/mxbai-embed-large-v1": 1024,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
# Default model (fast profile)
|
|
114
|
+
DEFAULT_MODEL = "BAAI/bge-small-en-v1.5"
|
|
115
|
+
DEFAULT_PROFILE = "fast"
|
|
116
|
+
|
|
117
|
+
def __init__(
|
|
118
|
+
self,
|
|
119
|
+
model_name: str | None = None,
|
|
120
|
+
profile: str | None = None,
|
|
121
|
+
use_gpu: bool = True,
|
|
122
|
+
providers: List[str] | None = None,
|
|
123
|
+
) -> None:
|
|
124
|
+
"""Initialize embedder with model or profile.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
model_name: Explicit model name (e.g., "jinaai/jina-embeddings-v2-base-code")
|
|
128
|
+
profile: Model profile shortcut ("fast", "code", "multilingual", "balanced")
|
|
129
|
+
If both provided, model_name takes precedence.
|
|
130
|
+
use_gpu: If True, use GPU acceleration when available (default: True)
|
|
131
|
+
providers: Explicit ONNX providers list (overrides use_gpu if provided)
|
|
132
|
+
"""
|
|
133
|
+
if not SEMANTIC_AVAILABLE:
|
|
134
|
+
raise ImportError(
|
|
135
|
+
"Semantic search dependencies not available. "
|
|
136
|
+
"Install with: pip install codexlens[semantic]"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Resolve model name from profile or use explicit name
|
|
140
|
+
if model_name:
|
|
141
|
+
self.model_name = model_name
|
|
142
|
+
elif profile and profile in self.MODELS:
|
|
143
|
+
self.model_name = self.MODELS[profile]
|
|
144
|
+
else:
|
|
145
|
+
self.model_name = self.DEFAULT_MODEL
|
|
146
|
+
|
|
147
|
+
# Configure ONNX execution providers
|
|
148
|
+
if providers is not None:
|
|
149
|
+
self._providers = providers
|
|
150
|
+
else:
|
|
151
|
+
self._providers = get_optimal_providers(use_gpu=use_gpu)
|
|
152
|
+
|
|
153
|
+
self._use_gpu = use_gpu
|
|
154
|
+
self._model = None
|
|
155
|
+
|
|
156
|
+
@property
|
|
157
|
+
def embedding_dim(self) -> int:
|
|
158
|
+
"""Get embedding dimension for current model."""
|
|
159
|
+
return self.MODEL_DIMS.get(self.model_name, 768) # Default to 768 if unknown
|
|
160
|
+
|
|
161
|
+
@property
|
|
162
|
+
def providers(self) -> List[str]:
|
|
163
|
+
"""Get configured ONNX execution providers."""
|
|
164
|
+
return self._providers
|
|
165
|
+
|
|
166
|
+
@property
|
|
167
|
+
def is_gpu_enabled(self) -> bool:
|
|
168
|
+
"""Check if GPU acceleration is enabled for this embedder."""
|
|
169
|
+
gpu_providers = {"CUDAExecutionProvider", "TensorrtExecutionProvider",
|
|
170
|
+
"DmlExecutionProvider", "ROCMExecutionProvider", "CoreMLExecutionProvider"}
|
|
171
|
+
return any(p in gpu_providers for p in self._providers)
|
|
172
|
+
|
|
173
|
+
def _load_model(self) -> None:
|
|
174
|
+
"""Lazy load the embedding model with configured providers."""
|
|
175
|
+
if self._model is not None:
|
|
176
|
+
return
|
|
177
|
+
|
|
178
|
+
from fastembed import TextEmbedding
|
|
179
|
+
|
|
180
|
+
# fastembed supports 'providers' parameter for ONNX execution providers
|
|
181
|
+
try:
|
|
182
|
+
self._model = TextEmbedding(
|
|
183
|
+
model_name=self.model_name,
|
|
184
|
+
providers=self._providers,
|
|
185
|
+
)
|
|
186
|
+
logger.debug(f"Model loaded with providers: {self._providers}")
|
|
187
|
+
except TypeError:
|
|
188
|
+
# Fallback for older fastembed versions without providers parameter
|
|
189
|
+
logger.warning(
|
|
190
|
+
"fastembed version doesn't support 'providers' parameter. "
|
|
191
|
+
"Upgrade fastembed for GPU acceleration: pip install --upgrade fastembed"
|
|
192
|
+
)
|
|
193
|
+
self._model = TextEmbedding(model_name=self.model_name)
|
|
194
|
+
|
|
195
|
+
def embed(self, texts: str | Iterable[str]) -> List[List[float]]:
|
|
196
|
+
"""Generate embeddings for one or more texts.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
texts: Single text or iterable of texts to embed.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List of embedding vectors (each is a list of floats).
|
|
203
|
+
|
|
204
|
+
Note:
|
|
205
|
+
This method converts numpy arrays to Python lists for backward compatibility.
|
|
206
|
+
For memory-efficient processing, use embed_to_numpy() instead.
|
|
207
|
+
"""
|
|
208
|
+
self._load_model()
|
|
209
|
+
|
|
210
|
+
if isinstance(texts, str):
|
|
211
|
+
texts = [texts]
|
|
212
|
+
else:
|
|
213
|
+
texts = list(texts)
|
|
214
|
+
|
|
215
|
+
embeddings = list(self._model.embed(texts))
|
|
216
|
+
return [emb.tolist() for emb in embeddings]
|
|
217
|
+
|
|
218
|
+
def embed_to_numpy(self, texts: str | Iterable[str]) -> np.ndarray:
|
|
219
|
+
"""Generate embeddings for one or more texts (returns numpy arrays).
|
|
220
|
+
|
|
221
|
+
This method is more memory-efficient than embed() as it avoids converting
|
|
222
|
+
numpy arrays to Python lists, which can significantly reduce memory usage
|
|
223
|
+
during batch processing.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
texts: Single text or iterable of texts to embed.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
numpy.ndarray of shape (n_texts, embedding_dim) containing embeddings.
|
|
230
|
+
"""
|
|
231
|
+
self._load_model()
|
|
232
|
+
|
|
233
|
+
if isinstance(texts, str):
|
|
234
|
+
texts = [texts]
|
|
235
|
+
else:
|
|
236
|
+
texts = list(texts)
|
|
237
|
+
|
|
238
|
+
# Return embeddings as numpy array directly (no .tolist() conversion)
|
|
239
|
+
embeddings = list(self._model.embed(texts))
|
|
240
|
+
return np.array(embeddings)
|
|
241
|
+
|
|
242
|
+
def embed_single(self, text: str) -> List[float]:
|
|
243
|
+
"""Generate embedding for a single text."""
|
|
244
|
+
return self.embed(text)[0]
|