claude-code-workflow 6.3.2 → 6.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/.claude/CLAUDE.md +9 -1
  2. package/.claude/commands/{clean.md → workflow/clean.md} +5 -5
  3. package/.claude/commands/workflow/docs/analyze.md +1467 -0
  4. package/.claude/commands/workflow/docs/copyright.md +1265 -0
  5. package/.claude/commands/workflow/lite-plan.md +1 -1
  6. package/.claude/commands/workflow/tools/conflict-resolution.md +76 -240
  7. package/.claude/commands/workflow/tools/task-generate-agent.md +81 -8
  8. package/.claude/skills/_shared/mermaid-utils.md +584 -0
  9. package/.claude/skills/copyright-docs/SKILL.md +132 -0
  10. package/.claude/skills/copyright-docs/phases/01-metadata-collection.md +78 -0
  11. package/.claude/skills/copyright-docs/phases/02-deep-analysis.md +454 -0
  12. package/.claude/skills/copyright-docs/phases/02.5-consolidation.md +192 -0
  13. package/.claude/skills/copyright-docs/phases/04-document-assembly.md +261 -0
  14. package/.claude/skills/copyright-docs/phases/05-compliance-refinement.md +192 -0
  15. package/.claude/skills/copyright-docs/specs/cpcc-requirements.md +121 -0
  16. package/.claude/skills/copyright-docs/templates/agent-base.md +200 -0
  17. package/.claude/skills/project-analyze/SKILL.md +162 -0
  18. package/.claude/skills/project-analyze/phases/01-requirements-discovery.md +79 -0
  19. package/.claude/skills/project-analyze/phases/02-project-exploration.md +75 -0
  20. package/.claude/skills/project-analyze/phases/03-deep-analysis.md +640 -0
  21. package/.claude/skills/project-analyze/phases/03.5-consolidation.md +208 -0
  22. package/.claude/skills/project-analyze/phases/04-report-generation.md +217 -0
  23. package/.claude/skills/project-analyze/phases/05-iterative-refinement.md +124 -0
  24. package/.claude/skills/project-analyze/specs/quality-standards.md +115 -0
  25. package/.claude/skills/project-analyze/specs/writing-style.md +152 -0
  26. package/.claude/workflows/cli-templates/schemas/conflict-resolution-schema.json +79 -65
  27. package/.claude/workflows/cli-tools-usage.md +515 -516
  28. package/README.md +11 -1
  29. package/ccw/dist/cli.d.ts.map +1 -1
  30. package/ccw/dist/cli.js +7 -1
  31. package/ccw/dist/cli.js.map +1 -1
  32. package/ccw/dist/commands/cli.d.ts +1 -1
  33. package/ccw/dist/commands/cli.d.ts.map +1 -1
  34. package/ccw/dist/commands/cli.js +116 -14
  35. package/ccw/dist/commands/cli.js.map +1 -1
  36. package/ccw/dist/core/routes/cli-routes.js +2 -2
  37. package/ccw/dist/core/routes/cli-routes.js.map +1 -1
  38. package/ccw/dist/tools/claude-cli-tools.d.ts +7 -3
  39. package/ccw/dist/tools/claude-cli-tools.d.ts.map +1 -1
  40. package/ccw/dist/tools/claude-cli-tools.js +31 -17
  41. package/ccw/dist/tools/claude-cli-tools.js.map +1 -1
  42. package/ccw/dist/tools/cli-executor.d.ts.map +1 -1
  43. package/ccw/dist/tools/cli-executor.js +19 -7
  44. package/ccw/dist/tools/cli-executor.js.map +1 -1
  45. package/ccw/dist/tools/cli-history-store.d.ts +33 -0
  46. package/ccw/dist/tools/cli-history-store.d.ts.map +1 -1
  47. package/ccw/dist/tools/cli-history-store.js +89 -5
  48. package/ccw/dist/tools/cli-history-store.js.map +1 -1
  49. package/ccw/dist/tools/smart-search.d.ts +25 -0
  50. package/ccw/dist/tools/smart-search.d.ts.map +1 -1
  51. package/ccw/dist/tools/smart-search.js +121 -17
  52. package/ccw/dist/tools/smart-search.js.map +1 -1
  53. package/ccw/src/cli.ts +264 -258
  54. package/ccw/src/commands/cli.ts +1009 -884
  55. package/ccw/src/core/routes/cli-routes.ts +3 -3
  56. package/ccw/src/templates/dashboard-js/components/cli-history.js +40 -13
  57. package/ccw/src/templates/dashboard-js/components/cli-status.js +26 -2
  58. package/ccw/src/templates/dashboard-js/views/cli-manager.js +5 -0
  59. package/ccw/src/templates/dashboard-js/views/history.js +19 -4
  60. package/ccw/src/tools/claude-cli-tools.ts +37 -20
  61. package/ccw/src/tools/cli-executor.ts +20 -7
  62. package/ccw/src/tools/cli-history-store.ts +125 -5
  63. package/ccw/src/tools/smart-search.ts +157 -16
  64. package/codex-lens/src/codexlens/__pycache__/config.cpython-313.pyc +0 -0
  65. package/codex-lens/src/codexlens/config.py +8 -0
  66. package/codex-lens/src/codexlens/search/__pycache__/chain_search.cpython-313.pyc +0 -0
  67. package/codex-lens/src/codexlens/search/__pycache__/hybrid_search.cpython-313.pyc +0 -0
  68. package/codex-lens/src/codexlens/search/__pycache__/ranking.cpython-313.pyc +0 -0
  69. package/codex-lens/src/codexlens/search/chain_search.py +71 -1
  70. package/codex-lens/src/codexlens/search/hybrid_search.py +144 -11
  71. package/codex-lens/src/codexlens/search/ranking.py +540 -274
  72. package/codex-lens/src/codexlens/semantic/__pycache__/chunker.cpython-313.pyc +0 -0
  73. package/codex-lens/src/codexlens/semantic/chunker.py +55 -10
  74. package/codex-lens/src/codexlens/storage/__pycache__/dir_index.cpython-313.pyc +0 -0
  75. package/codex-lens/src/codexlens/storage/__pycache__/global_index.cpython-313.pyc +0 -0
  76. package/codex-lens/src/codexlens/storage/__pycache__/index_tree.cpython-313.pyc +0 -0
  77. package/codex-lens/src/codexlens/storage/dir_index.py +1888 -1850
  78. package/codex-lens/src/codexlens/storage/global_index.py +365 -0
  79. package/codex-lens/src/codexlens/storage/index_tree.py +83 -10
  80. package/package.json +2 -2
@@ -7,12 +7,38 @@ results via Reciprocal Rank Fusion (RRF) algorithm.
7
7
  from __future__ import annotations
8
8
 
9
9
  import logging
10
+ import time
10
11
  from concurrent.futures import ThreadPoolExecutor, as_completed
12
+ from contextlib import contextmanager
11
13
  from pathlib import Path
12
- from typing import Dict, List, Optional
14
+ from typing import Any, Dict, List, Optional
13
15
 
16
+
17
+ @contextmanager
18
+ def timer(name: str, logger: logging.Logger, level: int = logging.DEBUG):
19
+ """Context manager for timing code blocks.
20
+
21
+ Args:
22
+ name: Name of the operation being timed
23
+ logger: Logger instance to use
24
+ level: Logging level (default DEBUG)
25
+ """
26
+ start = time.perf_counter()
27
+ try:
28
+ yield
29
+ finally:
30
+ elapsed_ms = (time.perf_counter() - start) * 1000
31
+ logger.log(level, "[TIMING] %s: %.2fms", name, elapsed_ms)
32
+
33
+ from codexlens.config import Config
14
34
  from codexlens.entities import SearchResult
15
- from codexlens.search.ranking import reciprocal_rank_fusion, tag_search_source
35
+ from codexlens.search.ranking import (
36
+ apply_symbol_boost,
37
+ get_rrf_weights,
38
+ reciprocal_rank_fusion,
39
+ rerank_results,
40
+ tag_search_source,
41
+ )
16
42
  from codexlens.storage.dir_index import DirIndexStore
17
43
 
18
44
 
@@ -34,14 +60,23 @@ class HybridSearchEngine:
34
60
  "vector": 0.6,
35
61
  }
36
62
 
37
- def __init__(self, weights: Optional[Dict[str, float]] = None):
63
+ def __init__(
64
+ self,
65
+ weights: Optional[Dict[str, float]] = None,
66
+ config: Optional[Config] = None,
67
+ embedder: Any = None,
68
+ ):
38
69
  """Initialize hybrid search engine.
39
70
 
40
71
  Args:
41
72
  weights: Optional custom RRF weights (default: DEFAULT_WEIGHTS)
73
+ config: Optional runtime config (enables optional reranking features)
74
+ embedder: Optional embedder instance for embedding-based reranking
42
75
  """
43
76
  self.logger = logging.getLogger(__name__)
44
77
  self.weights = weights or self.DEFAULT_WEIGHTS.copy()
78
+ self._config = config
79
+ self.embedder = embedder
45
80
 
46
81
  def search(
47
82
  self,
@@ -101,7 +136,8 @@ class HybridSearchEngine:
101
136
  backends["vector"] = True
102
137
 
103
138
  # Execute parallel searches
104
- results_map = self._search_parallel(index_path, query, backends, limit)
139
+ with timer("parallel_search_total", self.logger):
140
+ results_map = self._search_parallel(index_path, query, backends, limit)
105
141
 
106
142
  # Provide helpful message if pure-vector mode returns no results
107
143
  if pure_vector and enable_vector and len(results_map.get("vector", [])) == 0:
@@ -120,11 +156,72 @@ class HybridSearchEngine:
120
156
  if source in results_map
121
157
  }
122
158
 
123
- fused_results = reciprocal_rank_fusion(results_map, active_weights)
159
+ with timer("rrf_fusion", self.logger):
160
+ adaptive_weights = get_rrf_weights(query, active_weights)
161
+ fused_results = reciprocal_rank_fusion(results_map, adaptive_weights)
162
+
163
+ # Optional: boost results that include explicit symbol matches
164
+ boost_factor = (
165
+ self._config.symbol_boost_factor
166
+ if self._config is not None
167
+ else 1.5
168
+ )
169
+ with timer("symbol_boost", self.logger):
170
+ fused_results = apply_symbol_boost(
171
+ fused_results, boost_factor=boost_factor
172
+ )
173
+
174
+ # Optional: embedding-based reranking on top results
175
+ if self._config is not None and self._config.enable_reranking:
176
+ with timer("reranking", self.logger):
177
+ if self.embedder is None:
178
+ self.embedder = self._get_reranking_embedder()
179
+ fused_results = rerank_results(
180
+ query,
181
+ fused_results[:100],
182
+ self.embedder,
183
+ top_k=self._config.reranking_top_k,
184
+ )
124
185
 
125
186
  # Apply final limit
126
187
  return fused_results[:limit]
127
188
 
189
+ def _get_reranking_embedder(self) -> Any:
190
+ """Create an embedder for reranking based on Config embedding settings."""
191
+ if self._config is None:
192
+ return None
193
+
194
+ try:
195
+ from codexlens.semantic.factory import get_embedder
196
+ except Exception as exc:
197
+ self.logger.debug("Reranking embedder unavailable: %s", exc)
198
+ return None
199
+
200
+ try:
201
+ if self._config.embedding_backend == "fastembed":
202
+ return get_embedder(
203
+ backend="fastembed",
204
+ profile=self._config.embedding_model,
205
+ use_gpu=self._config.embedding_use_gpu,
206
+ )
207
+ if self._config.embedding_backend == "litellm":
208
+ return get_embedder(
209
+ backend="litellm",
210
+ model=self._config.embedding_model,
211
+ endpoints=self._config.embedding_endpoints,
212
+ strategy=self._config.embedding_strategy,
213
+ cooldown=self._config.embedding_cooldown,
214
+ )
215
+ except Exception as exc:
216
+ self.logger.debug("Failed to initialize reranking embedder: %s", exc)
217
+ return None
218
+
219
+ self.logger.debug(
220
+ "Unknown embedding backend for reranking: %s",
221
+ self._config.embedding_backend,
222
+ )
223
+ return None
224
+
128
225
  def _search_parallel(
129
226
  self,
130
227
  index_path: Path,
@@ -144,25 +241,30 @@ class HybridSearchEngine:
144
241
  Dictionary mapping source name to results list
145
242
  """
146
243
  results_map: Dict[str, List[SearchResult]] = {}
244
+ timing_data: Dict[str, float] = {}
147
245
 
148
246
  # Use ThreadPoolExecutor for parallel I/O-bound searches
149
247
  with ThreadPoolExecutor(max_workers=len(backends)) as executor:
150
- # Submit search tasks
248
+ # Submit search tasks with timing
151
249
  future_to_source = {}
250
+ submit_times = {}
152
251
 
153
252
  if backends.get("exact"):
253
+ submit_times["exact"] = time.perf_counter()
154
254
  future = executor.submit(
155
255
  self._search_exact, index_path, query, limit
156
256
  )
157
257
  future_to_source[future] = "exact"
158
258
 
159
259
  if backends.get("fuzzy"):
260
+ submit_times["fuzzy"] = time.perf_counter()
160
261
  future = executor.submit(
161
262
  self._search_fuzzy, index_path, query, limit
162
263
  )
163
264
  future_to_source[future] = "fuzzy"
164
265
 
165
266
  if backends.get("vector"):
267
+ submit_times["vector"] = time.perf_counter()
166
268
  future = executor.submit(
167
269
  self._search_vector, index_path, query, limit
168
270
  )
@@ -171,18 +273,26 @@ class HybridSearchEngine:
171
273
  # Collect results as they complete
172
274
  for future in as_completed(future_to_source):
173
275
  source = future_to_source[future]
276
+ elapsed_ms = (time.perf_counter() - submit_times[source]) * 1000
277
+ timing_data[source] = elapsed_ms
174
278
  try:
175
279
  results = future.result()
176
280
  # Tag results with source for debugging
177
281
  tagged_results = tag_search_source(results, source)
178
282
  results_map[source] = tagged_results
179
283
  self.logger.debug(
180
- "Got %d results from %s search", len(results), source
284
+ "[TIMING] %s_search: %.2fms (%d results)",
285
+ source, elapsed_ms, len(results)
181
286
  )
182
287
  except Exception as exc:
183
288
  self.logger.error("Search failed for %s: %s", source, exc)
184
289
  results_map[source] = []
185
290
 
291
+ # Log timing summary
292
+ if timing_data:
293
+ timing_str = ", ".join(f"{k}={v:.1f}ms" for k, v in timing_data.items())
294
+ self.logger.debug("[TIMING] search_backends: {%s}", timing_str)
295
+
186
296
  return results_map
187
297
 
188
298
  def _search_exact(
@@ -245,6 +355,8 @@ class HybridSearchEngine:
245
355
  try:
246
356
  # Check if semantic chunks table exists
247
357
  import sqlite3
358
+
359
+ start_check = time.perf_counter()
248
360
  try:
249
361
  with sqlite3.connect(index_path) as conn:
250
362
  cursor = conn.execute(
@@ -254,6 +366,10 @@ class HybridSearchEngine:
254
366
  except sqlite3.Error as e:
255
367
  self.logger.error("Database check failed in vector search: %s", e)
256
368
  return []
369
+ self.logger.debug(
370
+ "[TIMING] vector_table_check: %.2fms",
371
+ (time.perf_counter() - start_check) * 1000
372
+ )
257
373
 
258
374
  if not has_semantic_table:
259
375
  self.logger.info(
@@ -267,7 +383,12 @@ class HybridSearchEngine:
267
383
  from codexlens.semantic.factory import get_embedder
268
384
  from codexlens.semantic.vector_store import VectorStore
269
385
 
386
+ start_init = time.perf_counter()
270
387
  vector_store = VectorStore(index_path)
388
+ self.logger.debug(
389
+ "[TIMING] vector_store_init: %.2fms",
390
+ (time.perf_counter() - start_init) * 1000
391
+ )
271
392
 
272
393
  # Check if vector store has data
273
394
  if vector_store.count_chunks() == 0:
@@ -279,6 +400,7 @@ class HybridSearchEngine:
279
400
  return []
280
401
 
281
402
  # Get stored model configuration (preferred) or auto-detect from dimension
403
+ start_embedder = time.perf_counter()
282
404
  model_config = vector_store.get_model_config()
283
405
  if model_config:
284
406
  backend = model_config.get("backend", "fastembed")
@@ -288,7 +410,7 @@ class HybridSearchEngine:
288
410
  "Using stored model config: %s backend, %s (%s, %dd)",
289
411
  backend, model_profile, model_name, model_config["embedding_dim"]
290
412
  )
291
-
413
+
292
414
  # Get embedder based on backend
293
415
  if backend == "litellm":
294
416
  embedder = get_embedder(backend="litellm", model=model_name)
@@ -324,21 +446,32 @@ class HybridSearchEngine:
324
446
  detected_dim
325
447
  )
326
448
  embedder = get_embedder(backend="fastembed", profile="code")
327
-
328
-
449
+ self.logger.debug(
450
+ "[TIMING] embedder_init: %.2fms",
451
+ (time.perf_counter() - start_embedder) * 1000
452
+ )
329
453
 
330
454
  # Generate query embedding
455
+ start_embed = time.perf_counter()
331
456
  query_embedding = embedder.embed_single(query)
457
+ self.logger.debug(
458
+ "[TIMING] query_embedding: %.2fms",
459
+ (time.perf_counter() - start_embed) * 1000
460
+ )
332
461
 
333
462
  # Search for similar chunks
463
+ start_search = time.perf_counter()
334
464
  results = vector_store.search_similar(
335
465
  query_embedding=query_embedding,
336
466
  top_k=limit,
337
467
  min_score=0.0, # Return all results, let RRF handle filtering
338
468
  return_full_content=True,
339
469
  )
470
+ self.logger.debug(
471
+ "[TIMING] vector_similarity_search: %.2fms (%d results)",
472
+ (time.perf_counter() - start_search) * 1000, len(results)
473
+ )
340
474
 
341
- self.logger.debug("Vector search found %d results", len(results))
342
475
  return results
343
476
 
344
477
  except ImportError as exc: