claude-code-workflow 6.2.7 → 6.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. package/.claude/CLAUDE.md +16 -1
  2. package/.claude/workflows/cli-templates/protocols/analysis-protocol.md +11 -4
  3. package/.claude/workflows/cli-templates/protocols/write-protocol.md +10 -75
  4. package/.claude/workflows/cli-tools-usage.md +14 -24
  5. package/.codex/AGENTS.md +51 -1
  6. package/.codex/prompts/compact.md +378 -0
  7. package/.gemini/GEMINI.md +57 -20
  8. package/ccw/dist/cli.d.ts.map +1 -1
  9. package/ccw/dist/cli.js +21 -8
  10. package/ccw/dist/cli.js.map +1 -1
  11. package/ccw/dist/commands/cli.d.ts +2 -0
  12. package/ccw/dist/commands/cli.d.ts.map +1 -1
  13. package/ccw/dist/commands/cli.js +129 -8
  14. package/ccw/dist/commands/cli.js.map +1 -1
  15. package/ccw/dist/commands/hook.d.ts.map +1 -1
  16. package/ccw/dist/commands/hook.js +3 -2
  17. package/ccw/dist/commands/hook.js.map +1 -1
  18. package/ccw/dist/config/litellm-api-config-manager.d.ts +180 -0
  19. package/ccw/dist/config/litellm-api-config-manager.d.ts.map +1 -0
  20. package/ccw/dist/config/litellm-api-config-manager.js +770 -0
  21. package/ccw/dist/config/litellm-api-config-manager.js.map +1 -0
  22. package/ccw/dist/config/provider-models.d.ts +73 -0
  23. package/ccw/dist/config/provider-models.d.ts.map +1 -0
  24. package/ccw/dist/config/provider-models.js +172 -0
  25. package/ccw/dist/config/provider-models.js.map +1 -0
  26. package/ccw/dist/core/cache-manager.d.ts.map +1 -1
  27. package/ccw/dist/core/cache-manager.js +3 -5
  28. package/ccw/dist/core/cache-manager.js.map +1 -1
  29. package/ccw/dist/core/dashboard-generator.d.ts.map +1 -1
  30. package/ccw/dist/core/dashboard-generator.js +3 -1
  31. package/ccw/dist/core/dashboard-generator.js.map +1 -1
  32. package/ccw/dist/core/routes/cli-routes.d.ts.map +1 -1
  33. package/ccw/dist/core/routes/cli-routes.js +169 -0
  34. package/ccw/dist/core/routes/cli-routes.js.map +1 -1
  35. package/ccw/dist/core/routes/codexlens-routes.d.ts.map +1 -1
  36. package/ccw/dist/core/routes/codexlens-routes.js +234 -18
  37. package/ccw/dist/core/routes/codexlens-routes.js.map +1 -1
  38. package/ccw/dist/core/routes/hooks-routes.d.ts.map +1 -1
  39. package/ccw/dist/core/routes/hooks-routes.js +30 -32
  40. package/ccw/dist/core/routes/hooks-routes.js.map +1 -1
  41. package/ccw/dist/core/routes/litellm-api-routes.d.ts +21 -0
  42. package/ccw/dist/core/routes/litellm-api-routes.d.ts.map +1 -0
  43. package/ccw/dist/core/routes/litellm-api-routes.js +780 -0
  44. package/ccw/dist/core/routes/litellm-api-routes.js.map +1 -0
  45. package/ccw/dist/core/routes/litellm-routes.d.ts +20 -0
  46. package/ccw/dist/core/routes/litellm-routes.d.ts.map +1 -0
  47. package/ccw/dist/core/routes/litellm-routes.js +85 -0
  48. package/ccw/dist/core/routes/litellm-routes.js.map +1 -0
  49. package/ccw/dist/core/routes/mcp-routes.js +2 -2
  50. package/ccw/dist/core/routes/mcp-routes.js.map +1 -1
  51. package/ccw/dist/core/routes/status-routes.d.ts.map +1 -1
  52. package/ccw/dist/core/routes/status-routes.js +39 -0
  53. package/ccw/dist/core/routes/status-routes.js.map +1 -1
  54. package/ccw/dist/core/routes/system-routes.js +1 -1
  55. package/ccw/dist/core/routes/system-routes.js.map +1 -1
  56. package/ccw/dist/core/server.d.ts.map +1 -1
  57. package/ccw/dist/core/server.js +15 -1
  58. package/ccw/dist/core/server.js.map +1 -1
  59. package/ccw/dist/mcp-server/index.js +1 -1
  60. package/ccw/dist/mcp-server/index.js.map +1 -1
  61. package/ccw/dist/tools/claude-cli-tools.d.ts +82 -0
  62. package/ccw/dist/tools/claude-cli-tools.d.ts.map +1 -0
  63. package/ccw/dist/tools/claude-cli-tools.js +216 -0
  64. package/ccw/dist/tools/claude-cli-tools.js.map +1 -0
  65. package/ccw/dist/tools/cli-executor.d.ts.map +1 -1
  66. package/ccw/dist/tools/cli-executor.js +76 -14
  67. package/ccw/dist/tools/cli-executor.js.map +1 -1
  68. package/ccw/dist/tools/codex-lens.d.ts +9 -2
  69. package/ccw/dist/tools/codex-lens.d.ts.map +1 -1
  70. package/ccw/dist/tools/codex-lens.js +114 -9
  71. package/ccw/dist/tools/codex-lens.js.map +1 -1
  72. package/ccw/dist/tools/context-cache-store.d.ts +136 -0
  73. package/ccw/dist/tools/context-cache-store.d.ts.map +1 -0
  74. package/ccw/dist/tools/context-cache-store.js +256 -0
  75. package/ccw/dist/tools/context-cache-store.js.map +1 -0
  76. package/ccw/dist/tools/context-cache.d.ts +56 -0
  77. package/ccw/dist/tools/context-cache.d.ts.map +1 -0
  78. package/ccw/dist/tools/context-cache.js +294 -0
  79. package/ccw/dist/tools/context-cache.js.map +1 -0
  80. package/ccw/dist/tools/core-memory.d.ts.map +1 -1
  81. package/ccw/dist/tools/core-memory.js +33 -19
  82. package/ccw/dist/tools/core-memory.js.map +1 -1
  83. package/ccw/dist/tools/index.d.ts.map +1 -1
  84. package/ccw/dist/tools/index.js +2 -0
  85. package/ccw/dist/tools/index.js.map +1 -1
  86. package/ccw/dist/tools/litellm-client.d.ts +85 -0
  87. package/ccw/dist/tools/litellm-client.d.ts.map +1 -0
  88. package/ccw/dist/tools/litellm-client.js +188 -0
  89. package/ccw/dist/tools/litellm-client.js.map +1 -0
  90. package/ccw/dist/tools/litellm-executor.d.ts +34 -0
  91. package/ccw/dist/tools/litellm-executor.d.ts.map +1 -0
  92. package/ccw/dist/tools/litellm-executor.js +192 -0
  93. package/ccw/dist/tools/litellm-executor.js.map +1 -0
  94. package/ccw/dist/tools/pattern-parser.d.ts +55 -0
  95. package/ccw/dist/tools/pattern-parser.d.ts.map +1 -0
  96. package/ccw/dist/tools/pattern-parser.js +237 -0
  97. package/ccw/dist/tools/pattern-parser.js.map +1 -0
  98. package/ccw/dist/tools/smart-search.d.ts +1 -0
  99. package/ccw/dist/tools/smart-search.d.ts.map +1 -1
  100. package/ccw/dist/tools/smart-search.js +117 -41
  101. package/ccw/dist/tools/smart-search.js.map +1 -1
  102. package/ccw/dist/types/litellm-api-config.d.ts +294 -0
  103. package/ccw/dist/types/litellm-api-config.d.ts.map +1 -0
  104. package/ccw/dist/types/litellm-api-config.js +8 -0
  105. package/ccw/dist/types/litellm-api-config.js.map +1 -0
  106. package/ccw/src/cli.ts +258 -244
  107. package/ccw/src/commands/cli.ts +153 -9
  108. package/ccw/src/commands/hook.ts +3 -2
  109. package/ccw/src/config/.litellm-api-config-manager.ts.2025-12-23T11-57-43-727Z.bak +441 -0
  110. package/ccw/src/config/litellm-api-config-manager.ts +1012 -0
  111. package/ccw/src/config/provider-models.ts +222 -0
  112. package/ccw/src/core/cache-manager.ts +292 -294
  113. package/ccw/src/core/dashboard-generator.ts +3 -1
  114. package/ccw/src/core/routes/cli-routes.ts +192 -0
  115. package/ccw/src/core/routes/codexlens-routes.ts +241 -19
  116. package/ccw/src/core/routes/hooks-routes.ts +399 -405
  117. package/ccw/src/core/routes/litellm-api-routes.ts +930 -0
  118. package/ccw/src/core/routes/litellm-routes.ts +107 -0
  119. package/ccw/src/core/routes/mcp-routes.ts +1271 -1271
  120. package/ccw/src/core/routes/status-routes.ts +51 -0
  121. package/ccw/src/core/routes/system-routes.ts +1 -1
  122. package/ccw/src/core/server.ts +15 -1
  123. package/ccw/src/mcp-server/index.ts +1 -1
  124. package/ccw/src/templates/dashboard-css/12-cli-legacy.css +44 -0
  125. package/ccw/src/templates/dashboard-css/31-api-settings.css +2265 -0
  126. package/ccw/src/templates/dashboard-js/components/cli-history.js +15 -8
  127. package/ccw/src/templates/dashboard-js/components/cli-status.js +323 -9
  128. package/ccw/src/templates/dashboard-js/components/navigation.js +329 -313
  129. package/ccw/src/templates/dashboard-js/i18n.js +583 -1
  130. package/ccw/src/templates/dashboard-js/views/api-settings.js +3362 -0
  131. package/ccw/src/templates/dashboard-js/views/cli-manager.js +199 -24
  132. package/ccw/src/templates/dashboard-js/views/codexlens-manager.js +1265 -27
  133. package/ccw/src/templates/dashboard.html +840 -831
  134. package/ccw/src/tools/claude-cli-tools.ts +300 -0
  135. package/ccw/src/tools/cli-executor.ts +83 -14
  136. package/ccw/src/tools/codex-lens.ts +146 -9
  137. package/ccw/src/tools/context-cache-store.ts +368 -0
  138. package/ccw/src/tools/context-cache.ts +393 -0
  139. package/ccw/src/tools/core-memory.ts +33 -19
  140. package/ccw/src/tools/index.ts +2 -0
  141. package/ccw/src/tools/litellm-client.ts +246 -0
  142. package/ccw/src/tools/litellm-executor.ts +241 -0
  143. package/ccw/src/tools/pattern-parser.ts +329 -0
  144. package/ccw/src/tools/smart-search.ts +142 -41
  145. package/ccw/src/types/litellm-api-config.ts +402 -0
  146. package/ccw-litellm/README.md +180 -0
  147. package/ccw-litellm/pyproject.toml +35 -0
  148. package/ccw-litellm/src/ccw_litellm/__init__.py +47 -0
  149. package/ccw-litellm/src/ccw_litellm/__pycache__/__init__.cpython-313.pyc +0 -0
  150. package/ccw-litellm/src/ccw_litellm/__pycache__/cli.cpython-313.pyc +0 -0
  151. package/ccw-litellm/src/ccw_litellm/cli.py +108 -0
  152. package/ccw-litellm/src/ccw_litellm/clients/__init__.py +12 -0
  153. package/ccw-litellm/src/ccw_litellm/clients/__pycache__/__init__.cpython-313.pyc +0 -0
  154. package/ccw-litellm/src/ccw_litellm/clients/__pycache__/litellm_embedder.cpython-313.pyc +0 -0
  155. package/ccw-litellm/src/ccw_litellm/clients/__pycache__/litellm_llm.cpython-313.pyc +0 -0
  156. package/ccw-litellm/src/ccw_litellm/clients/litellm_embedder.py +251 -0
  157. package/ccw-litellm/src/ccw_litellm/clients/litellm_llm.py +165 -0
  158. package/ccw-litellm/src/ccw_litellm/config/__init__.py +22 -0
  159. package/ccw-litellm/src/ccw_litellm/config/__pycache__/__init__.cpython-313.pyc +0 -0
  160. package/ccw-litellm/src/ccw_litellm/config/__pycache__/loader.cpython-313.pyc +0 -0
  161. package/ccw-litellm/src/ccw_litellm/config/__pycache__/models.cpython-313.pyc +0 -0
  162. package/ccw-litellm/src/ccw_litellm/config/loader.py +316 -0
  163. package/ccw-litellm/src/ccw_litellm/config/models.py +130 -0
  164. package/ccw-litellm/src/ccw_litellm/interfaces/__init__.py +14 -0
  165. package/ccw-litellm/src/ccw_litellm/interfaces/__pycache__/__init__.cpython-313.pyc +0 -0
  166. package/ccw-litellm/src/ccw_litellm/interfaces/__pycache__/embedder.cpython-313.pyc +0 -0
  167. package/ccw-litellm/src/ccw_litellm/interfaces/__pycache__/llm.cpython-313.pyc +0 -0
  168. package/ccw-litellm/src/ccw_litellm/interfaces/embedder.py +52 -0
  169. package/ccw-litellm/src/ccw_litellm/interfaces/llm.py +45 -0
  170. package/codex-lens/src/codexlens/__pycache__/config.cpython-313.pyc +0 -0
  171. package/codex-lens/src/codexlens/cli/__pycache__/commands.cpython-313.pyc +0 -0
  172. package/codex-lens/src/codexlens/cli/__pycache__/embedding_manager.cpython-313.pyc +0 -0
  173. package/codex-lens/src/codexlens/cli/__pycache__/model_manager.cpython-313.pyc +0 -0
  174. package/codex-lens/src/codexlens/cli/__pycache__/output.cpython-313.pyc +0 -0
  175. package/codex-lens/src/codexlens/cli/commands.py +378 -23
  176. package/codex-lens/src/codexlens/cli/embedding_manager.py +660 -56
  177. package/codex-lens/src/codexlens/cli/model_manager.py +31 -18
  178. package/codex-lens/src/codexlens/cli/output.py +12 -1
  179. package/codex-lens/src/codexlens/config.py +93 -0
  180. package/codex-lens/src/codexlens/search/__pycache__/chain_search.cpython-313.pyc +0 -0
  181. package/codex-lens/src/codexlens/search/__pycache__/hybrid_search.cpython-313.pyc +0 -0
  182. package/codex-lens/src/codexlens/search/__pycache__/ranking.cpython-313.pyc +0 -0
  183. package/codex-lens/src/codexlens/search/chain_search.py +6 -2
  184. package/codex-lens/src/codexlens/search/hybrid_search.py +44 -21
  185. package/codex-lens/src/codexlens/search/ranking.py +1 -1
  186. package/codex-lens/src/codexlens/semantic/__init__.py +42 -0
  187. package/codex-lens/src/codexlens/semantic/__pycache__/__init__.cpython-313.pyc +0 -0
  188. package/codex-lens/src/codexlens/semantic/__pycache__/base.cpython-313.pyc +0 -0
  189. package/codex-lens/src/codexlens/semantic/__pycache__/chunker.cpython-313.pyc +0 -0
  190. package/codex-lens/src/codexlens/semantic/__pycache__/embedder.cpython-313.pyc +0 -0
  191. package/codex-lens/src/codexlens/semantic/__pycache__/factory.cpython-313.pyc +0 -0
  192. package/codex-lens/src/codexlens/semantic/__pycache__/gpu_support.cpython-313.pyc +0 -0
  193. package/codex-lens/src/codexlens/semantic/__pycache__/litellm_embedder.cpython-313.pyc +0 -0
  194. package/codex-lens/src/codexlens/semantic/__pycache__/vector_store.cpython-313.pyc +0 -0
  195. package/codex-lens/src/codexlens/semantic/base.py +61 -0
  196. package/codex-lens/src/codexlens/semantic/chunker.py +43 -20
  197. package/codex-lens/src/codexlens/semantic/embedder.py +60 -13
  198. package/codex-lens/src/codexlens/semantic/factory.py +98 -0
  199. package/codex-lens/src/codexlens/semantic/gpu_support.py +225 -3
  200. package/codex-lens/src/codexlens/semantic/litellm_embedder.py +144 -0
  201. package/codex-lens/src/codexlens/semantic/rotational_embedder.py +434 -0
  202. package/codex-lens/src/codexlens/semantic/vector_store.py +33 -8
  203. package/codex-lens/src/codexlens/storage/__pycache__/path_mapper.cpython-313.pyc +0 -0
  204. package/codex-lens/src/codexlens/storage/migrations/__pycache__/migration_004_dual_fts.cpython-313.pyc +0 -0
  205. package/codex-lens/src/codexlens/storage/path_mapper.py +27 -1
  206. package/package.json +15 -5
  207. package/.codex/prompts.zip +0 -0
  208. package/ccw/package.json +0 -65
@@ -0,0 +1,316 @@
1
+ """Configuration loader with environment variable substitution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ import re
8
+ from pathlib import Path
9
+ from typing import Any
10
+
11
+ import yaml
12
+
13
+ from .models import LiteLLMConfig
14
+
15
+ # Default configuration paths
16
+ # JSON format (UI config) takes priority over YAML format
17
+ DEFAULT_JSON_CONFIG_PATH = Path.home() / ".ccw" / "config" / "litellm-api-config.json"
18
+ DEFAULT_YAML_CONFIG_PATH = Path.home() / ".ccw" / "config" / "litellm-config.yaml"
19
+ # Keep backward compatibility
20
+ DEFAULT_CONFIG_PATH = DEFAULT_YAML_CONFIG_PATH
21
+
22
+ # Global configuration singleton
23
+ _config_instance: LiteLLMConfig | None = None
24
+
25
+
26
+ def _substitute_env_vars(value: Any) -> Any:
27
+ """Recursively substitute environment variables in configuration values.
28
+
29
+ Supports ${ENV_VAR} and ${ENV_VAR:-default} syntax.
30
+
31
+ Args:
32
+ value: Configuration value (str, dict, list, or primitive)
33
+
34
+ Returns:
35
+ Value with environment variables substituted
36
+ """
37
+ if isinstance(value, str):
38
+ # Pattern: ${VAR} or ${VAR:-default}
39
+ pattern = r"\$\{([^:}]+)(?::-(.*?))?\}"
40
+
41
+ def replace_var(match: re.Match) -> str:
42
+ var_name = match.group(1)
43
+ default_value = match.group(2) if match.group(2) is not None else ""
44
+ return os.environ.get(var_name, default_value)
45
+
46
+ return re.sub(pattern, replace_var, value)
47
+
48
+ if isinstance(value, dict):
49
+ return {k: _substitute_env_vars(v) for k, v in value.items()}
50
+
51
+ if isinstance(value, list):
52
+ return [_substitute_env_vars(item) for item in value]
53
+
54
+ return value
55
+
56
+
57
+ def _get_default_config() -> dict[str, Any]:
58
+ """Get default configuration when no config file exists.
59
+
60
+ Returns:
61
+ Default configuration dictionary
62
+ """
63
+ return {
64
+ "version": 1,
65
+ "default_provider": "openai",
66
+ "providers": {
67
+ "openai": {
68
+ "api_key": "${OPENAI_API_KEY}",
69
+ "api_base": "https://api.openai.com/v1",
70
+ },
71
+ },
72
+ "llm_models": {
73
+ "default": {
74
+ "provider": "openai",
75
+ "model": "gpt-4",
76
+ },
77
+ "fast": {
78
+ "provider": "openai",
79
+ "model": "gpt-3.5-turbo",
80
+ },
81
+ },
82
+ "embedding_models": {
83
+ "default": {
84
+ "provider": "openai",
85
+ "model": "text-embedding-3-small",
86
+ "dimensions": 1536,
87
+ },
88
+ },
89
+ }
90
+
91
+
92
+ def _convert_json_to_internal_format(json_config: dict[str, Any]) -> dict[str, Any]:
93
+ """Convert UI JSON config format to internal format.
94
+
95
+ The UI stores config in a different structure:
96
+ - providers: array of {id, name, type, apiKey, apiBase, llmModels[], embeddingModels[]}
97
+
98
+ Internal format uses:
99
+ - providers: dict of {provider_id: {api_key, api_base}}
100
+ - llm_models: dict of {model_id: {provider, model}}
101
+ - embedding_models: dict of {model_id: {provider, model, dimensions}}
102
+
103
+ Args:
104
+ json_config: Configuration in UI JSON format
105
+
106
+ Returns:
107
+ Configuration in internal format
108
+ """
109
+ providers: dict[str, Any] = {}
110
+ llm_models: dict[str, Any] = {}
111
+ embedding_models: dict[str, Any] = {}
112
+ default_provider: str | None = None
113
+
114
+ for provider in json_config.get("providers", []):
115
+ if not provider.get("enabled", True):
116
+ continue
117
+
118
+ provider_id = provider.get("id", "")
119
+ if not provider_id:
120
+ continue
121
+
122
+ # Set first enabled provider as default
123
+ if default_provider is None:
124
+ default_provider = provider_id
125
+
126
+ # Convert provider with advanced settings
127
+ provider_config: dict[str, Any] = {
128
+ "api_key": provider.get("apiKey", ""),
129
+ "api_base": provider.get("apiBase"),
130
+ }
131
+
132
+ # Map advanced settings
133
+ adv = provider.get("advancedSettings", {})
134
+ if adv.get("timeout"):
135
+ provider_config["timeout"] = adv["timeout"]
136
+ if adv.get("maxRetries"):
137
+ provider_config["max_retries"] = adv["maxRetries"]
138
+ if adv.get("organization"):
139
+ provider_config["organization"] = adv["organization"]
140
+ if adv.get("apiVersion"):
141
+ provider_config["api_version"] = adv["apiVersion"]
142
+ if adv.get("customHeaders"):
143
+ provider_config["custom_headers"] = adv["customHeaders"]
144
+
145
+ providers[provider_id] = provider_config
146
+
147
+ # Convert LLM models
148
+ for model in provider.get("llmModels", []):
149
+ if not model.get("enabled", True):
150
+ continue
151
+ model_id = model.get("id", "")
152
+ if not model_id:
153
+ continue
154
+
155
+ llm_model_config: dict[str, Any] = {
156
+ "provider": provider_id,
157
+ "model": model.get("name", ""),
158
+ }
159
+ # Add model-specific endpoint settings
160
+ endpoint = model.get("endpointSettings", {})
161
+ if endpoint.get("baseUrl"):
162
+ llm_model_config["api_base"] = endpoint["baseUrl"]
163
+ if endpoint.get("timeout"):
164
+ llm_model_config["timeout"] = endpoint["timeout"]
165
+ if endpoint.get("maxRetries"):
166
+ llm_model_config["max_retries"] = endpoint["maxRetries"]
167
+
168
+ # Add capabilities
169
+ caps = model.get("capabilities", {})
170
+ if caps.get("contextWindow"):
171
+ llm_model_config["context_window"] = caps["contextWindow"]
172
+ if caps.get("maxOutputTokens"):
173
+ llm_model_config["max_output_tokens"] = caps["maxOutputTokens"]
174
+
175
+ llm_models[model_id] = llm_model_config
176
+
177
+ # Convert embedding models
178
+ for model in provider.get("embeddingModels", []):
179
+ if not model.get("enabled", True):
180
+ continue
181
+ model_id = model.get("id", "")
182
+ if not model_id:
183
+ continue
184
+
185
+ embedding_model_config: dict[str, Any] = {
186
+ "provider": provider_id,
187
+ "model": model.get("name", ""),
188
+ "dimensions": model.get("capabilities", {}).get("embeddingDimension", 1536),
189
+ }
190
+ # Add model-specific endpoint settings
191
+ endpoint = model.get("endpointSettings", {})
192
+ if endpoint.get("baseUrl"):
193
+ embedding_model_config["api_base"] = endpoint["baseUrl"]
194
+ if endpoint.get("timeout"):
195
+ embedding_model_config["timeout"] = endpoint["timeout"]
196
+
197
+ embedding_models[model_id] = embedding_model_config
198
+
199
+ # Ensure we have defaults if no models found
200
+ if not llm_models:
201
+ llm_models["default"] = {
202
+ "provider": default_provider or "openai",
203
+ "model": "gpt-4",
204
+ }
205
+
206
+ if not embedding_models:
207
+ embedding_models["default"] = {
208
+ "provider": default_provider or "openai",
209
+ "model": "text-embedding-3-small",
210
+ "dimensions": 1536,
211
+ }
212
+
213
+ return {
214
+ "version": json_config.get("version", 1),
215
+ "default_provider": default_provider or "openai",
216
+ "providers": providers,
217
+ "llm_models": llm_models,
218
+ "embedding_models": embedding_models,
219
+ }
220
+
221
+
222
+ def load_config(config_path: Path | str | None = None) -> LiteLLMConfig:
223
+ """Load LiteLLM configuration from JSON or YAML file.
224
+
225
+ Priority order:
226
+ 1. Explicit config_path if provided
227
+ 2. JSON config (UI format): ~/.ccw/config/litellm-api-config.json
228
+ 3. YAML config: ~/.ccw/config/litellm-config.yaml
229
+ 4. Default configuration
230
+
231
+ Args:
232
+ config_path: Path to configuration file (optional)
233
+
234
+ Returns:
235
+ Parsed and validated configuration
236
+
237
+ Raises:
238
+ FileNotFoundError: If config file not found and no default available
239
+ ValueError: If configuration is invalid
240
+ """
241
+ raw_config: dict[str, Any] | None = None
242
+ is_json_format = False
243
+
244
+ if config_path is not None:
245
+ config_path = Path(config_path)
246
+ if config_path.exists():
247
+ try:
248
+ with open(config_path, "r", encoding="utf-8") as f:
249
+ if config_path.suffix == ".json":
250
+ raw_config = json.load(f)
251
+ is_json_format = True
252
+ else:
253
+ raw_config = yaml.safe_load(f)
254
+ except Exception as e:
255
+ raise ValueError(f"Failed to load configuration from {config_path}: {e}") from e
256
+
257
+ # Check JSON config first (UI format)
258
+ if raw_config is None and DEFAULT_JSON_CONFIG_PATH.exists():
259
+ try:
260
+ with open(DEFAULT_JSON_CONFIG_PATH, "r", encoding="utf-8") as f:
261
+ raw_config = json.load(f)
262
+ is_json_format = True
263
+ except Exception:
264
+ pass # Fall through to YAML
265
+
266
+ # Check YAML config
267
+ if raw_config is None and DEFAULT_YAML_CONFIG_PATH.exists():
268
+ try:
269
+ with open(DEFAULT_YAML_CONFIG_PATH, "r", encoding="utf-8") as f:
270
+ raw_config = yaml.safe_load(f)
271
+ except Exception:
272
+ pass # Fall through to default
273
+
274
+ # Use default configuration
275
+ if raw_config is None:
276
+ raw_config = _get_default_config()
277
+
278
+ # Convert JSON format to internal format if needed
279
+ if is_json_format:
280
+ raw_config = _convert_json_to_internal_format(raw_config)
281
+
282
+ # Substitute environment variables
283
+ config_data = _substitute_env_vars(raw_config)
284
+
285
+ # Validate and parse with Pydantic
286
+ try:
287
+ return LiteLLMConfig.model_validate(config_data)
288
+ except Exception as e:
289
+ raise ValueError(f"Invalid configuration: {e}") from e
290
+
291
+
292
+ def get_config(config_path: Path | str | None = None, reload: bool = False) -> LiteLLMConfig:
293
+ """Get global configuration singleton.
294
+
295
+ Args:
296
+ config_path: Path to configuration file (default: ~/.ccw/config/litellm-config.yaml)
297
+ reload: Force reload configuration from disk
298
+
299
+ Returns:
300
+ Global configuration instance
301
+ """
302
+ global _config_instance
303
+
304
+ if _config_instance is None or reload:
305
+ _config_instance = load_config(config_path)
306
+
307
+ return _config_instance
308
+
309
+
310
+ def reset_config() -> None:
311
+ """Reset global configuration singleton.
312
+
313
+ Useful for testing.
314
+ """
315
+ global _config_instance
316
+ _config_instance = None
@@ -0,0 +1,130 @@
1
+ """Pydantic configuration models for LiteLLM integration."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class ProviderConfig(BaseModel):
11
+ """Provider API configuration.
12
+
13
+ Supports environment variable substitution in the format ${ENV_VAR}.
14
+ """
15
+
16
+ api_key: str | None = None
17
+ api_base: str | None = None
18
+
19
+ model_config = {"extra": "allow"}
20
+
21
+
22
+ class LLMModelConfig(BaseModel):
23
+ """LLM model configuration."""
24
+
25
+ provider: str
26
+ model: str
27
+
28
+ model_config = {"extra": "allow"}
29
+
30
+
31
+ class EmbeddingModelConfig(BaseModel):
32
+ """Embedding model configuration."""
33
+
34
+ provider: str # "openai", "fastembed", "ollama", etc.
35
+ model: str
36
+ dimensions: int
37
+
38
+ model_config = {"extra": "allow"}
39
+
40
+
41
+ class LiteLLMConfig(BaseModel):
42
+ """Root configuration for LiteLLM integration.
43
+
44
+ Example YAML:
45
+ version: 1
46
+ default_provider: openai
47
+ providers:
48
+ openai:
49
+ api_key: ${OPENAI_API_KEY}
50
+ api_base: https://api.openai.com/v1
51
+ anthropic:
52
+ api_key: ${ANTHROPIC_API_KEY}
53
+ llm_models:
54
+ default:
55
+ provider: openai
56
+ model: gpt-4
57
+ fast:
58
+ provider: openai
59
+ model: gpt-3.5-turbo
60
+ embedding_models:
61
+ default:
62
+ provider: openai
63
+ model: text-embedding-3-small
64
+ dimensions: 1536
65
+ """
66
+
67
+ version: int = 1
68
+ default_provider: str = "openai"
69
+ providers: dict[str, ProviderConfig] = Field(default_factory=dict)
70
+ llm_models: dict[str, LLMModelConfig] = Field(default_factory=dict)
71
+ embedding_models: dict[str, EmbeddingModelConfig] = Field(default_factory=dict)
72
+
73
+ model_config = {"extra": "allow"}
74
+
75
+ def get_llm_model(self, model: str = "default") -> LLMModelConfig:
76
+ """Get LLM model configuration by name.
77
+
78
+ Args:
79
+ model: Model name or "default"
80
+
81
+ Returns:
82
+ LLM model configuration
83
+
84
+ Raises:
85
+ ValueError: If model not found
86
+ """
87
+ if model not in self.llm_models:
88
+ raise ValueError(
89
+ f"LLM model '{model}' not found in configuration. "
90
+ f"Available models: {list(self.llm_models.keys())}"
91
+ )
92
+ return self.llm_models[model]
93
+
94
+ def get_embedding_model(self, model: str = "default") -> EmbeddingModelConfig:
95
+ """Get embedding model configuration by name.
96
+
97
+ Args:
98
+ model: Model name or "default"
99
+
100
+ Returns:
101
+ Embedding model configuration
102
+
103
+ Raises:
104
+ ValueError: If model not found
105
+ """
106
+ if model not in self.embedding_models:
107
+ raise ValueError(
108
+ f"Embedding model '{model}' not found in configuration. "
109
+ f"Available models: {list(self.embedding_models.keys())}"
110
+ )
111
+ return self.embedding_models[model]
112
+
113
+ def get_provider(self, provider: str) -> ProviderConfig:
114
+ """Get provider configuration by name.
115
+
116
+ Args:
117
+ provider: Provider name
118
+
119
+ Returns:
120
+ Provider configuration
121
+
122
+ Raises:
123
+ ValueError: If provider not found
124
+ """
125
+ if provider not in self.providers:
126
+ raise ValueError(
127
+ f"Provider '{provider}' not found in configuration. "
128
+ f"Available providers: {list(self.providers.keys())}"
129
+ )
130
+ return self.providers[provider]
@@ -0,0 +1,14 @@
1
+ """Abstract interfaces for ccw-litellm."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from .embedder import AbstractEmbedder
6
+ from .llm import AbstractLLMClient, ChatMessage, LLMResponse
7
+
8
+ __all__ = [
9
+ "AbstractEmbedder",
10
+ "AbstractLLMClient",
11
+ "ChatMessage",
12
+ "LLMResponse",
13
+ ]
14
+
@@ -0,0 +1,52 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from abc import ABC, abstractmethod
5
+ from typing import Any, Sequence
6
+
7
+ import numpy as np
8
+ from numpy.typing import NDArray
9
+
10
+
11
+ class AbstractEmbedder(ABC):
12
+ """Embedding interface compatible with fastembed-style embedders.
13
+
14
+ Implementers only need to provide the synchronous `embed` method; an
15
+ asynchronous `aembed` wrapper is provided for convenience.
16
+ """
17
+
18
+ @property
19
+ @abstractmethod
20
+ def dimensions(self) -> int:
21
+ """Embedding vector size."""
22
+
23
+ @abstractmethod
24
+ def embed(
25
+ self,
26
+ texts: str | Sequence[str],
27
+ *,
28
+ batch_size: int | None = None,
29
+ **kwargs: Any,
30
+ ) -> NDArray[np.floating]:
31
+ """Embed one or more texts.
32
+
33
+ Returns:
34
+ A numpy array of shape (n_texts, dimensions).
35
+ """
36
+
37
+ async def aembed(
38
+ self,
39
+ texts: str | Sequence[str],
40
+ *,
41
+ batch_size: int | None = None,
42
+ **kwargs: Any,
43
+ ) -> NDArray[np.floating]:
44
+ """Async wrapper around `embed` using a worker thread by default."""
45
+
46
+ return await asyncio.to_thread(
47
+ self.embed,
48
+ texts,
49
+ batch_size=batch_size,
50
+ **kwargs,
51
+ )
52
+
@@ -0,0 +1,45 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from abc import ABC, abstractmethod
5
+ from dataclasses import dataclass
6
+ from typing import Any, Literal, Sequence
7
+
8
+
9
+ @dataclass(frozen=True, slots=True)
10
+ class ChatMessage:
11
+ role: Literal["system", "user", "assistant", "tool"]
12
+ content: str
13
+
14
+
15
+ @dataclass(frozen=True, slots=True)
16
+ class LLMResponse:
17
+ content: str
18
+ raw: Any | None = None
19
+
20
+
21
+ class AbstractLLMClient(ABC):
22
+ """LiteLLM-like client interface.
23
+
24
+ Implementers only need to provide synchronous methods; async wrappers are
25
+ provided via `asyncio.to_thread`.
26
+ """
27
+
28
+ @abstractmethod
29
+ def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> LLMResponse:
30
+ """Chat completion for a sequence of messages."""
31
+
32
+ @abstractmethod
33
+ def complete(self, prompt: str, **kwargs: Any) -> LLMResponse:
34
+ """Text completion for a prompt."""
35
+
36
+ async def achat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> LLMResponse:
37
+ """Async wrapper around `chat` using a worker thread by default."""
38
+
39
+ return await asyncio.to_thread(self.chat, messages, **kwargs)
40
+
41
+ async def acomplete(self, prompt: str, **kwargs: Any) -> LLMResponse:
42
+ """Async wrapper around `complete` using a worker thread by default."""
43
+
44
+ return await asyncio.to_thread(self.complete, prompt, **kwargs)
45
+