@sylphx/flow 1.7.0 → 1.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/CHANGELOG.md +78 -0
  2. package/assets/agents/coder.md +72 -119
  3. package/assets/agents/orchestrator.md +26 -90
  4. package/assets/agents/reviewer.md +76 -47
  5. package/assets/agents/writer.md +82 -63
  6. package/assets/output-styles/silent.md +141 -8
  7. package/assets/rules/code-standards.md +9 -33
  8. package/assets/rules/core.md +67 -59
  9. package/package.json +2 -12
  10. package/src/commands/flow/execute.ts +470 -0
  11. package/src/commands/flow/index.ts +11 -0
  12. package/src/commands/flow/prompt.ts +35 -0
  13. package/src/commands/flow/setup.ts +312 -0
  14. package/src/commands/flow/targets.ts +18 -0
  15. package/src/commands/flow/types.ts +47 -0
  16. package/src/commands/flow-command.ts +18 -967
  17. package/src/commands/flow-orchestrator.ts +14 -5
  18. package/src/commands/hook-command.ts +1 -1
  19. package/src/commands/init-core.ts +12 -3
  20. package/src/commands/run-command.ts +1 -1
  21. package/src/config/rules.ts +1 -1
  22. package/src/core/error-handling.ts +1 -1
  23. package/src/core/loop-controller.ts +1 -1
  24. package/src/core/state-detector.ts +1 -1
  25. package/src/core/target-manager.ts +1 -1
  26. package/src/index.ts +1 -1
  27. package/src/shared/files/index.ts +1 -1
  28. package/src/shared/processing/index.ts +1 -1
  29. package/src/targets/claude-code.ts +3 -3
  30. package/src/targets/opencode.ts +3 -3
  31. package/src/utils/agent-enhancer.ts +2 -2
  32. package/src/utils/{mcp-config.ts → config/mcp-config.ts} +4 -4
  33. package/src/utils/{paths.ts → config/paths.ts} +1 -1
  34. package/src/utils/{settings.ts → config/settings.ts} +1 -1
  35. package/src/utils/{target-config.ts → config/target-config.ts} +5 -5
  36. package/src/utils/{target-utils.ts → config/target-utils.ts} +3 -3
  37. package/src/utils/display/banner.ts +25 -0
  38. package/src/utils/display/status.ts +55 -0
  39. package/src/utils/{file-operations.ts → files/file-operations.ts} +2 -2
  40. package/src/utils/files/jsonc.ts +36 -0
  41. package/src/utils/{sync-utils.ts → files/sync-utils.ts} +3 -3
  42. package/src/utils/index.ts +42 -61
  43. package/src/utils/version.ts +47 -0
  44. package/src/components/benchmark-monitor.tsx +0 -331
  45. package/src/components/reindex-progress.tsx +0 -261
  46. package/src/composables/functional/index.ts +0 -14
  47. package/src/composables/functional/useEnvironment.ts +0 -171
  48. package/src/composables/functional/useFileSystem.ts +0 -139
  49. package/src/composables/index.ts +0 -4
  50. package/src/composables/useEnv.ts +0 -13
  51. package/src/composables/useRuntimeConfig.ts +0 -27
  52. package/src/core/ai-sdk.ts +0 -603
  53. package/src/core/app-factory.ts +0 -381
  54. package/src/core/builtin-agents.ts +0 -9
  55. package/src/core/command-system.ts +0 -550
  56. package/src/core/config-system.ts +0 -550
  57. package/src/core/connection-pool.ts +0 -390
  58. package/src/core/di-container.ts +0 -155
  59. package/src/core/headless-display.ts +0 -96
  60. package/src/core/interfaces/index.ts +0 -22
  61. package/src/core/interfaces/repository.interface.ts +0 -91
  62. package/src/core/interfaces/service.interface.ts +0 -133
  63. package/src/core/interfaces.ts +0 -96
  64. package/src/core/result.ts +0 -351
  65. package/src/core/service-config.ts +0 -252
  66. package/src/core/session-service.ts +0 -121
  67. package/src/core/storage-factory.ts +0 -115
  68. package/src/core/stream-handler.ts +0 -288
  69. package/src/core/type-utils.ts +0 -427
  70. package/src/core/unified-storage.ts +0 -456
  71. package/src/core/validation/limit.ts +0 -46
  72. package/src/core/validation/query.ts +0 -20
  73. package/src/db/auto-migrate.ts +0 -322
  74. package/src/db/base-database-client.ts +0 -144
  75. package/src/db/cache-db.ts +0 -218
  76. package/src/db/cache-schema.ts +0 -75
  77. package/src/db/database.ts +0 -70
  78. package/src/db/index.ts +0 -252
  79. package/src/db/memory-db.ts +0 -153
  80. package/src/db/memory-schema.ts +0 -29
  81. package/src/db/schema.ts +0 -289
  82. package/src/db/session-repository.ts +0 -733
  83. package/src/domains/index.ts +0 -6
  84. package/src/domains/utilities/index.ts +0 -6
  85. package/src/domains/utilities/time/index.ts +0 -5
  86. package/src/domains/utilities/time/tools.ts +0 -291
  87. package/src/services/agent-service.ts +0 -273
  88. package/src/services/evaluation-service.ts +0 -271
  89. package/src/services/functional/evaluation-logic.ts +0 -296
  90. package/src/services/functional/file-processor.ts +0 -273
  91. package/src/services/functional/index.ts +0 -12
  92. package/src/services/memory.service.ts +0 -476
  93. package/src/types/api/batch.ts +0 -108
  94. package/src/types/api/errors.ts +0 -118
  95. package/src/types/api/index.ts +0 -55
  96. package/src/types/api/requests.ts +0 -76
  97. package/src/types/api/responses.ts +0 -180
  98. package/src/types/api/websockets.ts +0 -85
  99. package/src/types/benchmark.ts +0 -49
  100. package/src/types/database.types.ts +0 -510
  101. package/src/types/memory-types.ts +0 -63
  102. package/src/utils/advanced-tokenizer.ts +0 -191
  103. package/src/utils/ai-model-fetcher.ts +0 -19
  104. package/src/utils/async-file-operations.ts +0 -516
  105. package/src/utils/audio-player.ts +0 -345
  106. package/src/utils/codebase-helpers.ts +0 -211
  107. package/src/utils/console-ui.ts +0 -79
  108. package/src/utils/database-errors.ts +0 -140
  109. package/src/utils/debug-logger.ts +0 -49
  110. package/src/utils/file-scanner.ts +0 -259
  111. package/src/utils/help.ts +0 -20
  112. package/src/utils/immutable-cache.ts +0 -106
  113. package/src/utils/jsonc.ts +0 -158
  114. package/src/utils/memory-tui.ts +0 -414
  115. package/src/utils/models-dev.ts +0 -91
  116. package/src/utils/parallel-operations.ts +0 -487
  117. package/src/utils/process-manager.ts +0 -155
  118. package/src/utils/prompts.ts +0 -120
  119. package/src/utils/search-tool-builder.ts +0 -214
  120. package/src/utils/session-manager.ts +0 -168
  121. package/src/utils/session-title.ts +0 -87
  122. package/src/utils/simplified-errors.ts +0 -410
  123. package/src/utils/template-engine.ts +0 -94
  124. package/src/utils/test-audio.ts +0 -71
  125. package/src/utils/todo-context.ts +0 -46
  126. package/src/utils/token-counter.ts +0 -288
  127. /package/src/utils/{cli-output.ts → display/cli-output.ts} +0 -0
  128. /package/src/utils/{logger.ts → display/logger.ts} +0 -0
  129. /package/src/utils/{notifications.ts → display/notifications.ts} +0 -0
  130. /package/src/utils/{secret-utils.ts → security/secret-utils.ts} +0 -0
  131. /package/src/utils/{security.ts → security/security.ts} +0 -0
@@ -1,288 +0,0 @@
1
- /**
2
- * Token Counter Utility
3
- * BPE-based token counting using Hugging Face AutoTokenizer
4
- *
5
- * Primary method: BPE tokenizer (auto-selected by Hugging Face)
6
- * Fallback: Fast estimation when tokenizer unavailable
7
- */
8
-
9
- import { AutoTokenizer } from '@huggingface/transformers';
10
-
11
- // Cache for multiple tokenizers (keyed by tokenizer name)
12
- // Limited to 3 tokenizers to prevent memory leak (each ~100-500MB)
13
- const tokenizerCache = new Map<string, any>();
14
- const tokenizerInitializing = new Set<string>();
15
- const tokenizerFailed = new Set<string>();
16
- const MAX_CACHED_TOKENIZERS = 3;
17
-
18
- /**
19
- * Map provider model names to tokenizer names
20
- * AutoTokenizer will automatically find the right tokenizer for each model
21
- */
22
- const MODEL_TO_TOKENIZER: Record<string, string> = {
23
- // OpenAI models
24
- 'gpt-4': 'Xenova/gpt-4',
25
- 'gpt-4-turbo': 'Xenova/gpt-4',
26
- 'gpt-4o': 'Xenova/gpt-4',
27
- 'gpt-3.5-turbo': 'Xenova/gpt-3.5-turbo',
28
- 'gpt-3.5': 'Xenova/gpt-3.5-turbo',
29
-
30
- // Anthropic Claude models
31
- 'claude-3-opus': 'Xenova/claude-tokenizer',
32
- 'claude-3-sonnet': 'Xenova/claude-tokenizer',
33
- 'claude-3-haiku': 'Xenova/claude-tokenizer',
34
- 'claude-3.5-sonnet': 'Xenova/claude-tokenizer',
35
- 'claude-3.5-haiku': 'Xenova/claude-tokenizer',
36
-
37
- // Code models
38
- 'starcoder': 'bigcode/starcoder',
39
- 'starcoder2': 'bigcode/starcoder2-3b',
40
- 'codellama': 'codellama/CodeLlama-7b-hf',
41
-
42
- // Google models
43
- 'gemini': 'Xenova/gpt-4', // Fallback to GPT-4 (no official Gemini tokenizer)
44
-
45
- // Fallback
46
- 'default': 'Xenova/gpt-4',
47
- };
48
-
49
- /**
50
- * Get tokenizer name for a model
51
- * AutoTokenizer will find the right tokenizer automatically
52
- */
53
- function getTokenizerForModel(modelName?: string): string {
54
- if (!modelName) return MODEL_TO_TOKENIZER['default']!;
55
-
56
- // Direct match
57
- if (MODEL_TO_TOKENIZER[modelName]) {
58
- return MODEL_TO_TOKENIZER[modelName]!;
59
- }
60
-
61
- // Fuzzy match (e.g., "gpt-4-turbo-preview" → "gpt-4")
62
- const modelLower = modelName.toLowerCase();
63
- for (const [key, tokenizer] of Object.entries(MODEL_TO_TOKENIZER)) {
64
- if (modelLower.includes(key)) {
65
- return tokenizer;
66
- }
67
- }
68
-
69
- // Default fallback
70
- return MODEL_TO_TOKENIZER['default']!;
71
- }
72
-
73
- /**
74
- * Fast fallback estimation (only when BPE tokenizer unavailable)
75
- * Based on ~3.5 chars per token for code
76
- */
77
- function estimateFallback(text: string): number {
78
- if (!text) return 0;
79
-
80
- const words = text.split(/\s+/).filter(Boolean).length;
81
- const chars = text.length;
82
-
83
- const charBasedEstimate = Math.ceil(chars / 3.5);
84
- const wordBasedEstimate = Math.ceil(words * 1.3);
85
-
86
- return Math.round((charBasedEstimate + wordBasedEstimate) / 2);
87
- }
88
-
89
- /**
90
- * Initialize BPE tokenizer (lazy, cached per tokenizer name)
91
- * Uses Hugging Face AutoTokenizer to automatically select best tokenizer
92
- */
93
- async function ensureTokenizer(modelName?: string): Promise<any | null> {
94
- // Get tokenizer name for this model
95
- const tokenizerName = getTokenizerForModel(modelName);
96
-
97
- // Check if already cached
98
- if (tokenizerCache.has(tokenizerName)) {
99
- return tokenizerCache.get(tokenizerName);
100
- }
101
-
102
- // Check if previous initialization failed
103
- if (tokenizerFailed.has(tokenizerName)) {
104
- return null;
105
- }
106
-
107
- // Wait if initialization in progress for this tokenizer
108
- while (tokenizerInitializing.has(tokenizerName)) {
109
- await new Promise(resolve => setTimeout(resolve, 100));
110
- }
111
-
112
- // Check again after waiting
113
- if (tokenizerCache.has(tokenizerName)) {
114
- return tokenizerCache.get(tokenizerName);
115
- }
116
- if (tokenizerFailed.has(tokenizerName)) {
117
- return null;
118
- }
119
-
120
- // Initialize with Hugging Face AutoTokenizer
121
- try {
122
- tokenizerInitializing.add(tokenizerName);
123
-
124
- // Let Hugging Face auto-select and load the best tokenizer
125
- const tokenizer = await AutoTokenizer.from_pretrained(tokenizerName, {
126
- // Cache models locally for faster subsequent loads
127
- cache_dir: './models/.cache',
128
- // Use local files if available, otherwise download
129
- local_files_only: false,
130
- });
131
-
132
- // Limit cache size - evict oldest tokenizer if limit reached
133
- if (tokenizerCache.size >= MAX_CACHED_TOKENIZERS) {
134
- const oldestKey = tokenizerCache.keys().next().value;
135
- if (oldestKey) {
136
- tokenizerCache.delete(oldestKey);
137
- }
138
- }
139
-
140
- tokenizerCache.set(tokenizerName, tokenizer);
141
- tokenizerInitializing.delete(tokenizerName);
142
- return tokenizer;
143
- } catch (error) {
144
- console.warn('[TokenCounter] BPE tokenizer initialization failed, using fallback estimation:', error);
145
- tokenizerFailed.add(tokenizerName);
146
- tokenizerInitializing.delete(tokenizerName);
147
- return null;
148
- }
149
- }
150
-
151
- /**
152
- * Count tokens using BPE tokenizer (Hugging Face AutoTokenizer)
153
- * Falls back to estimation if tokenizer unavailable
154
- *
155
- * @param text Text to count tokens for
156
- * @param modelName Optional model name to use specific tokenizer
157
- * @returns Token count
158
- */
159
- export async function countTokens(text: string, modelName?: string): Promise<number> {
160
- if (!text) return 0;
161
-
162
- const tokenizer = await ensureTokenizer(modelName);
163
-
164
- if (!tokenizer) {
165
- // Tokenizer unavailable, use fallback
166
- return estimateFallback(text);
167
- }
168
-
169
- try {
170
- // Use Hugging Face tokenizer API
171
- const encoded = await tokenizer(text);
172
-
173
- // Get token count from encoded result
174
- if (encoded.input_ids && encoded.input_ids.size) {
175
- return encoded.input_ids.size;
176
- }
177
-
178
- // Fallback: count array length
179
- if (Array.isArray(encoded.input_ids)) {
180
- return encoded.input_ids.length;
181
- }
182
-
183
- // Fallback: if it's a tensor, get its length
184
- if (encoded.input_ids.data) {
185
- return encoded.input_ids.data.length;
186
- }
187
-
188
- // Last resort fallback
189
- return estimateFallback(text);
190
- } catch (error) {
191
- console.warn('[TokenCounter] Token counting failed, using fallback:', error);
192
- return estimateFallback(text);
193
- }
194
- }
195
-
196
- /**
197
- * Synchronous token estimation (for cases where async is not possible)
198
- * Uses fallback estimation only
199
- */
200
- export function estimateTokens(text: string): number {
201
- return estimateFallback(text);
202
- }
203
-
204
- /**
205
- * Format token count for display
206
- * Examples: 150 -> "150", 1500 -> "1.5K", 1500000 -> "1.5M"
207
- */
208
- export function formatTokenCount(count: number): string {
209
- if (count < 1000) {
210
- return count.toString();
211
- }
212
-
213
- if (count < 1000000) {
214
- const k = count / 1000;
215
- return `${k.toFixed(1)}K`;
216
- }
217
-
218
- const m = count / 1000000;
219
- return `${m.toFixed(1)}M`;
220
- }
221
-
222
- /**
223
- * Count tokens for specific model
224
- * Uses the correct tokenizer for that model
225
- */
226
- export async function countTokensForModel(text: string, modelName: string): Promise<number> {
227
- return countTokens(text, modelName);
228
- }
229
-
230
- /**
231
- * Count tokens with display formatting
232
- * Uses BPE tokenizer (async)
233
- */
234
- export async function countAndFormat(text: string, modelName?: string): Promise<string> {
235
- const count = await countTokens(text, modelName);
236
- return `${formatTokenCount(count)} Tokens`;
237
- }
238
-
239
- /**
240
- * Count tokens with display formatting (sync, estimation only)
241
- * Use this only when async is not possible
242
- */
243
- export function countAndFormatSync(text: string): string {
244
- const count = estimateTokens(text);
245
- return `${formatTokenCount(count)} Tokens`;
246
- }
247
-
248
- /**
249
- * Batch count tokens for multiple texts
250
- * Uses BPE tokenizer
251
- */
252
- export async function countTokensBatch(texts: string[], modelName?: string): Promise<number[]> {
253
- return Promise.all(texts.map(text => countTokens(text, modelName)));
254
- }
255
-
256
- /**
257
- * Batch count tokens (sync estimation fallback)
258
- */
259
- export function estimateTokensBatch(texts: string[]): number[] {
260
- return texts.map(estimateTokens);
261
- }
262
-
263
- /**
264
- * Get tokenizer info (for debugging)
265
- */
266
- export async function getTokenizerInfo(modelName?: string): Promise<{
267
- modelName: string;
268
- tokenizerName: string;
269
- loaded: boolean;
270
- failed: boolean;
271
- } | null> {
272
- const tokenizerName = getTokenizerForModel(modelName);
273
- const tokenizer = await ensureTokenizer(modelName);
274
-
275
- return {
276
- modelName: modelName || 'default',
277
- tokenizerName,
278
- loaded: tokenizer !== null,
279
- failed: tokenizerFailed.has(tokenizerName),
280
- };
281
- }
282
-
283
- /**
284
- * Get supported models
285
- */
286
- export function getSupportedModels(): string[] {
287
- return Object.keys(MODEL_TO_TOKENIZER).filter(k => k !== 'default');
288
- }
File without changes
File without changes