@sylphx/flow 1.8.0 → 1.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. package/CHANGELOG.md +72 -0
  2. package/assets/output-styles/silent.md +145 -8
  3. package/assets/rules/core.md +19 -2
  4. package/package.json +2 -12
  5. package/src/commands/flow/execute.ts +470 -0
  6. package/src/commands/flow/index.ts +11 -0
  7. package/src/commands/flow/prompt.ts +35 -0
  8. package/src/commands/flow/setup.ts +312 -0
  9. package/src/commands/flow/targets.ts +18 -0
  10. package/src/commands/flow/types.ts +47 -0
  11. package/src/commands/flow-command.ts +18 -967
  12. package/src/commands/flow-orchestrator.ts +14 -5
  13. package/src/commands/hook-command.ts +1 -1
  14. package/src/commands/init-core.ts +12 -3
  15. package/src/commands/run-command.ts +1 -1
  16. package/src/config/rules.ts +1 -1
  17. package/src/core/error-handling.ts +1 -1
  18. package/src/core/loop-controller.ts +1 -1
  19. package/src/core/state-detector.ts +1 -1
  20. package/src/core/target-manager.ts +1 -1
  21. package/src/index.ts +1 -1
  22. package/src/shared/files/index.ts +1 -1
  23. package/src/shared/processing/index.ts +1 -1
  24. package/src/targets/claude-code.ts +3 -3
  25. package/src/targets/opencode.ts +3 -3
  26. package/src/utils/agent-enhancer.ts +2 -2
  27. package/src/utils/{mcp-config.ts → config/mcp-config.ts} +4 -4
  28. package/src/utils/{paths.ts → config/paths.ts} +1 -1
  29. package/src/utils/{settings.ts → config/settings.ts} +1 -1
  30. package/src/utils/{target-config.ts → config/target-config.ts} +5 -5
  31. package/src/utils/{target-utils.ts → config/target-utils.ts} +3 -3
  32. package/src/utils/display/banner.ts +25 -0
  33. package/src/utils/display/status.ts +55 -0
  34. package/src/utils/{file-operations.ts → files/file-operations.ts} +2 -2
  35. package/src/utils/files/jsonc.ts +36 -0
  36. package/src/utils/{sync-utils.ts → files/sync-utils.ts} +3 -3
  37. package/src/utils/index.ts +42 -61
  38. package/src/utils/version.ts +47 -0
  39. package/src/components/benchmark-monitor.tsx +0 -331
  40. package/src/components/reindex-progress.tsx +0 -261
  41. package/src/composables/functional/index.ts +0 -14
  42. package/src/composables/functional/useEnvironment.ts +0 -171
  43. package/src/composables/functional/useFileSystem.ts +0 -139
  44. package/src/composables/index.ts +0 -4
  45. package/src/composables/useEnv.ts +0 -13
  46. package/src/composables/useRuntimeConfig.ts +0 -27
  47. package/src/core/ai-sdk.ts +0 -603
  48. package/src/core/app-factory.ts +0 -381
  49. package/src/core/builtin-agents.ts +0 -9
  50. package/src/core/command-system.ts +0 -550
  51. package/src/core/config-system.ts +0 -550
  52. package/src/core/connection-pool.ts +0 -390
  53. package/src/core/di-container.ts +0 -155
  54. package/src/core/headless-display.ts +0 -96
  55. package/src/core/interfaces/index.ts +0 -22
  56. package/src/core/interfaces/repository.interface.ts +0 -91
  57. package/src/core/interfaces/service.interface.ts +0 -133
  58. package/src/core/interfaces.ts +0 -96
  59. package/src/core/result.ts +0 -351
  60. package/src/core/service-config.ts +0 -252
  61. package/src/core/session-service.ts +0 -121
  62. package/src/core/storage-factory.ts +0 -115
  63. package/src/core/stream-handler.ts +0 -288
  64. package/src/core/type-utils.ts +0 -427
  65. package/src/core/unified-storage.ts +0 -456
  66. package/src/core/validation/limit.ts +0 -46
  67. package/src/core/validation/query.ts +0 -20
  68. package/src/db/auto-migrate.ts +0 -322
  69. package/src/db/base-database-client.ts +0 -144
  70. package/src/db/cache-db.ts +0 -218
  71. package/src/db/cache-schema.ts +0 -75
  72. package/src/db/database.ts +0 -70
  73. package/src/db/index.ts +0 -252
  74. package/src/db/memory-db.ts +0 -153
  75. package/src/db/memory-schema.ts +0 -29
  76. package/src/db/schema.ts +0 -289
  77. package/src/db/session-repository.ts +0 -733
  78. package/src/domains/index.ts +0 -6
  79. package/src/domains/utilities/index.ts +0 -6
  80. package/src/domains/utilities/time/index.ts +0 -5
  81. package/src/domains/utilities/time/tools.ts +0 -291
  82. package/src/services/agent-service.ts +0 -273
  83. package/src/services/evaluation-service.ts +0 -271
  84. package/src/services/functional/evaluation-logic.ts +0 -296
  85. package/src/services/functional/file-processor.ts +0 -273
  86. package/src/services/functional/index.ts +0 -12
  87. package/src/services/memory.service.ts +0 -476
  88. package/src/types/api/batch.ts +0 -108
  89. package/src/types/api/errors.ts +0 -118
  90. package/src/types/api/index.ts +0 -55
  91. package/src/types/api/requests.ts +0 -76
  92. package/src/types/api/responses.ts +0 -180
  93. package/src/types/api/websockets.ts +0 -85
  94. package/src/types/benchmark.ts +0 -49
  95. package/src/types/database.types.ts +0 -510
  96. package/src/types/memory-types.ts +0 -63
  97. package/src/utils/advanced-tokenizer.ts +0 -191
  98. package/src/utils/ai-model-fetcher.ts +0 -19
  99. package/src/utils/async-file-operations.ts +0 -516
  100. package/src/utils/audio-player.ts +0 -345
  101. package/src/utils/codebase-helpers.ts +0 -211
  102. package/src/utils/console-ui.ts +0 -79
  103. package/src/utils/database-errors.ts +0 -140
  104. package/src/utils/debug-logger.ts +0 -49
  105. package/src/utils/file-scanner.ts +0 -259
  106. package/src/utils/help.ts +0 -20
  107. package/src/utils/immutable-cache.ts +0 -106
  108. package/src/utils/jsonc.ts +0 -158
  109. package/src/utils/memory-tui.ts +0 -414
  110. package/src/utils/models-dev.ts +0 -91
  111. package/src/utils/parallel-operations.ts +0 -487
  112. package/src/utils/process-manager.ts +0 -155
  113. package/src/utils/prompts.ts +0 -120
  114. package/src/utils/search-tool-builder.ts +0 -214
  115. package/src/utils/session-manager.ts +0 -168
  116. package/src/utils/session-title.ts +0 -87
  117. package/src/utils/simplified-errors.ts +0 -410
  118. package/src/utils/template-engine.ts +0 -94
  119. package/src/utils/test-audio.ts +0 -71
  120. package/src/utils/todo-context.ts +0 -46
  121. package/src/utils/token-counter.ts +0 -288
  122. /package/src/utils/{cli-output.ts → display/cli-output.ts} +0 -0
  123. /package/src/utils/{logger.ts → display/logger.ts} +0 -0
  124. /package/src/utils/{notifications.ts → display/notifications.ts} +0 -0
  125. /package/src/utils/{secret-utils.ts → security/secret-utils.ts} +0 -0
  126. /package/src/utils/{security.ts → security/security.ts} +0 -0
@@ -1,288 +0,0 @@
1
- /**
2
- * Token Counter Utility
3
- * BPE-based token counting using Hugging Face AutoTokenizer
4
- *
5
- * Primary method: BPE tokenizer (auto-selected by Hugging Face)
6
- * Fallback: Fast estimation when tokenizer unavailable
7
- */
8
-
9
- import { AutoTokenizer } from '@huggingface/transformers';
10
-
11
- // Cache for multiple tokenizers (keyed by tokenizer name)
12
- // Limited to 3 tokenizers to prevent memory leak (each ~100-500MB)
13
- const tokenizerCache = new Map<string, any>();
14
- const tokenizerInitializing = new Set<string>();
15
- const tokenizerFailed = new Set<string>();
16
- const MAX_CACHED_TOKENIZERS = 3;
17
-
18
- /**
19
- * Map provider model names to tokenizer names
20
- * AutoTokenizer will automatically find the right tokenizer for each model
21
- */
22
- const MODEL_TO_TOKENIZER: Record<string, string> = {
23
- // OpenAI models
24
- 'gpt-4': 'Xenova/gpt-4',
25
- 'gpt-4-turbo': 'Xenova/gpt-4',
26
- 'gpt-4o': 'Xenova/gpt-4',
27
- 'gpt-3.5-turbo': 'Xenova/gpt-3.5-turbo',
28
- 'gpt-3.5': 'Xenova/gpt-3.5-turbo',
29
-
30
- // Anthropic Claude models
31
- 'claude-3-opus': 'Xenova/claude-tokenizer',
32
- 'claude-3-sonnet': 'Xenova/claude-tokenizer',
33
- 'claude-3-haiku': 'Xenova/claude-tokenizer',
34
- 'claude-3.5-sonnet': 'Xenova/claude-tokenizer',
35
- 'claude-3.5-haiku': 'Xenova/claude-tokenizer',
36
-
37
- // Code models
38
- 'starcoder': 'bigcode/starcoder',
39
- 'starcoder2': 'bigcode/starcoder2-3b',
40
- 'codellama': 'codellama/CodeLlama-7b-hf',
41
-
42
- // Google models
43
- 'gemini': 'Xenova/gpt-4', // Fallback to GPT-4 (no official Gemini tokenizer)
44
-
45
- // Fallback
46
- 'default': 'Xenova/gpt-4',
47
- };
48
-
49
- /**
50
- * Get tokenizer name for a model
51
- * AutoTokenizer will find the right tokenizer automatically
52
- */
53
- function getTokenizerForModel(modelName?: string): string {
54
- if (!modelName) return MODEL_TO_TOKENIZER['default']!;
55
-
56
- // Direct match
57
- if (MODEL_TO_TOKENIZER[modelName]) {
58
- return MODEL_TO_TOKENIZER[modelName]!;
59
- }
60
-
61
- // Fuzzy match (e.g., "gpt-4-turbo-preview" → "gpt-4")
62
- const modelLower = modelName.toLowerCase();
63
- for (const [key, tokenizer] of Object.entries(MODEL_TO_TOKENIZER)) {
64
- if (modelLower.includes(key)) {
65
- return tokenizer;
66
- }
67
- }
68
-
69
- // Default fallback
70
- return MODEL_TO_TOKENIZER['default']!;
71
- }
72
-
73
- /**
74
- * Fast fallback estimation (only when BPE tokenizer unavailable)
75
- * Based on ~3.5 chars per token for code
76
- */
77
- function estimateFallback(text: string): number {
78
- if (!text) return 0;
79
-
80
- const words = text.split(/\s+/).filter(Boolean).length;
81
- const chars = text.length;
82
-
83
- const charBasedEstimate = Math.ceil(chars / 3.5);
84
- const wordBasedEstimate = Math.ceil(words * 1.3);
85
-
86
- return Math.round((charBasedEstimate + wordBasedEstimate) / 2);
87
- }
88
-
89
- /**
90
- * Initialize BPE tokenizer (lazy, cached per tokenizer name)
91
- * Uses Hugging Face AutoTokenizer to automatically select best tokenizer
92
- */
93
- async function ensureTokenizer(modelName?: string): Promise<any | null> {
94
- // Get tokenizer name for this model
95
- const tokenizerName = getTokenizerForModel(modelName);
96
-
97
- // Check if already cached
98
- if (tokenizerCache.has(tokenizerName)) {
99
- return tokenizerCache.get(tokenizerName);
100
- }
101
-
102
- // Check if previous initialization failed
103
- if (tokenizerFailed.has(tokenizerName)) {
104
- return null;
105
- }
106
-
107
- // Wait if initialization in progress for this tokenizer
108
- while (tokenizerInitializing.has(tokenizerName)) {
109
- await new Promise(resolve => setTimeout(resolve, 100));
110
- }
111
-
112
- // Check again after waiting
113
- if (tokenizerCache.has(tokenizerName)) {
114
- return tokenizerCache.get(tokenizerName);
115
- }
116
- if (tokenizerFailed.has(tokenizerName)) {
117
- return null;
118
- }
119
-
120
- // Initialize with Hugging Face AutoTokenizer
121
- try {
122
- tokenizerInitializing.add(tokenizerName);
123
-
124
- // Let Hugging Face auto-select and load the best tokenizer
125
- const tokenizer = await AutoTokenizer.from_pretrained(tokenizerName, {
126
- // Cache models locally for faster subsequent loads
127
- cache_dir: './models/.cache',
128
- // Use local files if available, otherwise download
129
- local_files_only: false,
130
- });
131
-
132
- // Limit cache size - evict oldest tokenizer if limit reached
133
- if (tokenizerCache.size >= MAX_CACHED_TOKENIZERS) {
134
- const oldestKey = tokenizerCache.keys().next().value;
135
- if (oldestKey) {
136
- tokenizerCache.delete(oldestKey);
137
- }
138
- }
139
-
140
- tokenizerCache.set(tokenizerName, tokenizer);
141
- tokenizerInitializing.delete(tokenizerName);
142
- return tokenizer;
143
- } catch (error) {
144
- console.warn('[TokenCounter] BPE tokenizer initialization failed, using fallback estimation:', error);
145
- tokenizerFailed.add(tokenizerName);
146
- tokenizerInitializing.delete(tokenizerName);
147
- return null;
148
- }
149
- }
150
-
151
- /**
152
- * Count tokens using BPE tokenizer (Hugging Face AutoTokenizer)
153
- * Falls back to estimation if tokenizer unavailable
154
- *
155
- * @param text Text to count tokens for
156
- * @param modelName Optional model name to use specific tokenizer
157
- * @returns Token count
158
- */
159
- export async function countTokens(text: string, modelName?: string): Promise<number> {
160
- if (!text) return 0;
161
-
162
- const tokenizer = await ensureTokenizer(modelName);
163
-
164
- if (!tokenizer) {
165
- // Tokenizer unavailable, use fallback
166
- return estimateFallback(text);
167
- }
168
-
169
- try {
170
- // Use Hugging Face tokenizer API
171
- const encoded = await tokenizer(text);
172
-
173
- // Get token count from encoded result
174
- if (encoded.input_ids && encoded.input_ids.size) {
175
- return encoded.input_ids.size;
176
- }
177
-
178
- // Fallback: count array length
179
- if (Array.isArray(encoded.input_ids)) {
180
- return encoded.input_ids.length;
181
- }
182
-
183
- // Fallback: if it's a tensor, get its length
184
- if (encoded.input_ids.data) {
185
- return encoded.input_ids.data.length;
186
- }
187
-
188
- // Last resort fallback
189
- return estimateFallback(text);
190
- } catch (error) {
191
- console.warn('[TokenCounter] Token counting failed, using fallback:', error);
192
- return estimateFallback(text);
193
- }
194
- }
195
-
196
- /**
197
- * Synchronous token estimation (for cases where async is not possible)
198
- * Uses fallback estimation only
199
- */
200
- export function estimateTokens(text: string): number {
201
- return estimateFallback(text);
202
- }
203
-
204
- /**
205
- * Format token count for display
206
- * Examples: 150 -> "150", 1500 -> "1.5K", 1500000 -> "1.5M"
207
- */
208
- export function formatTokenCount(count: number): string {
209
- if (count < 1000) {
210
- return count.toString();
211
- }
212
-
213
- if (count < 1000000) {
214
- const k = count / 1000;
215
- return `${k.toFixed(1)}K`;
216
- }
217
-
218
- const m = count / 1000000;
219
- return `${m.toFixed(1)}M`;
220
- }
221
-
222
- /**
223
- * Count tokens for specific model
224
- * Uses the correct tokenizer for that model
225
- */
226
- export async function countTokensForModel(text: string, modelName: string): Promise<number> {
227
- return countTokens(text, modelName);
228
- }
229
-
230
- /**
231
- * Count tokens with display formatting
232
- * Uses BPE tokenizer (async)
233
- */
234
- export async function countAndFormat(text: string, modelName?: string): Promise<string> {
235
- const count = await countTokens(text, modelName);
236
- return `${formatTokenCount(count)} Tokens`;
237
- }
238
-
239
- /**
240
- * Count tokens with display formatting (sync, estimation only)
241
- * Use this only when async is not possible
242
- */
243
- export function countAndFormatSync(text: string): string {
244
- const count = estimateTokens(text);
245
- return `${formatTokenCount(count)} Tokens`;
246
- }
247
-
248
- /**
249
- * Batch count tokens for multiple texts
250
- * Uses BPE tokenizer
251
- */
252
- export async function countTokensBatch(texts: string[], modelName?: string): Promise<number[]> {
253
- return Promise.all(texts.map(text => countTokens(text, modelName)));
254
- }
255
-
256
- /**
257
- * Batch count tokens (sync estimation fallback)
258
- */
259
- export function estimateTokensBatch(texts: string[]): number[] {
260
- return texts.map(estimateTokens);
261
- }
262
-
263
- /**
264
- * Get tokenizer info (for debugging)
265
- */
266
- export async function getTokenizerInfo(modelName?: string): Promise<{
267
- modelName: string;
268
- tokenizerName: string;
269
- loaded: boolean;
270
- failed: boolean;
271
- } | null> {
272
- const tokenizerName = getTokenizerForModel(modelName);
273
- const tokenizer = await ensureTokenizer(modelName);
274
-
275
- return {
276
- modelName: modelName || 'default',
277
- tokenizerName,
278
- loaded: tokenizer !== null,
279
- failed: tokenizerFailed.has(tokenizerName),
280
- };
281
- }
282
-
283
- /**
284
- * Get supported models
285
- */
286
- export function getSupportedModels(): string[] {
287
- return Object.keys(MODEL_TO_TOKENIZER).filter(k => k !== 'default');
288
- }
File without changes
File without changes