fluxy-bot 0.10.18 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -88,13 +88,33 @@ export function registerUpdateCommand(program: Command) {
88
88
  fs.cpSync(wsSrc, path.join(DATA_DIR, 'workspace'), { recursive: true });
89
89
  }
90
90
 
91
- // Always update index.html contains splash screen, SW registration,
92
- // and meta tags that ship with each version. Safe to overwrite since
93
- // users don't edit this file (their code lives in src/ and backend/).
94
- const indexSrc = path.join(extracted, 'workspace', 'client', 'index.html');
95
- const indexDst = path.join(DATA_DIR, 'workspace', 'client', 'index.html');
96
- if (fs.existsSync(indexSrc)) {
97
- fs.cpSync(indexSrc, indexDst, { force: true });
91
+ // Always update framework files that ship with each version.
92
+ // These are not user-editable user code lives in src/components/, src/pages/, backend/, etc.
93
+ const frameworkFiles = [
94
+ 'workspace/client/index.html', // splash screen, SW registration, meta tags
95
+ 'workspace/client/src/main.tsx', // React entry point, app-ready signal
96
+ ];
97
+ for (const rel of frameworkFiles) {
98
+ const src = path.join(extracted, rel);
99
+ const dst = path.join(DATA_DIR, rel);
100
+ if (fs.existsSync(src)) {
101
+ fs.cpSync(src, dst, { force: true });
102
+ }
103
+ }
104
+
105
+ // Always update public assets that ship with the framework (animation spritesheet, icons).
106
+ // Only copy specific files — never overwrite user-added public assets.
107
+ const frameworkAssets = [
108
+ 'spritesheet.webp',
109
+ ];
110
+ const publicSrc = path.join(extracted, 'workspace', 'client', 'public');
111
+ const publicDst = path.join(DATA_DIR, 'workspace', 'client', 'public');
112
+ for (const asset of frameworkAssets) {
113
+ const src = path.join(publicSrc, asset);
114
+ const dst = path.join(publicDst, asset);
115
+ if (fs.existsSync(src)) {
116
+ fs.cpSync(src, dst, { force: true });
117
+ }
98
118
  }
99
119
 
100
120
  for (const file of ['package.json', 'vite.config.ts', 'vite.fluxy.config.ts', 'tsconfig.json', 'postcss.config.js', 'components.json']) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "fluxy-bot",
3
- "version": "0.10.18",
3
+ "version": "0.11.0",
4
4
  "releaseNotes": [
5
5
  "Adding a way for users to claim their fluxies on the fluxy.bot dashboard",
6
6
  "2. ",
@@ -52,9 +52,6 @@
52
52
  "dependencies": {
53
53
  "@anthropic-ai/claude-agent-sdk": "^0.2.50",
54
54
  "@clack/prompts": "^1.1.0",
55
- "@react-three/drei": "^10.7.7",
56
- "@react-three/fiber": "^9.5.0",
57
- "@tailwindcss/postcss": "^4.2.0",
58
55
  "@tailwindcss/vite": "^4.2.0",
59
56
  "@vitejs/plugin-react": "^5.1.4",
60
57
  "better-sqlite3": "^12.6.2",
@@ -80,11 +77,9 @@
80
77
  "sonner": "^2.0.7",
81
78
  "tailwind-merge": "^3.5.0",
82
79
  "tailwindcss": "^4.2.0",
83
- "three": "^0.183.1",
84
80
  "tsx": "^4.21.0",
85
81
  "viem": "^2.47.6",
86
82
  "vite": "^7.3.1",
87
- "vite-plugin-pwa": "^1.2.0",
88
83
  "web-push": "^3.6.7",
89
84
  "ws": "^8.19.0",
90
85
  "zustand": "^5.0.11"
@@ -99,7 +94,6 @@
99
94
  "@types/react-syntax-highlighter": "^15.5.13",
100
95
  "@types/ws": "^8.18.1",
101
96
  "concurrently": "^9.2.1",
102
- "fumapress": "^0.1.1",
103
97
  "typescript": "^5.9.3"
104
98
  }
105
99
  }
@@ -206,19 +206,15 @@ function FluxyApp() {
206
206
 
207
207
  // Forward rebuild/HMR events to parent (dashboard) via postMessage
208
208
  const unsubRebuilding = client.on('app:rebuilding', () => {
209
- console.log('[fluxy] app:rebuilding received');
210
209
  window.parent?.postMessage({ type: 'fluxy:rebuilding' }, '*');
211
210
  });
212
211
  const unsubRebuilt = client.on('app:rebuilt', () => {
213
- console.log('[fluxy] app:rebuilt received');
214
212
  window.parent?.postMessage({ type: 'fluxy:rebuilt' }, '*');
215
213
  });
216
214
  const unsubBuildError = client.on('app:build-error', (data: { error: string }) => {
217
- console.log('[fluxy] app:build-error received:', data.error);
218
215
  window.parent?.postMessage({ type: 'fluxy:build-error', error: data.error }, '*');
219
216
  });
220
217
  const unsubHmr = client.on('app:hmr-update', () => {
221
- console.log('[fluxy] Vite HMR update — changes applied automatically');
222
218
  window.parent?.postMessage({ type: 'fluxy:hmr-update' }, '*');
223
219
  });
224
220
 
@@ -58,7 +58,6 @@ const DRAFT_KEY = 'fluxy_draft';
58
58
  export default function InputBar({ onSend, onStop, streaming, whisperEnabled, onTranscribe }: Props) {
59
59
  const { start: startSpeech, stop: stopSpeech, abort: abortSpeech, isSupported: webSpeechSupported } = useSpeechRecognition();
60
60
  const voiceEnabled = whisperEnabled || webSpeechSupported;
61
- console.log('[InputBar] render - whisperEnabled:', whisperEnabled, 'webSpeechSupported:', webSpeechSupported, 'voiceEnabled:', voiceEnabled);
62
61
  const [text, setText] = useState(() => {
63
62
  try { return localStorage.getItem(DRAFT_KEY) || ''; } catch { return ''; }
64
63
  });
@@ -109,7 +108,6 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
109
108
  }, [isRecording]);
110
109
 
111
110
  const stopRecording = useCallback(async (cancelled: boolean) => {
112
- console.log('[InputBar] stopRecording called, cancelled:', cancelled, 'recorder:', !!mediaRecorderRef.current, 'whisper:', whisperEnabled);
113
111
  if (intervalRef.current) clearInterval(intervalRef.current);
114
112
  if (holdTimerRef.current) { clearTimeout(holdTimerRef.current); holdTimerRef.current = null; }
115
113
  isHolding.current = false;
@@ -126,14 +124,12 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
126
124
  } else if (recorder && recorder.state !== 'inactive') {
127
125
  // Whisper path: stop MediaRecorder and use its audio
128
126
  recorder.onstop = async () => {
129
- console.log('[InputBar] recorder.onstop fired, chunks:', audioChunksRef.current.length);
130
127
  stream?.getTracks().forEach((t) => t.stop());
131
128
  const blob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
132
129
  audioChunksRef.current = [];
133
130
  mediaRecorderRef.current = null;
134
131
  streamRef.current = null;
135
132
 
136
- console.log('[InputBar] blob size:', blob.size);
137
133
  if (blob.size < 1000) return;
138
134
 
139
135
  const fileReader = new FileReader();
@@ -142,7 +138,6 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
142
138
  const base64 = dataUrl.split(',')[1];
143
139
  if (!base64) return;
144
140
 
145
- console.log('[InputBar] Whisper path, base64 length:', base64.length);
146
141
  try {
147
142
  let data: { transcript?: string };
148
143
  if (onTranscribe) {
@@ -169,7 +164,6 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
169
164
  recorder.stop();
170
165
  } else {
171
166
  // Web Speech API path (no MediaRecorder): get transcript directly
172
- console.log('[InputBar] Web Speech path, stopping speech recognition...');
173
167
  stream?.getTracks().forEach((t) => t.stop());
174
168
  mediaRecorderRef.current = null;
175
169
  streamRef.current = null;
@@ -177,13 +171,10 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
177
171
 
178
172
  try {
179
173
  const transcript = await stopSpeech();
180
- console.log('[InputBar] Web Speech transcript:', JSON.stringify(transcript));
181
174
  if (transcript.trim()) {
182
175
  const pendingAtts = attachments.length > 0 ? attachments : undefined;
183
176
  onSend(transcript.trim(), pendingAtts);
184
177
  if (pendingAtts) setAttachments([]);
185
- } else {
186
- console.log('[InputBar] Web Speech transcript was empty');
187
178
  }
188
179
  } catch (err) {
189
180
  console.error('[InputBar] Web Speech stop error:', err);
@@ -263,24 +254,19 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
263
254
 
264
255
  // ── Mic pointer handlers ──
265
256
  const handleMicDown = useCallback((e: RPointerEvent) => {
266
- console.log('[InputBar] handleMicDown fired, voiceEnabled:', voiceEnabled, 'whisper:', whisperEnabled);
267
257
  e.preventDefault();
268
258
  startXRef.current = e.clientX;
269
259
  dragRef.current = 0;
270
260
  (e.currentTarget as HTMLElement).setPointerCapture(e.pointerId);
271
261
 
272
262
  holdTimerRef.current = setTimeout(async () => {
273
- console.log('[InputBar] hold timer fired, voiceEnabled:', voiceEnabled, 'whisper:', whisperEnabled);
274
263
  if (!voiceEnabled) {
275
- console.log('[InputBar] voiceEnabled is false, returning');
276
264
  return;
277
265
  }
278
266
  try {
279
267
  if (whisperEnabled) {
280
268
  // Whisper path: need getUserMedia + MediaRecorder for audio capture
281
- console.log('[InputBar] Whisper path: requesting getUserMedia...');
282
269
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
283
- console.log('[InputBar] getUserMedia succeeded, tracks:', stream.getTracks().length);
284
270
  streamRef.current = stream;
285
271
  const mimeType = MediaRecorder.isTypeSupported('audio/webm;codecs=opus') ? 'audio/webm;codecs=opus' : 'audio/webm';
286
272
  const recorder = new MediaRecorder(stream, { mimeType });
@@ -290,18 +276,14 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
290
276
  };
291
277
  mediaRecorderRef.current = recorder;
292
278
  recorder.start();
293
- console.log('[InputBar] MediaRecorder started');
294
279
  } else {
295
280
  // Web Speech path: only SpeechRecognition, no getUserMedia (avoids mic conflict on mobile)
296
- console.log('[InputBar] Web Speech path: starting SpeechRecognition only...');
297
281
  startSpeech();
298
- console.log('[InputBar] SpeechRecognition started');
299
282
  }
300
283
 
301
284
  isHolding.current = true;
302
285
  setIsRecording(true);
303
286
  setRecordingTime(0);
304
- console.log('[InputBar] recording started, isHolding=true');
305
287
  } catch (err) {
306
288
  console.error('[InputBar] recording setup failed:', err);
307
289
  }
@@ -324,17 +306,14 @@ export default function InputBar({ onSend, onStop, streaming, whisperEnabled, on
324
306
  }, [stopRecording]);
325
307
 
326
308
  const handleMicUp = useCallback(() => {
327
- console.log('[InputBar] handleMicUp, isHolding:', isHolding.current);
328
309
  if (holdTimerRef.current) { clearTimeout(holdTimerRef.current); holdTimerRef.current = null; }
329
310
  if (!isHolding.current) {
330
- console.log('[InputBar] handleMicUp - not holding, ignoring');
331
311
  return;
332
312
  }
333
313
  stopRecording(false);
334
314
  }, [stopRecording]);
335
315
 
336
316
  const handleMicCancel = useCallback(() => {
337
- console.log('[InputBar] handleMicCancel fired, isHolding:', isHolding.current, 'holdTimer:', !!holdTimerRef.current);
338
317
  if (holdTimerRef.current) { clearTimeout(holdTimerRef.current); holdTimerRef.current = null; }
339
318
  if (isHolding.current) stopRecording(true);
340
319
  }, [stopRecording]);
@@ -39,10 +39,8 @@ export function useSpeechRecognition() {
39
39
  const isSupported = useMemo(() => isWebSpeechSupported, []);
40
40
 
41
41
  const start = useCallback(() => {
42
- console.log('[SpeechRecognition] start()');
43
42
  const Ctor = getSpeechRecognitionCtor();
44
43
  if (!Ctor) {
45
- console.log('[SpeechRecognition] not available');
46
44
  return;
47
45
  }
48
46
 
@@ -63,12 +61,10 @@ export function useSpeechRecognition() {
63
61
  // Grab the latest result — it's always the most complete/accurate
64
62
  const last = event.results[event.results.length - 1];
65
63
  const text = last[0].transcript;
66
- console.log('[SpeechRecognition] onresult:', JSON.stringify(text), 'isFinal:', last.isFinal);
67
64
  transcriptRef.current = text;
68
65
  };
69
66
 
70
67
  recognition.onend = () => {
71
- console.log('[SpeechRecognition] onend, transcript:', JSON.stringify(transcriptRef.current));
72
68
  instanceRef.current = null;
73
69
  // If stop() is waiting for a result, resolve it now
74
70
  if (resolveRef.current) {
@@ -88,19 +84,16 @@ export function useSpeechRecognition() {
88
84
 
89
85
  try {
90
86
  recognition.start();
91
- console.log('[SpeechRecognition] started, lang:', recognition.lang);
92
87
  } catch (e) {
93
88
  console.error('[SpeechRecognition] start failed:', e);
94
89
  }
95
90
  }, []);
96
91
 
97
92
  const stop = useCallback((): Promise<string> => {
98
- console.log('[SpeechRecognition] stop(), transcript so far:', JSON.stringify(transcriptRef.current));
99
93
  return new Promise((resolve) => {
100
94
  const instance = instanceRef.current;
101
95
  if (!instance) {
102
96
  // Already ended (e.g. short utterance auto-stopped)
103
- console.log('[SpeechRecognition] no instance, resolving immediately');
104
97
  resolve(transcriptRef.current);
105
98
  return;
106
99
  }
@@ -119,7 +112,6 @@ export function useSpeechRecognition() {
119
112
  // Safety timeout in case onend never fires
120
113
  setTimeout(() => {
121
114
  if (resolveRef.current) {
122
- console.log('[SpeechRecognition] safety timeout, resolving with:', JSON.stringify(transcriptRef.current));
123
115
  resolveRef.current(transcriptRef.current);
124
116
  resolveRef.current = null;
125
117
  instanceRef.current = null;
@@ -129,7 +121,6 @@ export function useSpeechRecognition() {
129
121
  }, []);
130
122
 
131
123
  const abort = useCallback(() => {
132
- console.log('[SpeechRecognition] abort()');
133
124
  transcriptRef.current = '';
134
125
  const instance = instanceRef.current;
135
126
  if (instance) {
@@ -44,33 +44,23 @@
44
44
  <script type="module" src="/src/main.tsx"></script>
45
45
  <script>
46
46
  if('serviceWorker' in navigator){
47
- console.log('[sw-reg] current controller:', navigator.serviceWorker.controller ? 'yes' : 'none');
48
- // When a new SW takes control (update), show splash and reload
49
- // so the new caching strategy kicks in immediately.
50
47
  var swRefreshing=false;
51
48
  navigator.serviceWorker.addEventListener('controllerchange',function(){
52
- console.log('[sw-reg] controllerchange fired, refreshing:', swRefreshing);
53
49
  if(swRefreshing)return;
54
50
  swRefreshing=true;
55
51
  var s=document.getElementById('splash');
56
52
  if(s){s.style.transition='none';s.style.display='block';s.style.opacity='1'}
57
- console.log('[sw-reg] reloading after new SW took control');
58
53
  location.reload();
59
54
  });
60
55
  navigator.serviceWorker.register('/sw.js').then(function(r){
61
- console.log('[sw-reg] registered, active:', r.active?.state, 'waiting:', !!r.waiting, 'installing:', !!r.installing);
62
56
  r.update();
63
57
  if(r.waiting){
64
- console.log('[sw-reg] found waiting SW — sending SKIP_WAITING');
65
58
  r.waiting.postMessage({type:'SKIP_WAITING'});
66
59
  }
67
60
  r.addEventListener('updatefound',function(){
68
- console.log('[sw-reg] updatefound — new SW installing');
69
61
  var w=r.installing;
70
62
  if(w)w.addEventListener('statechange',function(){
71
- console.log('[sw-reg] installing SW state:', w.state);
72
63
  if(w.state==='installed'&&navigator.serviceWorker.controller){
73
- console.log('[sw-reg] new SW installed — sending SKIP_WAITING');
74
64
  w.postMessage({type:'SKIP_WAITING'});
75
65
  }
76
66
  });
@@ -134,7 +134,7 @@ export default function App() {
134
134
  // Vite HMR handles hot updates natively — no manual reload needed.
135
135
  // Manual location.reload() here was causing unnecessary full-page refreshes
136
136
  // that killed the chat iframe's WebSocket connection.
137
- console.log('[dashboard] File changed — Vite HMR will handle it');
137
+
138
138
  }
139
139
  };
140
140
  window.addEventListener('message', handler);