tlsd 2.19.0 → 2.20.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "tlsd",
3
- "version": "2.19.0",
3
+ "version": "2.20.1",
4
4
  "description": "A server for web app prototyping with HTTPS and Websockets",
5
5
  "main": "tlsd.js",
6
6
  "bin": {
@@ -45,6 +45,7 @@
45
45
  const uploadURL = '/';
46
46
  xhr.open( 'PUT', uploadURL, true );
47
47
  xhr.setRequestHeader( 'Content-Type', file.type || 'application/octet-stream' );
48
+ xhr.setRequestHeader( 'X-Filename', file.name );
48
49
  xhr.onload = () => {
49
50
  if( xhr.status === 200 ) {
50
51
  let result = JSON.parse( xhr.responseText );
package/tlsd.js CHANGED
@@ -20,6 +20,15 @@ const { D, V, I, W, E } = L;
20
20
  const UPLOAD_DIR = process.env.UPLOAD_DIR || "/tmp/tlsd-put-files/";
21
21
  const UPLOAD_MAX_BYTES = toInt( process.env.UPLOAD_MAX_BYTES ) || ( 200 * 1024 * 1024 );
22
22
 
23
+ function sanitize_upload_filename( raw ) {
24
+ if( ! raw || typeof raw !== "string" ) return "file";
25
+ const basename = path.basename( raw.trim() );
26
+ const sanitized = basename.replace( /[\\/:*?"<>|\x00-\x1f]/g, "_" ).replace( /\.{2,}/g, "." );
27
+ // Limit to the LAST 40 chars of the sanitized filename
28
+ const truncated = sanitized.slice( -40 );
29
+ return truncated || "file";
30
+ }
31
+
23
32
  // DOS protection configuration
24
33
  const MAX_RPC_MESSAGE_SIZE = toInt( process.env.MAX_RPC_MESSAGE_SIZE ) || ( 1024 * 1024 ); // 1MB default
25
34
  const MAX_WS_CONNECTIONS = toInt( process.env.MAX_WS_CONNECTIONS ) || 1000;
@@ -420,36 +429,47 @@ function csrf_protection( root ) {
420
429
  return;
421
430
  }
422
431
 
423
- // Validate CSRF token (double-submit cookie pattern)
424
- const cookieToken = cookies.csrf_token;
425
- const headerToken = req.headers[ "x-csrf-token" ];
426
-
427
- if( ! cookieToken || ! headerToken || cookieToken !== headerToken ) {
428
- W( "CSRF token validation failed for " + method + " " + url );
429
- res.writeHead( 403, {
430
- "Content-Type": "application/json",
431
- "Cache-Control": "no-store",
432
- } );
433
- res.write( o2j( { error: "CSRF token validation failed" } ) );
434
- res.end();
432
+ // In dev mode, skip CSRF validation for PUT (e.g. drag-drop file uploads)
433
+ if( dev_mode && is_put ) {
434
+ next();
435
435
  return;
436
436
  }
437
437
 
438
- // Additional Origin/Referer validation
438
+ const cookieToken = cookies.csrf_token;
439
+ const headerToken = req.headers[ "x-csrf-token" ];
440
+
441
+ // Origin/Referer validation
439
442
  const origin = req.headers[ "origin" ];
440
443
  const referer = req.headers[ "referer" ];
441
444
  let originValid = false;
442
445
 
443
446
  if( origin ) {
444
- // Check if origin matches host (with protocol)
445
447
  const expectedOrigin = ( dev_mode ? "http://" : "https://" ) + host;
446
448
  originValid = origin === expectedOrigin;
447
449
  } else if( referer ) {
448
- // Fall back to Referer if Origin is missing
449
450
  const expectedReferer = ( dev_mode ? "http://" : "https://" ) + host;
450
451
  originValid = referer.startsWith( expectedReferer );
451
452
  }
452
453
 
454
+ // For PUT (e.g. file uploads), cookie + Origin/Referer is sufficient.
455
+ // Same-origin XHR sends these automatically; no client-side token handling needed.
456
+ if( is_put && ! is_rpc && cookieToken && originValid ) {
457
+ next();
458
+ return;
459
+ }
460
+
461
+ // Validate CSRF token (double-submit cookie pattern)
462
+ if( ! cookieToken || ! headerToken || cookieToken !== headerToken ) {
463
+ W( "CSRF token validation failed for " + method + " " + url );
464
+ res.writeHead( 403, {
465
+ "Content-Type": "application/json",
466
+ "Cache-Control": "no-store",
467
+ } );
468
+ res.write( o2j( { error: "CSRF token validation failed" } ) );
469
+ res.end();
470
+ return;
471
+ }
472
+
453
473
  if( ! originValid ) {
454
474
  W( "CSRF Origin/Referer validation failed for " + method + " " + url + " origin: " + origin + " referer: " + referer );
455
475
  res.writeHead( 403, {
@@ -578,54 +598,117 @@ function put_handler( req, res, next ) {
578
598
  return;
579
599
  }
580
600
 
581
- let local_path = UPLOAD_DIR;
582
- fs.mkdirSync( local_path, { recursive: true } );
601
+ const upload_dir = UPLOAD_DIR;
602
+ fs.mkdirSync( upload_dir, { recursive: true } );
603
+
604
+ // Kick off the upload, given the initial available bytes on the dest filesystem.
605
+ // available_bytes is re-queried every 500ms throughout the transfer so that
606
+ // concurrent uploads are accounted for.
607
+ const do_put = function( initial_available_bytes ) {
608
+ let available_bytes = initial_available_bytes;
609
+
610
+ // Preflight: reject immediately if Content-Length already exceeds threshold
611
+ if( content_length && content_length > available_bytes * 0.8 ) {
612
+ W( "PUT: Content-Length " + content_length + " exceeds 80% of available space (" + available_bytes + " bytes)" );
613
+ res.writeHead( 507, { "Content-Type": "application/json" } );
614
+ res.write( o2j( { error: "Insufficient storage space" } ) );
615
+ res.end();
616
+ return;
617
+ }
583
618
 
584
- // generate random hash to store file under locally
585
- const hash = sha1( "" + ( Date.now() + Math.random() ) );
586
- local_path += "/" + hash;
619
+ // generate random hash to store file under locally
620
+ const hash = sha1( "" + ( Date.now() + Math.random() ) ).slice( 0, 16 ); // only use first 16 chars of the hash
621
+ let raw_filename = req.headers[ "X-Filename" ];
622
+ if( ! raw_filename ) {
623
+ // take the filename from the last part of the URL
624
+ raw_filename = req.url.split( "/" ).pop();
625
+ // remove any query string
626
+ raw_filename = raw_filename.split( "?" ).shift();
627
+ }
628
+ const sanitized_filename = sanitize_upload_filename( raw_filename );
629
+ const final_filename = hash + "_" + sanitized_filename;
630
+ const local_path = upload_dir + "/" + final_filename;
631
+
632
+ D( "PUT: " + local_path );
633
+
634
+ const writeStream = fs.createWriteStream( local_path );
635
+
636
+ let responded = false;
637
+ let received_bytes = 0;
638
+
639
+ const abort_with = function( status, message, log_msg ) {
640
+ if( responded ) return;
641
+ responded = true;
642
+ clearInterval( disk_poll );
643
+ W( log_msg );
644
+ try { req.unpipe( writeStream ); } catch( _e ) {}
645
+ try { writeStream.destroy(); } catch( _e ) {}
646
+ try { req.destroy(); } catch( _e ) {}
647
+ try { fs.unlink( local_path, function( ){} ); } catch( _e ) {}
648
+ res.writeHead( status, { "Content-Type": "application/json" } );
649
+ res.write( o2j( { error: message } ) );
650
+ res.end();
651
+ };
587
652
 
588
- D( "PUT: " + local_path );
653
+ // Poll available disk space every 500ms so concurrent uploads are reflected
654
+ const disk_poll = setInterval( function( ) {
655
+ fs.statfs( upload_dir, function( err, stats ) {
656
+ if( ! err ) {
657
+ available_bytes = stats.bavail * stats.bsize;
658
+ D( "PUT: disk poll available=" + available_bytes );
659
+ }
660
+ } );
661
+ }, 500 );
662
+
663
+ // Streaming size guard - checks against the latest available_bytes each chunk
664
+ req.on( "data", function( chunk ) {
665
+ received_bytes += chunk.length;
666
+ if( received_bytes > UPLOAD_MAX_BYTES ) {
667
+ abort_with( 413, "Payload too large", "PUT: stream exceeded cap: " + received_bytes + " > " + UPLOAD_MAX_BYTES );
668
+ } else if( received_bytes > available_bytes * 0.8 ) {
669
+ abort_with( 507, "Insufficient storage space", "PUT: stream exceeded 80% of available space: " + received_bytes + " > " + ( available_bytes * 0.8 ) );
670
+ }
671
+ } );
589
672
 
590
- const writeStream = fs.createWriteStream( local_path );
673
+ req.pipe( writeStream );
591
674
 
592
- let responded = false;
593
- let received_bytes = 0;
594
- const abort_too_large = function( ) {
595
- if( responded ) return;
596
- responded = true;
597
- try { req.unpipe( writeStream ); } catch( _e ) {}
598
- try { writeStream.destroy(); } catch( _e ) {}
599
- try { req.destroy(); } catch( _e ) {}
600
- try { fs.unlink( local_path, function( ){} ); } catch( _e ) {}
601
- res.writeHead( 413, { "Content-Type": "application/json" } );
602
- res.write( o2j( { error: "Payload too large" } ) );
603
- res.end();
675
+ writeStream.on( "finish", ( ) => {
676
+ if( responded ) return;
677
+ clearInterval( disk_poll );
678
+ I( "PUT: " + local_path );
679
+ res.writeHead( 200, { "Content-Type": "application/json" } );
680
+ res.write( o2j( { hash, filename: final_filename } ) );
681
+ res.end();
682
+ } );
683
+
684
+ writeStream.on( "error", ( error ) => {
685
+ if( responded ) return;
686
+ clearInterval( disk_poll );
687
+ fail( "PUT: " + local_path + " failed during stream", error.stack );
688
+ } );
604
689
  };
605
690
 
606
- // Streaming size guard
607
- req.on( "data", function( chunk ) {
608
- received_bytes += chunk.length;
609
- if( received_bytes > UPLOAD_MAX_BYTES ) {
610
- W( "PUT: stream exceeded cap: " + received_bytes + " > " + UPLOAD_MAX_BYTES );
611
- abort_too_large( );
691
+ if( typeof fs.statfs !== "function" ) {
692
+ E( "PUT: fs.statfs unavailable, aborting transfer" );
693
+ res.writeHead( 503, { "Content-Type": "application/json" } );
694
+ res.write( o2j( { error: "Disk space check unavailable" } ) );
695
+ res.end();
696
+ return;
697
+ }
698
+
699
+ fs.statfs( upload_dir, function( err, stats ) {
700
+ if( err ) {
701
+ E( "PUT: statfs failed, aborting transfer: " + err.message );
702
+ res.writeHead( 503, { "Content-Type": "application/json" } );
703
+ res.write( o2j( { error: "Disk space check failed" } ) );
704
+ res.end();
705
+ return;
612
706
  }
707
+ const available_bytes = stats.bavail * stats.bsize;
708
+ D( "PUT: disk available=" + available_bytes );
709
+ do_put( available_bytes );
613
710
  } );
614
711
 
615
- req.pipe( writeStream );
616
-
617
- writeStream.on( "finish", ( ) => {
618
- if( responded ) return;
619
- I( "PUT: " + local_path );
620
- res.writeHead( 200, { "Content-Type": "application/json" } );
621
- res.write( o2j( { hash } ) );
622
- res.end();
623
- } );
624
-
625
- writeStream.on( "error", ( error ) => {
626
- if( responded ) return;
627
- fail( "PUT: " + local_path + " failed during stream", error.stack );
628
- } );
629
712
  } catch ( error ) {
630
713
  fail( "PUT: failed", error.stack );
631
714
  }