tlsd 2.20.0 → 2.20.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/tlsd.js +106 -52
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "tlsd",
3
- "version": "2.20.0",
3
+ "version": "2.20.1",
4
4
  "description": "A server for web app prototyping with HTTPS and Websockets",
5
5
  "main": "tlsd.js",
6
6
  "bin": {
package/tlsd.js CHANGED
@@ -598,63 +598,117 @@ function put_handler( req, res, next ) {
598
598
  return;
599
599
  }
600
600
 
601
- let local_path = UPLOAD_DIR;
602
- fs.mkdirSync( local_path, { recursive: true } );
603
-
604
- // generate random hash to store file under locally
605
- const hash = sha1( "" + ( Date.now() + Math.random() ) ).slice( 0, 16 ); // only use first 16 chars of the hash
606
- let raw_filename = req.headers[ "X-Filename" ];
607
- if( ! raw_filename ) {
608
- // take the filename from the last part of the URL
609
- raw_filename = req.url.split( "/" ).pop();
610
- // remove any query string
611
- raw_filename = raw_filename.split( "?" ).shift();
612
- }
613
- const sanitized_filename = sanitize_upload_filename( raw_filename );
614
- const final_filename = hash + "_" + sanitized_filename;
615
- local_path += "/" + final_filename;
616
-
617
- D( "PUT: " + local_path );
618
-
619
- const writeStream = fs.createWriteStream( local_path );
620
-
621
- let responded = false;
622
- let received_bytes = 0;
623
- const abort_too_large = function( ) {
624
- if( responded ) return;
625
- responded = true;
626
- try { req.unpipe( writeStream ); } catch( _e ) {}
627
- try { writeStream.destroy(); } catch( _e ) {}
628
- try { req.destroy(); } catch( _e ) {}
629
- try { fs.unlink( local_path, function( ){} ); } catch( _e ) {}
630
- res.writeHead( 413, { "Content-Type": "application/json" } );
631
- res.write( o2j( { error: "Payload too large" } ) );
632
- res.end();
633
- };
601
+ const upload_dir = UPLOAD_DIR;
602
+ fs.mkdirSync( upload_dir, { recursive: true } );
603
+
604
+ // Kick off the upload, given the initial available bytes on the dest filesystem.
605
+ // available_bytes is re-queried every 500ms throughout the transfer so that
606
+ // concurrent uploads are accounted for.
607
+ const do_put = function( initial_available_bytes ) {
608
+ let available_bytes = initial_available_bytes;
609
+
610
+ // Preflight: reject immediately if Content-Length already exceeds threshold
611
+ if( content_length && content_length > available_bytes * 0.8 ) {
612
+ W( "PUT: Content-Length " + content_length + " exceeds 80% of available space (" + available_bytes + " bytes)" );
613
+ res.writeHead( 507, { "Content-Type": "application/json" } );
614
+ res.write( o2j( { error: "Insufficient storage space" } ) );
615
+ res.end();
616
+ return;
617
+ }
634
618
 
635
- // Streaming size guard
636
- req.on( "data", function( chunk ) {
637
- received_bytes += chunk.length;
638
- if( received_bytes > UPLOAD_MAX_BYTES ) {
639
- W( "PUT: stream exceeded cap: " + received_bytes + " > " + UPLOAD_MAX_BYTES );
640
- abort_too_large( );
619
+ // generate random hash to store file under locally
620
+ const hash = sha1( "" + ( Date.now() + Math.random() ) ).slice( 0, 16 ); // only use first 16 chars of the hash
621
+ let raw_filename = req.headers[ "X-Filename" ];
622
+ if( ! raw_filename ) {
623
+ // take the filename from the last part of the URL
624
+ raw_filename = req.url.split( "/" ).pop();
625
+ // remove any query string
626
+ raw_filename = raw_filename.split( "?" ).shift();
641
627
  }
642
- } );
628
+ const sanitized_filename = sanitize_upload_filename( raw_filename );
629
+ const final_filename = hash + "_" + sanitized_filename;
630
+ const local_path = upload_dir + "/" + final_filename;
631
+
632
+ D( "PUT: " + local_path );
633
+
634
+ const writeStream = fs.createWriteStream( local_path );
635
+
636
+ let responded = false;
637
+ let received_bytes = 0;
638
+
639
+ const abort_with = function( status, message, log_msg ) {
640
+ if( responded ) return;
641
+ responded = true;
642
+ clearInterval( disk_poll );
643
+ W( log_msg );
644
+ try { req.unpipe( writeStream ); } catch( _e ) {}
645
+ try { writeStream.destroy(); } catch( _e ) {}
646
+ try { req.destroy(); } catch( _e ) {}
647
+ try { fs.unlink( local_path, function( ){} ); } catch( _e ) {}
648
+ res.writeHead( status, { "Content-Type": "application/json" } );
649
+ res.write( o2j( { error: message } ) );
650
+ res.end();
651
+ };
643
652
 
644
- req.pipe( writeStream );
653
+ // Poll available disk space every 500ms so concurrent uploads are reflected
654
+ const disk_poll = setInterval( function( ) {
655
+ fs.statfs( upload_dir, function( err, stats ) {
656
+ if( ! err ) {
657
+ available_bytes = stats.bavail * stats.bsize;
658
+ D( "PUT: disk poll available=" + available_bytes );
659
+ }
660
+ } );
661
+ }, 500 );
662
+
663
+ // Streaming size guard - checks against the latest available_bytes each chunk
664
+ req.on( "data", function( chunk ) {
665
+ received_bytes += chunk.length;
666
+ if( received_bytes > UPLOAD_MAX_BYTES ) {
667
+ abort_with( 413, "Payload too large", "PUT: stream exceeded cap: " + received_bytes + " > " + UPLOAD_MAX_BYTES );
668
+ } else if( received_bytes > available_bytes * 0.8 ) {
669
+ abort_with( 507, "Insufficient storage space", "PUT: stream exceeded 80% of available space: " + received_bytes + " > " + ( available_bytes * 0.8 ) );
670
+ }
671
+ } );
645
672
 
646
- writeStream.on( "finish", ( ) => {
647
- if( responded ) return;
648
- I( "PUT: " + local_path );
649
- res.writeHead( 200, { "Content-Type": "application/json" } );
650
- res.write( o2j( { hash, filename: final_filename } ) );
651
- res.end();
652
- } );
673
+ req.pipe( writeStream );
674
+
675
+ writeStream.on( "finish", ( ) => {
676
+ if( responded ) return;
677
+ clearInterval( disk_poll );
678
+ I( "PUT: " + local_path );
679
+ res.writeHead( 200, { "Content-Type": "application/json" } );
680
+ res.write( o2j( { hash, filename: final_filename } ) );
681
+ res.end();
682
+ } );
683
+
684
+ writeStream.on( "error", ( error ) => {
685
+ if( responded ) return;
686
+ clearInterval( disk_poll );
687
+ fail( "PUT: " + local_path + " failed during stream", error.stack );
688
+ } );
689
+ };
690
+
691
+ if( typeof fs.statfs !== "function" ) {
692
+ E( "PUT: fs.statfs unavailable, aborting transfer" );
693
+ res.writeHead( 503, { "Content-Type": "application/json" } );
694
+ res.write( o2j( { error: "Disk space check unavailable" } ) );
695
+ res.end();
696
+ return;
697
+ }
698
+
699
+ fs.statfs( upload_dir, function( err, stats ) {
700
+ if( err ) {
701
+ E( "PUT: statfs failed, aborting transfer: " + err.message );
702
+ res.writeHead( 503, { "Content-Type": "application/json" } );
703
+ res.write( o2j( { error: "Disk space check failed" } ) );
704
+ res.end();
705
+ return;
706
+ }
707
+ const available_bytes = stats.bavail * stats.bsize;
708
+ D( "PUT: disk available=" + available_bytes );
709
+ do_put( available_bytes );
710
+ } );
653
711
 
654
- writeStream.on( "error", ( error ) => {
655
- if( responded ) return;
656
- fail( "PUT: " + local_path + " failed during stream", error.stack );
657
- } );
658
712
  } catch ( error ) {
659
713
  fail( "PUT: failed", error.stack );
660
714
  }