tlsd 2.20.0 → 2.20.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/tlsd.js +135 -59
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "tlsd",
3
- "version": "2.20.0",
3
+ "version": "2.20.2",
4
4
  "description": "A server for web app prototyping with HTTPS and Websockets",
5
5
  "main": "tlsd.js",
6
6
  "bin": {
package/tlsd.js CHANGED
@@ -43,6 +43,30 @@ const RATE_LIMIT_MAX_REQUESTS = toInt( process.env.RATE_LIMIT_MAX_REQUESTS ) ||
43
43
 
44
44
  let dev_mode = false;
45
45
 
46
+ function public_origin_matches_host( origin, host ) {
47
+ if( ! origin ) {
48
+ return false;
49
+ }
50
+ const httpOrigin = "http://" + host;
51
+ const httpsOrigin = "https://" + host;
52
+ if( dev_mode ) {
53
+ return origin === httpOrigin || origin === httpsOrigin;
54
+ }
55
+ return origin === httpsOrigin;
56
+ }
57
+
58
+ function public_referer_matches_host( referer, host ) {
59
+ if( ! referer ) {
60
+ return false;
61
+ }
62
+ const httpRef = "http://" + host;
63
+ const httpsRef = "https://" + host;
64
+ if( dev_mode ) {
65
+ return referer.startsWith( httpRef ) || referer.startsWith( httpsRef );
66
+ }
67
+ return referer.startsWith( httpsRef );
68
+ }
69
+
46
70
  // Connection tracking for DOS protection
47
71
  const ipConnections = new Map();
48
72
  let totalConnections = 0;
@@ -444,11 +468,9 @@ function csrf_protection( root ) {
444
468
  let originValid = false;
445
469
 
446
470
  if( origin ) {
447
- const expectedOrigin = ( dev_mode ? "http://" : "https://" ) + host;
448
- originValid = origin === expectedOrigin;
471
+ originValid = public_origin_matches_host( origin, host );
449
472
  } else if( referer ) {
450
- const expectedReferer = ( dev_mode ? "http://" : "https://" ) + host;
451
- originValid = referer.startsWith( expectedReferer );
473
+ originValid = public_referer_matches_host( referer, host );
452
474
  }
453
475
 
454
476
  // For PUT (e.g. file uploads), cookie + Origin/Referer is sufficient.
@@ -598,63 +620,117 @@ function put_handler( req, res, next ) {
598
620
  return;
599
621
  }
600
622
 
601
- let local_path = UPLOAD_DIR;
602
- fs.mkdirSync( local_path, { recursive: true } );
603
-
604
- // generate random hash to store file under locally
605
- const hash = sha1( "" + ( Date.now() + Math.random() ) ).slice( 0, 16 ); // only use first 16 chars of the hash
606
- let raw_filename = req.headers[ "X-Filename" ];
607
- if( ! raw_filename ) {
608
- // take the filename from the last part of the URL
609
- raw_filename = req.url.split( "/" ).pop();
610
- // remove any query string
611
- raw_filename = raw_filename.split( "?" ).shift();
612
- }
613
- const sanitized_filename = sanitize_upload_filename( raw_filename );
614
- const final_filename = hash + "_" + sanitized_filename;
615
- local_path += "/" + final_filename;
616
-
617
- D( "PUT: " + local_path );
618
-
619
- const writeStream = fs.createWriteStream( local_path );
620
-
621
- let responded = false;
622
- let received_bytes = 0;
623
- const abort_too_large = function( ) {
624
- if( responded ) return;
625
- responded = true;
626
- try { req.unpipe( writeStream ); } catch( _e ) {}
627
- try { writeStream.destroy(); } catch( _e ) {}
628
- try { req.destroy(); } catch( _e ) {}
629
- try { fs.unlink( local_path, function( ){} ); } catch( _e ) {}
630
- res.writeHead( 413, { "Content-Type": "application/json" } );
631
- res.write( o2j( { error: "Payload too large" } ) );
632
- res.end();
633
- };
623
+ const upload_dir = UPLOAD_DIR;
624
+ fs.mkdirSync( upload_dir, { recursive: true } );
625
+
626
+ // Kick off the upload, given the initial available bytes on the dest filesystem.
627
+ // available_bytes is re-queried every 500ms throughout the transfer so that
628
+ // concurrent uploads are accounted for.
629
+ const do_put = function( initial_available_bytes ) {
630
+ let available_bytes = initial_available_bytes;
631
+
632
+ // Preflight: reject immediately if Content-Length already exceeds threshold
633
+ if( content_length && content_length > available_bytes * 0.8 ) {
634
+ W( "PUT: Content-Length " + content_length + " exceeds 80% of available space (" + available_bytes + " bytes)" );
635
+ res.writeHead( 507, { "Content-Type": "application/json" } );
636
+ res.write( o2j( { error: "Insufficient storage space" } ) );
637
+ res.end();
638
+ return;
639
+ }
634
640
 
635
- // Streaming size guard
636
- req.on( "data", function( chunk ) {
637
- received_bytes += chunk.length;
638
- if( received_bytes > UPLOAD_MAX_BYTES ) {
639
- W( "PUT: stream exceeded cap: " + received_bytes + " > " + UPLOAD_MAX_BYTES );
640
- abort_too_large( );
641
+ // generate random hash to store file under locally
642
+ const hash = sha1( "" + ( Date.now() + Math.random() ) ).slice( 0, 16 ); // only use first 16 chars of the hash
643
+ let raw_filename = req.headers[ "X-Filename" ];
644
+ if( ! raw_filename ) {
645
+ // take the filename from the last part of the URL
646
+ raw_filename = req.url.split( "/" ).pop();
647
+ // remove any query string
648
+ raw_filename = raw_filename.split( "?" ).shift();
641
649
  }
642
- } );
650
+ const sanitized_filename = sanitize_upload_filename( raw_filename );
651
+ const final_filename = hash + "_" + sanitized_filename;
652
+ const local_path = upload_dir + "/" + final_filename;
653
+
654
+ D( "PUT: " + local_path );
655
+
656
+ const writeStream = fs.createWriteStream( local_path );
657
+
658
+ let responded = false;
659
+ let received_bytes = 0;
660
+
661
+ const abort_with = function( status, message, log_msg ) {
662
+ if( responded ) return;
663
+ responded = true;
664
+ clearInterval( disk_poll );
665
+ W( log_msg );
666
+ try { req.unpipe( writeStream ); } catch( _e ) {}
667
+ try { writeStream.destroy(); } catch( _e ) {}
668
+ try { req.destroy(); } catch( _e ) {}
669
+ try { fs.unlink( local_path, function( ){} ); } catch( _e ) {}
670
+ res.writeHead( status, { "Content-Type": "application/json" } );
671
+ res.write( o2j( { error: message } ) );
672
+ res.end();
673
+ };
643
674
 
644
- req.pipe( writeStream );
675
+ // Poll available disk space every 500ms so concurrent uploads are reflected
676
+ const disk_poll = setInterval( function( ) {
677
+ fs.statfs( upload_dir, function( err, stats ) {
678
+ if( ! err ) {
679
+ available_bytes = stats.bavail * stats.bsize;
680
+ D( "PUT: disk poll available=" + available_bytes );
681
+ }
682
+ } );
683
+ }, 500 );
684
+
685
+ // Streaming size guard - checks against the latest available_bytes each chunk
686
+ req.on( "data", function( chunk ) {
687
+ received_bytes += chunk.length;
688
+ if( received_bytes > UPLOAD_MAX_BYTES ) {
689
+ abort_with( 413, "Payload too large", "PUT: stream exceeded cap: " + received_bytes + " > " + UPLOAD_MAX_BYTES );
690
+ } else if( received_bytes > available_bytes * 0.8 ) {
691
+ abort_with( 507, "Insufficient storage space", "PUT: stream exceeded 80% of available space: " + received_bytes + " > " + ( available_bytes * 0.8 ) );
692
+ }
693
+ } );
645
694
 
646
- writeStream.on( "finish", ( ) => {
647
- if( responded ) return;
648
- I( "PUT: " + local_path );
649
- res.writeHead( 200, { "Content-Type": "application/json" } );
650
- res.write( o2j( { hash, filename: final_filename } ) );
651
- res.end();
652
- } );
695
+ req.pipe( writeStream );
696
+
697
+ writeStream.on( "finish", ( ) => {
698
+ if( responded ) return;
699
+ clearInterval( disk_poll );
700
+ I( "PUT: " + local_path );
701
+ res.writeHead( 200, { "Content-Type": "application/json" } );
702
+ res.write( o2j( { hash, filename: final_filename } ) );
703
+ res.end();
704
+ } );
705
+
706
+ writeStream.on( "error", ( error ) => {
707
+ if( responded ) return;
708
+ clearInterval( disk_poll );
709
+ fail( "PUT: " + local_path + " failed during stream", error.stack );
710
+ } );
711
+ };
712
+
713
+ if( typeof fs.statfs !== "function" ) {
714
+ E( "PUT: fs.statfs unavailable, aborting transfer" );
715
+ res.writeHead( 503, { "Content-Type": "application/json" } );
716
+ res.write( o2j( { error: "Disk space check unavailable" } ) );
717
+ res.end();
718
+ return;
719
+ }
720
+
721
+ fs.statfs( upload_dir, function( err, stats ) {
722
+ if( err ) {
723
+ E( "PUT: statfs failed, aborting transfer: " + err.message );
724
+ res.writeHead( 503, { "Content-Type": "application/json" } );
725
+ res.write( o2j( { error: "Disk space check failed" } ) );
726
+ res.end();
727
+ return;
728
+ }
729
+ const available_bytes = stats.bavail * stats.bsize;
730
+ D( "PUT: disk available=" + available_bytes );
731
+ do_put( available_bytes );
732
+ } );
653
733
 
654
- writeStream.on( "error", ( error ) => {
655
- if( responded ) return;
656
- fail( "PUT: " + local_path + " failed during stream", error.stack );
657
- } );
658
734
  } catch ( error ) {
659
735
  fail( "PUT: failed", error.stack );
660
736
  }
@@ -799,9 +875,9 @@ function ws_attach( server, msg_handler ) {
799
875
 
800
876
  // Validate WebSocket origin matches host
801
877
  if( origin ) {
802
- const expectedOrigin = ( dev_mode ? "http://" : "https://" ) + host;
803
- if( origin !== expectedOrigin ) {
804
- W( "WS: Origin validation failed: " + origin + " expected: " + expectedOrigin );
878
+ if( ! public_origin_matches_host( origin, host ) ) {
879
+ const expectedHint = dev_mode ? "http(s)://" + host : "https://" + host;
880
+ W( "WS: Origin validation failed: " + origin + " expected: " + expectedHint );
805
881
  activeWSConnections--;
806
882
  totalConnections--;
807
883
  release_ip_connection( remote_ip );