s3_website 2.10.0 → 2.11.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +25 -6
- data/build.sbt +1 -1
- data/changelog.md +4 -0
- data/lib/s3_website/version.rb +1 -1
- data/resources/configuration_file_template.yml +3 -0
- data/resources/s3_website.jar.md5 +1 -1
- data/src/main/scala/s3/website/CloudFront.scala +1 -1
- data/src/main/scala/s3/website/S3.scala +18 -11
- data/src/main/scala/s3/website/UploadHelper.scala +9 -6
- data/src/main/scala/s3/website/model/Config.scala +39 -21
- data/src/main/scala/s3/website/model/Site.scala +9 -8
- data/src/main/scala/s3/website/model/push.scala +28 -61
- data/src/main/scala/s3/website/package.scala +41 -1
- data/src/test/scala/s3/website/S3WebsiteSpec.scala +231 -7
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7df2add8f3589b718b7f8f1eca3cde4b7d03112b
|
4
|
+
data.tar.gz: eba43c04b20fc754a8cfe7c69b2a4cc596c03acd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 26d815278b9d10e2238f52445436d8fd740d1415244c95c16dc3bf9563697ca68a1a82b943810ddf895389bd3d045cbdd27e72bc15503c970f564bd031b1bb14
|
7
|
+
data.tar.gz: 96f3b42495fc67196253d39d2614f9a3154b508962adf9b692141fed8152f03298cf9495482a0ea383d5ea382a2364a683e335191934c4cf0d1b0646b42513a0
|
data/README.md
CHANGED
@@ -91,8 +91,8 @@ incomprehensible or inconsistent.
|
|
91
91
|
|
92
92
|
### Cache Control
|
93
93
|
|
94
|
-
You can use either the setting `max_age` or `cache_control`to enable more
|
95
|
-
effective browser caching of your static assets.
|
94
|
+
You can use either the setting `max_age` or `cache_control`to enable more
|
95
|
+
effective browser caching of your static assets.
|
96
96
|
|
97
97
|
#### max_age
|
98
98
|
|
@@ -117,10 +117,10 @@ Force-pushing allows you to update the S3 object metadata of existing files.
|
|
117
117
|
|
118
118
|
#### cache_control
|
119
119
|
|
120
|
-
The `cache_control` setting allows you to define an arbitrary string that s3_website
|
120
|
+
The `cache_control` setting allows you to define an arbitrary string that s3_website
|
121
121
|
will put on all the S3 objects of your website.
|
122
|
-
|
123
|
-
Here's an example:
|
122
|
+
|
123
|
+
Here's an example:
|
124
124
|
|
125
125
|
```yaml
|
126
126
|
cache_control: public, no-transform, max-age=1200, s-maxage=1200
|
@@ -241,7 +241,7 @@ When you run the command `s3_website cfg apply`, it will ask you whether you
|
|
241
241
|
want to deliver your website via CloudFront. If you answer yes, the command will
|
242
242
|
create a CloudFront distribution for you.
|
243
243
|
|
244
|
-
If you do not want to receive this prompt, or if you are running the command in a non-interactive session, you can use `s3_website cfg apply --headless
|
244
|
+
If you do not want to receive this prompt, or if you are running the command in a non-interactive session, you can use `s3_website cfg apply --headless` (and optionally also use `--autocreate-cloudfront-dist` if desired).
|
245
245
|
|
246
246
|
#### Using your existing CloudFront distribution
|
247
247
|
|
@@ -323,6 +323,14 @@ redirects:
|
|
323
323
|
music-files/promo.mp4: http://www.youtube.com/watch?v=dQw4w9WgXcQ
|
324
324
|
```
|
325
325
|
|
326
|
+
On terminology: the left value is the redirect source and the right value is the redirect
|
327
|
+
target. For example above, *about.php* is the redirect source and */about.html* the target.
|
328
|
+
|
329
|
+
If the `s3_key_prefix` setting is defined, it will be applied to the redirect
|
330
|
+
target if and only if the redirect target points to a site-local resource and
|
331
|
+
does not start with a slash. E.g., `about.php: about.html` will be translated
|
332
|
+
into `about.php: VALUE-OF-S3_KEY_PREFIX/about.html`.
|
333
|
+
|
326
334
|
#### Routing Rules
|
327
335
|
|
328
336
|
You can configure more complex redirect rules by adding the following
|
@@ -396,6 +404,17 @@ operation would actually do if run without the dry switch.
|
|
396
404
|
You can use the dry run mode if you are unsure what kind of effects the `push`
|
397
405
|
operation would cause to your live website.
|
398
406
|
|
407
|
+
### S3 website in a subdirectory of the bucket
|
408
|
+
|
409
|
+
If your S3 website shares the same S3 bucket with other applications, you can
|
410
|
+
push your website into a "subdirectory" on the bucket.
|
411
|
+
|
412
|
+
Define the subdirectory like so:
|
413
|
+
|
414
|
+
```yaml
|
415
|
+
s3_key_prefix: your-subdirectory
|
416
|
+
```
|
417
|
+
|
399
418
|
## Migrating from v1 to v2
|
400
419
|
|
401
420
|
Please read the [release note](/changelog.md#200) on version 2. It contains
|
data/build.sbt
CHANGED
data/changelog.md
CHANGED
data/lib/s3_website/version.rb
CHANGED
@@ -1 +1 @@
|
|
1
|
-
|
1
|
+
cf1a6f74b8eaceb16173937afc198952
|
@@ -81,7 +81,7 @@ object CloudFront {
|
|
81
81
|
if (containsPotentialDefaultRootObject) Some("/") else None
|
82
82
|
}
|
83
83
|
val indexPath = config.cloudfront_invalidate_root collect {
|
84
|
-
case true if pushSuccessReports.nonEmpty => "/index.html"
|
84
|
+
case true if pushSuccessReports.nonEmpty => config.s3_key_prefix.map(prefix => s"/$prefix").getOrElse("") + "/index.html"
|
85
85
|
}
|
86
86
|
|
87
87
|
val invalidationPaths: Seq[String] = {
|
@@ -52,7 +52,7 @@ object S3 {
|
|
52
52
|
(implicit config: Config, s3Settings: S3Setting, pushOptions: PushOptions, executor: ExecutionContextExecutor, logger: Logger):
|
53
53
|
Future[Either[FailedDelete, SuccessfulDelete]] =
|
54
54
|
Future {
|
55
|
-
if (!pushOptions.dryRun) s3Settings.s3Client(config) deleteObject(config.s3_bucket, s3Key)
|
55
|
+
if (!pushOptions.dryRun) s3Settings.s3Client(config) deleteObject(config.s3_bucket, s3Key.key)
|
56
56
|
val report = SuccessfulDelete(s3Key)
|
57
57
|
logger.info(report)
|
58
58
|
Right(report)
|
@@ -86,13 +86,13 @@ object S3 {
|
|
86
86
|
case (None, None) => None
|
87
87
|
}
|
88
88
|
cacheControl foreach { md.setCacheControl }
|
89
|
-
val req = new PutObjectRequest(config.s3_bucket, upload.s3Key, new FileInputStream(uploadFile), md)
|
89
|
+
val req = new PutObjectRequest(config.s3_bucket, upload.s3Key.key, new FileInputStream(uploadFile), md)
|
90
90
|
config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
|
91
91
|
req
|
92
92
|
}
|
93
93
|
,
|
94
94
|
redirect => {
|
95
|
-
val req = new PutObjectRequest(config.s3_bucket, redirect.s3Key, redirect.redirectTarget)
|
95
|
+
val req = new PutObjectRequest(config.s3_bucket, redirect.s3Key.key, redirect.redirectTarget)
|
96
96
|
req.setMetadata({
|
97
97
|
val md = new ObjectMetadata()
|
98
98
|
md.setContentLength(0) // Otherwise the AWS SDK will log a warning
|
@@ -116,21 +116,28 @@ object S3 {
|
|
116
116
|
def awsS3Client(config: Config) = new AmazonS3Client(awsCredentials(config))
|
117
117
|
|
118
118
|
def resolveS3Files(nextMarker: Option[String] = None, alreadyResolved: Seq[S3File] = Nil, attempt: Attempt = 1)
|
119
|
-
(implicit
|
119
|
+
(implicit site: Site, s3Settings: S3Setting, ec: ExecutionContextExecutor, logger: Logger, pushOptions: PushOptions):
|
120
120
|
Future[Either[ErrorReport, Seq[S3File]]] = Future {
|
121
121
|
logger.debug(nextMarker.fold
|
122
122
|
("Querying S3 files")
|
123
123
|
{m => s"Querying more S3 files (starting from $m)"}
|
124
124
|
)
|
125
|
-
val objects: ObjectListing = s3Settings.s3Client(config).listObjects({
|
125
|
+
val objects: ObjectListing = s3Settings.s3Client(site.config).listObjects({
|
126
126
|
val req = new ListObjectsRequest()
|
127
|
-
req.setBucketName(config.s3_bucket)
|
127
|
+
req.setBucketName(site.config.s3_bucket)
|
128
128
|
nextMarker.foreach(req.setMarker)
|
129
129
|
req
|
130
130
|
})
|
131
131
|
objects
|
132
132
|
} flatMap { (objects: ObjectListing) =>
|
133
|
-
|
133
|
+
|
134
|
+
/**
|
135
|
+
* We could filter the keys by prefix already on S3, but unfortunately s3_website test infrastructure does not currently support testing of that.
|
136
|
+
* Hence fetch all the keys from S3 and then filter by s3_key_prefix.
|
137
|
+
*/
|
138
|
+
def matchesPrefix(os: S3ObjectSummary) = site.config.s3_key_prefix.fold(true)(prefix => os.getKey.startsWith(prefix))
|
139
|
+
|
140
|
+
val s3Files = alreadyResolved ++ (objects.getObjectSummaries.filter(matchesPrefix).toIndexedSeq.toSeq map (S3File(_)))
|
134
141
|
Option(objects.getNextMarker)
|
135
142
|
.fold(Future(Right(s3Files)): Future[Either[ErrorReport, Seq[S3File]]]) // We've received all the S3 keys from the bucket
|
136
143
|
{ nextMarker => // There are more S3 keys on the bucket. Fetch them.
|
@@ -149,7 +156,7 @@ object S3 {
|
|
149
156
|
|
150
157
|
sealed trait PushFailureReport extends ErrorReport
|
151
158
|
sealed trait PushSuccessReport extends SuccessReport {
|
152
|
-
def s3Key:
|
159
|
+
def s3Key: S3Key
|
153
160
|
}
|
154
161
|
|
155
162
|
case class SuccessfulRedirectDetails(uploadType: UploadType, redirectTarget: String)
|
@@ -211,15 +218,15 @@ object S3 {
|
|
211
218
|
}
|
212
219
|
}
|
213
220
|
|
214
|
-
case class SuccessfulDelete(s3Key:
|
221
|
+
case class SuccessfulDelete(s3Key: S3Key)(implicit pushOptions: PushOptions) extends PushSuccessReport {
|
215
222
|
def reportMessage = s"${Deleted.renderVerb} $s3Key"
|
216
223
|
}
|
217
224
|
|
218
|
-
case class FailedUpload(s3Key:
|
225
|
+
case class FailedUpload(s3Key: S3Key, error: Throwable)(implicit logger: Logger) extends PushFailureReport {
|
219
226
|
def reportMessage = errorMessage(s"Failed to upload $s3Key", error)
|
220
227
|
}
|
221
228
|
|
222
|
-
case class FailedDelete(s3Key:
|
229
|
+
case class FailedDelete(s3Key: S3Key, error: Throwable)(implicit logger: Logger) extends PushFailureReport {
|
223
230
|
def reportMessage = errorMessage(s"Failed to delete $s3Key", error)
|
224
231
|
}
|
225
232
|
|
@@ -1,5 +1,6 @@
|
|
1
1
|
package s3.website
|
2
2
|
|
3
|
+
import s3.website.S3Key.isIgnoredBecauseOfPrefix
|
3
4
|
import s3.website.model.Files.listSiteFiles
|
4
5
|
import s3.website.model._
|
5
6
|
import s3.website.Ruby.rubyRegexMatches
|
@@ -46,7 +47,9 @@ object UploadHelper {
|
|
46
47
|
|
47
48
|
def resolveDeletes(s3Files: Future[Either[ErrorReport, Seq[S3File]]], redirects: Seq[Redirect])
|
48
49
|
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Future[Either[ErrorReport, Seq[S3Key]]] =
|
49
|
-
if (site.config.ignore_on_server
|
50
|
+
if (site.config.ignore_on_server exists (
|
51
|
+
ignoreRegexes => ignoreRegexes.s3KeyRegexes exists( regex => regex matches S3Key.build(DELETE_NOTHING_MAGIC_WORD, site.config.s3_key_prefix))
|
52
|
+
)) {
|
50
53
|
logger.debug(s"Ignoring all files on the bucket, since the setting $DELETE_NOTHING_MAGIC_WORD is on.")
|
51
54
|
Future(Right(Nil))
|
52
55
|
} else {
|
@@ -56,12 +59,12 @@ object UploadHelper {
|
|
56
59
|
for {
|
57
60
|
remoteS3Keys <- s3Files.right.map(_ map (_.s3Key)).right
|
58
61
|
} yield {
|
59
|
-
val
|
62
|
+
val keysIgnoredBecauseOf_s3_key_prefix = remoteS3Keys.filterNot(isIgnoredBecauseOfPrefix)
|
63
|
+
val keysToRetain = (
|
64
|
+
localS3Keys ++ (redirects map { _.s3Key }) ++ keysIgnoredBecauseOf_s3_key_prefix
|
65
|
+
).toSet
|
60
66
|
remoteS3Keys filterNot { s3Key =>
|
61
|
-
val ignoreOnServer = site.config.ignore_on_server.exists(_
|
62
|
-
(ignoreRegex: String) => rubyRegexMatches(s3Key, ignoreRegex),
|
63
|
-
(ignoreRegexes: Seq[String]) => ignoreRegexes.exists(rubyRegexMatches(s3Key, _))
|
64
|
-
))
|
67
|
+
val ignoreOnServer = site.config.ignore_on_server.exists(_ matches s3Key)
|
65
68
|
if (ignoreOnServer) logger.debug(s"Ignoring $s3Key on server")
|
66
69
|
(keysToRetain contains s3Key) || ignoreOnServer
|
67
70
|
}
|
@@ -1,11 +1,13 @@
|
|
1
1
|
package s3.website.model
|
2
2
|
|
3
3
|
import java.io.File
|
4
|
+
import java.util
|
4
5
|
|
6
|
+
import scala.util.matching.Regex
|
5
7
|
import scala.util.{Failure, Try}
|
6
8
|
import scala.collection.JavaConversions._
|
7
9
|
import s3.website.Ruby.rubyRuntime
|
8
|
-
import s3.website.
|
10
|
+
import s3.website._
|
9
11
|
import com.amazonaws.auth.{AWSCredentialsProvider, BasicAWSCredentials, DefaultAWSCredentialsProviderChain}
|
10
12
|
|
11
13
|
case class Config(
|
@@ -14,16 +16,17 @@ case class Config(
|
|
14
16
|
s3_bucket: String,
|
15
17
|
s3_endpoint: S3Endpoint,
|
16
18
|
site: Option[String],
|
17
|
-
max_age: Option[Either[Int,
|
18
|
-
cache_control: Option[Either[String,
|
19
|
+
max_age: Option[Either[Int, S3KeyGlob[Int]]],
|
20
|
+
cache_control: Option[Either[String, S3KeyGlob[String]]],
|
19
21
|
gzip: Option[Either[Boolean, Seq[String]]],
|
20
22
|
gzip_zopfli: Option[Boolean],
|
21
|
-
|
22
|
-
|
23
|
+
s3_key_prefix: Option[String],
|
24
|
+
ignore_on_server: Option[S3KeyRegexes],
|
25
|
+
exclude_from_upload: Option[S3KeyRegexes],
|
23
26
|
s3_reduced_redundancy: Option[Boolean],
|
24
27
|
cloudfront_distribution_id: Option[String],
|
25
28
|
cloudfront_invalidate_root: Option[Boolean],
|
26
|
-
redirects: Option[Map[
|
29
|
+
redirects: Option[Map[S3Key, String]],
|
27
30
|
concurrency_level: Int,
|
28
31
|
treat_zero_length_objects_as_redirects: Option[Boolean]
|
29
32
|
)
|
@@ -56,41 +59,52 @@ object Config {
|
|
56
59
|
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a boolean or [string] value"))
|
57
60
|
}
|
58
61
|
|
59
|
-
def
|
62
|
+
def loadOptionalS3KeyRegexes(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[S3KeyRegexes]] = {
|
60
63
|
val yamlValue = for {
|
61
64
|
valueOption <- loadOptionalValue(key)
|
62
65
|
} yield {
|
66
|
+
def toS3KeyRegexes(xs: Seq[String]) = S3KeyRegexes(xs map (str => str.r) map S3KeyRegex)
|
63
67
|
Right(valueOption.map {
|
64
|
-
case value if value.isInstanceOf[String] =>
|
65
|
-
|
68
|
+
case value if value.isInstanceOf[String] =>
|
69
|
+
toS3KeyRegexes(value.asInstanceOf[String] :: Nil)
|
70
|
+
case value if value.isInstanceOf[java.util.List[_]] =>
|
71
|
+
toS3KeyRegexes(value.asInstanceOf[java.util.List[String]].toIndexedSeq)
|
66
72
|
})
|
67
73
|
}
|
68
74
|
|
69
75
|
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a string or [string] value"))
|
70
76
|
}
|
71
77
|
|
72
|
-
def loadMaxAge(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[Int,
|
78
|
+
def loadMaxAge(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[Int, S3KeyGlob[Int]]]] = {
|
73
79
|
val key = "max_age"
|
74
80
|
val yamlValue = for {
|
75
81
|
maxAgeOption <- loadOptionalValue(key)
|
76
82
|
} yield {
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
83
|
+
// TODO below we are using an unsafe call to asInstance of – we should implement error handling
|
84
|
+
Right(maxAgeOption.map {
|
85
|
+
case maxAge if maxAge.isInstanceOf[Int] =>
|
86
|
+
Left(maxAge.asInstanceOf[Int])
|
87
|
+
case maxAge if maxAge.isInstanceOf[java.util.Map[_,_]] =>
|
88
|
+
val globs: Map[String, Int] = maxAge.asInstanceOf[util.Map[String, Int]].toMap
|
89
|
+
Right(S3KeyGlob(globs))
|
90
|
+
})
|
91
|
+
}
|
82
92
|
|
83
93
|
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have an int or (string -> int) value"))
|
84
94
|
}
|
85
95
|
|
86
|
-
def loadCacheControl(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[String,
|
96
|
+
def loadCacheControl(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[String, S3KeyGlob[String]]]] = {
|
87
97
|
val key = "cache_control"
|
88
98
|
val yamlValue = for {
|
89
99
|
cacheControlOption <- loadOptionalValue(key)
|
90
100
|
} yield {
|
101
|
+
// TODO below we are using an unsafe call to asInstance of – we should implement error handling
|
91
102
|
Right(cacheControlOption.map {
|
92
|
-
case cacheControl if cacheControl.isInstanceOf[String] =>
|
93
|
-
|
103
|
+
case cacheControl if cacheControl.isInstanceOf[String] =>
|
104
|
+
Left(cacheControl.asInstanceOf[String])
|
105
|
+
case cacheControl if cacheControl.isInstanceOf[java.util.Map[_,_]] =>
|
106
|
+
val globs: Map[String, String] = cacheControl.asInstanceOf[util.Map[String, String]].toMap
|
107
|
+
Right(S3KeyGlob(globs))
|
94
108
|
})
|
95
109
|
}
|
96
110
|
|
@@ -106,12 +120,16 @@ object Config {
|
|
106
120
|
}
|
107
121
|
}
|
108
122
|
|
109
|
-
def loadRedirects(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Map[
|
123
|
+
def loadRedirects(s3_key_prefix: Option[String])(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Map[S3Key, String]]] = {
|
110
124
|
val key = "redirects"
|
111
125
|
val yamlValue = for {
|
112
126
|
redirectsOption <- loadOptionalValue(key)
|
113
|
-
|
114
|
-
} yield Right(
|
127
|
+
redirectsOption <- Try(redirectsOption.map(_.asInstanceOf[java.util.Map[String,String]].toMap))
|
128
|
+
} yield Right(redirectsOption.map(
|
129
|
+
redirects => redirects.map(
|
130
|
+
((key: String, value: String) => (S3Key.build(key, s3_key_prefix), value)).tupled
|
131
|
+
)
|
132
|
+
))
|
115
133
|
|
116
134
|
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a (string -> string) value"))
|
117
135
|
}
|
@@ -15,11 +15,10 @@ import s3.website.model.Config.UnsafeYaml
|
|
15
15
|
import scala.util.Success
|
16
16
|
|
17
17
|
case class Site(rootDirectory: File, config: Config) {
|
18
|
-
def resolveS3Key(file: File) =
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
def resolveFile(s3Key: S3Key): File = new File(s"$rootDirectory/$s3Key")
|
18
|
+
def resolveS3Key(file: File) = S3Key.build(
|
19
|
+
file.getAbsolutePath.replace(rootDirectory.getAbsolutePath, "").replace(File.separator,"/").replaceFirst("^/", ""),
|
20
|
+
config.s3_key_prefix
|
21
|
+
)
|
23
22
|
}
|
24
23
|
|
25
24
|
object Site {
|
@@ -44,13 +43,14 @@ object Site {
|
|
44
43
|
gzip <- loadOptionalBooleanOrStringSeq("gzip").right
|
45
44
|
gzip_zopfli <- loadOptionalBoolean("gzip_zopfli").right
|
46
45
|
extensionless_mime_type <- loadOptionalString("extensionless_mime_type").right
|
47
|
-
|
48
|
-
|
46
|
+
s3_key_prefix <- loadOptionalString("s3_key_prefix").right
|
47
|
+
ignore_on_server <- loadOptionalS3KeyRegexes("ignore_on_server").right
|
48
|
+
exclude_from_upload <- loadOptionalS3KeyRegexes("exclude_from_upload").right
|
49
49
|
s3_reduced_redundancy <- loadOptionalBoolean("s3_reduced_redundancy").right
|
50
50
|
cloudfront_distribution_id <- loadOptionalString("cloudfront_distribution_id").right
|
51
51
|
cloudfront_invalidate_root <- loadOptionalBoolean("cloudfront_invalidate_root").right
|
52
52
|
concurrency_level <- loadOptionalInt("concurrency_level").right
|
53
|
-
redirects <- loadRedirects.right
|
53
|
+
redirects <- loadRedirects(s3_key_prefix).right
|
54
54
|
treat_zero_length_objects_as_redirects <- loadOptionalBoolean("treat_zero_length_objects_as_redirects").right
|
55
55
|
} yield {
|
56
56
|
gzip_zopfli.foreach(_ => logger.info(
|
@@ -70,6 +70,7 @@ object Site {
|
|
70
70
|
cache_control,
|
71
71
|
gzip,
|
72
72
|
gzip_zopfli,
|
73
|
+
s3_key_prefix,
|
73
74
|
ignore_on_server = ignore_on_server,
|
74
75
|
exclude_from_upload = exclude_from_upload,
|
75
76
|
s3_reduced_redundancy,
|
@@ -21,11 +21,11 @@ object Encoding {
|
|
21
21
|
case class Gzip()
|
22
22
|
case class Zopfli()
|
23
23
|
|
24
|
-
def encodingOnS3(s3Key:
|
24
|
+
def encodingOnS3(s3Key: S3Key)(implicit config: Config): Option[Either[Gzip, Zopfli]] =
|
25
25
|
config.gzip.flatMap { (gzipSetting: Either[Boolean, Seq[String]]) =>
|
26
26
|
val shouldZipThisFile = gzipSetting.fold(
|
27
|
-
shouldGzip => defaultGzipExtensions exists s3Key.endsWith,
|
28
|
-
fileExtensions => fileExtensions exists s3Key.endsWith
|
27
|
+
shouldGzip => defaultGzipExtensions exists s3Key.key.endsWith,
|
28
|
+
fileExtensions => fileExtensions exists s3Key.key.endsWith
|
29
29
|
)
|
30
30
|
if (shouldZipThisFile && config.gzip_zopfli.isDefined)
|
31
31
|
Some(Right(Zopfli()))
|
@@ -71,51 +71,21 @@ case class Upload(originalFile: File, uploadType: UploadType)(implicit site: Sit
|
|
71
71
|
mimeType
|
72
72
|
}
|
73
73
|
|
74
|
-
lazy val maxAge: Option[Int] =
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
.fold(
|
82
|
-
(seconds: Int) => Some(seconds),
|
83
|
-
(globs: GlobsSeq) => {
|
84
|
-
val matchingMaxAge = (glob: String, maxAge: Int) =>
|
85
|
-
rubyRuntime.evalScriptlet(
|
86
|
-
s"""|# encoding: utf-8
|
87
|
-
|File.fnmatch('$glob', "$s3Key")""".stripMargin)
|
88
|
-
.toJava(classOf[Boolean])
|
89
|
-
.asInstanceOf[Boolean]
|
90
|
-
val fileGlobMatch = globs find Function.tupled(matchingMaxAge)
|
91
|
-
fileGlobMatch map (_._2)
|
92
|
-
}
|
93
|
-
)
|
94
|
-
}
|
95
|
-
}
|
74
|
+
lazy val maxAge: Option[Int] =
|
75
|
+
site.config.max_age.flatMap(
|
76
|
+
_ fold(
|
77
|
+
(maxAge: Int) => Some(maxAge),
|
78
|
+
(globs: S3KeyGlob[Int]) => globs.globMatch(s3Key)
|
79
|
+
)
|
80
|
+
)
|
96
81
|
|
97
|
-
lazy val cacheControl: Option[String] =
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
.fold(
|
105
|
-
(cacheCtrl: String) => Some(cacheCtrl),
|
106
|
-
(globs: GlobsSeq) => {
|
107
|
-
val matchingCacheControl = (glob: String, cacheControl: String) =>
|
108
|
-
rubyRuntime.evalScriptlet(
|
109
|
-
s"""|# encoding: utf-8
|
110
|
-
|File.fnmatch('$glob', "$s3Key")""".stripMargin)
|
111
|
-
.toJava(classOf[Boolean])
|
112
|
-
.asInstanceOf[Boolean]
|
113
|
-
val fileGlobMatch = globs find Function.tupled(matchingCacheControl)
|
114
|
-
fileGlobMatch map (_._2)
|
115
|
-
}
|
116
|
-
)
|
117
|
-
}
|
118
|
-
}
|
82
|
+
lazy val cacheControl: Option[String] =
|
83
|
+
site.config.cache_control.flatMap(
|
84
|
+
_ fold(
|
85
|
+
(cacheCtrl: String) => Some(cacheCtrl),
|
86
|
+
(globs: S3KeyGlob[String]) => globs.globMatch(s3Key)
|
87
|
+
)
|
88
|
+
)
|
119
89
|
|
120
90
|
/**
|
121
91
|
* May throw an exception, so remember to call this in a Try or Future monad
|
@@ -158,15 +128,11 @@ object Files {
|
|
158
128
|
}
|
159
129
|
|
160
130
|
def listSiteFiles(implicit site: Site, logger: Logger) = {
|
161
|
-
def excludeFromUpload(s3Key:
|
131
|
+
def excludeFromUpload(s3Key: S3Key) = {
|
162
132
|
val excludeByConfig = site.config.exclude_from_upload exists {
|
163
|
-
_.
|
164
|
-
// For backward compatibility, use Ruby regex matching
|
165
|
-
(exclusionRegex: String) => rubyRegexMatches(s3Key, exclusionRegex),
|
166
|
-
(exclusionRegexes: Seq[String]) => exclusionRegexes exists (rubyRegexMatches(s3Key, _))
|
167
|
-
)
|
133
|
+
_.s3KeyRegexes.exists(_ matches s3Key)
|
168
134
|
}
|
169
|
-
val neverUpload = "s3_website.yml" :: ".env" :: Nil
|
135
|
+
val neverUpload = "s3_website.yml" :: ".env" :: Nil map (k => S3Key.build(k, site.config.s3_key_prefix))
|
170
136
|
val doNotUpload = excludeByConfig || (neverUpload contains s3Key)
|
171
137
|
if (doNotUpload) logger.debug(s"Excluded $s3Key from upload")
|
172
138
|
doNotUpload
|
@@ -177,11 +143,11 @@ object Files {
|
|
177
143
|
}
|
178
144
|
}
|
179
145
|
|
180
|
-
case class Redirect(s3Key:
|
146
|
+
case class Redirect(s3Key: S3Key, redirectTarget: String, needsUpload: Boolean) {
|
181
147
|
def uploadType = RedirectFile
|
182
148
|
}
|
183
149
|
|
184
|
-
private case class RedirectSetting(source:
|
150
|
+
private case class RedirectSetting(source: S3Key, target: String)
|
185
151
|
|
186
152
|
object Redirect {
|
187
153
|
type Redirects = Future[Either[ErrorReport, Seq[Redirect]]]
|
@@ -191,7 +157,7 @@ object Redirect {
|
|
191
157
|
val redirectSettings = config.redirects.fold(Nil: Seq[RedirectSetting]) { sourcesToTargets =>
|
192
158
|
sourcesToTargets.foldLeft(Seq(): Seq[RedirectSetting]) {
|
193
159
|
(redirects, sourceToTarget) =>
|
194
|
-
redirects :+ RedirectSetting(sourceToTarget._1,
|
160
|
+
redirects :+ RedirectSetting(sourceToTarget._1, applyRedirectRules(sourceToTarget._2))
|
195
161
|
}
|
196
162
|
}
|
197
163
|
def redirectsWithExistsOnS3Info =
|
@@ -212,21 +178,22 @@ object Redirect {
|
|
212
178
|
allConfiguredRedirects
|
213
179
|
}
|
214
180
|
|
215
|
-
private def
|
181
|
+
private def applyRedirectRules(redirectTarget: String)(implicit config: Config) = {
|
216
182
|
val isExternalRedirect = redirectTarget.matches("https?:\\/\\/.*")
|
217
183
|
val isInSiteRedirect = redirectTarget.startsWith("/")
|
218
184
|
if (isInSiteRedirect || isExternalRedirect)
|
219
185
|
redirectTarget
|
220
186
|
else
|
221
|
-
"
|
187
|
+
s"${config.s3_key_prefix.map(prefix => s"/$prefix").getOrElse("")}/$redirectTarget"
|
222
188
|
}
|
223
189
|
|
224
190
|
def apply(redirectSetting: RedirectSetting, needsUpload: Boolean): Redirect =
|
225
191
|
Redirect(redirectSetting.source, redirectSetting.target, needsUpload)
|
226
192
|
}
|
227
193
|
|
228
|
-
case class S3File(s3Key:
|
194
|
+
case class S3File(s3Key: S3Key, md5: MD5, size: Long)
|
229
195
|
|
230
196
|
object S3File {
|
231
|
-
def apply(summary: S3ObjectSummary): S3File =
|
197
|
+
def apply(summary: S3ObjectSummary)(implicit site: Site): S3File =
|
198
|
+
S3File(S3Key.build(summary.getKey, None), summary.getETag, summary.getSize)
|
232
199
|
}
|
@@ -1,5 +1,7 @@
|
|
1
1
|
package s3
|
2
2
|
|
3
|
+
import s3.website.Ruby._
|
4
|
+
|
3
5
|
import scala.concurrent.{ExecutionContextExecutor, Future}
|
4
6
|
import scala.concurrent.duration.{TimeUnit, Duration}
|
5
7
|
import s3.website.S3.{PushSuccessReport, PushFailureReport}
|
@@ -7,6 +9,8 @@ import com.amazonaws.AmazonServiceException
|
|
7
9
|
import s3.website.model.{Config, Site}
|
8
10
|
import java.io.File
|
9
11
|
|
12
|
+
import scala.util.matching.Regex
|
13
|
+
|
10
14
|
package object website {
|
11
15
|
trait Report {
|
12
16
|
def reportMessage: String
|
@@ -52,7 +56,43 @@ package object website {
|
|
52
56
|
def force: Boolean
|
53
57
|
}
|
54
58
|
|
55
|
-
|
59
|
+
case class S3KeyRegex(keyRegex: Regex) {
|
60
|
+
def matches(s3Key: S3Key) = rubyRegexMatches(s3Key.key, keyRegex.pattern.pattern())
|
61
|
+
}
|
62
|
+
|
63
|
+
trait S3Key {
|
64
|
+
val key: String
|
65
|
+
override def toString = key
|
66
|
+
}
|
67
|
+
|
68
|
+
object S3Key {
|
69
|
+
def prefix(s3_key_prefix: Option[String]) = s3_key_prefix.map(prefix => if (prefix.endsWith("/")) prefix else prefix + "/").getOrElse("")
|
70
|
+
|
71
|
+
def isIgnoredBecauseOfPrefix(s3Key: S3Key)(implicit site: Site) = s3Key.key.startsWith(prefix(site.config.s3_key_prefix))
|
72
|
+
|
73
|
+
case class S3KeyClass(key: String) extends S3Key
|
74
|
+
def build(key: String, s3_key_prefix: Option[String]): S3Key = S3KeyClass(prefix(s3_key_prefix) + key)
|
75
|
+
}
|
76
|
+
|
77
|
+
case class S3KeyGlob[T](globs: Map[String, T]) {
|
78
|
+
def globMatch(s3Key: S3Key): Option[T] = {
|
79
|
+
def respectMostSpecific(globs: Map[String, T]) = globs.toSeq.sortBy(_._1.length).reverse
|
80
|
+
val matcher = (glob: String, value: T) =>
|
81
|
+
rubyRuntime.evalScriptlet(
|
82
|
+
s"""|# encoding: utf-8
|
83
|
+
|File.fnmatch('$glob', "$s3Key")""".stripMargin)
|
84
|
+
.toJava(classOf[Boolean])
|
85
|
+
.asInstanceOf[Boolean]
|
86
|
+
val fileGlobMatch = respectMostSpecific(globs) find Function.tupled(matcher)
|
87
|
+
fileGlobMatch map (_._2)
|
88
|
+
}
|
89
|
+
}
|
90
|
+
|
91
|
+
case class S3KeyRegexes(s3KeyRegexes: Seq[S3KeyRegex]) {
|
92
|
+
def matches(s3Key: S3Key) = s3KeyRegexes exists (
|
93
|
+
(keyRegex: S3KeyRegex) => keyRegex matches s3Key
|
94
|
+
)
|
95
|
+
}
|
56
96
|
|
57
97
|
type UploadDuration = Long
|
58
98
|
|
@@ -83,6 +83,14 @@ class S3WebsiteSpec extends Specification {
|
|
83
83
|
noUploadsOccurred must beTrue
|
84
84
|
}
|
85
85
|
|
86
|
+
"not upload a file if it has not changed and s3_key_prefix is defined" in new BasicSetup {
|
87
|
+
config = "s3_key_prefix: test"
|
88
|
+
setLocalFileWithContent(("index.html", "<div>hello</div>"))
|
89
|
+
setS3File("test/index.html", md5Hex("<div>hello</div>"))
|
90
|
+
push()
|
91
|
+
noUploadsOccurred must beTrue
|
92
|
+
}
|
93
|
+
|
86
94
|
"detect a changed file even though another file has the same contents as the changed file" in new BasicSetup {
|
87
95
|
setLocalFilesWithContent(("1.txt", "foo"), ("2.txt", "foo"))
|
88
96
|
setS3File("1.txt", md5Hex("bar"))
|
@@ -110,6 +118,27 @@ class S3WebsiteSpec extends Specification {
|
|
110
118
|
sentDelete must equalTo("old.html")
|
111
119
|
}
|
112
120
|
|
121
|
+
"delete files that match the s3_key_prefix" in new BasicSetup {
|
122
|
+
config = "s3_key_prefix: production"
|
123
|
+
setS3File("production/old.html", md5Hex("<h1>old text</h1>"))
|
124
|
+
push()
|
125
|
+
sentDelete must equalTo("production/old.html")
|
126
|
+
}
|
127
|
+
|
128
|
+
"retain files that do not match the s3_key_prefix" in new BasicSetup {
|
129
|
+
config = "s3_key_prefix: production"
|
130
|
+
setS3File("old.html", md5Hex("<h1>old text</h1>"))
|
131
|
+
push()
|
132
|
+
noDeletesOccurred
|
133
|
+
}
|
134
|
+
|
135
|
+
"retain files that do not match the s3_key_prefix" in new BasicSetup {
|
136
|
+
config = "s3_key_prefix: test"
|
137
|
+
setS3File("test1.html")
|
138
|
+
push()
|
139
|
+
noDeletesOccurred
|
140
|
+
}
|
141
|
+
|
113
142
|
"try again if the upload fails" in new BasicSetup {
|
114
143
|
setLocalFile("index.html")
|
115
144
|
uploadFailsAndThenSucceeds(howManyFailures = 5)
|
@@ -230,6 +259,17 @@ class S3WebsiteSpec extends Specification {
|
|
230
259
|
push()
|
231
260
|
sentInvalidationRequest.getInvalidationBatch.getPaths.getItems.toSeq.sorted must equalTo(("/" :: "/maybe-index.html" :: Nil).sorted)
|
232
261
|
}
|
262
|
+
|
263
|
+
"work with s3_key_prefix" in new BasicSetup {
|
264
|
+
config = """
|
265
|
+
|cloudfront_distribution_id: EGM1J2JJX9Z
|
266
|
+
|s3_key_prefix: production
|
267
|
+
""".stripMargin
|
268
|
+
setLocalFile("index.html")
|
269
|
+
setOutdatedS3Keys("production/index.html")
|
270
|
+
push()
|
271
|
+
sentInvalidationRequest.getInvalidationBatch.getPaths.getItems.toSeq.sorted must equalTo(("/production/index.html" :: Nil).sorted)
|
272
|
+
}
|
233
273
|
}
|
234
274
|
|
235
275
|
"cloudfront_invalidate_root: true" should {
|
@@ -255,6 +295,20 @@ class S3WebsiteSpec extends Specification {
|
|
255
295
|
sentInvalidationRequest.getInvalidationBatch.getPaths.getItems.toSeq must contain("/index.html")
|
256
296
|
}
|
257
297
|
|
298
|
+
"treat the s3_key_prefix as the root path" in new BasicSetup {
|
299
|
+
config = """
|
300
|
+
|cloudfront_distribution_id: EGM1J2JJX9Z
|
301
|
+
|cloudfront_invalidate_root: true
|
302
|
+
|s3_key_prefix: test
|
303
|
+
""".stripMargin
|
304
|
+
setLocalFile("articles/index.html")
|
305
|
+
setOutdatedS3Keys("test/articles/index.html")
|
306
|
+
push()
|
307
|
+
sentInvalidationRequest.getInvalidationBatch.getPaths.getItems.toSeq.sorted must equalTo(
|
308
|
+
("/test/index.html" :: "/test/articles/" :: Nil).sorted
|
309
|
+
)
|
310
|
+
}
|
311
|
+
|
258
312
|
"not invalidate anything if there was nothing to push" in new BasicSetup {
|
259
313
|
config = """
|
260
314
|
|cloudfront_distribution_id: EGM1J2JJX9Z
|
@@ -333,12 +387,35 @@ class S3WebsiteSpec extends Specification {
|
|
333
387
|
}
|
334
388
|
}
|
335
389
|
|
390
|
+
"s3_key_prefix in config" should {
|
391
|
+
"apply the prefix into all the S3 keys" in new BasicSetup {
|
392
|
+
config = "s3_key_prefix: production"
|
393
|
+
setLocalFile("index.html")
|
394
|
+
push()
|
395
|
+
sentPutObjectRequest.getKey must equalTo("production/index.html")
|
396
|
+
}
|
397
|
+
|
398
|
+
"work with slash" in new BasicSetup {
|
399
|
+
config = "s3_key_prefix: production/"
|
400
|
+
setLocalFile("index.html")
|
401
|
+
push()
|
402
|
+
sentPutObjectRequest.getKey must equalTo("production/index.html")
|
403
|
+
}
|
404
|
+
}
|
405
|
+
|
336
406
|
"s3_website.yml file" should {
|
337
407
|
"never be uploaded" in new BasicSetup {
|
338
408
|
setLocalFile("s3_website.yml")
|
339
409
|
push()
|
340
410
|
noUploadsOccurred must beTrue
|
341
411
|
}
|
412
|
+
|
413
|
+
"never be uploaded even when s3_key_prefix is defined" in new BasicSetup {
|
414
|
+
config = "s3_key_prefix: production"
|
415
|
+
setLocalFile("s3_website.yml")
|
416
|
+
push()
|
417
|
+
noUploadsOccurred must beTrue
|
418
|
+
}
|
342
419
|
}
|
343
420
|
|
344
421
|
".env file" should { // The .env file is the https://github.com/bkeepers/dotenv file
|
@@ -347,6 +424,13 @@ class S3WebsiteSpec extends Specification {
|
|
347
424
|
push()
|
348
425
|
noUploadsOccurred must beTrue
|
349
426
|
}
|
427
|
+
|
428
|
+
"never be uploaded even when s3_key_prefix is defined" in new BasicSetup {
|
429
|
+
config = "s3_key_prefix: production"
|
430
|
+
setLocalFile(".env")
|
431
|
+
push()
|
432
|
+
noUploadsOccurred must beTrue
|
433
|
+
}
|
350
434
|
}
|
351
435
|
|
352
436
|
"exclude_from_upload: string" should {
|
@@ -356,6 +440,16 @@ class S3WebsiteSpec extends Specification {
|
|
356
440
|
push()
|
357
441
|
noUploadsOccurred must beTrue
|
358
442
|
}
|
443
|
+
|
444
|
+
"work with s3_key_prefix" in new BasicSetup {
|
445
|
+
config = """
|
446
|
+
|s3_key_prefix: production
|
447
|
+
|exclude_from_upload: hello.txt
|
448
|
+
""".stripMargin
|
449
|
+
setLocalFile("hello.txt")
|
450
|
+
push()
|
451
|
+
noUploadsOccurred must beTrue
|
452
|
+
}
|
359
453
|
}
|
360
454
|
|
361
455
|
"""
|
@@ -373,6 +467,17 @@ class S3WebsiteSpec extends Specification {
|
|
373
467
|
push()
|
374
468
|
noUploadsOccurred must beTrue
|
375
469
|
}
|
470
|
+
|
471
|
+
"work with s3_key_prefix" in new BasicSetup {
|
472
|
+
config = """
|
473
|
+
|s3_key_prefix: production
|
474
|
+
|exclude_from_upload:
|
475
|
+
|- hello.txt
|
476
|
+
""".stripMargin
|
477
|
+
setLocalFile("hello.txt")
|
478
|
+
push()
|
479
|
+
noUploadsOccurred must beTrue
|
480
|
+
}
|
376
481
|
}
|
377
482
|
|
378
483
|
"ignore_on_server: value" should {
|
@@ -389,6 +494,16 @@ class S3WebsiteSpec extends Specification {
|
|
389
494
|
push()
|
390
495
|
noDeletesOccurred must beTrue
|
391
496
|
}
|
497
|
+
|
498
|
+
"work with s3_key_prefix" in new BasicSetup {
|
499
|
+
config = """
|
500
|
+
|s3_key_prefix: production
|
501
|
+
|ignore_on_server: hello.txt
|
502
|
+
""".stripMargin
|
503
|
+
setS3File("hello.txt")
|
504
|
+
push()
|
505
|
+
noDeletesOccurred must beTrue
|
506
|
+
}
|
392
507
|
}
|
393
508
|
|
394
509
|
"ignore_on_server: _DELETE_NOTHING_ON_THE_S3_BUCKET_" should {
|
@@ -400,6 +515,16 @@ class S3WebsiteSpec extends Specification {
|
|
400
515
|
push()
|
401
516
|
noDeletesOccurred
|
402
517
|
}
|
518
|
+
|
519
|
+
"work with s3_key_prefix" in new BasicSetup {
|
520
|
+
config = s"""
|
521
|
+
|s3_key_prefix: production
|
522
|
+
|ignore_on_server: $DELETE_NOTHING_MAGIC_WORD
|
523
|
+
""".stripMargin
|
524
|
+
setS3File("file.txt")
|
525
|
+
push()
|
526
|
+
noDeletesOccurred
|
527
|
+
}
|
403
528
|
}
|
404
529
|
|
405
530
|
"""
|
@@ -426,6 +551,17 @@ class S3WebsiteSpec extends Specification {
|
|
426
551
|
push()
|
427
552
|
noDeletesOccurred must beTrue
|
428
553
|
}
|
554
|
+
|
555
|
+
"work with s3_key_prefix" in new BasicSetup {
|
556
|
+
config = """
|
557
|
+
|s3_key_prefix: production
|
558
|
+
|ignore_on_server:
|
559
|
+
|- hello.*
|
560
|
+
""".stripMargin
|
561
|
+
setS3File("hello.txt")
|
562
|
+
push()
|
563
|
+
noDeletesOccurred must beTrue
|
564
|
+
}
|
429
565
|
}
|
430
566
|
|
431
567
|
"error message" should {
|
@@ -485,6 +621,17 @@ class S3WebsiteSpec extends Specification {
|
|
485
621
|
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("public, no-transform, max-age=1200, s-maxage=1200")
|
486
622
|
}
|
487
623
|
|
624
|
+
"work with s3_key_prefix" in new BasicSetup {
|
625
|
+
config =
|
626
|
+
"""
|
627
|
+
|cache_control: public, no-transform, max-age=1200, s-maxage=1200
|
628
|
+
|s3_key_prefix: foo
|
629
|
+
""".stripMargin
|
630
|
+
setLocalFile("index.html")
|
631
|
+
push()
|
632
|
+
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("public, no-transform, max-age=1200, s-maxage=1200")
|
633
|
+
}
|
634
|
+
|
488
635
|
"should take precedence over max_age" in new BasicSetup {
|
489
636
|
config = """
|
490
637
|
|max_age: 120
|
@@ -557,6 +704,29 @@ class S3WebsiteSpec extends Specification {
|
|
557
704
|
setLocalFile("tags/笔记/index.html")
|
558
705
|
push() must equalTo(0)
|
559
706
|
}
|
707
|
+
|
708
|
+
"have overlapping definitions in the glob, and then the most specific glob will win" in new BasicSetup {
|
709
|
+
config = """
|
710
|
+
|cache_control:
|
711
|
+
| "*.js": no-cache, no-store
|
712
|
+
| "assets/**/*.js": public, must-revalidate, max-age=120
|
713
|
+
""".stripMargin
|
714
|
+
setLocalFile("assets/lib/jquery.js")
|
715
|
+
push()
|
716
|
+
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("public, must-revalidate, max-age=120")
|
717
|
+
}
|
718
|
+
|
719
|
+
"work with s3_key_prefix" in new BasicSetup {
|
720
|
+
config =
|
721
|
+
"""
|
722
|
+
|cache_control:
|
723
|
+
| "*.html": public, no-transform, max-age=1200, s-maxage=1200
|
724
|
+
|s3_key_prefix: foo
|
725
|
+
""".stripMargin
|
726
|
+
setLocalFile("index.html")
|
727
|
+
push()
|
728
|
+
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("public, no-transform, max-age=1200, s-maxage=1200")
|
729
|
+
}
|
560
730
|
}
|
561
731
|
|
562
732
|
"cache control" can {
|
@@ -575,6 +745,17 @@ class S3WebsiteSpec extends Specification {
|
|
575
745
|
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("max-age=60")
|
576
746
|
}
|
577
747
|
|
748
|
+
"work with s3_key_prefix" in new BasicSetup {
|
749
|
+
config =
|
750
|
+
"""
|
751
|
+
|max_age: 60
|
752
|
+
|s3_key_prefix: test
|
753
|
+
""".stripMargin
|
754
|
+
setLocalFile("index.html")
|
755
|
+
push()
|
756
|
+
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("max-age=60")
|
757
|
+
}
|
758
|
+
|
578
759
|
"supports all valid URI characters in the glob setting" in new BasicSetup {
|
579
760
|
config = """
|
580
761
|
|max_age:
|
@@ -631,20 +812,41 @@ class S3WebsiteSpec extends Specification {
|
|
631
812
|
setLocalFile("tags/笔记/index.html")
|
632
813
|
push() must equalTo(0)
|
633
814
|
}
|
634
|
-
}
|
635
815
|
|
636
|
-
|
816
|
+
"have overlapping definitions in the glob, and then the most specific glob will win" in new BasicSetup {
|
817
|
+
config = """
|
818
|
+
|max_age:
|
819
|
+
| "*.js": 33
|
820
|
+
| "assets/**/*.js": 90
|
821
|
+
""".stripMargin
|
822
|
+
setLocalFile("assets/lib/jquery.js")
|
823
|
+
push()
|
824
|
+
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("max-age=90")
|
825
|
+
}
|
826
|
+
|
637
827
|
"respect the more specific glob" in new BasicSetup {
|
638
828
|
config = """
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
829
|
+
|max_age:
|
830
|
+
| "assets/*": 150
|
831
|
+
| "assets/*.gif": 86400
|
832
|
+
""".stripMargin
|
643
833
|
setLocalFiles("assets/jquery.js", "assets/picture.gif")
|
644
834
|
push()
|
645
835
|
sentPutObjectRequests.find(_.getKey == "assets/jquery.js").get.getMetadata.getCacheControl must equalTo("max-age=150")
|
646
836
|
sentPutObjectRequests.find(_.getKey == "assets/picture.gif").get.getMetadata.getCacheControl must equalTo("max-age=86400")
|
647
837
|
}
|
838
|
+
|
839
|
+
"work with s3_key_prefix" in new BasicSetup {
|
840
|
+
config =
|
841
|
+
"""
|
842
|
+
|max_age:
|
843
|
+
| "*.html": 60
|
844
|
+
|s3_key_prefix: test
|
845
|
+
""".stripMargin
|
846
|
+
setLocalFile("index.html")
|
847
|
+
push()
|
848
|
+
sentPutObjectRequest.getMetadata.getCacheControl must equalTo("max-age=60")
|
849
|
+
}
|
648
850
|
}
|
649
851
|
|
650
852
|
"s3_reduced_redundancy: true in config" should {
|
@@ -675,6 +877,28 @@ class S3WebsiteSpec extends Specification {
|
|
675
877
|
sentPutObjectRequest.getRedirectLocation must equalTo("/index.html")
|
676
878
|
}
|
677
879
|
|
880
|
+
"refer to site root when the s3_key_prefix is defined and the redirect target starts with a slash" in new BasicSetup {
|
881
|
+
config = """
|
882
|
+
|s3_key_prefix: production
|
883
|
+
|redirects:
|
884
|
+
| index.php: /index.html
|
885
|
+
""".stripMargin
|
886
|
+
push()
|
887
|
+
sentPutObjectRequest.getKey must equalTo("production/index.php")
|
888
|
+
sentPutObjectRequest.getRedirectLocation must equalTo("/index.html")
|
889
|
+
}
|
890
|
+
|
891
|
+
"use s3_key_prefix as the root when the redirect target does not start with a slash" in new BasicSetup {
|
892
|
+
config = """
|
893
|
+
|s3_key_prefix: production
|
894
|
+
|redirects:
|
895
|
+
| index.php: index.html
|
896
|
+
""".stripMargin
|
897
|
+
push()
|
898
|
+
sentPutObjectRequest.getKey must equalTo("production/index.php")
|
899
|
+
sentPutObjectRequest.getRedirectLocation must equalTo("/production/index.html")
|
900
|
+
}
|
901
|
+
|
678
902
|
"add slash to the redirect target" in new BasicSetup {
|
679
903
|
config = """
|
680
904
|
|redirects:
|
@@ -987,7 +1211,7 @@ class S3WebsiteSpec extends Specification {
|
|
987
1211
|
|
988
1212
|
def setS3Files(s3Files: S3File*) {
|
989
1213
|
s3Files.foreach { s3File =>
|
990
|
-
setS3File(s3File.s3Key, s3File.md5)
|
1214
|
+
setS3File(s3File.s3Key.key, s3File.md5)
|
991
1215
|
}
|
992
1216
|
}
|
993
1217
|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: s3_website
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.
|
4
|
+
version: 2.11.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Lauri Lehmijoki
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-
|
11
|
+
date: 2015-07-27 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: thor
|