s3_website_revived 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +15 -0
- data/.travis.yml +5 -0
- data/Gemfile +3 -0
- data/LICENSE +42 -0
- data/README.md +591 -0
- data/Rakefile +2 -0
- data/additional-docs/debugging.md +21 -0
- data/additional-docs/development.md +29 -0
- data/additional-docs/example-configurations.md +113 -0
- data/additional-docs/running-from-ec2-with-dropbox.md +6 -0
- data/additional-docs/setting-up-aws-credentials.md +52 -0
- data/assembly.sbt +3 -0
- data/bin/s3_website +285 -0
- data/build.sbt +48 -0
- data/changelog.md +596 -0
- data/lib/s3_website/version.rb +3 -0
- data/lib/s3_website.rb +7 -0
- data/project/assembly.sbt +1 -0
- data/project/build.properties +1 -0
- data/project/plugins.sbt +1 -0
- data/release +41 -0
- data/resources/configuration_file_template.yml +67 -0
- data/resources/s3_website.jar.md5 +1 -0
- data/s3_website-4.0.0.jar +0 -0
- data/s3_website.gemspec +34 -0
- data/sbt +3 -0
- data/src/main/resources/log4j.properties +6 -0
- data/src/main/scala/s3/website/ByteHelper.scala +18 -0
- data/src/main/scala/s3/website/CloudFront.scala +144 -0
- data/src/main/scala/s3/website/Logger.scala +67 -0
- data/src/main/scala/s3/website/Push.scala +246 -0
- data/src/main/scala/s3/website/Ruby.scala +14 -0
- data/src/main/scala/s3/website/S3.scala +239 -0
- data/src/main/scala/s3/website/UploadHelper.scala +76 -0
- data/src/main/scala/s3/website/model/Config.scala +249 -0
- data/src/main/scala/s3/website/model/S3Endpoint.scala +35 -0
- data/src/main/scala/s3/website/model/Site.scala +159 -0
- data/src/main/scala/s3/website/model/push.scala +225 -0
- data/src/main/scala/s3/website/model/ssg.scala +30 -0
- data/src/main/scala/s3/website/package.scala +182 -0
- data/src/test/scala/s3/website/AwsSdkSpec.scala +15 -0
- data/src/test/scala/s3/website/ConfigSpec.scala +150 -0
- data/src/test/scala/s3/website/S3EndpointSpec.scala +15 -0
- data/src/test/scala/s3/website/S3WebsiteSpec.scala +1480 -0
- data/src/test/scala/s3/website/UnitTest.scala +11 -0
- data/vagrant/Vagrantfile +25 -0
- metadata +195 -0
@@ -0,0 +1,239 @@
|
|
1
|
+
package s3.website
|
2
|
+
|
3
|
+
import s3.website.ErrorReport.errorMessage
|
4
|
+
import s3.website.model._
|
5
|
+
import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client}
|
6
|
+
import com.amazonaws.services.s3.model._
|
7
|
+
import scala.collection.JavaConversions._
|
8
|
+
import scala.concurrent.{ExecutionContextExecutor, Future}
|
9
|
+
import com.amazonaws.services.s3.model.StorageClass.ReducedRedundancy
|
10
|
+
import s3.website.ByteHelper.humanReadableByteCount
|
11
|
+
import scala.concurrent.duration.TimeUnit
|
12
|
+
import java.util.concurrent.TimeUnit.SECONDS
|
13
|
+
import s3.website.S3.SuccessfulUpload.humanizeUploadSpeed
|
14
|
+
import java.io.FileInputStream
|
15
|
+
import s3.website.model.Config.awsCredentials
|
16
|
+
import scala.util.Try
|
17
|
+
|
18
|
+
object S3 {
|
19
|
+
|
20
|
+
def uploadRedirect(redirect: Redirect, a: Attempt = 1)
|
21
|
+
(implicit config: Config, s3Settings: S3Setting, pushOptions: PushOptions, executor: ExecutionContextExecutor, logger: Logger) =
|
22
|
+
uploadToS3(Right(redirect))
|
23
|
+
|
24
|
+
def uploadFile(up: Upload, a: Attempt = 1)
|
25
|
+
(implicit config: Config, s3Settings: S3Setting, pushOptions: PushOptions, executor: ExecutionContextExecutor, logger: Logger) =
|
26
|
+
uploadToS3(Left(up))
|
27
|
+
|
28
|
+
def uploadToS3(source: Either[Upload, Redirect], a: Attempt = 1)
|
29
|
+
(implicit config: Config, s3Settings: S3Setting, pushOptions: PushOptions, executor: ExecutionContextExecutor, logger: Logger):
|
30
|
+
Future[Either[FailedUpload, SuccessfulUpload]] =
|
31
|
+
Future {
|
32
|
+
val putObjectRequest = toPutObjectRequest(source).get
|
33
|
+
val uploadDuration =
|
34
|
+
if (pushOptions.dryRun) None
|
35
|
+
else Some(recordUploadDuration(putObjectRequest, s3Settings.s3Client(config) putObject putObjectRequest))
|
36
|
+
val report = SuccessfulUpload(
|
37
|
+
source.fold(_.s3Key, _.s3Key),
|
38
|
+
source.fold(
|
39
|
+
upload => Left(SuccessfulNewOrCreatedDetails(upload.uploadType, upload.uploadFile.get.length(), uploadDuration)),
|
40
|
+
redirect => Right(SuccessfulRedirectDetails(redirect.uploadType, redirect.redirectTarget))
|
41
|
+
),
|
42
|
+
putObjectRequest
|
43
|
+
)
|
44
|
+
logger.info(report)
|
45
|
+
Right(report)
|
46
|
+
} recoverWith retry(a)(
|
47
|
+
createFailureReport = error => FailedUpload(source.fold(_.s3Key, _.s3Key), error),
|
48
|
+
retryAction = newAttempt => this.uploadToS3(source, newAttempt)
|
49
|
+
)
|
50
|
+
|
51
|
+
def delete(s3Key: S3Key, a: Attempt = 1)
|
52
|
+
(implicit config: Config, s3Settings: S3Setting, pushOptions: PushOptions, executor: ExecutionContextExecutor, logger: Logger):
|
53
|
+
Future[Either[FailedDelete, SuccessfulDelete]] =
|
54
|
+
Future {
|
55
|
+
if (!pushOptions.dryRun) s3Settings.s3Client(config) deleteObject(config.s3_bucket, s3Key.key)
|
56
|
+
val report = SuccessfulDelete(s3Key)
|
57
|
+
logger.info(report)
|
58
|
+
Right(report)
|
59
|
+
} recoverWith retry(a)(
|
60
|
+
createFailureReport = error => FailedDelete(s3Key, error),
|
61
|
+
retryAction = newAttempt => this.delete(s3Key, newAttempt)
|
62
|
+
)
|
63
|
+
|
64
|
+
def toPutObjectRequest(source: Either[Upload, Redirect])(implicit config: Config, logger: Logger): Try[PutObjectRequest] =
|
65
|
+
source.fold(
|
66
|
+
upload =>
|
67
|
+
for {
|
68
|
+
uploadFile <- upload.uploadFile
|
69
|
+
contentType <- upload.contentType
|
70
|
+
} yield {
|
71
|
+
val md = new ObjectMetadata()
|
72
|
+
md setContentLength uploadFile.length
|
73
|
+
md setContentType contentType
|
74
|
+
upload.encodingOnS3.map(_ => "gzip") foreach md.setContentEncoding
|
75
|
+
val cacheControl: Option[String] = (upload.maxAge, upload.cacheControl) match {
|
76
|
+
case (maxAge: Some[Int], cacheCtrl: Some[String]) =>
|
77
|
+
logger.warn("Overriding the max_age setting with the cache_control setting")
|
78
|
+
cacheCtrl
|
79
|
+
case (_, cacheCtrl: Some[String]) =>
|
80
|
+
cacheCtrl
|
81
|
+
case (maxAgeSeconds: Some[int], None) =>
|
82
|
+
maxAgeSeconds.map({
|
83
|
+
case seconds if seconds == 0 => s"no-cache, max-age=0"
|
84
|
+
case seconds => s"max-age=$seconds"
|
85
|
+
})
|
86
|
+
case (None, None) => None
|
87
|
+
}
|
88
|
+
cacheControl foreach { md.setCacheControl }
|
89
|
+
val req = new PutObjectRequest(config.s3_bucket, upload.s3Key.key, new FileInputStream(uploadFile), md)
|
90
|
+
config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
|
91
|
+
req
|
92
|
+
}
|
93
|
+
,
|
94
|
+
redirect => {
|
95
|
+
val req = new PutObjectRequest(config.s3_bucket, redirect.s3Key.key, redirect.redirectTarget)
|
96
|
+
req.setMetadata({
|
97
|
+
val md = new ObjectMetadata()
|
98
|
+
md.setContentLength(0) // Otherwise the AWS SDK will log a warning
|
99
|
+
/*
|
100
|
+
* Instruct HTTP clients to always re-check the redirect. The 301 status code may override this, though.
|
101
|
+
* This is for the sake of simplicity.
|
102
|
+
*/
|
103
|
+
md.setCacheControl("max-age=0, no-cache")
|
104
|
+
md
|
105
|
+
})
|
106
|
+
Try(req)
|
107
|
+
}
|
108
|
+
)
|
109
|
+
|
110
|
+
def recordUploadDuration(putObjectRequest: PutObjectRequest, f: => Unit): UploadDuration = {
|
111
|
+
val start = System.currentTimeMillis()
|
112
|
+
f
|
113
|
+
System.currentTimeMillis - start
|
114
|
+
}
|
115
|
+
|
116
|
+
def awsS3Client(config: Config) = new AmazonS3Client(awsCredentials(config))
|
117
|
+
|
118
|
+
def resolveS3Files(nextMarker: Option[String] = None, alreadyResolved: Seq[S3File] = Nil, attempt: Attempt = 1)
|
119
|
+
(implicit site: Site, s3Settings: S3Setting, ec: ExecutionContextExecutor, logger: Logger, pushOptions: PushOptions):
|
120
|
+
Future[Either[ErrorReport, Seq[S3File]]] = Future {
|
121
|
+
logger.debug(nextMarker.fold
|
122
|
+
("Querying S3 files")
|
123
|
+
{m => s"Querying more S3 files (starting from $m)"}
|
124
|
+
)
|
125
|
+
val objects: ObjectListing = s3Settings.s3Client(site.config).listObjects({
|
126
|
+
val req = new ListObjectsRequest()
|
127
|
+
req.setBucketName(site.config.s3_bucket)
|
128
|
+
nextMarker.foreach(req.setMarker)
|
129
|
+
req
|
130
|
+
})
|
131
|
+
objects
|
132
|
+
} flatMap { (objects: ObjectListing) =>
|
133
|
+
|
134
|
+
/**
|
135
|
+
* We could filter the keys by prefix already on S3, but unfortunately s3_website test infrastructure does not currently support testing of that.
|
136
|
+
* Hence fetch all the keys from S3 and then filter by s3_key_prefix.
|
137
|
+
*/
|
138
|
+
def matchesPrefix(os: S3ObjectSummary) = site.config.s3_key_prefix.fold(true)(prefix => os.getKey.startsWith(prefix))
|
139
|
+
|
140
|
+
val s3Files = alreadyResolved ++ (objects.getObjectSummaries.filter(matchesPrefix).toIndexedSeq.toSeq map (S3File(_)))
|
141
|
+
Option(objects.getNextMarker)
|
142
|
+
.fold(Future(Right(s3Files)): Future[Either[ErrorReport, Seq[S3File]]]) // We've received all the S3 keys from the bucket
|
143
|
+
{ nextMarker => // There are more S3 keys on the bucket. Fetch them.
|
144
|
+
resolveS3Files(Some(nextMarker), s3Files, attempt = attempt)
|
145
|
+
}
|
146
|
+
} recoverWith retry(attempt)(
|
147
|
+
createFailureReport = error => ErrorReport(s"Failed to fetch an object listing (${error.getMessage})"),
|
148
|
+
retryAction = nextAttempt => resolveS3Files(nextMarker, alreadyResolved, nextAttempt)
|
149
|
+
)
|
150
|
+
|
151
|
+
type S3FilesAndUpdates = (ErrorOrS3Files, UpdateFutures)
|
152
|
+
type S3FilesAndUpdatesFuture = Future[S3FilesAndUpdates]
|
153
|
+
type ErrorOrS3FilesAndUpdates = Future[Either[ErrorReport, S3FilesAndUpdates]]
|
154
|
+
type UpdateFutures = Seq[Either[ErrorReport, Future[PushErrorOrSuccess]]]
|
155
|
+
type ErrorOrS3Files = Either[ErrorReport, Seq[S3File]]
|
156
|
+
|
157
|
+
sealed trait PushFailureReport extends ErrorReport
|
158
|
+
sealed trait PushSuccessReport extends SuccessReport {
|
159
|
+
def s3Key: S3Key
|
160
|
+
}
|
161
|
+
|
162
|
+
case class SuccessfulRedirectDetails(uploadType: UploadType, redirectTarget: String)
|
163
|
+
case class SuccessfulNewOrCreatedDetails(uploadType: UploadType, uploadSize: Long, uploadDuration: Option[Long])
|
164
|
+
|
165
|
+
case class SuccessfulUpload(s3Key: S3Key,
|
166
|
+
details: Either[SuccessfulNewOrCreatedDetails, SuccessfulRedirectDetails],
|
167
|
+
putObjectRequest: PutObjectRequest)
|
168
|
+
(implicit pushOptions: PushOptions, logger: Logger) extends PushSuccessReport {
|
169
|
+
def reportMessage =
|
170
|
+
details.fold(
|
171
|
+
newOrCreatedDetails => s"${newOrCreatedDetails.uploadType.pushAction.renderVerb} $s3Key ($reportDetails)",
|
172
|
+
redirectDetails => s"${redirectDetails.uploadType.pushAction.renderVerb} $s3Key to ${redirectDetails.redirectTarget}"
|
173
|
+
)
|
174
|
+
|
175
|
+
def reportDetails = {
|
176
|
+
val md = putObjectRequest.getMetadata
|
177
|
+
val detailFragments: Seq[Option[String]] =
|
178
|
+
(
|
179
|
+
md.getCacheControl ::
|
180
|
+
md.getContentType ::
|
181
|
+
md.getContentEncoding ::
|
182
|
+
putObjectRequest.getStorageClass ::
|
183
|
+
Nil map (Option(_)) // AWS SDK may return nulls
|
184
|
+
) :+ uploadSizeForHumans :+ uploadSpeedForHumans
|
185
|
+
detailFragments.collect {
|
186
|
+
case Some(detailFragment) => detailFragment
|
187
|
+
}.mkString(" | ")
|
188
|
+
}
|
189
|
+
|
190
|
+
lazy val uploadSize = details.fold(
|
191
|
+
newOrCreatedDetails => Some(newOrCreatedDetails.uploadSize),
|
192
|
+
redirectDetails => None
|
193
|
+
)
|
194
|
+
|
195
|
+
lazy val uploadSizeForHumans: Option[String] = uploadSize filter (_ => logger.verboseOutput) map humanReadableByteCount
|
196
|
+
|
197
|
+
lazy val uploadSpeedForHumans: Option[String] =
|
198
|
+
(for {
|
199
|
+
dataSize <- uploadSize
|
200
|
+
duration <- details.left.map(_.uploadDuration).left.toOption.flatten
|
201
|
+
} yield {
|
202
|
+
humanizeUploadSpeed(dataSize, duration)
|
203
|
+
}) flatMap identity filter (_ => logger.verboseOutput)
|
204
|
+
}
|
205
|
+
|
206
|
+
object SuccessfulUpload {
|
207
|
+
def humanizeUploadSpeed(uploadedBytes: Long, uploadDurations: UploadDuration*): Option[String] = {
|
208
|
+
val totalDurationMillis = uploadDurations.foldLeft(0L){ (memo, duration) =>
|
209
|
+
memo + duration
|
210
|
+
}
|
211
|
+
if (totalDurationMillis > 0) {
|
212
|
+
val bytesPerMillisecond = uploadedBytes / totalDurationMillis
|
213
|
+
val bytesPerSecond = bytesPerMillisecond * 1000 * uploadDurations.length
|
214
|
+
Some(humanReadableByteCount(bytesPerSecond) + "/s")
|
215
|
+
} else {
|
216
|
+
None
|
217
|
+
}
|
218
|
+
}
|
219
|
+
}
|
220
|
+
|
221
|
+
case class SuccessfulDelete(s3Key: S3Key)(implicit pushOptions: PushOptions) extends PushSuccessReport {
|
222
|
+
def reportMessage = s"${Deleted.renderVerb} $s3Key"
|
223
|
+
}
|
224
|
+
|
225
|
+
case class FailedUpload(s3Key: S3Key, error: Throwable)(implicit logger: Logger) extends PushFailureReport {
|
226
|
+
def reportMessage = errorMessage(s"Failed to upload $s3Key", error)
|
227
|
+
}
|
228
|
+
|
229
|
+
case class FailedDelete(s3Key: S3Key, error: Throwable)(implicit logger: Logger) extends PushFailureReport {
|
230
|
+
def reportMessage = errorMessage(s"Failed to delete $s3Key", error)
|
231
|
+
}
|
232
|
+
|
233
|
+
type S3ClientProvider = (Config) => AmazonS3
|
234
|
+
|
235
|
+
case class S3Setting(
|
236
|
+
s3Client: S3ClientProvider = S3.awsS3Client,
|
237
|
+
retryTimeUnit: TimeUnit = SECONDS
|
238
|
+
) extends RetrySetting
|
239
|
+
}
|
@@ -0,0 +1,76 @@
|
|
1
|
+
package s3.website
|
2
|
+
|
3
|
+
import s3.website.S3Key.isIgnoredBecauseOfPrefix
|
4
|
+
import s3.website.model.Files.listSiteFiles
|
5
|
+
import s3.website.model._
|
6
|
+
import s3.website.Ruby.rubyRegexMatches
|
7
|
+
import scala.concurrent.{ExecutionContextExecutor, Future}
|
8
|
+
import scala.util.{Failure, Success, Try}
|
9
|
+
import java.io.File
|
10
|
+
|
11
|
+
object UploadHelper {
|
12
|
+
|
13
|
+
type FutureUploads = Future[Either[ErrorReport, Seq[Upload]]]
|
14
|
+
|
15
|
+
def resolveUploads(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
16
|
+
(implicit site: Site, pushOptions: PushOptions, logger: Logger, executor: ExecutionContextExecutor): FutureUploads =
|
17
|
+
resolveUploadsAgainstGetBucketResponse(s3FilesFuture)
|
18
|
+
|
19
|
+
private def resolveUploadsAgainstGetBucketResponse(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
20
|
+
(implicit site: Site,
|
21
|
+
pushOptions: PushOptions,
|
22
|
+
logger: Logger,
|
23
|
+
executor: ExecutionContextExecutor): FutureUploads =
|
24
|
+
s3FilesFuture.map { errorOrS3Files =>
|
25
|
+
errorOrS3Files.right.flatMap { s3Files =>
|
26
|
+
Try {
|
27
|
+
val s3KeyIndex = s3Files.map(_.s3Key).toSet
|
28
|
+
val s3Md5Index = s3Files.map(s3File => (s3File.s3Key, s3File.md5)).toSet
|
29
|
+
val siteFiles = listSiteFiles
|
30
|
+
val existsOnS3 = (f: File) => s3KeyIndex contains site.resolveS3Key(f)
|
31
|
+
val isChangedOnS3 = (upload: Upload) => !(s3Md5Index contains (upload.s3Key, upload.md5.get))
|
32
|
+
val newUploads = siteFiles collect {
|
33
|
+
case file if !existsOnS3(file) => Upload(file, NewFile)
|
34
|
+
}
|
35
|
+
val changedUploads = siteFiles collect {
|
36
|
+
case file if existsOnS3(file) => Upload(file, FileUpdate)
|
37
|
+
} filter (if (pushOptions.force) selectAllFiles else isChangedOnS3)
|
38
|
+
newUploads ++ changedUploads
|
39
|
+
} match {
|
40
|
+
case Success(ok) => Right(ok)
|
41
|
+
case Failure(err) => Left(ErrorReport(err))
|
42
|
+
}
|
43
|
+
}
|
44
|
+
}
|
45
|
+
|
46
|
+
val selectAllFiles = (upload: Upload) => true
|
47
|
+
|
48
|
+
def resolveDeletes(s3Files: Future[Either[ErrorReport, Seq[S3File]]], redirects: Seq[Redirect])
|
49
|
+
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Future[Either[ErrorReport, Seq[S3Key]]] =
|
50
|
+
if (site.config.ignore_on_server exists (
|
51
|
+
ignoreRegexes => ignoreRegexes.s3KeyRegexes exists( regex => regex matches S3Key.build(DELETE_NOTHING_MAGIC_WORD, site.config.s3_key_prefix))
|
52
|
+
)) {
|
53
|
+
logger.debug(s"Ignoring all files on the bucket, since the setting $DELETE_NOTHING_MAGIC_WORD is on.")
|
54
|
+
Future(Right(Nil))
|
55
|
+
} else {
|
56
|
+
val localS3Keys = listSiteFiles.map(site resolveS3Key)
|
57
|
+
|
58
|
+
s3Files map { s3Files: Either[ErrorReport, Seq[S3File]] =>
|
59
|
+
for {
|
60
|
+
remoteS3Keys <- s3Files.right.map(_ map (_.s3Key)).right
|
61
|
+
} yield {
|
62
|
+
val keysIgnoredBecauseOf_s3_key_prefix = remoteS3Keys.filterNot(isIgnoredBecauseOfPrefix)
|
63
|
+
val keysToRetain = (
|
64
|
+
localS3Keys ++ (redirects map { _.s3Key }) ++ keysIgnoredBecauseOf_s3_key_prefix
|
65
|
+
).toSet
|
66
|
+
remoteS3Keys filterNot { s3Key =>
|
67
|
+
val ignoreOnServer = site.config.ignore_on_server.exists(_ matches s3Key)
|
68
|
+
if (ignoreOnServer) logger.debug(s"Ignoring $s3Key on server")
|
69
|
+
(keysToRetain contains s3Key) || ignoreOnServer
|
70
|
+
}
|
71
|
+
}
|
72
|
+
}
|
73
|
+
}
|
74
|
+
|
75
|
+
val DELETE_NOTHING_MAGIC_WORD = "_DELETE_NOTHING_ON_THE_S3_BUCKET_"
|
76
|
+
}
|
@@ -0,0 +1,249 @@
|
|
1
|
+
package s3.website.model
|
2
|
+
|
3
|
+
import java.io.File
|
4
|
+
import java.util
|
5
|
+
|
6
|
+
import scala.util.matching.Regex
|
7
|
+
import scala.util.{Failure, Try}
|
8
|
+
import scala.collection.JavaConversions._
|
9
|
+
import s3.website.Ruby.rubyRuntime
|
10
|
+
import s3.website._
|
11
|
+
import com.amazonaws.auth.{AWSCredentialsProvider, BasicAWSCredentials, BasicSessionCredentials, AWSStaticCredentialsProvider, DefaultAWSCredentialsProviderChain, STSAssumeRoleSessionCredentialsProvider}
|
12
|
+
import com.amazonaws.auth.profile.ProfileCredentialsProvider
|
13
|
+
|
14
|
+
case class Config(
|
15
|
+
s3_id: Option[String], // If undefined, use IAM Roles (http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-roles.html)
|
16
|
+
s3_secret: Option[String], // If undefined, use IAM Roles (http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-roles.html)
|
17
|
+
session_token: Option[String], // If defined, the AWS Security Token Service session token (http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html)
|
18
|
+
profile: Option[String], // If defined, the AWS profile to use for credentials
|
19
|
+
profile_assume_role_arn: Option[String], // If defined, the ARN of the role to assume
|
20
|
+
s3_bucket: String,
|
21
|
+
s3_endpoint: S3Endpoint,
|
22
|
+
site: Option[String],
|
23
|
+
max_age: Option[Either[Int, S3KeyGlob[Int]]],
|
24
|
+
cache_control: Option[Either[String, S3KeyGlob[String]]],
|
25
|
+
gzip: Option[Either[Boolean, Seq[String]]],
|
26
|
+
gzip_zopfli: Option[Boolean],
|
27
|
+
s3_key_prefix: Option[String],
|
28
|
+
ignore_on_server: Option[S3KeyRegexes],
|
29
|
+
exclude_from_upload: Option[S3KeyRegexes],
|
30
|
+
s3_reduced_redundancy: Option[Boolean],
|
31
|
+
cloudfront_distribution_id: Option[String],
|
32
|
+
cloudfront_invalidate_root: Option[Boolean],
|
33
|
+
content_type: Option[S3KeyGlob[String]],
|
34
|
+
redirects: Option[Map[S3Key, String]],
|
35
|
+
concurrency_level: Int,
|
36
|
+
cloudfront_wildcard_invalidation: Option[Boolean],
|
37
|
+
treat_zero_length_objects_as_redirects: Option[Boolean]
|
38
|
+
)
|
39
|
+
|
40
|
+
object Config {
|
41
|
+
|
42
|
+
def awsCredentials(config: Config): AWSCredentialsProvider = {
|
43
|
+
val credentialsFromConfigFile: Option[AWSCredentialsProvider] =
|
44
|
+
(
|
45
|
+
for {
|
46
|
+
s3_id <- config.s3_id
|
47
|
+
s3_secret <- config.s3_secret
|
48
|
+
session_token <- config.session_token
|
49
|
+
} yield new AWSStaticCredentialsProvider(new BasicSessionCredentials(s3_id, s3_secret, session_token))
|
50
|
+
) orElse (
|
51
|
+
for {
|
52
|
+
s3_id <- config.s3_id
|
53
|
+
s3_secret <- config.s3_secret
|
54
|
+
} yield new AWSStaticCredentialsProvider(new BasicAWSCredentials(s3_id, s3_secret))
|
55
|
+
) orElse (
|
56
|
+
for {
|
57
|
+
profile <- config.profile
|
58
|
+
profile_assume_role_arn <- config.profile_assume_role_arn
|
59
|
+
} yield new STSAssumeRoleSessionCredentialsProvider.Builder(profile_assume_role_arn, "s3_website_assume_role_session")
|
60
|
+
.withLongLivedCredentialsProvider(new ProfileCredentialsProvider(profile)).build()
|
61
|
+
) orElse (
|
62
|
+
for {
|
63
|
+
profile <- config.profile
|
64
|
+
} yield new ProfileCredentialsProvider(profile)
|
65
|
+
)
|
66
|
+
credentialsFromConfigFile getOrElse new DefaultAWSCredentialsProviderChain
|
67
|
+
}
|
68
|
+
|
69
|
+
def loadOptionalBooleanOrStringSeq(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[Boolean, Seq[String]]]] = {
|
70
|
+
val yamlValue = for {
|
71
|
+
optionalValue <- loadOptionalValue(key)
|
72
|
+
} yield {
|
73
|
+
Right(optionalValue.map {
|
74
|
+
case value if value.isInstanceOf[Boolean] => Left(value.asInstanceOf[Boolean])
|
75
|
+
case value if value.isInstanceOf[java.util.List[_]] => Right(value.asInstanceOf[java.util.List[String]].toIndexedSeq)
|
76
|
+
})
|
77
|
+
}
|
78
|
+
|
79
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a boolean or [string] value"))
|
80
|
+
}
|
81
|
+
|
82
|
+
def loadOptionalS3KeyRegexes(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[S3KeyRegexes]] = {
|
83
|
+
val yamlValue = for {
|
84
|
+
valueOption <- loadOptionalValue(key)
|
85
|
+
} yield {
|
86
|
+
def toS3KeyRegexes(xs: Seq[String]) = S3KeyRegexes(xs map (str => str.r) map S3KeyRegex)
|
87
|
+
Right(valueOption.map {
|
88
|
+
case value if value.isInstanceOf[String] =>
|
89
|
+
toS3KeyRegexes(value.asInstanceOf[String] :: Nil)
|
90
|
+
case value if value.isInstanceOf[java.util.List[_]] =>
|
91
|
+
toS3KeyRegexes(value.asInstanceOf[java.util.List[String]].toIndexedSeq)
|
92
|
+
})
|
93
|
+
}
|
94
|
+
|
95
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a string or [string] value"))
|
96
|
+
}
|
97
|
+
|
98
|
+
def loadMaxAge(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[Int, S3KeyGlob[Int]]]] = {
|
99
|
+
val key = "max_age"
|
100
|
+
val yamlValue = for {
|
101
|
+
maxAgeOption <- loadOptionalValue(key)
|
102
|
+
} yield {
|
103
|
+
// TODO below we are using an unsafe call to asInstance of – we should implement error handling
|
104
|
+
Right(maxAgeOption.map {
|
105
|
+
case maxAge if maxAge.isInstanceOf[Int] =>
|
106
|
+
Left(maxAge.asInstanceOf[Int])
|
107
|
+
case maxAge if maxAge.isInstanceOf[java.util.Map[_,_]] =>
|
108
|
+
val globs: Map[String, Int] = maxAge.asInstanceOf[util.Map[String, Int]].toMap
|
109
|
+
Right(S3KeyGlob(globs))
|
110
|
+
})
|
111
|
+
}
|
112
|
+
|
113
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have an int or (string -> int) value"))
|
114
|
+
}
|
115
|
+
|
116
|
+
def loadCacheControl(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[String, S3KeyGlob[String]]]] = {
|
117
|
+
val key = "cache_control"
|
118
|
+
val yamlValue = for {
|
119
|
+
cacheControlOption <- loadOptionalValue(key)
|
120
|
+
} yield {
|
121
|
+
// TODO below we are using an unsafe call to asInstance of – we should implement error handling
|
122
|
+
Right(cacheControlOption.map {
|
123
|
+
case cacheControl if cacheControl.isInstanceOf[String] =>
|
124
|
+
Left(cacheControl.asInstanceOf[String])
|
125
|
+
case cacheControl if cacheControl.isInstanceOf[java.util.Map[_,_]] =>
|
126
|
+
val globs: Map[String, String] = cacheControl.asInstanceOf[util.Map[String, String]].toMap
|
127
|
+
Right(S3KeyGlob(globs))
|
128
|
+
})
|
129
|
+
}
|
130
|
+
|
131
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a string or (string -> string) value"))
|
132
|
+
}
|
133
|
+
|
134
|
+
def loadContentType(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[S3KeyGlob[String]]] = {
|
135
|
+
val key = "content_type"
|
136
|
+
val yamlValue = for {
|
137
|
+
contentTypeOption <- loadOptionalValue(key)
|
138
|
+
} yield {
|
139
|
+
// TODO below we are using an unsafe call to asInstance of – we should implement error handling
|
140
|
+
Right(contentTypeOption.map { xs =>
|
141
|
+
val globs: Map[String, String] = xs.asInstanceOf[util.Map[String, String]].toMap
|
142
|
+
S3KeyGlob(globs)
|
143
|
+
}
|
144
|
+
)
|
145
|
+
}
|
146
|
+
|
147
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a string or (string -> string) value"))
|
148
|
+
}
|
149
|
+
|
150
|
+
|
151
|
+
def loadEndpoint(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[S3Endpoint]] =
|
152
|
+
loadOptionalString("s3_endpoint").right map { endpointString =>
|
153
|
+
endpointString.map(S3Endpoint.fromString)
|
154
|
+
}
|
155
|
+
|
156
|
+
def loadRedirects(s3_key_prefix: Option[String])(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Map[S3Key, String]]] = {
|
157
|
+
val key = "redirects"
|
158
|
+
val yamlValue = for {
|
159
|
+
redirectsOption <- loadOptionalValue(key)
|
160
|
+
redirectsOption <- Try(redirectsOption.map(_.asInstanceOf[java.util.Map[String,String]].toMap))
|
161
|
+
} yield Right(redirectsOption.map(
|
162
|
+
redirects => redirects.map(
|
163
|
+
((key: String, value: String) => (S3Key.build(key, s3_key_prefix), value)).tupled
|
164
|
+
)
|
165
|
+
))
|
166
|
+
|
167
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a (string -> string) value"))
|
168
|
+
}
|
169
|
+
|
170
|
+
def loadRequiredString(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, String] = {
|
171
|
+
val yamlValue = for {
|
172
|
+
valueOption <- loadOptionalValue(key)
|
173
|
+
stringValue <- Try(valueOption.asInstanceOf[Option[String]].get)
|
174
|
+
} yield {
|
175
|
+
Right(stringValue)
|
176
|
+
}
|
177
|
+
|
178
|
+
yamlValue getOrElse {
|
179
|
+
Left(ErrorReport(s"The key $key has to have a string value"))
|
180
|
+
}
|
181
|
+
}
|
182
|
+
|
183
|
+
def loadOptionalString(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[String]] = {
|
184
|
+
val yamlValueOption = for {
|
185
|
+
valueOption <- loadOptionalValue(key)
|
186
|
+
optionalString <- Try(valueOption.asInstanceOf[Option[String]])
|
187
|
+
} yield {
|
188
|
+
Right(optionalString)
|
189
|
+
}
|
190
|
+
|
191
|
+
yamlValueOption getOrElse {
|
192
|
+
Left(ErrorReport(s"The key $key has to have a string value"))
|
193
|
+
}
|
194
|
+
}
|
195
|
+
|
196
|
+
def loadOptionalBoolean(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Boolean]] = {
|
197
|
+
val yamlValueOption = for {
|
198
|
+
valueOption <- loadOptionalValue(key)
|
199
|
+
optionalBoolean <- Try(valueOption.asInstanceOf[Option[Boolean]])
|
200
|
+
} yield {
|
201
|
+
Right(optionalBoolean)
|
202
|
+
}
|
203
|
+
|
204
|
+
yamlValueOption getOrElse {
|
205
|
+
Left(ErrorReport(s"The key $key has to have a boolean value"))
|
206
|
+
}
|
207
|
+
}
|
208
|
+
|
209
|
+
def loadOptionalInt(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Int]] = {
|
210
|
+
val yamlValueOption = for {
|
211
|
+
valueOption <- loadOptionalValue(key)
|
212
|
+
optionalInt <- Try(valueOption.asInstanceOf[Option[Int]])
|
213
|
+
} yield {
|
214
|
+
Right(optionalInt)
|
215
|
+
}
|
216
|
+
|
217
|
+
yamlValueOption getOrElse {
|
218
|
+
Left(ErrorReport(s"The key $key has to have an integer value"))
|
219
|
+
}
|
220
|
+
}
|
221
|
+
|
222
|
+
def loadOptionalValue(key: String)(implicit unsafeYaml: UnsafeYaml): Try[Option[_]] =
|
223
|
+
Try {
|
224
|
+
unsafeYaml.yamlObject.asInstanceOf[java.util.Map[String, _]].toMap get key
|
225
|
+
}
|
226
|
+
|
227
|
+
def erbEval(erbString: String, yamlConfig: S3_website_yml): Try[String] = Try {
|
228
|
+
val erbStringWithoutComments = erbString.replaceAll("^\\s*#.*", "")
|
229
|
+
rubyRuntime.evalScriptlet(
|
230
|
+
s"""|# encoding: utf-8
|
231
|
+
|require 'erb'
|
232
|
+
|
|
233
|
+
|str = <<-ERBSTR
|
234
|
+
|$erbStringWithoutComments
|
235
|
+
|ERBSTR
|
236
|
+
|ERB.new(str).result
|
237
|
+
""".stripMargin
|
238
|
+
).asJavaString()
|
239
|
+
} match {
|
240
|
+
case Failure(err) => Failure(new RuntimeException(s"Failed to parse ERB in $yamlConfig:\n${err.getMessage}"))
|
241
|
+
case x => x
|
242
|
+
}
|
243
|
+
|
244
|
+
case class UnsafeYaml(yamlObject: AnyRef)
|
245
|
+
|
246
|
+
case class S3_website_yml(file: File) {
|
247
|
+
override def toString = file.getPath
|
248
|
+
}
|
249
|
+
}
|
@@ -0,0 +1,35 @@
|
|
1
|
+
package s3.website.model
|
2
|
+
|
3
|
+
case class S3Endpoint(
|
4
|
+
s3WebsiteHostname: String
|
5
|
+
)
|
6
|
+
|
7
|
+
object S3Endpoint {
|
8
|
+
def defaultEndpoint = S3Endpoint.fromString("us-east-1")
|
9
|
+
|
10
|
+
val oldRegions = Seq(
|
11
|
+
"us-east-1",
|
12
|
+
"us-west-1",
|
13
|
+
"us-west-2",
|
14
|
+
"ap-southeast-1",
|
15
|
+
"ap-southeast-2",
|
16
|
+
"ap-northeast-1",
|
17
|
+
"eu-west-1",
|
18
|
+
"sa-east-1"
|
19
|
+
)
|
20
|
+
|
21
|
+
def fromString(region: String): S3Endpoint = {
|
22
|
+
if (region == "EU") {
|
23
|
+
return S3Endpoint.fromString("eu-west-1")
|
24
|
+
}
|
25
|
+
|
26
|
+
val isOldRegion = oldRegions.contains(region)
|
27
|
+
val s3WebsiteHostname =
|
28
|
+
if (isOldRegion)
|
29
|
+
s"s3-website-$region.amazonaws.com"
|
30
|
+
else
|
31
|
+
s"s3-website.$region.amazonaws.com"
|
32
|
+
|
33
|
+
S3Endpoint(s3WebsiteHostname = s3WebsiteHostname)
|
34
|
+
}
|
35
|
+
}
|