s3_website_monadic 0.0.31 → 0.0.32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/changelog.md +2 -0
- data/lib/s3_website/version.rb +1 -1
- data/src/main/scala/s3/website/CloudFront.scala +8 -15
- data/src/main/scala/s3/website/Diff.scala +264 -25
- data/src/main/scala/s3/website/Logger.scala +61 -0
- data/src/main/scala/s3/website/Push.scala +69 -77
- data/src/main/scala/s3/website/S3.scala +81 -76
- data/src/main/scala/s3/website/model/Config.scala +8 -8
- data/src/main/scala/s3/website/model/S3Endpoint.scala +4 -2
- data/src/main/scala/s3/website/model/Site.scala +6 -3
- data/src/main/scala/s3/website/model/push.scala +72 -140
- data/src/main/scala/s3/website/model/ssg.scala +1 -1
- data/src/main/scala/s3/website/package.scala +23 -1
- data/src/test/scala/s3/website/S3WebsiteSpec.scala +64 -25
- metadata +3 -4
- data/src/main/scala/s3/website/Utils.scala +0 -108
- data/src/main/scala/s3/website/model/errors.scala +0 -9
@@ -4,15 +4,13 @@ import s3.website.model.Site._
|
|
4
4
|
import scala.concurrent.{ExecutionContextExecutor, Future, Await}
|
5
5
|
import scala.concurrent.duration._
|
6
6
|
import scala.language.postfixOps
|
7
|
-
import s3.website.Diff.{
|
7
|
+
import s3.website.Diff.{resolveDeletes, resolveDiff}
|
8
8
|
import s3.website.S3._
|
9
9
|
import scala.concurrent.ExecutionContext.fromExecutor
|
10
10
|
import java.util.concurrent.Executors.newFixedThreadPool
|
11
|
-
import s3.website.model.LocalFile.resolveLocalFiles
|
12
|
-
import scala.collection.parallel.ParSeq
|
13
11
|
import java.util.concurrent.ExecutorService
|
14
12
|
import s3.website.model._
|
15
|
-
import s3.website.model.
|
13
|
+
import s3.website.model.FileUpdate
|
16
14
|
import s3.website.model.NewFile
|
17
15
|
import s3.website.S3.PushSuccessReport
|
18
16
|
import scala.collection.mutable.ArrayBuffer
|
@@ -27,6 +25,7 @@ import scala.Int
|
|
27
25
|
import java.io.File
|
28
26
|
import com.lexicalscope.jewel.cli.CliFactory.parseArguments
|
29
27
|
import s3.website.ByteHelper.humanReadableByteCount
|
28
|
+
import s3.website.S3.SuccessfulUpload.humanizeUploadSpeed
|
30
29
|
|
31
30
|
object Push {
|
32
31
|
|
@@ -83,27 +82,32 @@ object Push {
|
|
83
82
|
pushMode: PushMode
|
84
83
|
): ExitCode = {
|
85
84
|
logger.info(s"${Deploy.renderVerb} ${site.rootDirectory}/* to ${site.config.s3_bucket}")
|
86
|
-
val utils = new Utils
|
87
|
-
|
88
85
|
val redirects = Redirect.resolveRedirects
|
89
|
-
val
|
86
|
+
val s3FilesFuture = resolveS3Files()
|
87
|
+
val redirectReports: PushReports = redirects.map(S3 uploadRedirect _) map (Right(_))
|
90
88
|
|
91
|
-
val errorsOrReports = for {
|
92
|
-
|
93
|
-
errorOrS3FilesAndUpdateFutures <- Await.result(resolveS3FilesAndUpdates(localFiles)(), 7 days).right
|
94
|
-
s3Files <- errorOrS3FilesAndUpdateFutures._1.right
|
89
|
+
val errorsOrReports: Either[ErrorReport, PushReports] = for {
|
90
|
+
diff <- resolveDiff(s3FilesFuture).right
|
95
91
|
} yield {
|
96
|
-
val
|
97
|
-
|
98
|
-
|
99
|
-
.
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
92
|
+
val newOrChangedReports: PushReports = diff.uploads.map { uploadBatch =>
|
93
|
+
uploadBatch.map(_.right.map(_.map(S3 uploadFile _)))
|
94
|
+
}.map (Await.result(_, 1 day)).foldLeft(Seq(): PushReports) { (memo: PushReports, res: Either[ErrorReport, Seq[Future[PushErrorOrSuccess]]]) =>
|
95
|
+
res.fold(
|
96
|
+
error => memo :+ Left(error),
|
97
|
+
(pushResults: Seq[Future[PushErrorOrSuccess]]) => memo ++ (pushResults map (Right(_)))
|
98
|
+
)
|
99
|
+
}
|
100
|
+
val deleteReports =
|
101
|
+
Await.result(resolveDeletes(diff, s3FilesFuture, redirects), 1 day).right.map { keysToDelete =>
|
102
|
+
keysToDelete map (S3 delete _)
|
103
|
+
}.fold(
|
104
|
+
error => Left(error) :: Nil,
|
105
|
+
(pushResults: Seq[Future[PushErrorOrSuccess]]) => pushResults map (Right(_))
|
106
|
+
)
|
107
|
+
val diffErrorReport: PushReports = Await.result(diff.persistenceError, 1 day).fold(Nil: PushReports)(Left(_) :: Nil)
|
108
|
+
newOrChangedReports ++ deleteReports ++ redirectReports ++ diffErrorReport
|
106
109
|
}
|
110
|
+
val errorsOrFinishedPushOps = errorsOrReports.right map awaitForResults
|
107
111
|
val invalidationSucceeded = invalidateCloudFrontItems(errorsOrFinishedPushOps)
|
108
112
|
|
109
113
|
afterPushFinished(errorsOrFinishedPushOps, invalidationSucceeded)
|
@@ -112,40 +116,37 @@ object Push {
|
|
112
116
|
def invalidateCloudFrontItems
|
113
117
|
(errorsOrFinishedPushOps: Either[ErrorReport, FinishedPushOperations])
|
114
118
|
(implicit config: Config, cloudFrontSettings: CloudFrontSetting, ec: ExecutionContextExecutor, logger: Logger, pushMode: PushMode):
|
115
|
-
Option[InvalidationSucceeded] =
|
116
|
-
config.cloudfront_distribution_id.map {
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
finishedPushOps
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
(
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
)
|
133
|
-
}
|
134
|
-
}
|
135
|
-
)
|
136
|
-
val invalidationResults: Seq[Either[FailedInvalidation, SuccessfulInvalidation]] =
|
137
|
-
toInvalidationBatches(pushSuccessReports) map { invalidationBatch =>
|
138
|
-
Await.result(
|
139
|
-
new CloudFront().invalidate(invalidationBatch, distributionId),
|
140
|
-
atMost = 1 day
|
141
|
-
)
|
119
|
+
Option[InvalidationSucceeded] =
|
120
|
+
config.cloudfront_distribution_id.map { distributionId =>
|
121
|
+
val pushSuccessReports = errorsOrFinishedPushOps.fold(
|
122
|
+
errors => Nil,
|
123
|
+
finishedPushOps =>
|
124
|
+
finishedPushOps.map {
|
125
|
+
ops =>
|
126
|
+
for {
|
127
|
+
failedOrSucceededPushes <- ops.right
|
128
|
+
successfulPush <- failedOrSucceededPushes.right
|
129
|
+
} yield successfulPush
|
130
|
+
}.foldLeft(Seq(): Seq[PushSuccessReport]) {
|
131
|
+
(reports, failOrSucc) =>
|
132
|
+
failOrSucc.fold(
|
133
|
+
_ => reports,
|
134
|
+
(pushSuccessReport: PushSuccessReport) => reports :+ pushSuccessReport
|
135
|
+
)
|
142
136
|
}
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
137
|
+
)
|
138
|
+
val invalidationResults: Seq[Either[FailedInvalidation, SuccessfulInvalidation]] =
|
139
|
+
toInvalidationBatches(pushSuccessReports) map { invalidationBatch =>
|
140
|
+
Await.result(
|
141
|
+
CloudFront.invalidate(invalidationBatch, distributionId),
|
142
|
+
atMost = 1 day
|
143
|
+
)
|
144
|
+
}
|
145
|
+
if (invalidationResults.exists(_.isLeft))
|
146
|
+
false // If one of the invalidations failed, mark the whole process as failed
|
147
|
+
else
|
148
|
+
true
|
147
149
|
}
|
148
|
-
}
|
149
150
|
|
150
151
|
type InvalidationSucceeded = Boolean
|
151
152
|
|
@@ -177,7 +178,7 @@ object Push {
|
|
177
178
|
exitCode
|
178
179
|
}
|
179
180
|
|
180
|
-
def
|
181
|
+
def awaitForResults(uploadReports: PushReports)(implicit executor: ExecutionContextExecutor): FinishedPushOperations =
|
181
182
|
uploadReports map (_.right.map {
|
182
183
|
rep => Await.result(rep, 1 day)
|
183
184
|
})
|
@@ -188,12 +189,13 @@ object Push {
|
|
188
189
|
(error: ErrorReport) => counts.copy(failures = counts.failures + 1),
|
189
190
|
failureOrSuccess => failureOrSuccess.fold(
|
190
191
|
(failureReport: PushFailureReport) => counts.copy(failures = counts.failures + 1),
|
191
|
-
(successReport: PushSuccessReport) =>
|
192
|
-
|
193
|
-
case
|
194
|
-
|
195
|
-
|
196
|
-
|
192
|
+
(successReport: PushSuccessReport) =>
|
193
|
+
successReport match {
|
194
|
+
case succ: SuccessfulUpload => succ.source.fold(_.uploadType, _.uploadType) match {
|
195
|
+
case NewFile => counts.copy(newFiles = counts.newFiles + 1).addTransferStats(succ) // TODO nasty repetition here
|
196
|
+
case FileUpdate => counts.copy(updates = counts.updates + 1).addTransferStats(succ)
|
197
|
+
case RedirectFile => counts.copy(redirects = counts.redirects + 1).addTransferStats(succ)
|
198
|
+
}
|
197
199
|
case SuccessfulDelete(_) => counts.copy(deletes = counts.deletes + 1)
|
198
200
|
}
|
199
201
|
)
|
@@ -205,7 +207,7 @@ object Push {
|
|
205
207
|
case PushCounts(updates, newFiles, failures, redirects, deletes, _, _)
|
206
208
|
if updates == 0 && newFiles == 0 && failures == 0 && redirects == 0 && deletes == 0 =>
|
207
209
|
PushNothing.renderVerb
|
208
|
-
case PushCounts(updates, newFiles, failures, redirects, deletes, uploadedBytes,
|
210
|
+
case PushCounts(updates, newFiles, failures, redirects, deletes, uploadedBytes, uploadDurations) =>
|
209
211
|
val reportClauses: scala.collection.mutable.ArrayBuffer[String] = ArrayBuffer()
|
210
212
|
if (updates > 0) reportClauses += s"${Updated.renderVerb} ${updates ofType "file"}."
|
211
213
|
if (newFiles > 0) reportClauses += s"${Created.renderVerb} ${newFiles ofType "file"}."
|
@@ -213,14 +215,7 @@ object Push {
|
|
213
215
|
if (redirects > 0) reportClauses += s"${Applied.renderVerb} ${redirects ofType "redirect"}."
|
214
216
|
if (deletes > 0) reportClauses += s"${Deleted.renderVerb} ${deletes ofType "file"}."
|
215
217
|
if (uploadedBytes > 0) {
|
216
|
-
val transferSuffix =
|
217
|
-
if (uploadDurationAndFrequency._1.getStandardSeconds > 0)
|
218
|
-
s", ${humanReadableByteCount(
|
219
|
-
(uploadedBytes / uploadDurationAndFrequency._1.getMillis * 1000) * uploadDurationAndFrequency._2
|
220
|
-
)}/s."
|
221
|
-
else
|
222
|
-
"."
|
223
|
-
|
218
|
+
val transferSuffix = humanizeUploadSpeed(uploadedBytes, uploadDurations: _*).fold(".")(speed => s", $speed.")
|
224
219
|
reportClauses += s"${Transferred.renderVerb} ${humanReadableByteCount(uploadedBytes)}$transferSuffix"
|
225
220
|
}
|
226
221
|
reportClauses.mkString(" ")
|
@@ -233,22 +228,19 @@ object Push {
|
|
233
228
|
redirects: Int = 0,
|
234
229
|
deletes: Int = 0,
|
235
230
|
uploadedBytes: Long = 0,
|
236
|
-
|
231
|
+
uploadDurations: Seq[org.joda.time.Duration] = Nil
|
237
232
|
) {
|
238
233
|
val thereWasSomethingToPush = updates + newFiles + redirects + deletes > 0
|
239
234
|
|
240
|
-
def addTransferStats(successfulUpload: SuccessfulUpload): PushCounts =
|
235
|
+
def addTransferStats(successfulUpload: SuccessfulUpload): PushCounts =
|
241
236
|
copy(
|
242
237
|
uploadedBytes = uploadedBytes + (successfulUpload.uploadSize getOrElse 0L),
|
243
|
-
|
244
|
-
dur => (uploadDurationAndFrequency._1.plus(dur), uploadDurationAndFrequency._2 + 1)
|
245
|
-
)
|
238
|
+
uploadDurations = uploadDurations ++ successfulUpload.uploadDuration
|
246
239
|
)
|
247
|
-
}
|
248
240
|
}
|
249
241
|
|
250
|
-
type FinishedPushOperations =
|
251
|
-
type PushReports =
|
242
|
+
type FinishedPushOperations = Seq[Either[ErrorReport, PushErrorOrSuccess]]
|
243
|
+
type PushReports = Seq[Either[ErrorReport, Future[PushErrorOrSuccess]]]
|
252
244
|
case class PushResult(threadPool: ExecutorService, uploadReports: PushReports)
|
253
245
|
type ExitCode = Int
|
254
246
|
}
|
@@ -19,26 +19,38 @@ import scala.concurrent.duration.TimeUnit
|
|
19
19
|
import java.util.concurrent.TimeUnit
|
20
20
|
import scala.concurrent.duration.TimeUnit
|
21
21
|
import java.util.concurrent.TimeUnit.SECONDS
|
22
|
+
import s3.website.S3.SuccessfulUpload.humanizeUploadSpeed
|
23
|
+
import java.io.FileInputStream
|
22
24
|
|
23
|
-
|
25
|
+
object S3 {
|
26
|
+
|
27
|
+
def uploadRedirect(redirect: Redirect, a: Attempt = 1)
|
28
|
+
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) =
|
29
|
+
upload(Right(redirect))
|
30
|
+
|
31
|
+
def uploadFile(localFile: LocalFileFromDisk, a: Attempt = 1)
|
32
|
+
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) =
|
33
|
+
upload(Left(localFile))
|
24
34
|
|
25
|
-
def upload(
|
26
|
-
(implicit config: Config
|
35
|
+
def upload(source: Either[LocalFileFromDisk, Redirect], a: Attempt = 1)
|
36
|
+
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger):
|
37
|
+
Future[Either[FailedUpload, SuccessfulUpload]] =
|
27
38
|
Future {
|
28
|
-
val putObjectRequest = toPutObjectRequest(
|
39
|
+
val putObjectRequest = toPutObjectRequest(source)
|
29
40
|
val uploadDuration =
|
30
41
|
if (pushMode.dryRun) None
|
31
42
|
else recordUploadDuration(putObjectRequest, s3Settings.s3Client(config) putObject putObjectRequest)
|
32
|
-
val report = SuccessfulUpload(
|
43
|
+
val report = SuccessfulUpload(source, putObjectRequest, uploadDuration)
|
33
44
|
logger.info(report)
|
34
45
|
Right(report)
|
35
46
|
} recoverWith retry(a)(
|
36
|
-
createFailureReport = error => FailedUpload(
|
37
|
-
retryAction = newAttempt => this.upload(
|
47
|
+
createFailureReport = error => FailedUpload(source.fold(_.s3Key, _.s3Key), error),
|
48
|
+
retryAction = newAttempt => this.upload(source, newAttempt)
|
38
49
|
)
|
39
50
|
|
40
|
-
def delete(s3Key:
|
41
|
-
(implicit config: Config
|
51
|
+
def delete(s3Key: S3Key, a: Attempt = 1)
|
52
|
+
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger):
|
53
|
+
Future[Either[FailedDelete, SuccessfulDelete]] =
|
42
54
|
Future {
|
43
55
|
if (!pushMode.dryRun) s3Settings.s3Client(config) deleteObject(config.s3_bucket, s3Key)
|
44
56
|
val report = SuccessfulDelete(s3Key)
|
@@ -49,10 +61,27 @@ class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: Execution
|
|
49
61
|
retryAction = newAttempt => this.delete(s3Key, newAttempt)
|
50
62
|
)
|
51
63
|
|
52
|
-
def toPutObjectRequest(
|
53
|
-
|
64
|
+
def toPutObjectRequest(source: Either[LocalFileFromDisk, Redirect])(implicit config: Config) =
|
65
|
+
source.fold(
|
66
|
+
localFile => {
|
67
|
+
val md = new ObjectMetadata()
|
68
|
+
md setContentLength localFile.uploadFile.length
|
69
|
+
md setContentType localFile.contentType
|
70
|
+
localFile.encodingOnS3.map(_ => "gzip") foreach md.setContentEncoding
|
71
|
+
localFile.maxAge foreach { seconds =>
|
72
|
+
md.setCacheControl(
|
73
|
+
if (seconds == 0)
|
74
|
+
s"no-cache; max-age=$seconds"
|
75
|
+
else
|
76
|
+
s"max-age=$seconds"
|
77
|
+
)
|
78
|
+
}
|
79
|
+
val req = new PutObjectRequest(config.s3_bucket, localFile.s3Key, new FileInputStream(localFile.uploadFile), md)
|
80
|
+
config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
|
81
|
+
req
|
82
|
+
},
|
54
83
|
redirect => {
|
55
|
-
val req = new PutObjectRequest(config.s3_bucket,
|
84
|
+
val req = new PutObjectRequest(config.s3_bucket, redirect.s3Key, redirect.redirectTarget)
|
56
85
|
req.setMetadata({
|
57
86
|
val md = new ObjectMetadata()
|
58
87
|
md.setContentLength(0) // Otherwise the AWS SDK will log a warning
|
@@ -64,23 +93,6 @@ class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: Execution
|
|
64
93
|
md
|
65
94
|
})
|
66
95
|
req
|
67
|
-
},
|
68
|
-
uploadBody => {
|
69
|
-
val md = new ObjectMetadata()
|
70
|
-
md setContentLength uploadBody.contentLength
|
71
|
-
md setContentType uploadBody.contentType
|
72
|
-
uploadBody.contentEncoding foreach md.setContentEncoding
|
73
|
-
uploadBody.maxAge foreach { seconds =>
|
74
|
-
md.setCacheControl(
|
75
|
-
if (seconds == 0)
|
76
|
-
s"no-cache; max-age=$seconds"
|
77
|
-
else
|
78
|
-
s"max-age=$seconds"
|
79
|
-
)
|
80
|
-
}
|
81
|
-
val req = new PutObjectRequest(config.s3_bucket, upload.s3Key, uploadBody.openInputStream(), md)
|
82
|
-
config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
|
83
|
-
req
|
84
96
|
}
|
85
97
|
)
|
86
98
|
|
@@ -92,15 +104,12 @@ class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: Execution
|
|
92
104
|
else
|
93
105
|
None // We are not interested in tracking durations of PUT requests that don't contain data. Redirect is an example of such request.
|
94
106
|
}
|
95
|
-
}
|
96
107
|
|
97
|
-
object S3 {
|
98
108
|
def awsS3Client(config: Config) = new AmazonS3Client(new BasicAWSCredentials(config.s3_id, config.s3_secret))
|
99
109
|
|
100
|
-
def
|
101
|
-
(nextMarker: Option[String] = None, alreadyResolved: Seq[S3File] = Nil, attempt: Attempt = 1, onFlightUpdateFutures: UpdateFutures = Nil)
|
110
|
+
def resolveS3Files(nextMarker: Option[String] = None, alreadyResolved: Seq[S3File] = Nil, attempt: Attempt = 1)
|
102
111
|
(implicit config: Config, s3Settings: S3Setting, ec: ExecutionContextExecutor, logger: Logger, pushMode: PushMode):
|
103
|
-
|
112
|
+
Future[Either[ErrorReport, Seq[S3File]]] = Future {
|
104
113
|
logger.debug(nextMarker.fold
|
105
114
|
("Querying S3 files")
|
106
115
|
{m => s"Querying more S3 files (starting from $m)"}
|
@@ -111,35 +120,17 @@ object S3 {
|
|
111
120
|
nextMarker.foreach(req.setMarker)
|
112
121
|
req
|
113
122
|
})
|
114
|
-
|
115
|
-
|
116
|
-
summaryIndex.exists((md5AndS3Key) =>
|
117
|
-
md5AndS3Key._1 != lf.md5 && md5AndS3Key._2 == lf.s3Key
|
118
|
-
)
|
119
|
-
val updateFutures: UpdateFutures = localFiles.collect {
|
120
|
-
case lf: LocalFile if shouldUpdate(lf) =>
|
121
|
-
val errorOrUpdate = LocalFile
|
122
|
-
.toUpload(lf)
|
123
|
-
.right
|
124
|
-
.map { (upload: Upload) =>
|
125
|
-
upload.withUploadType(Update)
|
126
|
-
}
|
127
|
-
errorOrUpdate.right.map(update => new S3 upload update)
|
128
|
-
}
|
129
|
-
|
130
|
-
(objects, onFlightUpdateFutures ++ updateFutures)
|
131
|
-
} flatMap { (objectsAndUpdateFutures) =>
|
132
|
-
val objects: ObjectListing = objectsAndUpdateFutures._1
|
133
|
-
val updateFutures: UpdateFutures = objectsAndUpdateFutures._2
|
123
|
+
objects
|
124
|
+
} flatMap { (objects: ObjectListing) =>
|
134
125
|
val s3Files = alreadyResolved ++ (objects.getObjectSummaries.toIndexedSeq.toSeq map (S3File(_)))
|
135
126
|
Option(objects.getNextMarker)
|
136
|
-
.fold(Future(Right(
|
127
|
+
.fold(Future(Right(s3Files)): Future[Either[ErrorReport, Seq[S3File]]]) // We've received all the S3 keys from the bucket
|
137
128
|
{ nextMarker => // There are more S3 keys on the bucket. Fetch them.
|
138
|
-
|
129
|
+
resolveS3Files(Some(nextMarker), s3Files, attempt = attempt)
|
139
130
|
}
|
140
131
|
} recoverWith retry(attempt)(
|
141
|
-
createFailureReport = error =>
|
142
|
-
retryAction = nextAttempt =>
|
132
|
+
createFailureReport = error => ErrorReport(s"Failed to fetch an object listing (${error.getMessage})"),
|
133
|
+
retryAction = nextAttempt => resolveS3Files(nextMarker, alreadyResolved, nextAttempt)
|
143
134
|
)
|
144
135
|
|
145
136
|
type S3FilesAndUpdates = (ErrorOrS3Files, UpdateFutures)
|
@@ -153,13 +144,13 @@ object S3 {
|
|
153
144
|
def s3Key: String
|
154
145
|
}
|
155
146
|
|
156
|
-
case class SuccessfulUpload(
|
147
|
+
case class SuccessfulUpload(source: Either[LocalFileFromDisk, Redirect], putObjectRequest: PutObjectRequest, uploadDuration: Option[Duration])
|
157
148
|
(implicit pushMode: PushMode, logger: Logger) extends PushSuccessReport {
|
158
149
|
def reportMessage =
|
159
|
-
|
160
|
-
case NewFile
|
161
|
-
case
|
162
|
-
case Redirect => s"${Redirected.renderVerb} $
|
150
|
+
source.fold(_.uploadType, (redirect: Redirect) => redirect) match {
|
151
|
+
case NewFile => s"${Created.renderVerb} $s3Key ($reportDetails)"
|
152
|
+
case FileUpdate => s"${Updated.renderVerb} $s3Key ($reportDetails)"
|
153
|
+
case Redirect(s3Key, redirectTarget) => s"${Redirected.renderVerb} $s3Key to $redirectTarget"
|
163
154
|
}
|
164
155
|
|
165
156
|
def reportDetails = {
|
@@ -177,24 +168,38 @@ object S3 {
|
|
177
168
|
}.mkString(" | ")
|
178
169
|
}
|
179
170
|
|
171
|
+
def s3Key = source.fold(_.s3Key, _.s3Key)
|
172
|
+
|
180
173
|
lazy val uploadSize: Option[Long] =
|
181
|
-
|
182
|
-
(
|
183
|
-
|
184
|
-
|
174
|
+
source.fold(
|
175
|
+
(localFile: LocalFileFromDisk) => Some(localFile.uploadFile.length()),
|
176
|
+
(redirect: Redirect) => None
|
177
|
+
)
|
185
178
|
|
186
179
|
lazy val uploadSizeForHumans: Option[String] = uploadSize filter (_ => logger.verboseOutput) map humanReadableByteCount
|
187
180
|
|
188
|
-
lazy val
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
181
|
+
lazy val uploadSpeedForHumans: Option[String] =
|
182
|
+
(for {
|
183
|
+
dataSize <- uploadSize
|
184
|
+
duration <- uploadDuration
|
185
|
+
} yield {
|
186
|
+
humanizeUploadSpeed(dataSize, duration)
|
187
|
+
}) flatMap identity filter (_ => logger.verboseOutput)
|
188
|
+
}
|
189
|
+
|
190
|
+
object SuccessfulUpload {
|
191
|
+
def humanizeUploadSpeed(uploadedBytes: Long, uploadDurations: Duration*): Option[String] = {
|
192
|
+
val totalDurationMillis = uploadDurations.foldLeft(new org.joda.time.Duration(0)){ (memo, duration) =>
|
193
|
+
memo.plus(duration)
|
194
|
+
}.getMillis // retain precision by using milliseconds
|
195
|
+
if (totalDurationMillis > 0) {
|
196
|
+
val bytesPerMillisecond = uploadedBytes / totalDurationMillis
|
197
|
+
val bytesPerSecond = bytesPerMillisecond * 1000 * uploadDurations.length
|
198
|
+
Some(humanReadableByteCount(bytesPerSecond) + "/s")
|
199
|
+
} else {
|
200
|
+
None
|
201
|
+
}
|
195
202
|
}
|
196
|
-
|
197
|
-
def s3Key = upload.s3Key
|
198
203
|
}
|
199
204
|
|
200
205
|
case class SuccessfulDelete(s3Key: String)(implicit pushMode: PushMode) extends PushSuccessReport {
|
@@ -33,7 +33,7 @@ object Config {
|
|
33
33
|
})
|
34
34
|
}
|
35
35
|
|
36
|
-
yamlValue getOrElse Left(
|
36
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a boolean or [string] value"))
|
37
37
|
}
|
38
38
|
|
39
39
|
def loadOptionalStringOrStringSeq(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[String, Seq[String]]]] = {
|
@@ -46,7 +46,7 @@ object Config {
|
|
46
46
|
})
|
47
47
|
}
|
48
48
|
|
49
|
-
yamlValue getOrElse Left(
|
49
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a string or [string] value"))
|
50
50
|
}
|
51
51
|
|
52
52
|
def loadMaxAge(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[Int, Map[String, Int]]]] = {
|
@@ -60,7 +60,7 @@ object Config {
|
|
60
60
|
})
|
61
61
|
}
|
62
62
|
|
63
|
-
yamlValue getOrElse Left(
|
63
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have an int or (string -> int) value"))
|
64
64
|
}
|
65
65
|
|
66
66
|
def loadEndpoint(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[S3Endpoint]] =
|
@@ -79,7 +79,7 @@ object Config {
|
|
79
79
|
redirects <- Try(redirectsOption.map(_.asInstanceOf[java.util.Map[String,String]].toMap))
|
80
80
|
} yield Right(redirects)
|
81
81
|
|
82
|
-
yamlValue getOrElse Left(
|
82
|
+
yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a (string -> string) value"))
|
83
83
|
}
|
84
84
|
|
85
85
|
def loadRequiredString(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, String] = {
|
@@ -91,7 +91,7 @@ object Config {
|
|
91
91
|
}
|
92
92
|
|
93
93
|
yamlValue getOrElse {
|
94
|
-
Left(
|
94
|
+
Left(ErrorReport(s"The key $key has to have a string value"))
|
95
95
|
}
|
96
96
|
}
|
97
97
|
|
@@ -104,7 +104,7 @@ object Config {
|
|
104
104
|
}
|
105
105
|
|
106
106
|
yamlValueOption getOrElse {
|
107
|
-
Left(
|
107
|
+
Left(ErrorReport(s"The key $key has to have a string value"))
|
108
108
|
}
|
109
109
|
}
|
110
110
|
|
@@ -117,7 +117,7 @@ object Config {
|
|
117
117
|
}
|
118
118
|
|
119
119
|
yamlValueOption getOrElse {
|
120
|
-
Left(
|
120
|
+
Left(ErrorReport(s"The key $key has to have a boolean value"))
|
121
121
|
}
|
122
122
|
}
|
123
123
|
|
@@ -130,7 +130,7 @@ object Config {
|
|
130
130
|
}
|
131
131
|
|
132
132
|
yamlValueOption getOrElse {
|
133
|
-
Left(
|
133
|
+
Left(ErrorReport(s"The key $key has to have an integer value"))
|
134
134
|
}
|
135
135
|
}
|
136
136
|
|
@@ -1,5 +1,7 @@
|
|
1
1
|
package s3.website.model
|
2
2
|
|
3
|
+
import s3.website.ErrorReport
|
4
|
+
|
3
5
|
case class S3Endpoint(
|
4
6
|
s3WebsiteHostname: String,
|
5
7
|
s3Hostname: String
|
@@ -8,7 +10,7 @@ case class S3Endpoint(
|
|
8
10
|
object S3Endpoint {
|
9
11
|
val defaultEndpoint = S3Endpoint("s3-website-us-east-1.amazonaws.com", "s3.amazonaws.com")
|
10
12
|
|
11
|
-
def forString(locationConstraint: String): Either[
|
13
|
+
def forString(locationConstraint: String): Either[ErrorReport, S3Endpoint] = locationConstraint match {
|
12
14
|
case "EU" | "eu-west-1" => Right(S3Endpoint("s3-website-eu-west-1.amazonaws.com", "s3-eu-west-1.amazonaws.com"))
|
13
15
|
case "us-east-1" => Right(defaultEndpoint)
|
14
16
|
case "us-west-1" => Right(S3Endpoint("s3-website-us-west-1.amazonaws.com", "s3-us-west-1.amazonaws.com"))
|
@@ -17,6 +19,6 @@ object S3Endpoint {
|
|
17
19
|
case "ap-southeast-2" => Right(S3Endpoint("s3-website-ap-southeast-2.amazonaws.com", "s3-ap-southeast-2.amazonaws.com"))
|
18
20
|
case "ap-northeast-1" => Right(S3Endpoint("s3-website-ap-northeast-1.amazonaws.com", "s3-ap-northeast-1.amazonaws.com"))
|
19
21
|
case "sa-east-1" => Right(S3Endpoint("s3-website-sa-east-1.amazonaws.com", "s3-sa-east-1.amazonaws.com"))
|
20
|
-
case _ => Left(
|
22
|
+
case _ => Left(ErrorReport(s"Unrecognised endpoint: $locationConstraint"))
|
21
23
|
}
|
22
24
|
}
|
@@ -6,14 +6,17 @@ import org.yaml.snakeyaml.Yaml
|
|
6
6
|
import s3.website.model.Config._
|
7
7
|
import scala.io.Source.fromFile
|
8
8
|
import scala.language.postfixOps
|
9
|
-
import s3.website.Logger
|
9
|
+
import s3.website.{S3Key, Logger, ErrorReport}
|
10
10
|
import scala.util.Failure
|
11
11
|
import s3.website.model.Config.UnsafeYaml
|
12
12
|
import scala.util.Success
|
13
|
-
import s3.website.ErrorReport
|
14
13
|
|
15
14
|
case class Site(rootDirectory: String, config: Config) {
|
16
15
|
def resolveS3Key(file: File) = file.getAbsolutePath.replace(rootDirectory, "").replaceFirst("^/", "")
|
16
|
+
|
17
|
+
def resolveFile(s3File: S3File): File = resolveFile(s3File.s3Key)
|
18
|
+
|
19
|
+
def resolveFile(s3Key: S3Key): File = new File(s"$rootDirectory/$s3Key")
|
17
20
|
}
|
18
21
|
|
19
22
|
object Site {
|
@@ -68,7 +71,7 @@ object Site {
|
|
68
71
|
|
69
72
|
config.right.map(Site(siteRootDirectory, _))
|
70
73
|
case Failure(error) =>
|
71
|
-
Left(
|
74
|
+
Left(ErrorReport(error))
|
72
75
|
}
|
73
76
|
}
|
74
77
|
}
|