s3_website_monadic 0.0.31 → 0.0.32

Sign up to get free protection for your applications and to get access to all the features.
@@ -4,15 +4,13 @@ import s3.website.model.Site._
4
4
  import scala.concurrent.{ExecutionContextExecutor, Future, Await}
5
5
  import scala.concurrent.duration._
6
6
  import scala.language.postfixOps
7
- import s3.website.Diff.{resolveNewFiles, resolveDeletes}
7
+ import s3.website.Diff.{resolveDeletes, resolveDiff}
8
8
  import s3.website.S3._
9
9
  import scala.concurrent.ExecutionContext.fromExecutor
10
10
  import java.util.concurrent.Executors.newFixedThreadPool
11
- import s3.website.model.LocalFile.resolveLocalFiles
12
- import scala.collection.parallel.ParSeq
13
11
  import java.util.concurrent.ExecutorService
14
12
  import s3.website.model._
15
- import s3.website.model.Update
13
+ import s3.website.model.FileUpdate
16
14
  import s3.website.model.NewFile
17
15
  import s3.website.S3.PushSuccessReport
18
16
  import scala.collection.mutable.ArrayBuffer
@@ -27,6 +25,7 @@ import scala.Int
27
25
  import java.io.File
28
26
  import com.lexicalscope.jewel.cli.CliFactory.parseArguments
29
27
  import s3.website.ByteHelper.humanReadableByteCount
28
+ import s3.website.S3.SuccessfulUpload.humanizeUploadSpeed
30
29
 
31
30
  object Push {
32
31
 
@@ -83,27 +82,32 @@ object Push {
83
82
  pushMode: PushMode
84
83
  ): ExitCode = {
85
84
  logger.info(s"${Deploy.renderVerb} ${site.rootDirectory}/* to ${site.config.s3_bucket}")
86
- val utils = new Utils
87
-
88
85
  val redirects = Redirect.resolveRedirects
89
- val redirectResults = redirects.map(new S3() upload(_))
86
+ val s3FilesFuture = resolveS3Files()
87
+ val redirectReports: PushReports = redirects.map(S3 uploadRedirect _) map (Right(_))
90
88
 
91
- val errorsOrReports = for {
92
- localFiles <- resolveLocalFiles.right
93
- errorOrS3FilesAndUpdateFutures <- Await.result(resolveS3FilesAndUpdates(localFiles)(), 7 days).right
94
- s3Files <- errorOrS3FilesAndUpdateFutures._1.right
89
+ val errorsOrReports: Either[ErrorReport, PushReports] = for {
90
+ diff <- resolveDiff(s3FilesFuture).right
95
91
  } yield {
96
- val updateReports: PushReports = errorOrS3FilesAndUpdateFutures._2.par
97
- val deleteReports: PushReports = utils toParSeq resolveDeletes(localFiles, s3Files, redirects)
98
- .map { s3File => new S3() delete s3File.s3Key }
99
- .map { Right(_) } // To make delete reports type-compatible with upload reports
100
- val uploadReports: PushReports = utils toParSeq resolveNewFiles(localFiles, s3Files)
101
- .map { _.right.map(new S3() upload(_)) }
102
- uploadReports ++ deleteReports ++ updateReports ++ redirectResults.map(Right(_))
103
- }
104
- val errorsOrFinishedPushOps: Either[ErrorReport, FinishedPushOperations] = errorsOrReports.right map {
105
- uploadReports => awaitForUploads(uploadReports)
92
+ val newOrChangedReports: PushReports = diff.uploads.map { uploadBatch =>
93
+ uploadBatch.map(_.right.map(_.map(S3 uploadFile _)))
94
+ }.map (Await.result(_, 1 day)).foldLeft(Seq(): PushReports) { (memo: PushReports, res: Either[ErrorReport, Seq[Future[PushErrorOrSuccess]]]) =>
95
+ res.fold(
96
+ error => memo :+ Left(error),
97
+ (pushResults: Seq[Future[PushErrorOrSuccess]]) => memo ++ (pushResults map (Right(_)))
98
+ )
99
+ }
100
+ val deleteReports =
101
+ Await.result(resolveDeletes(diff, s3FilesFuture, redirects), 1 day).right.map { keysToDelete =>
102
+ keysToDelete map (S3 delete _)
103
+ }.fold(
104
+ error => Left(error) :: Nil,
105
+ (pushResults: Seq[Future[PushErrorOrSuccess]]) => pushResults map (Right(_))
106
+ )
107
+ val diffErrorReport: PushReports = Await.result(diff.persistenceError, 1 day).fold(Nil: PushReports)(Left(_) :: Nil)
108
+ newOrChangedReports ++ deleteReports ++ redirectReports ++ diffErrorReport
106
109
  }
110
+ val errorsOrFinishedPushOps = errorsOrReports.right map awaitForResults
107
111
  val invalidationSucceeded = invalidateCloudFrontItems(errorsOrFinishedPushOps)
108
112
 
109
113
  afterPushFinished(errorsOrFinishedPushOps, invalidationSucceeded)
@@ -112,40 +116,37 @@ object Push {
112
116
  def invalidateCloudFrontItems
113
117
  (errorsOrFinishedPushOps: Either[ErrorReport, FinishedPushOperations])
114
118
  (implicit config: Config, cloudFrontSettings: CloudFrontSetting, ec: ExecutionContextExecutor, logger: Logger, pushMode: PushMode):
115
- Option[InvalidationSucceeded] = {
116
- config.cloudfront_distribution_id.map {
117
- distributionId =>
118
- val pushSuccessReports = errorsOrFinishedPushOps.fold(
119
- errors => Nil,
120
- finishedPushOps => {
121
- finishedPushOps.map {
122
- ops =>
123
- for {
124
- failedOrSucceededPushes <- ops.right
125
- successfulPush <- failedOrSucceededPushes.right
126
- } yield successfulPush
127
- }.foldLeft(Seq(): Seq[PushSuccessReport]) {
128
- (reports, failOrSucc) =>
129
- failOrSucc.fold(
130
- _ => reports,
131
- (pushSuccessReport: PushSuccessReport) => reports :+ pushSuccessReport
132
- )
133
- }
134
- }
135
- )
136
- val invalidationResults: Seq[Either[FailedInvalidation, SuccessfulInvalidation]] =
137
- toInvalidationBatches(pushSuccessReports) map { invalidationBatch =>
138
- Await.result(
139
- new CloudFront().invalidate(invalidationBatch, distributionId),
140
- atMost = 1 day
141
- )
119
+ Option[InvalidationSucceeded] =
120
+ config.cloudfront_distribution_id.map { distributionId =>
121
+ val pushSuccessReports = errorsOrFinishedPushOps.fold(
122
+ errors => Nil,
123
+ finishedPushOps =>
124
+ finishedPushOps.map {
125
+ ops =>
126
+ for {
127
+ failedOrSucceededPushes <- ops.right
128
+ successfulPush <- failedOrSucceededPushes.right
129
+ } yield successfulPush
130
+ }.foldLeft(Seq(): Seq[PushSuccessReport]) {
131
+ (reports, failOrSucc) =>
132
+ failOrSucc.fold(
133
+ _ => reports,
134
+ (pushSuccessReport: PushSuccessReport) => reports :+ pushSuccessReport
135
+ )
142
136
  }
143
- if (invalidationResults.exists(_.isLeft))
144
- false // If one of the invalidations failed, mark the whole process as failed
145
- else
146
- true
137
+ )
138
+ val invalidationResults: Seq[Either[FailedInvalidation, SuccessfulInvalidation]] =
139
+ toInvalidationBatches(pushSuccessReports) map { invalidationBatch =>
140
+ Await.result(
141
+ CloudFront.invalidate(invalidationBatch, distributionId),
142
+ atMost = 1 day
143
+ )
144
+ }
145
+ if (invalidationResults.exists(_.isLeft))
146
+ false // If one of the invalidations failed, mark the whole process as failed
147
+ else
148
+ true
147
149
  }
148
- }
149
150
 
150
151
  type InvalidationSucceeded = Boolean
151
152
 
@@ -177,7 +178,7 @@ object Push {
177
178
  exitCode
178
179
  }
179
180
 
180
- def awaitForUploads(uploadReports: PushReports)(implicit executor: ExecutionContextExecutor): FinishedPushOperations =
181
+ def awaitForResults(uploadReports: PushReports)(implicit executor: ExecutionContextExecutor): FinishedPushOperations =
181
182
  uploadReports map (_.right.map {
182
183
  rep => Await.result(rep, 1 day)
183
184
  })
@@ -188,12 +189,13 @@ object Push {
188
189
  (error: ErrorReport) => counts.copy(failures = counts.failures + 1),
189
190
  failureOrSuccess => failureOrSuccess.fold(
190
191
  (failureReport: PushFailureReport) => counts.copy(failures = counts.failures + 1),
191
- (successReport: PushSuccessReport) => successReport match {
192
- case succ: SuccessfulUpload => succ.upload.uploadType match {
193
- case NewFile => counts.copy(newFiles = counts.newFiles + 1).addTransferStats(succ) // TODO nasty repetition here
194
- case Update => counts.copy(updates = counts.updates + 1).addTransferStats(succ)
195
- case Redirect => counts.copy(redirects = counts.redirects + 1).addTransferStats(succ)
196
- }
192
+ (successReport: PushSuccessReport) =>
193
+ successReport match {
194
+ case succ: SuccessfulUpload => succ.source.fold(_.uploadType, _.uploadType) match {
195
+ case NewFile => counts.copy(newFiles = counts.newFiles + 1).addTransferStats(succ) // TODO nasty repetition here
196
+ case FileUpdate => counts.copy(updates = counts.updates + 1).addTransferStats(succ)
197
+ case RedirectFile => counts.copy(redirects = counts.redirects + 1).addTransferStats(succ)
198
+ }
197
199
  case SuccessfulDelete(_) => counts.copy(deletes = counts.deletes + 1)
198
200
  }
199
201
  )
@@ -205,7 +207,7 @@ object Push {
205
207
  case PushCounts(updates, newFiles, failures, redirects, deletes, _, _)
206
208
  if updates == 0 && newFiles == 0 && failures == 0 && redirects == 0 && deletes == 0 =>
207
209
  PushNothing.renderVerb
208
- case PushCounts(updates, newFiles, failures, redirects, deletes, uploadedBytes, uploadDurationAndFrequency) =>
210
+ case PushCounts(updates, newFiles, failures, redirects, deletes, uploadedBytes, uploadDurations) =>
209
211
  val reportClauses: scala.collection.mutable.ArrayBuffer[String] = ArrayBuffer()
210
212
  if (updates > 0) reportClauses += s"${Updated.renderVerb} ${updates ofType "file"}."
211
213
  if (newFiles > 0) reportClauses += s"${Created.renderVerb} ${newFiles ofType "file"}."
@@ -213,14 +215,7 @@ object Push {
213
215
  if (redirects > 0) reportClauses += s"${Applied.renderVerb} ${redirects ofType "redirect"}."
214
216
  if (deletes > 0) reportClauses += s"${Deleted.renderVerb} ${deletes ofType "file"}."
215
217
  if (uploadedBytes > 0) {
216
- val transferSuffix =
217
- if (uploadDurationAndFrequency._1.getStandardSeconds > 0)
218
- s", ${humanReadableByteCount(
219
- (uploadedBytes / uploadDurationAndFrequency._1.getMillis * 1000) * uploadDurationAndFrequency._2
220
- )}/s."
221
- else
222
- "."
223
-
218
+ val transferSuffix = humanizeUploadSpeed(uploadedBytes, uploadDurations: _*).fold(".")(speed => s", $speed.")
224
219
  reportClauses += s"${Transferred.renderVerb} ${humanReadableByteCount(uploadedBytes)}$transferSuffix"
225
220
  }
226
221
  reportClauses.mkString(" ")
@@ -233,22 +228,19 @@ object Push {
233
228
  redirects: Int = 0,
234
229
  deletes: Int = 0,
235
230
  uploadedBytes: Long = 0,
236
- uploadDurationAndFrequency: (org.joda.time.Duration, Int) = (new org.joda.time.Duration(0), 0)
231
+ uploadDurations: Seq[org.joda.time.Duration] = Nil
237
232
  ) {
238
233
  val thereWasSomethingToPush = updates + newFiles + redirects + deletes > 0
239
234
 
240
- def addTransferStats(successfulUpload: SuccessfulUpload): PushCounts = {
235
+ def addTransferStats(successfulUpload: SuccessfulUpload): PushCounts =
241
236
  copy(
242
237
  uploadedBytes = uploadedBytes + (successfulUpload.uploadSize getOrElse 0L),
243
- uploadDurationAndFrequency = successfulUpload.uploadDuration.fold(uploadDurationAndFrequency)(
244
- dur => (uploadDurationAndFrequency._1.plus(dur), uploadDurationAndFrequency._2 + 1)
245
- )
238
+ uploadDurations = uploadDurations ++ successfulUpload.uploadDuration
246
239
  )
247
- }
248
240
  }
249
241
 
250
- type FinishedPushOperations = ParSeq[Either[ErrorReport, PushErrorOrSuccess]]
251
- type PushReports = ParSeq[Either[ErrorReport, Future[PushErrorOrSuccess]]]
242
+ type FinishedPushOperations = Seq[Either[ErrorReport, PushErrorOrSuccess]]
243
+ type PushReports = Seq[Either[ErrorReport, Future[PushErrorOrSuccess]]]
252
244
  case class PushResult(threadPool: ExecutorService, uploadReports: PushReports)
253
245
  type ExitCode = Int
254
246
  }
@@ -19,26 +19,38 @@ import scala.concurrent.duration.TimeUnit
19
19
  import java.util.concurrent.TimeUnit
20
20
  import scala.concurrent.duration.TimeUnit
21
21
  import java.util.concurrent.TimeUnit.SECONDS
22
+ import s3.website.S3.SuccessfulUpload.humanizeUploadSpeed
23
+ import java.io.FileInputStream
22
24
 
23
- class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) {
25
+ object S3 {
26
+
27
+ def uploadRedirect(redirect: Redirect, a: Attempt = 1)
28
+ (implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) =
29
+ upload(Right(redirect))
30
+
31
+ def uploadFile(localFile: LocalFileFromDisk, a: Attempt = 1)
32
+ (implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) =
33
+ upload(Left(localFile))
24
34
 
25
- def upload(upload: Upload with UploadTypeResolved, a: Attempt = 1)
26
- (implicit config: Config): Future[Either[FailedUpload, SuccessfulUpload]] =
35
+ def upload(source: Either[LocalFileFromDisk, Redirect], a: Attempt = 1)
36
+ (implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger):
37
+ Future[Either[FailedUpload, SuccessfulUpload]] =
27
38
  Future {
28
- val putObjectRequest = toPutObjectRequest(upload)
39
+ val putObjectRequest = toPutObjectRequest(source)
29
40
  val uploadDuration =
30
41
  if (pushMode.dryRun) None
31
42
  else recordUploadDuration(putObjectRequest, s3Settings.s3Client(config) putObject putObjectRequest)
32
- val report = SuccessfulUpload(upload, putObjectRequest, uploadDuration)
43
+ val report = SuccessfulUpload(source, putObjectRequest, uploadDuration)
33
44
  logger.info(report)
34
45
  Right(report)
35
46
  } recoverWith retry(a)(
36
- createFailureReport = error => FailedUpload(upload.s3Key, error),
37
- retryAction = newAttempt => this.upload(upload, newAttempt)
47
+ createFailureReport = error => FailedUpload(source.fold(_.s3Key, _.s3Key), error),
48
+ retryAction = newAttempt => this.upload(source, newAttempt)
38
49
  )
39
50
 
40
- def delete(s3Key: String, a: Attempt = 1)
41
- (implicit config: Config): Future[Either[FailedDelete, SuccessfulDelete]] =
51
+ def delete(s3Key: S3Key, a: Attempt = 1)
52
+ (implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger):
53
+ Future[Either[FailedDelete, SuccessfulDelete]] =
42
54
  Future {
43
55
  if (!pushMode.dryRun) s3Settings.s3Client(config) deleteObject(config.s3_bucket, s3Key)
44
56
  val report = SuccessfulDelete(s3Key)
@@ -49,10 +61,27 @@ class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: Execution
49
61
  retryAction = newAttempt => this.delete(s3Key, newAttempt)
50
62
  )
51
63
 
52
- def toPutObjectRequest(upload: Upload)(implicit config: Config) =
53
- upload.essence.fold(
64
+ def toPutObjectRequest(source: Either[LocalFileFromDisk, Redirect])(implicit config: Config) =
65
+ source.fold(
66
+ localFile => {
67
+ val md = new ObjectMetadata()
68
+ md setContentLength localFile.uploadFile.length
69
+ md setContentType localFile.contentType
70
+ localFile.encodingOnS3.map(_ => "gzip") foreach md.setContentEncoding
71
+ localFile.maxAge foreach { seconds =>
72
+ md.setCacheControl(
73
+ if (seconds == 0)
74
+ s"no-cache; max-age=$seconds"
75
+ else
76
+ s"max-age=$seconds"
77
+ )
78
+ }
79
+ val req = new PutObjectRequest(config.s3_bucket, localFile.s3Key, new FileInputStream(localFile.uploadFile), md)
80
+ config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
81
+ req
82
+ },
54
83
  redirect => {
55
- val req = new PutObjectRequest(config.s3_bucket, upload.s3Key, redirect.redirectTarget)
84
+ val req = new PutObjectRequest(config.s3_bucket, redirect.s3Key, redirect.redirectTarget)
56
85
  req.setMetadata({
57
86
  val md = new ObjectMetadata()
58
87
  md.setContentLength(0) // Otherwise the AWS SDK will log a warning
@@ -64,23 +93,6 @@ class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: Execution
64
93
  md
65
94
  })
66
95
  req
67
- },
68
- uploadBody => {
69
- val md = new ObjectMetadata()
70
- md setContentLength uploadBody.contentLength
71
- md setContentType uploadBody.contentType
72
- uploadBody.contentEncoding foreach md.setContentEncoding
73
- uploadBody.maxAge foreach { seconds =>
74
- md.setCacheControl(
75
- if (seconds == 0)
76
- s"no-cache; max-age=$seconds"
77
- else
78
- s"max-age=$seconds"
79
- )
80
- }
81
- val req = new PutObjectRequest(config.s3_bucket, upload.s3Key, uploadBody.openInputStream(), md)
82
- config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
83
- req
84
96
  }
85
97
  )
86
98
 
@@ -92,15 +104,12 @@ class S3(implicit s3Settings: S3Setting, pushMode: PushMode, executor: Execution
92
104
  else
93
105
  None // We are not interested in tracking durations of PUT requests that don't contain data. Redirect is an example of such request.
94
106
  }
95
- }
96
107
 
97
- object S3 {
98
108
  def awsS3Client(config: Config) = new AmazonS3Client(new BasicAWSCredentials(config.s3_id, config.s3_secret))
99
109
 
100
- def resolveS3FilesAndUpdates(localFiles: Seq[LocalFile])
101
- (nextMarker: Option[String] = None, alreadyResolved: Seq[S3File] = Nil, attempt: Attempt = 1, onFlightUpdateFutures: UpdateFutures = Nil)
110
+ def resolveS3Files(nextMarker: Option[String] = None, alreadyResolved: Seq[S3File] = Nil, attempt: Attempt = 1)
102
111
  (implicit config: Config, s3Settings: S3Setting, ec: ExecutionContextExecutor, logger: Logger, pushMode: PushMode):
103
- ErrorOrS3FilesAndUpdates = Future {
112
+ Future[Either[ErrorReport, Seq[S3File]]] = Future {
104
113
  logger.debug(nextMarker.fold
105
114
  ("Querying S3 files")
106
115
  {m => s"Querying more S3 files (starting from $m)"}
@@ -111,35 +120,17 @@ object S3 {
111
120
  nextMarker.foreach(req.setMarker)
112
121
  req
113
122
  })
114
- val summaryIndex = objects.getObjectSummaries.map { summary => (summary.getETag, summary.getKey) }.toSet // Index to avoid O(n^2) lookups
115
- def shouldUpdate(lf: LocalFile) =
116
- summaryIndex.exists((md5AndS3Key) =>
117
- md5AndS3Key._1 != lf.md5 && md5AndS3Key._2 == lf.s3Key
118
- )
119
- val updateFutures: UpdateFutures = localFiles.collect {
120
- case lf: LocalFile if shouldUpdate(lf) =>
121
- val errorOrUpdate = LocalFile
122
- .toUpload(lf)
123
- .right
124
- .map { (upload: Upload) =>
125
- upload.withUploadType(Update)
126
- }
127
- errorOrUpdate.right.map(update => new S3 upload update)
128
- }
129
-
130
- (objects, onFlightUpdateFutures ++ updateFutures)
131
- } flatMap { (objectsAndUpdateFutures) =>
132
- val objects: ObjectListing = objectsAndUpdateFutures._1
133
- val updateFutures: UpdateFutures = objectsAndUpdateFutures._2
123
+ objects
124
+ } flatMap { (objects: ObjectListing) =>
134
125
  val s3Files = alreadyResolved ++ (objects.getObjectSummaries.toIndexedSeq.toSeq map (S3File(_)))
135
126
  Option(objects.getNextMarker)
136
- .fold(Future(Right((Right(s3Files), updateFutures))): ErrorOrS3FilesAndUpdates) // We've received all the S3 keys from the bucket
127
+ .fold(Future(Right(s3Files)): Future[Either[ErrorReport, Seq[S3File]]]) // We've received all the S3 keys from the bucket
137
128
  { nextMarker => // There are more S3 keys on the bucket. Fetch them.
138
- resolveS3FilesAndUpdates(localFiles)(Some(nextMarker), s3Files, attempt = attempt, updateFutures)
129
+ resolveS3Files(Some(nextMarker), s3Files, attempt = attempt)
139
130
  }
140
131
  } recoverWith retry(attempt)(
141
- createFailureReport = error => ClientError(s"Failed to fetch an object listing (${error.getMessage})"),
142
- retryAction = nextAttempt => resolveS3FilesAndUpdates(localFiles)(nextMarker, alreadyResolved, nextAttempt, onFlightUpdateFutures)
132
+ createFailureReport = error => ErrorReport(s"Failed to fetch an object listing (${error.getMessage})"),
133
+ retryAction = nextAttempt => resolveS3Files(nextMarker, alreadyResolved, nextAttempt)
143
134
  )
144
135
 
145
136
  type S3FilesAndUpdates = (ErrorOrS3Files, UpdateFutures)
@@ -153,13 +144,13 @@ object S3 {
153
144
  def s3Key: String
154
145
  }
155
146
 
156
- case class SuccessfulUpload(upload: Upload with UploadTypeResolved, putObjectRequest: PutObjectRequest, uploadDuration: Option[Duration])
147
+ case class SuccessfulUpload(source: Either[LocalFileFromDisk, Redirect], putObjectRequest: PutObjectRequest, uploadDuration: Option[Duration])
157
148
  (implicit pushMode: PushMode, logger: Logger) extends PushSuccessReport {
158
149
  def reportMessage =
159
- upload.uploadType match {
160
- case NewFile => s"${Created.renderVerb} $s3Key ($reportDetails)"
161
- case Update => s"${Updated.renderVerb} $s3Key ($reportDetails)"
162
- case Redirect => s"${Redirected.renderVerb} ${upload.essence.left.get.key} to ${upload.essence.left.get.redirectTarget}"
150
+ source.fold(_.uploadType, (redirect: Redirect) => redirect) match {
151
+ case NewFile => s"${Created.renderVerb} $s3Key ($reportDetails)"
152
+ case FileUpdate => s"${Updated.renderVerb} $s3Key ($reportDetails)"
153
+ case Redirect(s3Key, redirectTarget) => s"${Redirected.renderVerb} $s3Key to $redirectTarget"
163
154
  }
164
155
 
165
156
  def reportDetails = {
@@ -177,24 +168,38 @@ object S3 {
177
168
  }.mkString(" | ")
178
169
  }
179
170
 
171
+ def s3Key = source.fold(_.s3Key, _.s3Key)
172
+
180
173
  lazy val uploadSize: Option[Long] =
181
- upload.essence.fold(
182
- (redirect: Redirect) => None,
183
- uploadBody => Some(uploadBody.contentLength)
184
- )
174
+ source.fold(
175
+ (localFile: LocalFileFromDisk) => Some(localFile.uploadFile.length()),
176
+ (redirect: Redirect) => None
177
+ )
185
178
 
186
179
  lazy val uploadSizeForHumans: Option[String] = uploadSize filter (_ => logger.verboseOutput) map humanReadableByteCount
187
180
 
188
- lazy val uploadSpeed: Option[Long] = for {
189
- dataSize <- uploadSize
190
- duration <- uploadDuration
191
- } yield (dataSize / (duration.getMillis max 1)) * 1000 // Precision tweaking and avoidance of divide-by-zero
192
-
193
- lazy val uploadSpeedForHumans: Option[String] = uploadSpeed filter (_ => logger.verboseOutput) map {
194
- bytesPerSecond => s"${humanReadableByteCount(bytesPerSecond)}/s"
181
+ lazy val uploadSpeedForHumans: Option[String] =
182
+ (for {
183
+ dataSize <- uploadSize
184
+ duration <- uploadDuration
185
+ } yield {
186
+ humanizeUploadSpeed(dataSize, duration)
187
+ }) flatMap identity filter (_ => logger.verboseOutput)
188
+ }
189
+
190
+ object SuccessfulUpload {
191
+ def humanizeUploadSpeed(uploadedBytes: Long, uploadDurations: Duration*): Option[String] = {
192
+ val totalDurationMillis = uploadDurations.foldLeft(new org.joda.time.Duration(0)){ (memo, duration) =>
193
+ memo.plus(duration)
194
+ }.getMillis // retain precision by using milliseconds
195
+ if (totalDurationMillis > 0) {
196
+ val bytesPerMillisecond = uploadedBytes / totalDurationMillis
197
+ val bytesPerSecond = bytesPerMillisecond * 1000 * uploadDurations.length
198
+ Some(humanReadableByteCount(bytesPerSecond) + "/s")
199
+ } else {
200
+ None
201
+ }
195
202
  }
196
-
197
- def s3Key = upload.s3Key
198
203
  }
199
204
 
200
205
  case class SuccessfulDelete(s3Key: String)(implicit pushMode: PushMode) extends PushSuccessReport {
@@ -33,7 +33,7 @@ object Config {
33
33
  })
34
34
  }
35
35
 
36
- yamlValue getOrElse Left(ClientError(s"The key $key has to have a boolean or [string] value"))
36
+ yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a boolean or [string] value"))
37
37
  }
38
38
 
39
39
  def loadOptionalStringOrStringSeq(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[String, Seq[String]]]] = {
@@ -46,7 +46,7 @@ object Config {
46
46
  })
47
47
  }
48
48
 
49
- yamlValue getOrElse Left(ClientError(s"The key $key has to have a string or [string] value"))
49
+ yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a string or [string] value"))
50
50
  }
51
51
 
52
52
  def loadMaxAge(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[Either[Int, Map[String, Int]]]] = {
@@ -60,7 +60,7 @@ object Config {
60
60
  })
61
61
  }
62
62
 
63
- yamlValue getOrElse Left(ClientError(s"The key $key has to have an int or (string -> int) value"))
63
+ yamlValue getOrElse Left(ErrorReport(s"The key $key has to have an int or (string -> int) value"))
64
64
  }
65
65
 
66
66
  def loadEndpoint(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, Option[S3Endpoint]] =
@@ -79,7 +79,7 @@ object Config {
79
79
  redirects <- Try(redirectsOption.map(_.asInstanceOf[java.util.Map[String,String]].toMap))
80
80
  } yield Right(redirects)
81
81
 
82
- yamlValue getOrElse Left(ClientError(s"The key $key has to have a (string -> string) value"))
82
+ yamlValue getOrElse Left(ErrorReport(s"The key $key has to have a (string -> string) value"))
83
83
  }
84
84
 
85
85
  def loadRequiredString(key: String)(implicit unsafeYaml: UnsafeYaml): Either[ErrorReport, String] = {
@@ -91,7 +91,7 @@ object Config {
91
91
  }
92
92
 
93
93
  yamlValue getOrElse {
94
- Left(ClientError(s"The key $key has to have a string value"))
94
+ Left(ErrorReport(s"The key $key has to have a string value"))
95
95
  }
96
96
  }
97
97
 
@@ -104,7 +104,7 @@ object Config {
104
104
  }
105
105
 
106
106
  yamlValueOption getOrElse {
107
- Left(ClientError(s"The key $key has to have a string value"))
107
+ Left(ErrorReport(s"The key $key has to have a string value"))
108
108
  }
109
109
  }
110
110
 
@@ -117,7 +117,7 @@ object Config {
117
117
  }
118
118
 
119
119
  yamlValueOption getOrElse {
120
- Left(ClientError(s"The key $key has to have a boolean value"))
120
+ Left(ErrorReport(s"The key $key has to have a boolean value"))
121
121
  }
122
122
  }
123
123
 
@@ -130,7 +130,7 @@ object Config {
130
130
  }
131
131
 
132
132
  yamlValueOption getOrElse {
133
- Left(ClientError(s"The key $key has to have an integer value"))
133
+ Left(ErrorReport(s"The key $key has to have an integer value"))
134
134
  }
135
135
  }
136
136
 
@@ -1,5 +1,7 @@
1
1
  package s3.website.model
2
2
 
3
+ import s3.website.ErrorReport
4
+
3
5
  case class S3Endpoint(
4
6
  s3WebsiteHostname: String,
5
7
  s3Hostname: String
@@ -8,7 +10,7 @@ case class S3Endpoint(
8
10
  object S3Endpoint {
9
11
  val defaultEndpoint = S3Endpoint("s3-website-us-east-1.amazonaws.com", "s3.amazonaws.com")
10
12
 
11
- def forString(locationConstraint: String): Either[ClientError, S3Endpoint] = locationConstraint match {
13
+ def forString(locationConstraint: String): Either[ErrorReport, S3Endpoint] = locationConstraint match {
12
14
  case "EU" | "eu-west-1" => Right(S3Endpoint("s3-website-eu-west-1.amazonaws.com", "s3-eu-west-1.amazonaws.com"))
13
15
  case "us-east-1" => Right(defaultEndpoint)
14
16
  case "us-west-1" => Right(S3Endpoint("s3-website-us-west-1.amazonaws.com", "s3-us-west-1.amazonaws.com"))
@@ -17,6 +19,6 @@ object S3Endpoint {
17
19
  case "ap-southeast-2" => Right(S3Endpoint("s3-website-ap-southeast-2.amazonaws.com", "s3-ap-southeast-2.amazonaws.com"))
18
20
  case "ap-northeast-1" => Right(S3Endpoint("s3-website-ap-northeast-1.amazonaws.com", "s3-ap-northeast-1.amazonaws.com"))
19
21
  case "sa-east-1" => Right(S3Endpoint("s3-website-sa-east-1.amazonaws.com", "s3-sa-east-1.amazonaws.com"))
20
- case _ => Left(ClientError(s"Unrecognised endpoint: $locationConstraint"))
22
+ case _ => Left(ErrorReport(s"Unrecognised endpoint: $locationConstraint"))
21
23
  }
22
24
  }
@@ -6,14 +6,17 @@ import org.yaml.snakeyaml.Yaml
6
6
  import s3.website.model.Config._
7
7
  import scala.io.Source.fromFile
8
8
  import scala.language.postfixOps
9
- import s3.website.Logger
9
+ import s3.website.{S3Key, Logger, ErrorReport}
10
10
  import scala.util.Failure
11
11
  import s3.website.model.Config.UnsafeYaml
12
12
  import scala.util.Success
13
- import s3.website.ErrorReport
14
13
 
15
14
  case class Site(rootDirectory: String, config: Config) {
16
15
  def resolveS3Key(file: File) = file.getAbsolutePath.replace(rootDirectory, "").replaceFirst("^/", "")
16
+
17
+ def resolveFile(s3File: S3File): File = resolveFile(s3File.s3Key)
18
+
19
+ def resolveFile(s3Key: S3Key): File = new File(s"$rootDirectory/$s3Key")
17
20
  }
18
21
 
19
22
  object Site {
@@ -68,7 +71,7 @@ object Site {
68
71
 
69
72
  config.right.map(Site(siteRootDirectory, _))
70
73
  case Failure(error) =>
71
- Left(IOError(error))
74
+ Left(ErrorReport(error))
72
75
  }
73
76
  }
74
77
  }