s3_website 2.1.6 → 2.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 07675e2836114a62d64ba9c4f3cb1d468979def7
4
- data.tar.gz: 79ec43e71d28fa9eb6852883f3845a91c71adba5
3
+ metadata.gz: 8a50d9894a8f340bff511731fe4934a413e324a2
4
+ data.tar.gz: 23008693b34025381965ff5721796c78d63a5b72
5
5
  SHA512:
6
- metadata.gz: 5b8da0a144df3b8e6a9af48457927f54e9991afd9d81a7eb00ab215c754d72905727dc14298bbeade33190b2a88c14f27fca0e2b28cfbc11bdaaa13e8d60124c
7
- data.tar.gz: 83b3cc06196da7997bcd5f0d051c70e08b7ccc0d224c04070adc14e675788fc15c5e90c2d503489d46f854057373c5757969a579315c37059380bff5d9d267f2
6
+ metadata.gz: 98f1cb95e395dc00cc47f71af25cd2ac40e75e08dd7299e89df85d403e0179abbfcec2ae1c0b85caf9b1235fcd9a4fee25116d7bc7a6d511209ff8bb3504c6ba
7
+ data.tar.gz: 396c0a1d1a4f9a45baa6365a0041955d169b391384c243267092faa9a57be3f1d65d0ed8a7d1fc555b63ad5f07d19416219f3dc4968c5102c52aa1c04dbb701f
data/changelog.md CHANGED
@@ -2,6 +2,12 @@
2
2
 
3
3
  This project uses [Semantic Versioning](http://semver.org).
4
4
 
5
+ ## 2.1.7
6
+
7
+ * Remove local db
8
+
9
+ It turned out to be too complex to maintain
10
+
5
11
  ## 2.1.6
6
12
 
7
13
  * Automatically add slash to redirects if needed
@@ -40,9 +46,6 @@ This project uses [Semantic Versioning](http://semver.org).
40
46
 
41
47
  * Faster uploads for extra large sites
42
48
 
43
- Use a local database for calculating diffs. This removes the need to read all
44
- the files of the website, when you call the `s3_website push` command.
45
-
46
49
  Use proper multithreading with JVM threads.
47
50
 
48
51
  * Simulate deployments with `push --dry-run`
@@ -1,3 +1,3 @@
1
1
  module S3Website
2
- VERSION = '2.1.6'
2
+ VERSION = '2.1.7'
3
3
  end
@@ -1 +1 @@
1
- 103852dae1cdc7b6e85c0623ad526beb
1
+ a0ae3ccc855ccb2b3488da03422f492a
@@ -5,30 +5,18 @@ import s3.website.Ruby.rubyRegexMatches
5
5
  import scala.concurrent.{ExecutionContextExecutor, Future}
6
6
  import scala.util.{Failure, Success, Try}
7
7
  import java.io.File
8
- import org.apache.commons.io.FileUtils._
9
- import org.apache.commons.codec.digest.DigestUtils._
10
- import scala.io.Source
11
- import s3.website.Diff.LocalFileDatabase.resolveDiffAgainstLocalDb
12
- import s3.website.Diff.UploadBatch
13
-
14
- case class Diff(
15
- unchanged: Future[Either[ErrorReport, Seq[S3Key]]],
16
- uploads: Seq[UploadBatch],
17
- persistenceError: Future[Option[ErrorReport]]
18
- )
19
8
 
20
9
  object Diff {
21
10
 
22
- type UploadBatch = Future[Either[ErrorReport, Seq[Upload]]]
11
+ type FutureUploads = Future[Either[ErrorReport, Seq[Upload]]]
23
12
 
24
13
  def resolveDiff(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
25
- (implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] =
26
- if (LocalFileDatabase.hasRecords) resolveDiffAgainstLocalDb(s3FilesFuture)
27
- else resolveDiffAgainstGetBucketResponse(s3FilesFuture)
14
+ (implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): FutureUploads =
15
+ resolveDiffAgainstGetBucketResponse(s3FilesFuture)
28
16
 
29
17
  private def resolveDiffAgainstGetBucketResponse(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
30
- (implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] = {
31
- val diffAgainstS3 = s3FilesFuture.map { errorOrS3Files =>
18
+ (implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): FutureUploads =
19
+ s3FilesFuture.map { errorOrS3Files =>
32
20
  errorOrS3Files.right.flatMap { s3Files =>
33
21
  Try {
34
22
  val s3KeyIndex = s3Files.map(_.s3Key).toSet
@@ -37,62 +25,30 @@ object Diff {
37
25
  val existsOnS3 = (f: File) => s3KeyIndex contains site.resolveS3Key(f)
38
26
  val isChangedOnS3 = (upload: Upload) => !(s3Md5Index contains upload.md5.get)
39
27
  val newUploads = siteFiles collect {
40
- case file if !existsOnS3(file) => Upload(file, NewFile, reasonForUpload = "the file is missing from S3")
28
+ case file if !existsOnS3(file) => Upload(file, NewFile)
41
29
  }
42
30
  val changedUploads = siteFiles collect {
43
- case file if existsOnS3(file) => Upload(file, FileUpdate, reasonForUpload = "the S3 bucket has different contents for this file")
31
+ case file if existsOnS3(file) => Upload(file, FileUpdate)
44
32
  } filter isChangedOnS3
45
- val unchangedFiles = {
46
- val newOrChangedFiles = (changedUploads ++ newUploads).map(_.originalFile).toSet
47
- siteFiles.filterNot(f => newOrChangedFiles contains f)
48
- }
49
- val recordsAndUploads: Seq[Either[DbRecord, Upload]] = unchangedFiles.map {
50
- f => Left(DbRecord(f))
51
- } ++ (changedUploads ++ newUploads).map {
52
- Right(_)
53
- }
54
- LocalFileDatabase persist recordsAndUploads
55
- recordsAndUploads
33
+ newUploads ++ changedUploads
56
34
  } match {
57
35
  case Success(ok) => Right(ok)
58
36
  case Failure(err) => Left(ErrorReport(err))
59
37
  }
60
38
  }
61
39
  }
62
- def collectResult[B](pf: PartialFunction[Either[DbRecord, Upload],B]) =
63
- diffAgainstS3.map { errorOrDiffSource =>
64
- errorOrDiffSource.right map (_ collect pf)
65
- }
66
- val unchanged = collectResult {
67
- case Left(dbRecord) => dbRecord.s3Key
68
- }
69
- val uploads: UploadBatch = collectResult {
70
- case Right(upload) => upload
71
- }
72
- Right(Diff(unchanged, uploads :: Nil, persistenceError = Future(None)))
73
- }
74
40
 
75
- def resolveDeletes(diff: Diff, s3Files: Future[Either[ErrorReport, Seq[S3File]]], redirects: Seq[Redirect])
76
- (implicit config: Config, logger: Logger, executor: ExecutionContextExecutor): Future[Either[ErrorReport, Seq[S3Key]]] = {
77
- val localKeys = for {
78
- errorOrUnchanged <- diff.unchanged
79
- errorsOrChanges <- Future.sequence(diff.uploads)
80
- } yield
81
- errorsOrChanges.foldLeft(errorOrUnchanged: Either[ErrorReport, Seq[S3Key]]) { (memo, errorOrChanges) =>
82
- for {
83
- mem <- memo.right
84
- keysToDelete <- errorOrChanges.right
85
- } yield mem ++ keysToDelete.map(_.s3Key)
86
- }
41
+ def resolveDeletes(s3Files: Future[Either[ErrorReport, Seq[S3File]]], redirects: Seq[Redirect])
42
+ (implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Future[Either[ErrorReport, Seq[S3Key]]] = {
43
+ val localS3Keys = Files.listSiteFiles.map(site resolveS3Key)
87
44
 
88
- s3Files zip localKeys map { (s3Files: Either[ErrorReport, Seq[S3File]], errorOrLocalKeys: Either[ErrorReport, Seq[S3Key]]) =>
45
+ s3Files map { s3Files: Either[ErrorReport, Seq[S3File]] =>
89
46
  for {
90
- localS3Keys <- errorOrLocalKeys.right
91
47
  remoteS3Keys <- s3Files.right.map(_ map (_.s3Key)).right
92
48
  } yield {
93
49
  val keysToRetain = (localS3Keys ++ (redirects map { _.s3Key })).toSet
94
50
  remoteS3Keys filterNot { s3Key =>
95
- val ignoreOnServer = config.ignore_on_server.exists(_.fold(
51
+ val ignoreOnServer = site.config.ignore_on_server.exists(_.fold(
96
52
  (ignoreRegex: String) => rubyRegexMatches(s3Key, ignoreRegex),
97
53
  (ignoreRegexes: Seq[String]) => ignoreRegexes.exists(rubyRegexMatches(s3Key, _))
98
54
  ))
@@ -100,203 +56,6 @@ object Diff {
100
56
  (keysToRetain contains s3Key) || ignoreOnServer
101
57
  }
102
58
  }
103
- }.tupled
104
- }
105
-
106
- object LocalFileDatabase {
107
- def hasRecords(implicit site: Site, logger: Logger) =
108
- (for {
109
- dbFile <- getOrCreateDbFile
110
- databaseIndices <- loadDbFromFile(dbFile)
111
- } yield databaseIndices.fullIndex.headOption.isDefined) getOrElse false
112
-
113
- def resolveDiffAgainstLocalDb(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
114
- (implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] = {
115
- val localDiff: Either[ErrorReport, Seq[Either[DbRecord, Upload]]] =
116
- (for {
117
- dbFile <- getOrCreateDbFile
118
- databaseIndices <- loadDbFromFile(dbFile)
119
- } yield {
120
- val siteFiles = Files.listSiteFiles
121
- val recordsOrUploads = siteFiles.foldLeft(Seq(): Seq[Either[DbRecord, Upload]]) { (recordsOrUps, file) =>
122
- val truncatedKey = TruncatedDbRecord(file)
123
- val fileIsUnchanged = databaseIndices.truncatedIndex contains truncatedKey
124
- if (fileIsUnchanged)
125
- recordsOrUps :+ Left(databaseIndices.fullIndex find (_.truncated == truncatedKey) get)
126
- else {
127
- val isUpdate = databaseIndices.s3KeyIndex contains truncatedKey.s3Key
128
-
129
- val uploadType =
130
- if (isUpdate) FileUpdate
131
- else NewFile
132
- recordsOrUps :+ Right(Upload(file, uploadType, reasonForUpload(truncatedKey, databaseIndices, isUpdate)))
133
- }
134
- }
135
- logger.debug(s"Discovered ${siteFiles.length} files on the local site, of which ${recordsOrUploads count (_.isRight)} are new or changed")
136
- recordsOrUploads
137
- }) match {
138
- case Success(ok) => Right(ok)
139
- case Failure(err) => Left(ErrorReport(err))
140
- }
141
-
142
- localDiff.right map { localDiffResult =>
143
- val unchangedAccordingToLocalDiff = localDiffResult collect {
144
- case Left(f) => f
145
- }
146
-
147
- val uploadsAccordingToLocalDiff = localDiffResult collect {
148
- case Right(f) => f
149
- }
150
-
151
- val changesMissedByLocalDiff: Future[Either[ErrorReport, Seq[Upload]]] = s3FilesFuture.map { errorOrS3Files =>
152
- for (s3Files <- errorOrS3Files.right) yield {
153
- val remoteS3Keys = s3Files.map(_.s3Key).toSet
154
- val localS3Keys = unchangedAccordingToLocalDiff.map(_.s3Key).toSet
155
- val localMd5 = unchangedAccordingToLocalDiff.map(_.uploadFileMd5).toSet
156
- def isChangedOnS3(s3File: S3File) = (localS3Keys contains s3File.s3Key) && !(localMd5 contains s3File.md5)
157
- val changedOnS3 = s3Files collect {
158
- case s3File if isChangedOnS3(s3File) =>
159
- Upload(site resolveFile s3File, FileUpdate, reasonForUpload = "someone else has modified the file on the S3 bucket")
160
- }
161
- val missingFromS3 = localS3Keys collect {
162
- case localS3Key if !(remoteS3Keys contains localS3Key) =>
163
- Upload(site resolveFile localS3Key, NewFile, reasonForUpload = "someone else has removed the file from the S3 bucket")
164
-
165
- }
166
- changedOnS3 ++ missingFromS3
167
- }
168
- }
169
-
170
- val errorOrDiffAgainstS3 =
171
- changesMissedByLocalDiff map { errorOrUploads =>
172
- errorOrUploads.right map { uploadsMissedByLocalDiff =>
173
- val uploadsS3KeyIndex = uploadsMissedByLocalDiff.map(_.s3Key).toSet
174
- val unchangedAccordingToLocalAndS3Diff = unchangedAccordingToLocalDiff.filterNot(uploadsS3KeyIndex contains _.s3Key)
175
- (unchangedAccordingToLocalAndS3Diff, uploadsMissedByLocalDiff)
176
- }
177
- }
178
-
179
- val unchangedFilesFinal = errorOrDiffAgainstS3 map {
180
- _ fold (
181
- (error: ErrorReport) => Left(error),
182
- (syncResult: (Seq[DbRecord], Seq[Upload])) => Right(syncResult._1)
183
- )
184
- }
185
-
186
- val uploadsAccordingToS3Diff = errorOrDiffAgainstS3.map {
187
- _ fold (
188
- (error: ErrorReport) => Left(error),
189
- (syncResult: (Seq[DbRecord], Seq[Upload])) => Right(syncResult._2)
190
- )
191
- }
192
- val persistenceError: Future[Either[ErrorReport, _]] = for {
193
- unchanged <- unchangedFilesFinal
194
- uploads <- uploadsAccordingToS3Diff
195
- } yield
196
- for {
197
- records1 <- unchanged.right
198
- records2 <- uploads.right
199
- } yield
200
- persist(records1.map(Left(_)) ++ records2.map(Right(_)) ++ uploadsAccordingToLocalDiff.map(Right(_))) match {
201
- case Success(_) => Unit
202
- case Failure(err) => ErrorReport(err)
203
- }
204
- Diff(
205
- unchangedFilesFinal map (_.right.map(_ map (_.s3Key))),
206
- uploads = Future(Right(uploadsAccordingToLocalDiff)) :: uploadsAccordingToS3Diff :: Nil,
207
- persistenceError = persistenceError map (_.left.toOption)
208
- )
209
- }
210
59
  }
211
-
212
- private def reasonForUpload(truncatedKey: TruncatedDbRecord, databaseIndices: DbIndices, isUpdate: Boolean) = {
213
- if (isUpdate) {
214
- val lengthChanged = !(databaseIndices.fileLenghtIndex contains truncatedKey.fileLength)
215
- val mtimeChanged = !(databaseIndices.lastModifiedIndex contains truncatedKey.fileModified)
216
- if (mtimeChanged && lengthChanged)
217
- "file mtime and length have changed"
218
- else if (lengthChanged)
219
- "file length has changed"
220
- else if (mtimeChanged)
221
- "file mtime has changed"
222
- else
223
- "programmer error: faulty logic in inferring the reason for upload"
224
- }
225
- else "file is new"
226
- }
227
-
228
- private def getOrCreateDbFile(implicit site: Site, logger: Logger) =
229
- Try {
230
- val dbFile = new File(getTempDirectory, "s3_website_local_db_" + sha256Hex(site.rootDirectory))
231
- if (!dbFile.exists()) logger.debug("Creating a new database in " + dbFile.getName)
232
- dbFile.createNewFile()
233
- dbFile
234
- }
235
-
236
- case class DbIndices(
237
- s3KeyIndex: Set[S3Key],
238
- fileLenghtIndex: Set[Long],
239
- lastModifiedIndex: Set[Long],
240
- truncatedIndex: Set[TruncatedDbRecord],
241
- fullIndex: Set[DbRecord]
242
- )
243
-
244
- private def loadDbFromFile(databaseFile: File)(implicit site: Site, logger: Logger): Try[DbIndices] =
245
- Try {
246
- // record format: "s3Key(file.path)|length(file)|mtime(file)|md5Hex(file.encoded)"
247
- val RecordRegex = "(.*?)\\|(\\d+)\\|(\\d+)\\|([a-f0-9]{32})".r
248
- val fullIndex = Source
249
- .fromFile(databaseFile, "utf-8")
250
- .getLines()
251
- .toStream
252
- .map {
253
- case RecordRegex(s3Key, fileLength, fileModified, md5) =>
254
- DbRecord(s3Key, fileLength.toLong, fileModified.toLong, md5)
255
- }
256
- .toSet
257
- DbIndices(
258
- s3KeyIndex = fullIndex map (_.s3Key),
259
- truncatedIndex = fullIndex map (TruncatedDbRecord(_)),
260
- fileLenghtIndex = fullIndex map (_.fileLength),
261
- lastModifiedIndex = fullIndex map (_.fileModified),
262
- fullIndex = fullIndex
263
- )
264
- }
265
-
266
- def persist(recordsOrUploads: Seq[Either[DbRecord, Upload]])(implicit site: Site, logger: Logger): Try[Seq[Either[DbRecord, Upload]]] =
267
- getOrCreateDbFile flatMap { dbFile =>
268
- Try {
269
- val dbFileContents = recordsOrUploads.map { recordOrUpload =>
270
- val record: DbRecord = recordOrUpload fold(
271
- record => record,
272
- upload => DbRecord(upload.s3Key, upload.originalFile.length, upload.originalFile.lastModified, upload.md5.get)
273
- )
274
- record.s3Key :: record.fileLength :: record.fileModified :: record.uploadFileMd5 :: Nil mkString "|"
275
- } mkString "\n"
276
-
277
- write(dbFile, dbFileContents)
278
- recordsOrUploads
279
- }
280
- }
281
- }
282
-
283
- case class TruncatedDbRecord(s3Key: String, fileLength: Long, fileModified: Long)
284
-
285
- object TruncatedDbRecord {
286
- def apply(dbRecord: DbRecord): TruncatedDbRecord = TruncatedDbRecord(dbRecord.s3Key, dbRecord.fileLength, dbRecord.fileModified)
287
-
288
- def apply(file: File)(implicit site: Site): TruncatedDbRecord = TruncatedDbRecord(site resolveS3Key file, file.length, file.lastModified)
289
- }
290
-
291
- /**
292
- * @param uploadFileMd5 if the file is gzipped, this checksum should be calculated on the gzipped file, not the original file
293
- */
294
- case class DbRecord(s3Key: String, fileLength: Long, fileModified: Long, uploadFileMd5: MD5) {
295
- lazy val truncated = TruncatedDbRecord(s3Key, fileLength, fileModified)
296
- }
297
-
298
- object DbRecord {
299
- def apply(original: File)(implicit site: Site): DbRecord =
300
- DbRecord(site resolveS3Key original, original.length, original.lastModified, Upload.md5(original).get)
301
60
  }
302
61
  }
@@ -86,55 +86,49 @@ object Push {
86
86
  val s3FilesFuture = resolveS3Files()
87
87
  val redirectReports: PushReports = redirects.map(S3 uploadRedirect _) map (Right(_))
88
88
 
89
- val errorsOrReports: Either[ErrorReport, PushReports] = for {
90
- diff <- resolveDiff(s3FilesFuture).right
89
+ val pushReports: Future[PushReports] = for {
90
+ errorOrUploads: Either[ErrorReport, Seq[Upload]] <- resolveDiff(s3FilesFuture)
91
91
  } yield {
92
- val newOrChangedReports: PushReports = diff.uploads.map { uploadBatch =>
93
- uploadBatch.map(_.right.map(_.map(S3 uploadFile _)))
94
- }.map (Await.result(_, 1 day)).foldLeft(Seq(): PushReports) { (memo: PushReports, res: Either[ErrorReport, Seq[Future[PushErrorOrSuccess]]]) =>
95
- res.fold(
96
- error => memo :+ Left(error),
97
- (pushResults: Seq[Future[PushErrorOrSuccess]]) => memo ++ (pushResults map (Right(_)))
98
- )
99
- }
92
+ val uploadReports: PushReports = errorOrUploads.fold(
93
+ error => Left(error) :: Nil,
94
+ uploads => {
95
+ uploads.map(S3 uploadFile _).map(Right(_))
96
+ }
97
+ )
100
98
  val deleteReports =
101
- Await.result(resolveDeletes(diff, s3FilesFuture, redirects), 1 day).right.map { keysToDelete =>
99
+ Await.result(resolveDeletes(s3FilesFuture, redirects), 1 day).right.map { keysToDelete =>
102
100
  keysToDelete map (S3 delete _)
103
101
  }.fold(
104
102
  error => Left(error) :: Nil,
105
103
  (pushResults: Seq[Future[PushErrorOrSuccess]]) => pushResults map (Right(_))
106
104
  )
107
- val diffErrorReport: PushReports = Await.result(diff.persistenceError, 1 day).fold(Nil: PushReports)(Left(_) :: Nil)
108
- newOrChangedReports ++ deleteReports ++ redirectReports ++ diffErrorReport
105
+ uploadReports ++ deleteReports ++ redirectReports
109
106
  }
110
- val errorsOrFinishedPushOps = errorsOrReports.right map awaitForResults
111
- val invalidationSucceeded = invalidateCloudFrontItems(errorsOrFinishedPushOps)
107
+ val finishedPushOps = awaitForResults(Await.result(pushReports, 1 day))
108
+ val invalidationSucceeded = invalidateCloudFrontItems(finishedPushOps)
112
109
 
113
- afterPushFinished(errorsOrFinishedPushOps, invalidationSucceeded)
110
+ afterPushFinished(finishedPushOps, invalidationSucceeded)
114
111
  }
115
112
 
116
113
  def invalidateCloudFrontItems
117
- (errorsOrFinishedPushOps: Either[ErrorReport, FinishedPushOperations])
114
+ (finishedPushOperations: FinishedPushOperations)
118
115
  (implicit config: Config, cloudFrontSettings: CloudFrontSetting, ec: ExecutionContextExecutor, logger: Logger, pushMode: PushMode):
119
116
  Option[InvalidationSucceeded] =
120
117
  config.cloudfront_distribution_id.map { distributionId =>
121
- val pushSuccessReports = errorsOrFinishedPushOps.fold(
122
- errors => Nil,
123
- finishedPushOps =>
124
- finishedPushOps.map {
125
- ops =>
126
- for {
127
- failedOrSucceededPushes <- ops.right
128
- successfulPush <- failedOrSucceededPushes.right
129
- } yield successfulPush
130
- }.foldLeft(Seq(): Seq[PushSuccessReport]) {
131
- (reports, failOrSucc) =>
132
- failOrSucc.fold(
133
- _ => reports,
134
- (pushSuccessReport: PushSuccessReport) => reports :+ pushSuccessReport
135
- )
136
- }
137
- )
118
+ val pushSuccessReports =
119
+ finishedPushOperations.map {
120
+ ops =>
121
+ for {
122
+ failedOrSucceededPushes <- ops.right
123
+ successfulPush <- failedOrSucceededPushes.right
124
+ } yield successfulPush
125
+ }.foldLeft(Seq(): Seq[PushSuccessReport]) {
126
+ (reports, failOrSucc) =>
127
+ failOrSucc.fold(
128
+ _ => reports,
129
+ (pushSuccessReport: PushSuccessReport) => reports :+ pushSuccessReport
130
+ )
131
+ }
138
132
  val invalidationResults: Seq[Either[FailedInvalidation, SuccessfulInvalidation]] =
139
133
  toInvalidationBatches(pushSuccessReports) map { invalidationBatch =>
140
134
  Await.result(
@@ -150,26 +144,25 @@ object Push {
150
144
 
151
145
  type InvalidationSucceeded = Boolean
152
146
 
153
- def afterPushFinished(errorsOrFinishedUploads: Either[ErrorReport, FinishedPushOperations], invalidationSucceeded: Option[Boolean])
147
+ def afterPushFinished(finishedPushOps: FinishedPushOperations, invalidationSucceeded: Option[Boolean])
154
148
  (implicit config: Config, logger: Logger, pushMode: PushMode): ExitCode = {
155
- val pushCountsOption = errorsOrFinishedUploads.right.map(resolvePushCounts(_)).right.toOption
156
- pushCountsOption.map(pushCountsToString).foreach(pushCounts => logger.info(s"Summary: $pushCounts"))
157
- errorsOrFinishedUploads.left foreach (err => logger.fail(s"Encountered an error: ${err.reportMessage}"))
158
- val exitCode = errorsOrFinishedUploads.fold(
159
- _ => 1,
160
- finishedUploads => finishedUploads.foldLeft(0) { (memo, finishedUpload) =>
161
- memo + finishedUpload.fold(
162
- (error: ErrorReport) => 1,
163
- (failedOrSucceededUpload: Either[PushFailureReport, PushSuccessReport]) =>
164
- if (failedOrSucceededUpload.isLeft) 1 else 0
165
- )
166
- } min 1
167
- ) max invalidationSucceeded.fold(0)(allInvalidationsSucceeded =>
149
+ val pushCounts = resolvePushCounts(finishedPushOps)
150
+ logger.info(s"Summary: ${pushCountsToString(pushCounts)}")
151
+ val pushOpExitCode = finishedPushOps.foldLeft(0) { (memo, finishedUpload) =>
152
+ memo + finishedUpload.fold(
153
+ (error: ErrorReport) => 1,
154
+ (failedOrSucceededUpload: Either[PushFailureReport, PushSuccessReport]) =>
155
+ if (failedOrSucceededUpload.isLeft) 1 else 0
156
+ )
157
+ } min 1
158
+ val cloudFrontInvalidationExitCode = invalidationSucceeded.fold(0)(allInvalidationsSucceeded =>
168
159
  if (allInvalidationsSucceeded) 0 else 1
169
160
  )
170
161
 
162
+ val exitCode = (pushOpExitCode + cloudFrontInvalidationExitCode) min 1
163
+
171
164
  exitCode match {
172
- case 0 if !pushMode.dryRun && pushCountsOption.exists(_.thereWasSomethingToPush) =>
165
+ case 0 if !pushMode.dryRun && pushCounts.thereWasSomethingToPush =>
173
166
  logger.info(s"Successfully pushed the website to http://${config.s3_bucket}.${config.s3_endpoint.s3WebsiteHostname}")
174
167
  case 1 =>
175
168
  logger.fail(s"Failed to push the website to http://${config.s3_bucket}.${config.s3_endpoint.s3WebsiteHostname}")
@@ -35,7 +35,7 @@ object S3 {
35
35
  val report = SuccessfulUpload(
36
36
  source.fold(_.s3Key, _.s3Key),
37
37
  source.fold(
38
- upload => Left(SuccessfulNewOrCreatedDetails(upload.uploadType, upload.uploadFile.get.length(), uploadDuration, upload.reasonForUpload)),
38
+ upload => Left(SuccessfulNewOrCreatedDetails(upload.uploadType, upload.uploadFile.get.length(), uploadDuration)),
39
39
  redirect => Right(SuccessfulRedirectDetails(redirect.uploadType, redirect.redirectTarget))
40
40
  ),
41
41
  putObjectRequest
@@ -146,7 +146,7 @@ object S3 {
146
146
  }
147
147
 
148
148
  case class SuccessfulRedirectDetails(uploadType: UploadType, redirectTarget: String)
149
- case class SuccessfulNewOrCreatedDetails(uploadType: UploadType, uploadSize: Long, uploadDuration: Option[Long], reasonForUpload: String)
149
+ case class SuccessfulNewOrCreatedDetails(uploadType: UploadType, uploadSize: Long, uploadDuration: Option[Long])
150
150
 
151
151
  case class SuccessfulUpload(s3Key: S3Key,
152
152
  details: Either[SuccessfulNewOrCreatedDetails, SuccessfulRedirectDetails],
@@ -167,20 +167,12 @@ object S3 {
167
167
  md.getContentEncoding ::
168
168
  putObjectRequest.getStorageClass ::
169
169
  Nil map (Option(_)) // AWS SDK may return nulls
170
- ) :+ uploadSizeForHumans :+ uploadSpeedForHumans :+ uploadReason
170
+ ) :+ uploadSizeForHumans :+ uploadSpeedForHumans
171
171
  detailFragments.collect {
172
172
  case Some(detailFragment) => detailFragment
173
173
  }.mkString(" | ")
174
174
  }
175
175
 
176
- lazy val uploadReason =
177
- details
178
- .fold(uploadDetails => Some(uploadDetails.reasonForUpload), _ => None)
179
- .collect {
180
- case reasonForUpload if logger.verboseOutput =>
181
- s"upload reason: $reasonForUpload"
182
- }
183
-
184
176
  lazy val uploadSize = details.fold(
185
177
  newOrCreatedDetails => Some(newOrCreatedDetails.uploadSize),
186
178
  redirectDetails => None
@@ -50,7 +50,7 @@ case object RedirectFile extends UploadType {
50
50
  val pushAction = Redirected
51
51
  }
52
52
 
53
- case class Upload(originalFile: File, uploadType: UploadType, reasonForUpload: String)(implicit site: Site) {
53
+ case class Upload(originalFile: File, uploadType: UploadType)(implicit site: Site) {
54
54
  lazy val s3Key = site.resolveS3Key(originalFile)
55
55
 
56
56
  lazy val encodingOnS3 = Encoding.encodingOnS3(s3Key)
@@ -578,65 +578,6 @@ class S3WebsiteSpec extends Specification {
578
578
  }
579
579
  }
580
580
 
581
- // Because of the local database, the first and second run are implemented differently.
582
- "pushing files for the second time" should {
583
- "delete the S3 objects that no longer exist on the local site" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
584
- push
585
- setS3Files(S3File("obsolete.txt", ""))
586
- push
587
- sentDelete must equalTo("obsolete.txt")
588
- }
589
-
590
- "delete the local db record for the file if the user deletes the file" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
591
- setLocalFileWithContent(("file.txt", "first run"))
592
- push
593
- setS3Files(S3File("file.txt", md5Hex("first run")))
594
- FileUtils.deleteQuietly(new File(siteDirectory, "file.txt"))
595
- push
596
- FileUtils.readLines(localDatabase) must beEmpty
597
- }
598
-
599
- "push new files to the bucket" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
600
- push
601
- setLocalFile("newfile.txt")
602
- push
603
- sentPutObjectRequest.getKey must equalTo("newfile.txt")
604
- }
605
-
606
- "push locally changed files" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
607
- setLocalFileWithContent(("file.txt", "first run"))
608
- push
609
- setLocalFileWithContent(("file.txt", "second run"))
610
- push
611
- sentPutObjectRequests.length must equalTo(2)
612
- }
613
-
614
- "push locally changed files only once" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
615
- setLocalFileWithContent(("file.txt", "first run"))
616
- push
617
- setS3Files(S3File("file.txt", md5Hex("first run")))
618
- setLocalFileWithContent(("file.txt", "second run"))
619
- push
620
- sentPutObjectRequests.length must equalTo(2)
621
- }
622
-
623
- "detect files that someone else has changed on the S3 bucket" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
624
- setLocalFileWithContent(("file.txt", "first run"))
625
- push
626
- setOutdatedS3Keys("file.txt")
627
- push
628
- sentPutObjectRequests.length must equalTo(2)
629
- }
630
-
631
- "push locally unchanged files that are missing from S3" in new AllInSameDirectory with EmptySite with MockAWS with DefaultRunMode {
632
- setLocalFileWithContent(("file.txt", "first run"))
633
- push
634
- removeAllFilesFromS3()
635
- push
636
- sentPutObjectRequests.length must equalTo(2) // Even though we use the local db, we should notice that someone else has deleted file.txt
637
- }
638
- }
639
-
640
581
  "Jekyll site" should {
641
582
  "be detected automatically" in new JekyllSite with EmptySite with MockAWS with DefaultRunMode {
642
583
  setLocalFile("index.html")
@@ -806,14 +747,13 @@ class S3WebsiteSpec extends Specification {
806
747
  implicit final val workingDirectory: File = new File(FileUtils.getTempDirectory, "s3_website_dir" + Random.nextLong())
807
748
  val siteDirectory: File // Represents the --site=X option
808
749
  val configDirectory: File = workingDirectory // Represents the --config-dir=X option
809
- lazy val localDatabase: File = new File(FileUtils.getTempDirectory, "s3_website_local_db_" + sha256Hex(siteDirectory.getPath))
810
750
 
811
751
  def before {
812
752
  workingDirectory :: siteDirectory :: configDirectory :: Nil foreach forceMkdir
813
753
  }
814
754
 
815
755
  def after {
816
- (workingDirectory :: siteDirectory :: configDirectory :: localDatabase :: Nil) foreach { dir =>
756
+ (workingDirectory :: siteDirectory :: configDirectory :: Nil) foreach { dir =>
817
757
  if (dir.exists) forceDelete(dir)
818
758
  }
819
759
  }
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: s3_website
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.6
4
+ version: 2.1.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Lauri Lehmijoki