s3_website_monadic 0.0.31 → 0.0.32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/changelog.md +2 -0
- data/lib/s3_website/version.rb +1 -1
- data/src/main/scala/s3/website/CloudFront.scala +8 -15
- data/src/main/scala/s3/website/Diff.scala +264 -25
- data/src/main/scala/s3/website/Logger.scala +61 -0
- data/src/main/scala/s3/website/Push.scala +69 -77
- data/src/main/scala/s3/website/S3.scala +81 -76
- data/src/main/scala/s3/website/model/Config.scala +8 -8
- data/src/main/scala/s3/website/model/S3Endpoint.scala +4 -2
- data/src/main/scala/s3/website/model/Site.scala +6 -3
- data/src/main/scala/s3/website/model/push.scala +72 -140
- data/src/main/scala/s3/website/model/ssg.scala +1 -1
- data/src/main/scala/s3/website/package.scala +23 -1
- data/src/test/scala/s3/website/S3WebsiteSpec.scala +64 -25
- metadata +3 -4
- data/src/main/scala/s3/website/Utils.scala +0 -108
- data/src/main/scala/s3/website/model/errors.scala +0 -9
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 87c870c1dae2ea36e452f7df0b0f7565c5cec616
|
4
|
+
data.tar.gz: 4e8324a2207a37a3f991490d187c27d35139c04b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5fdcff5e2fad65a9a895bc7669db2690f70c9dd4e7ab54be0962a99a279dda4f88542373eb476e7a6750c2580554888f329aa240ff9d346f26648128adc2a38c
|
7
|
+
data.tar.gz: 0221384872bd4914d9585f5f63074f0b9286600491abdce62f411c5e545d42bd41a484bdf04c686be79350fcb5d71e28d865004f371ef0e32f8caafc04660800
|
data/changelog.md
CHANGED
data/lib/s3_website/version.rb
CHANGED
@@ -1,22 +1,18 @@
|
|
1
1
|
package s3.website
|
2
2
|
|
3
|
-
import s3.website.model.{
|
3
|
+
import s3.website.model.{FileUpdate, Config}
|
4
4
|
import com.amazonaws.services.cloudfront.{AmazonCloudFrontClient, AmazonCloudFront}
|
5
|
-
import s3.website.CloudFront.{CloudFrontSetting, SuccessfulInvalidation, FailedInvalidation}
|
6
5
|
import com.amazonaws.services.cloudfront.model.{TooManyInvalidationsInProgressException, Paths, InvalidationBatch, CreateInvalidationRequest}
|
7
6
|
import scala.collection.JavaConversions._
|
8
7
|
import scala.concurrent.duration._
|
9
8
|
import s3.website.S3.{SuccessfulDelete, PushSuccessReport, SuccessfulUpload}
|
10
9
|
import com.amazonaws.auth.BasicAWSCredentials
|
11
10
|
import java.net.URI
|
12
|
-
import Utils._
|
13
11
|
import scala.concurrent.{ExecutionContextExecutor, Future}
|
14
12
|
|
15
|
-
|
16
|
-
val cloudFront = cloudFrontSettings.cfClient(config)
|
17
|
-
|
13
|
+
object CloudFront {
|
18
14
|
def invalidate(invalidationBatch: InvalidationBatch, distributionId: String, attempt: Attempt = 1)
|
19
|
-
(implicit ec: ExecutionContextExecutor): InvalidationResult =
|
15
|
+
(implicit ec: ExecutionContextExecutor, cloudFrontSettings: CloudFrontSetting, config: Config, logger: Logger, pushMode: PushMode): InvalidationResult =
|
20
16
|
Future {
|
21
17
|
if (!pushMode.dryRun) cloudFront createInvalidation new CreateInvalidationRequest(distributionId, invalidationBatch)
|
22
18
|
val result = SuccessfulInvalidation(invalidationBatch.getPaths.getItems.size())
|
@@ -29,7 +25,8 @@ class CloudFront(implicit cloudFrontSettings: CloudFrontSetting, config: Config,
|
|
29
25
|
))
|
30
26
|
|
31
27
|
def tooManyInvalidationsRetry(invalidationBatch: InvalidationBatch, distributionId: String, attempt: Attempt)
|
32
|
-
|
28
|
+
(implicit ec: ExecutionContextExecutor, logger: Logger, cloudFrontSettings: CloudFrontSetting, config: Config, pushMode: PushMode):
|
29
|
+
PartialFunction[Throwable, InvalidationResult] = {
|
33
30
|
case e: TooManyInvalidationsInProgressException =>
|
34
31
|
val duration: Duration = Duration(
|
35
32
|
(fibs drop attempt).head min 15, /* CloudFront invalidations complete within 15 minutes */
|
@@ -52,10 +49,9 @@ class CloudFront(implicit cloudFrontSettings: CloudFrontSetting, config: Config,
|
|
52
49
|
basicInfo
|
53
50
|
}
|
54
51
|
|
55
|
-
|
56
|
-
}
|
52
|
+
def cloudFront(implicit config: Config, cloudFrontSettings: CloudFrontSetting) = cloudFrontSettings.cfClient(config)
|
57
53
|
|
58
|
-
|
54
|
+
type InvalidationResult = Future[Either[FailedInvalidation, SuccessfulInvalidation]]
|
59
55
|
|
60
56
|
type CloudFrontClientProvider = (Config) => AmazonCloudFront
|
61
57
|
|
@@ -121,10 +117,7 @@ object CloudFront {
|
|
121
117
|
|
122
118
|
|
123
119
|
def needsInvalidation: PartialFunction[PushSuccessReport, Boolean] = {
|
124
|
-
case SuccessfulUpload(
|
125
|
-
case Update => true
|
126
|
-
case _ => false
|
127
|
-
}
|
120
|
+
case SuccessfulUpload(localFile, _, _) => localFile.left.exists(_.uploadType == FileUpdate)
|
128
121
|
case SuccessfulDelete(_) => true
|
129
122
|
case _ => false
|
130
123
|
}
|
@@ -2,38 +2,277 @@ package s3.website
|
|
2
2
|
|
3
3
|
import s3.website.model._
|
4
4
|
import s3.website.Ruby.rubyRegexMatches
|
5
|
-
import
|
5
|
+
import scala.concurrent.{ExecutionContextExecutor, Future}
|
6
|
+
import scala.util.{Failure, Success, Try}
|
7
|
+
import java.io.File
|
8
|
+
import org.apache.commons.io.FileUtils._
|
9
|
+
import org.apache.commons.codec.digest.DigestUtils._
|
10
|
+
import scala.io.Source
|
11
|
+
import s3.website.Diff.LocalFileDatabase.resolveDiffAgainstLocalDb
|
12
|
+
import s3.website.Diff.UploadBatch
|
13
|
+
|
14
|
+
case class Diff(
|
15
|
+
unchanged: Future[Either[ErrorReport, Seq[S3Key]]],
|
16
|
+
uploads: Seq[UploadBatch],
|
17
|
+
persistenceError: Future[Option[ErrorReport]]
|
18
|
+
)
|
6
19
|
|
7
20
|
object Diff {
|
8
21
|
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
22
|
+
type UploadBatch = Future[Either[ErrorReport, Seq[LocalFileFromDisk]]]
|
23
|
+
|
24
|
+
def resolveDiff(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
25
|
+
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] =
|
26
|
+
if (LocalFileDatabase.hasRecords) resolveDiffAgainstLocalDb(s3FilesFuture)
|
27
|
+
else resolveDiffAgainstGetBucketResponse(s3FilesFuture)
|
28
|
+
|
29
|
+
private def resolveDiffAgainstGetBucketResponse(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
30
|
+
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] = {
|
31
|
+
val diffSrc = s3FilesFuture.map { errorOrS3Files =>
|
32
|
+
errorOrS3Files.right.flatMap { s3Files =>
|
33
|
+
Try {
|
34
|
+
val s3KeyIndex = s3Files.map(_.s3Key).toSet
|
35
|
+
val s3Md5Index = s3Files.map(_.md5).toSet
|
36
|
+
val siteFiles = Files.listSiteFiles
|
37
|
+
val nameExistsOnS3 = (f: File) => s3KeyIndex contains site.resolveS3Key(f)
|
38
|
+
val newFiles = siteFiles
|
39
|
+
.filterNot(nameExistsOnS3)
|
40
|
+
.map { f => LocalFileFromDisk(f, uploadType = NewFile)}
|
41
|
+
val changedFiles =
|
42
|
+
siteFiles
|
43
|
+
.filter(nameExistsOnS3)
|
44
|
+
.map(f => LocalFileFromDisk(f, uploadType = FileUpdate))
|
45
|
+
.filterNot(localFile => s3Md5Index contains localFile.md5)
|
46
|
+
val unchangedFiles = {
|
47
|
+
val newOrChangedFiles = (changedFiles ++ newFiles).map(_.originalFile).toSet
|
48
|
+
siteFiles.filterNot(f => newOrChangedFiles contains f)
|
49
|
+
}
|
50
|
+
val allFiles: Seq[Either[DbRecord, LocalFileFromDisk]] = unchangedFiles.map {
|
51
|
+
f => Left(DbRecord(f))
|
52
|
+
} ++ (changedFiles ++ newFiles).map {
|
53
|
+
Right(_)
|
54
|
+
}
|
55
|
+
LocalFileDatabase persist allFiles
|
56
|
+
allFiles
|
57
|
+
} match {
|
58
|
+
case Success(ok) => Right(ok)
|
59
|
+
case Failure(err) => Left(ErrorReport(err))
|
60
|
+
}
|
61
|
+
}
|
19
62
|
}
|
63
|
+
def collectResult[B](pf: PartialFunction[Either[DbRecord, LocalFileFromDisk],B]) =
|
64
|
+
diffSrc.map { errorOrDiffSource =>
|
65
|
+
errorOrDiffSource.right map (_ collect pf)
|
66
|
+
}
|
67
|
+
val unchanged = collectResult {
|
68
|
+
case Left(dbRecord) => dbRecord.s3Key
|
69
|
+
}
|
70
|
+
val uploads: UploadBatch = collectResult {
|
71
|
+
case Right(localFile) => localFile
|
72
|
+
}
|
73
|
+
Right(Diff(unchanged, uploads :: Nil, persistenceError = Future(None)))
|
20
74
|
}
|
21
75
|
|
22
|
-
def
|
23
|
-
|
24
|
-
val
|
25
|
-
|
26
|
-
.
|
27
|
-
|
28
|
-
.
|
29
|
-
|
30
|
-
|
76
|
+
def resolveDeletes(diff: Diff, s3Files: Future[Either[ErrorReport, Seq[S3File]]], redirects: Seq[Redirect])
|
77
|
+
(implicit config: Config, logger: Logger, executor: ExecutionContextExecutor): Future[Either[ErrorReport, Seq[S3Key]]] = {
|
78
|
+
val localKeys = for {
|
79
|
+
errorOrUnchanged <- diff.unchanged
|
80
|
+
errorsOrChanges <- Future.sequence(diff.uploads)
|
81
|
+
} yield {
|
82
|
+
errorsOrChanges.foldLeft(errorOrUnchanged: Either[ErrorReport, Seq[S3Key]]) { (memo, errorOrChanges) =>
|
83
|
+
for {
|
84
|
+
mem <- memo.right
|
85
|
+
keysToDelete <- errorOrChanges.right
|
86
|
+
} yield {
|
87
|
+
mem ++ keysToDelete.map(_.s3Key)
|
88
|
+
}
|
89
|
+
}
|
31
90
|
}
|
91
|
+
s3Files zip localKeys map { (s3Files: Either[ErrorReport, Seq[S3File]], errorOrLocalKeys: Either[ErrorReport, Seq[S3Key]]) =>
|
92
|
+
for {
|
93
|
+
localS3Keys <- errorOrLocalKeys.right
|
94
|
+
remoteS3Keys <- s3Files.right.map(_ map (_.s3Key)).right
|
95
|
+
} yield {
|
96
|
+
val keysToRetain = (localS3Keys ++ (redirects map { _.s3Key })).toSet
|
97
|
+
remoteS3Keys filterNot { s3Key =>
|
98
|
+
val ignoreOnServer = config.ignore_on_server.exists(_.fold(
|
99
|
+
(ignoreRegex: String) => rubyRegexMatches(s3Key, ignoreRegex),
|
100
|
+
(ignoreRegexes: Seq[String]) => ignoreRegexes.exists(rubyRegexMatches(s3Key, _))
|
101
|
+
))
|
102
|
+
if (ignoreOnServer) logger.debug(s"Ignoring $s3Key on server")
|
103
|
+
(keysToRetain contains s3Key) || ignoreOnServer
|
104
|
+
}
|
105
|
+
}
|
106
|
+
}.tupled
|
32
107
|
}
|
33
108
|
|
34
|
-
|
109
|
+
object LocalFileDatabase {
|
110
|
+
def hasRecords(implicit site: Site, logger: Logger) =
|
111
|
+
(for {
|
112
|
+
dbFile <- getOrCreateDbFile
|
113
|
+
databaseIndices <- loadDbFromFile(dbFile)
|
114
|
+
} yield databaseIndices.fullIndex.headOption.isDefined) getOrElse false
|
115
|
+
|
116
|
+
def resolveDiffAgainstLocalDb(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
117
|
+
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] = {
|
118
|
+
val localDiff: Either[ErrorReport, Seq[Either[DbRecord, LocalFileFromDisk]]] =
|
119
|
+
(for {
|
120
|
+
dbFile <- getOrCreateDbFile
|
121
|
+
databaseIndices <- loadDbFromFile(dbFile)
|
122
|
+
} yield {
|
123
|
+
val siteFiles = Files.listSiteFiles
|
124
|
+
val recordsOrChangedFiles = siteFiles.foldLeft(Seq(): Seq[Either[DbRecord, LocalFileFromDisk]]) { (localFiles, file) =>
|
125
|
+
val truncatedKey = TruncatedDbRecord(file)
|
126
|
+
val fileIsUnchanged = databaseIndices.truncatedIndex contains truncatedKey
|
127
|
+
if (fileIsUnchanged)
|
128
|
+
localFiles :+ Left(databaseIndices.fullIndex find (_.truncated == truncatedKey) get)
|
129
|
+
else {
|
130
|
+
val uploadType =
|
131
|
+
if (databaseIndices.s3KeyIndex contains truncatedKey.s3Key) FileUpdate
|
132
|
+
else NewFile
|
133
|
+
localFiles :+ Right(LocalFileFromDisk(file, uploadType))
|
134
|
+
}
|
135
|
+
}
|
136
|
+
logger.debug(s"Discovered ${siteFiles.length} files on the local site, of which ${recordsOrChangedFiles count (_.isRight)} are new or changed")
|
137
|
+
recordsOrChangedFiles
|
138
|
+
}) match {
|
139
|
+
case Success(ok) => Right(ok)
|
140
|
+
case Failure(err) => Left(ErrorReport(err))
|
141
|
+
}
|
35
142
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
}
|
143
|
+
localDiff.right map { localDiffResult =>
|
144
|
+
val unchangedAccordingToLocalDiff = localDiffResult collect {
|
145
|
+
case Left(f) => f
|
146
|
+
}
|
147
|
+
|
148
|
+
val uploadsResolvedByLocalDiff = localDiffResult collect {
|
149
|
+
case Right(f) => f
|
150
|
+
}
|
151
|
+
|
152
|
+
val changesMissedByLocalDiff: Future[Either[ErrorReport, Seq[LocalFileFromDisk]]] = s3FilesFuture.map { errorOrS3Files =>
|
153
|
+
for (s3Files <- errorOrS3Files.right) yield {
|
154
|
+
val localS3Keys = unchangedAccordingToLocalDiff.map(_.s3Key).toSet
|
155
|
+
val localMd5 = unchangedAccordingToLocalDiff.map(_.uploadFileMd5).toSet
|
156
|
+
val changedOnS3 = s3Files.filter { s3File =>
|
157
|
+
(localS3Keys contains s3File.s3Key) && !(localMd5 contains s3File.md5)
|
158
|
+
}
|
159
|
+
logger.debug(s"Detected ${changedOnS3.length} object(s) that have changed on S3 but not on the local site")
|
160
|
+
changedOnS3 map { s3File =>
|
161
|
+
LocalFileFromDisk(site resolveFile s3File, FileUpdate)
|
162
|
+
}
|
163
|
+
}
|
164
|
+
}
|
165
|
+
|
166
|
+
val errorOrDiffAgainstS3 =
|
167
|
+
changesMissedByLocalDiff map { errorOrUploads =>
|
168
|
+
errorOrUploads.right map { uploadsMissedByLocalDiff =>
|
169
|
+
val uploadsS3KeyIndex = uploadsMissedByLocalDiff.map(_.s3Key).toSet
|
170
|
+
val unchangedAccordingToLocalAndS3Diff = unchangedAccordingToLocalDiff.filterNot(uploadsS3KeyIndex contains _.s3Key)
|
171
|
+
(unchangedAccordingToLocalAndS3Diff, uploadsMissedByLocalDiff)
|
172
|
+
}
|
173
|
+
}
|
174
|
+
|
175
|
+
val unchangedFilesFinal = errorOrDiffAgainstS3 map {
|
176
|
+
_ fold (
|
177
|
+
(error: ErrorReport) => Left(error),
|
178
|
+
(syncResult: (Seq[DbRecord], Seq[LocalFileFromDisk])) => Right(syncResult._1)
|
179
|
+
)
|
180
|
+
}
|
181
|
+
|
182
|
+
val changedAccordingToS3Diff = errorOrDiffAgainstS3.map {
|
183
|
+
_ fold (
|
184
|
+
(error: ErrorReport) => Left(error),
|
185
|
+
(syncResult: (Seq[DbRecord], Seq[LocalFileFromDisk])) => Right(syncResult._2)
|
186
|
+
)
|
187
|
+
}
|
188
|
+
val persistenceError: Future[Either[ErrorReport, _]] = for {
|
189
|
+
unchanged <- unchangedFilesFinal
|
190
|
+
changedAccordingToS3 <- changedAccordingToS3Diff
|
191
|
+
} yield
|
192
|
+
for {
|
193
|
+
records1 <- unchanged.right
|
194
|
+
records2 <- changedAccordingToS3.right
|
195
|
+
} yield
|
196
|
+
persist(records1.map(Left(_)) ++ records2.map(Right(_)) ++ uploadsResolvedByLocalDiff.map(Right(_))) match {
|
197
|
+
case Success(_) => Unit
|
198
|
+
case Failure(err) => ErrorReport(err)
|
199
|
+
}
|
200
|
+
Diff(
|
201
|
+
unchangedFilesFinal map (_.right.map(_ map (_.s3Key))),
|
202
|
+
uploads = Future(Right(uploadsResolvedByLocalDiff)) :: changedAccordingToS3Diff :: Nil,
|
203
|
+
persistenceError = persistenceError map (_.left.toOption)
|
204
|
+
)
|
205
|
+
}
|
206
|
+
}
|
207
|
+
|
208
|
+
private def getOrCreateDbFile(implicit site: Site, logger: Logger) =
|
209
|
+
Try {
|
210
|
+
val dbFile = new File(getTempDirectory, "s3_website_local_db_" + sha256Hex(site.rootDirectory))
|
211
|
+
if (!dbFile.exists()) logger.debug("Creating a new database in " + dbFile.getName)
|
212
|
+
dbFile.createNewFile()
|
213
|
+
dbFile
|
214
|
+
}
|
215
|
+
|
216
|
+
case class DbIndices(
|
217
|
+
s3KeyIndex: Set[S3Key],
|
218
|
+
truncatedIndex: Set[TruncatedDbRecord],
|
219
|
+
fullIndex: Set[DbRecord]
|
220
|
+
)
|
221
|
+
|
222
|
+
private def loadDbFromFile(databaseFile: File)(implicit site: Site, logger: Logger): Try[DbIndices] =
|
223
|
+
Try {
|
224
|
+
// record format: "s3Key(file.path)|length(file)|mtime(file)|md5Hex(file.encoded)"
|
225
|
+
val RecordRegex = "(.*?)\\|(\\d+)\\|(\\d+)\\|([a-f0-9]{32})".r
|
226
|
+
val fullIndex = Source
|
227
|
+
.fromFile(databaseFile, "utf-8")
|
228
|
+
.getLines()
|
229
|
+
.toStream
|
230
|
+
.map {
|
231
|
+
case RecordRegex(s3Key, fileLength, fileModified, md5) =>
|
232
|
+
DbRecord(s3Key, fileLength.toLong, fileModified.toLong, md5)
|
233
|
+
}
|
234
|
+
.toSet
|
235
|
+
DbIndices(
|
236
|
+
s3KeyIndex = fullIndex map (_.s3Key),
|
237
|
+
truncatedIndex = fullIndex map (TruncatedDbRecord(_)),
|
238
|
+
fullIndex
|
239
|
+
)
|
240
|
+
}
|
241
|
+
|
242
|
+
def persist(recordsOrChangedFiles: Seq[Either[DbRecord, LocalFileFromDisk]])(implicit site: Site, logger: Logger): Try[Seq[Either[DbRecord, LocalFileFromDisk]]] =
|
243
|
+
getOrCreateDbFile flatMap { dbFile =>
|
244
|
+
Try {
|
245
|
+
val dbFileContents = recordsOrChangedFiles.map { recordOrChangedFile =>
|
246
|
+
val record: DbRecord = recordOrChangedFile fold(
|
247
|
+
record => record,
|
248
|
+
changedFile => DbRecord(changedFile.s3Key, changedFile.originalFile.length, changedFile.originalFile.lastModified, changedFile.md5)
|
249
|
+
)
|
250
|
+
record.s3Key :: record.fileLength :: record.fileModified :: record.uploadFileMd5 :: Nil mkString "|"
|
251
|
+
} mkString "\n"
|
252
|
+
|
253
|
+
write(dbFile, dbFileContents)
|
254
|
+
recordsOrChangedFiles
|
255
|
+
}
|
256
|
+
}
|
257
|
+
}
|
258
|
+
|
259
|
+
case class TruncatedDbRecord(s3Key: String, fileLength: Long, fileModified: Long)
|
260
|
+
|
261
|
+
object TruncatedDbRecord {
|
262
|
+
def apply(dbRecord: DbRecord): TruncatedDbRecord = TruncatedDbRecord(dbRecord.s3Key, dbRecord.fileLength, dbRecord.fileModified)
|
263
|
+
|
264
|
+
def apply(file: File)(implicit site: Site): TruncatedDbRecord = TruncatedDbRecord(site resolveS3Key file, file.length, file.lastModified)
|
265
|
+
}
|
266
|
+
|
267
|
+
/**
|
268
|
+
* @param uploadFileMd5 if the file is gzipped, this checksum should be calculated on the gzipped file, not the original file
|
269
|
+
*/
|
270
|
+
case class DbRecord(s3Key: String, fileLength: Long, fileModified: Long, uploadFileMd5: MD5) {
|
271
|
+
lazy val truncated = TruncatedDbRecord(s3Key, fileLength, fileModified)
|
272
|
+
}
|
273
|
+
|
274
|
+
object DbRecord {
|
275
|
+
def apply(original: File)(implicit site: Site): DbRecord =
|
276
|
+
DbRecord(site resolveS3Key original, original.length, original.lastModified, LocalFileFromDisk.md5(original))
|
277
|
+
}
|
278
|
+
}
|
@@ -0,0 +1,61 @@
|
|
1
|
+
package s3.website
|
2
|
+
|
3
|
+
class Logger(val verboseOutput: Boolean, logMessage: (String) => Unit = println) {
|
4
|
+
def debug(msg: String) = if (verboseOutput) log(Debug, msg)
|
5
|
+
def info(msg: String) = log(Info, msg)
|
6
|
+
def fail(msg: String) = log(Failure, msg)
|
7
|
+
|
8
|
+
def info(report: SuccessReport) = log(Success, report.reportMessage)
|
9
|
+
def info(report: FailureReport) = fail(report.reportMessage)
|
10
|
+
|
11
|
+
def pending(msg: String) = log(Wait, msg)
|
12
|
+
|
13
|
+
private def log(logType: LogType, msgRaw: String) {
|
14
|
+
val msg = msgRaw.replaceAll("\\n", "\n ") // Indent new lines, so that they arrange nicely with other log lines
|
15
|
+
logMessage(s"[$logType] $msg")
|
16
|
+
}
|
17
|
+
|
18
|
+
sealed trait LogType {
|
19
|
+
val prefix: String
|
20
|
+
override def toString = prefix
|
21
|
+
}
|
22
|
+
case object Debug extends LogType {
|
23
|
+
val prefix = "debg".cyan
|
24
|
+
}
|
25
|
+
case object Info extends LogType {
|
26
|
+
val prefix = "info".blue
|
27
|
+
}
|
28
|
+
case object Success extends LogType {
|
29
|
+
val prefix = "succ".green
|
30
|
+
}
|
31
|
+
case object Failure extends LogType {
|
32
|
+
val prefix = "fail".red
|
33
|
+
}
|
34
|
+
case object Wait extends LogType {
|
35
|
+
val prefix = "wait".yellow
|
36
|
+
}
|
37
|
+
|
38
|
+
/**
|
39
|
+
* Idea copied from https://github.com/ktoso/scala-rainbow.
|
40
|
+
*/
|
41
|
+
implicit class RainbowString(val s: String) {
|
42
|
+
import Console._
|
43
|
+
|
44
|
+
/** Colorize the given string foreground to ANSI black */
|
45
|
+
def black = BLACK + s + RESET
|
46
|
+
/** Colorize the given string foreground to ANSI red */
|
47
|
+
def red = RED + s + RESET
|
48
|
+
/** Colorize the given string foreground to ANSI red */
|
49
|
+
def green = GREEN + s + RESET
|
50
|
+
/** Colorize the given string foreground to ANSI red */
|
51
|
+
def yellow = YELLOW + s + RESET
|
52
|
+
/** Colorize the given string foreground to ANSI red */
|
53
|
+
def blue = BLUE + s + RESET
|
54
|
+
/** Colorize the given string foreground to ANSI red */
|
55
|
+
def magenta = MAGENTA + s + RESET
|
56
|
+
/** Colorize the given string foreground to ANSI red */
|
57
|
+
def cyan = CYAN + s + RESET
|
58
|
+
/** Colorize the given string foreground to ANSI red */
|
59
|
+
def white = WHITE + s + RESET
|
60
|
+
}
|
61
|
+
}
|