s3_website 2.0.1 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +6 -4
- data/changelog.md +8 -0
- data/lib/s3_website/version.rb +1 -1
- data/src/main/scala/s3/website/Diff.scala +61 -39
- data/src/main/scala/s3/website/S3.scala +23 -15
- data/src/main/scala/s3/website/model/push.scala +5 -5
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e9175154b8b9b1982291616351c8540b0dee659c
|
4
|
+
data.tar.gz: cf87ecd7a6f989c21c98699d9ce6b43fdf025e6a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5fce5e66cd6fe88902ec1d01441387e8d4bf725420face8c6e2ac8aaeaa226613b19fae7d476b3afa95807e611cb45492e565d385742599f4399cea8e9c8497f
|
7
|
+
data.tar.gz: 105be5233f7d9dc11f704f07e3e0878059751e3e1af3162fe86e51e5fa34a26d5b94437264ff55ddbc9147a0713ae11e990ae6e7f637a8965f222e6e16f90b70
|
data/README.md
CHANGED
@@ -25,7 +25,7 @@ and [Java](http://java.com) to run.
|
|
25
25
|
Here's how you can get started:
|
26
26
|
|
27
27
|
* Create API credentials that have sufficient permissions to S3. More info
|
28
|
-
[here](
|
28
|
+
[here](additional-docs/setting-up-aws-credentials.md).
|
29
29
|
* Go to your website directory
|
30
30
|
* Run `s3_website cfg create`. This generates a configuration file called `s3_website.yml`.
|
31
31
|
* Put your AWS credentials and the S3 bucket name into the file
|
@@ -85,7 +85,6 @@ syntax information.
|
|
85
85
|
* Let the power users benefit from advanced S3 website features such as
|
86
86
|
redirects, Cache-Control headers and gzip support
|
87
87
|
* Be as fast as possible. Do in parallel all that can be done in parallel.
|
88
|
-
* Maintain 90% backward compatibility with the jekyll-s3 gem
|
89
88
|
|
90
89
|
`s3_website` attempts to be a command-line interface tool that is easy to
|
91
90
|
understand and use. For example, `s3_website --help` should print you all the
|
@@ -343,10 +342,13 @@ operation would cause to your live website.
|
|
343
342
|
Please read the [release note](/changelog.md#200) on version 2. It contains
|
344
343
|
information on backward incompatible changes.
|
345
344
|
|
345
|
+
You can find the v1 branch
|
346
|
+
[here](https://github.com/laurilehmijoki/s3_website/tree/1.x). It's in
|
347
|
+
maintenance mode. This means that v1 will see only critical bugfix releases.
|
348
|
+
|
346
349
|
## Example configurations
|
347
350
|
|
348
|
-
See
|
349
|
-
<https://github.com/laurilehmijoki/s3_website/blob/master/additional-docs/example-configurations.md>.
|
351
|
+
See [example-configurations](additional-docs/example-configurations.md).
|
350
352
|
|
351
353
|
## On security
|
352
354
|
|
data/changelog.md
CHANGED
data/lib/s3_website/version.rb
CHANGED
@@ -19,7 +19,7 @@ case class Diff(
|
|
19
19
|
|
20
20
|
object Diff {
|
21
21
|
|
22
|
-
type UploadBatch = Future[Either[ErrorReport, Seq[
|
22
|
+
type UploadBatch = Future[Either[ErrorReport, Seq[Upload]]]
|
23
23
|
|
24
24
|
def resolveDiff(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
25
25
|
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] =
|
@@ -35,31 +35,31 @@ object Diff {
|
|
35
35
|
val s3Md5Index = s3Files.map(_.md5).toSet
|
36
36
|
val siteFiles = Files.listSiteFiles
|
37
37
|
val existsOnS3 = (f: File) => s3KeyIndex contains site.resolveS3Key(f)
|
38
|
-
val isChangedOnS3 = (
|
39
|
-
val
|
40
|
-
case file if !existsOnS3(file) =>
|
38
|
+
val isChangedOnS3 = (upload: Upload) => !(s3Md5Index contains upload.md5.get)
|
39
|
+
val newUploads = siteFiles collect {
|
40
|
+
case file if !existsOnS3(file) => Upload(file, NewFile, reasonForUpload = "the file is missing from S3")
|
41
41
|
}
|
42
|
-
val
|
43
|
-
case file if existsOnS3(file) =>
|
42
|
+
val changedUploads = siteFiles collect {
|
43
|
+
case file if existsOnS3(file) => Upload(file, FileUpdate, reasonForUpload = "the S3 bucket has different contents for this file")
|
44
44
|
} filter isChangedOnS3
|
45
45
|
val unchangedFiles = {
|
46
|
-
val newOrChangedFiles = (
|
46
|
+
val newOrChangedFiles = (changedUploads ++ newUploads).map(_.originalFile).toSet
|
47
47
|
siteFiles.filterNot(f => newOrChangedFiles contains f)
|
48
48
|
}
|
49
|
-
val
|
49
|
+
val recordsAndUploads: Seq[Either[DbRecord, Upload]] = unchangedFiles.map {
|
50
50
|
f => Left(DbRecord(f))
|
51
|
-
} ++ (
|
51
|
+
} ++ (changedUploads ++ newUploads).map {
|
52
52
|
Right(_)
|
53
53
|
}
|
54
|
-
LocalFileDatabase persist
|
55
|
-
|
54
|
+
LocalFileDatabase persist recordsAndUploads
|
55
|
+
recordsAndUploads
|
56
56
|
} match {
|
57
57
|
case Success(ok) => Right(ok)
|
58
58
|
case Failure(err) => Left(ErrorReport(err))
|
59
59
|
}
|
60
60
|
}
|
61
61
|
}
|
62
|
-
def collectResult[B](pf: PartialFunction[Either[DbRecord,
|
62
|
+
def collectResult[B](pf: PartialFunction[Either[DbRecord, Upload],B]) =
|
63
63
|
diffAgainstS3.map { errorOrDiffSource =>
|
64
64
|
errorOrDiffSource.right map (_ collect pf)
|
65
65
|
}
|
@@ -67,7 +67,7 @@ object Diff {
|
|
67
67
|
case Left(dbRecord) => dbRecord.s3Key
|
68
68
|
}
|
69
69
|
val uploads: UploadBatch = collectResult {
|
70
|
-
case Right(
|
70
|
+
case Right(upload) => upload
|
71
71
|
}
|
72
72
|
Right(Diff(unchanged, uploads :: Nil, persistenceError = Future(None)))
|
73
73
|
}
|
@@ -112,26 +112,28 @@ object Diff {
|
|
112
112
|
|
113
113
|
def resolveDiffAgainstLocalDb(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
|
114
114
|
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Either[ErrorReport, Diff] = {
|
115
|
-
val localDiff: Either[ErrorReport, Seq[Either[DbRecord,
|
115
|
+
val localDiff: Either[ErrorReport, Seq[Either[DbRecord, Upload]]] =
|
116
116
|
(for {
|
117
117
|
dbFile <- getOrCreateDbFile
|
118
118
|
databaseIndices <- loadDbFromFile(dbFile)
|
119
119
|
} yield {
|
120
120
|
val siteFiles = Files.listSiteFiles
|
121
|
-
val
|
121
|
+
val recordsOrUploads = siteFiles.foldLeft(Seq(): Seq[Either[DbRecord, Upload]]) { (recordsOrUps, file) =>
|
122
122
|
val truncatedKey = TruncatedDbRecord(file)
|
123
123
|
val fileIsUnchanged = databaseIndices.truncatedIndex contains truncatedKey
|
124
124
|
if (fileIsUnchanged)
|
125
|
-
|
125
|
+
recordsOrUps :+ Left(databaseIndices.fullIndex find (_.truncated == truncatedKey) get)
|
126
126
|
else {
|
127
|
+
val isUpdate = databaseIndices.s3KeyIndex contains truncatedKey.s3Key
|
128
|
+
|
127
129
|
val uploadType =
|
128
|
-
if (
|
130
|
+
if (isUpdate) FileUpdate
|
129
131
|
else NewFile
|
130
|
-
|
132
|
+
recordsOrUps :+ Right(Upload(file, uploadType, reasonForUpload(truncatedKey, databaseIndices, isUpdate)))
|
131
133
|
}
|
132
134
|
}
|
133
|
-
logger.debug(s"Discovered ${siteFiles.length} files on the local site, of which ${
|
134
|
-
|
135
|
+
logger.debug(s"Discovered ${siteFiles.length} files on the local site, of which ${recordsOrUploads count (_.isRight)} are new or changed")
|
136
|
+
recordsOrUploads
|
135
137
|
}) match {
|
136
138
|
case Success(ok) => Right(ok)
|
137
139
|
case Failure(err) => Left(ErrorReport(err))
|
@@ -146,7 +148,7 @@ object Diff {
|
|
146
148
|
case Right(f) => f
|
147
149
|
}
|
148
150
|
|
149
|
-
val changesMissedByLocalDiff: Future[Either[ErrorReport, Seq[
|
151
|
+
val changesMissedByLocalDiff: Future[Either[ErrorReport, Seq[Upload]]] = s3FilesFuture.map { errorOrS3Files =>
|
150
152
|
for (s3Files <- errorOrS3Files.right) yield {
|
151
153
|
val remoteS3Keys = s3Files.map(_.s3Key).toSet
|
152
154
|
val localS3Keys = unchangedAccordingToLocalDiff.map(_.s3Key).toSet
|
@@ -154,11 +156,11 @@ object Diff {
|
|
154
156
|
def isChangedOnS3(s3File: S3File) = (localS3Keys contains s3File.s3Key) && !(localMd5 contains s3File.md5)
|
155
157
|
val changedOnS3 = s3Files collect {
|
156
158
|
case s3File if isChangedOnS3(s3File) =>
|
157
|
-
|
159
|
+
Upload(site resolveFile s3File, FileUpdate, reasonForUpload = "someone else has modified the file on the S3 bucket")
|
158
160
|
}
|
159
161
|
val missingFromS3 = localS3Keys collect {
|
160
162
|
case localS3Key if !(remoteS3Keys contains localS3Key) =>
|
161
|
-
|
163
|
+
Upload(site resolveFile localS3Key, NewFile, reasonForUpload = "someone else has removed the file from the S3 bucket")
|
162
164
|
|
163
165
|
}
|
164
166
|
changedOnS3 ++ missingFromS3
|
@@ -177,23 +179,23 @@ object Diff {
|
|
177
179
|
val unchangedFilesFinal = errorOrDiffAgainstS3 map {
|
178
180
|
_ fold (
|
179
181
|
(error: ErrorReport) => Left(error),
|
180
|
-
(syncResult: (Seq[DbRecord], Seq[
|
182
|
+
(syncResult: (Seq[DbRecord], Seq[Upload])) => Right(syncResult._1)
|
181
183
|
)
|
182
184
|
}
|
183
185
|
|
184
|
-
val
|
186
|
+
val uploadsAccordingToS3Diff = errorOrDiffAgainstS3.map {
|
185
187
|
_ fold (
|
186
188
|
(error: ErrorReport) => Left(error),
|
187
|
-
(syncResult: (Seq[DbRecord], Seq[
|
189
|
+
(syncResult: (Seq[DbRecord], Seq[Upload])) => Right(syncResult._2)
|
188
190
|
)
|
189
191
|
}
|
190
192
|
val persistenceError: Future[Either[ErrorReport, _]] = for {
|
191
193
|
unchanged <- unchangedFilesFinal
|
192
|
-
|
194
|
+
uploads <- uploadsAccordingToS3Diff
|
193
195
|
} yield
|
194
196
|
for {
|
195
197
|
records1 <- unchanged.right
|
196
|
-
records2 <-
|
198
|
+
records2 <- uploads.right
|
197
199
|
} yield
|
198
200
|
persist(records1.map(Left(_)) ++ records2.map(Right(_)) ++ uploadsAccordingToLocalDiff.map(Right(_))) match {
|
199
201
|
case Success(_) => Unit
|
@@ -201,12 +203,28 @@ object Diff {
|
|
201
203
|
}
|
202
204
|
Diff(
|
203
205
|
unchangedFilesFinal map (_.right.map(_ map (_.s3Key))),
|
204
|
-
uploads = Future(Right(uploadsAccordingToLocalDiff)) ::
|
206
|
+
uploads = Future(Right(uploadsAccordingToLocalDiff)) :: uploadsAccordingToS3Diff :: Nil,
|
205
207
|
persistenceError = persistenceError map (_.left.toOption)
|
206
208
|
)
|
207
209
|
}
|
208
210
|
}
|
209
211
|
|
212
|
+
private def reasonForUpload(truncatedKey: TruncatedDbRecord, databaseIndices: DbIndices, isUpdate: Boolean) = {
|
213
|
+
if (isUpdate) {
|
214
|
+
val lengthChanged = !(databaseIndices.fileLenghtIndex contains truncatedKey.fileLength)
|
215
|
+
val mtimeChanged = !(databaseIndices.lastModifiedIndex contains truncatedKey.fileModified)
|
216
|
+
if (lengthChanged)
|
217
|
+
"file length has changed according to the local database"
|
218
|
+
else if (mtimeChanged)
|
219
|
+
"file mtime has changed according to the local database"
|
220
|
+
else if (mtimeChanged && lengthChanged)
|
221
|
+
"file mtime and length have changed according to the local database"
|
222
|
+
else
|
223
|
+
"programmer error: faulty logic in inferring the reason for upload"
|
224
|
+
}
|
225
|
+
else "file is new according to the local database"
|
226
|
+
}
|
227
|
+
|
210
228
|
private def getOrCreateDbFile(implicit site: Site, logger: Logger) =
|
211
229
|
Try {
|
212
230
|
val dbFile = new File(getTempDirectory, "s3_website_local_db_" + sha256Hex(site.rootDirectory))
|
@@ -216,9 +234,11 @@ object Diff {
|
|
216
234
|
}
|
217
235
|
|
218
236
|
case class DbIndices(
|
219
|
-
s3KeyIndex:
|
220
|
-
|
221
|
-
|
237
|
+
s3KeyIndex: Set[S3Key],
|
238
|
+
fileLenghtIndex: Set[Long],
|
239
|
+
lastModifiedIndex: Set[Long],
|
240
|
+
truncatedIndex: Set[TruncatedDbRecord],
|
241
|
+
fullIndex: Set[DbRecord]
|
222
242
|
)
|
223
243
|
|
224
244
|
private def loadDbFromFile(databaseFile: File)(implicit site: Site, logger: Logger): Try[DbIndices] =
|
@@ -237,23 +257,25 @@ object Diff {
|
|
237
257
|
DbIndices(
|
238
258
|
s3KeyIndex = fullIndex map (_.s3Key),
|
239
259
|
truncatedIndex = fullIndex map (TruncatedDbRecord(_)),
|
240
|
-
fullIndex
|
260
|
+
fileLenghtIndex = fullIndex map (_.fileLength),
|
261
|
+
lastModifiedIndex = fullIndex map (_.fileModified),
|
262
|
+
fullIndex = fullIndex
|
241
263
|
)
|
242
264
|
}
|
243
265
|
|
244
|
-
def persist(
|
266
|
+
def persist(recordsOrUploads: Seq[Either[DbRecord, Upload]])(implicit site: Site, logger: Logger): Try[Seq[Either[DbRecord, Upload]]] =
|
245
267
|
getOrCreateDbFile flatMap { dbFile =>
|
246
268
|
Try {
|
247
|
-
val dbFileContents =
|
248
|
-
val record: DbRecord =
|
269
|
+
val dbFileContents = recordsOrUploads.map { recordOrUpload =>
|
270
|
+
val record: DbRecord = recordOrUpload fold(
|
249
271
|
record => record,
|
250
|
-
|
272
|
+
upload => DbRecord(upload.s3Key, upload.originalFile.length, upload.originalFile.lastModified, upload.md5.get)
|
251
273
|
)
|
252
274
|
record.s3Key :: record.fileLength :: record.fileModified :: record.uploadFileMd5 :: Nil mkString "|"
|
253
275
|
} mkString "\n"
|
254
276
|
|
255
277
|
write(dbFile, dbFileContents)
|
256
|
-
|
278
|
+
recordsOrUploads
|
257
279
|
}
|
258
280
|
}
|
259
281
|
}
|
@@ -275,6 +297,6 @@ object Diff {
|
|
275
297
|
|
276
298
|
object DbRecord {
|
277
299
|
def apply(original: File)(implicit site: Site): DbRecord =
|
278
|
-
DbRecord(site resolveS3Key original, original.length, original.lastModified,
|
300
|
+
DbRecord(site resolveS3Key original, original.length, original.lastModified, Upload.md5(original).get)
|
279
301
|
}
|
280
302
|
}
|
@@ -18,13 +18,13 @@ object S3 {
|
|
18
18
|
|
19
19
|
def uploadRedirect(redirect: Redirect, a: Attempt = 1)
|
20
20
|
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) =
|
21
|
-
|
21
|
+
uploadToS3(Right(redirect))
|
22
22
|
|
23
|
-
def uploadFile(
|
23
|
+
def uploadFile(up: Upload, a: Attempt = 1)
|
24
24
|
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger) =
|
25
|
-
|
25
|
+
uploadToS3(Left(up))
|
26
26
|
|
27
|
-
def
|
27
|
+
def uploadToS3(source: Either[Upload, Redirect], a: Attempt = 1)
|
28
28
|
(implicit config: Config, s3Settings: S3Setting, pushMode: PushMode, executor: ExecutionContextExecutor, logger: Logger):
|
29
29
|
Future[Either[FailedUpload, SuccessfulUpload]] =
|
30
30
|
Future {
|
@@ -35,7 +35,7 @@ object S3 {
|
|
35
35
|
val report = SuccessfulUpload(
|
36
36
|
source.fold(_.s3Key, _.s3Key),
|
37
37
|
source.fold(
|
38
|
-
|
38
|
+
upload => Left(SuccessfulNewOrCreatedDetails(upload.uploadType, upload.uploadFile.get.length(), uploadDuration, upload.reasonForUpload)),
|
39
39
|
redirect => Right(SuccessfulRedirectDetails(redirect.uploadType, redirect.redirectTarget))
|
40
40
|
),
|
41
41
|
putObjectRequest
|
@@ -44,7 +44,7 @@ object S3 {
|
|
44
44
|
Right(report)
|
45
45
|
} recoverWith retry(a)(
|
46
46
|
createFailureReport = error => FailedUpload(source.fold(_.s3Key, _.s3Key), error),
|
47
|
-
retryAction = newAttempt => this.
|
47
|
+
retryAction = newAttempt => this.uploadToS3(source, newAttempt)
|
48
48
|
)
|
49
49
|
|
50
50
|
def delete(s3Key: S3Key, a: Attempt = 1)
|
@@ -60,18 +60,18 @@ object S3 {
|
|
60
60
|
retryAction = newAttempt => this.delete(s3Key, newAttempt)
|
61
61
|
)
|
62
62
|
|
63
|
-
def toPutObjectRequest(source: Either[
|
63
|
+
def toPutObjectRequest(source: Either[Upload, Redirect])(implicit config: Config): Try[PutObjectRequest] =
|
64
64
|
source.fold(
|
65
|
-
|
65
|
+
upload =>
|
66
66
|
for {
|
67
|
-
uploadFile <-
|
68
|
-
contentType <-
|
67
|
+
uploadFile <- upload.uploadFile
|
68
|
+
contentType <- upload.contentType
|
69
69
|
} yield {
|
70
70
|
val md = new ObjectMetadata()
|
71
71
|
md setContentLength uploadFile.length
|
72
72
|
md setContentType contentType
|
73
|
-
|
74
|
-
|
73
|
+
upload.encodingOnS3.map(_ => "gzip") foreach md.setContentEncoding
|
74
|
+
upload.maxAge foreach { seconds =>
|
75
75
|
md.setCacheControl(
|
76
76
|
if (seconds == 0)
|
77
77
|
s"no-cache; max-age=$seconds"
|
@@ -79,7 +79,7 @@ object S3 {
|
|
79
79
|
s"max-age=$seconds"
|
80
80
|
)
|
81
81
|
}
|
82
|
-
val req = new PutObjectRequest(config.s3_bucket,
|
82
|
+
val req = new PutObjectRequest(config.s3_bucket, upload.s3Key, new FileInputStream(uploadFile), md)
|
83
83
|
config.s3_reduced_redundancy.filter(_ == true) foreach (_ => req setStorageClass ReducedRedundancy)
|
84
84
|
req
|
85
85
|
}
|
@@ -146,7 +146,7 @@ object S3 {
|
|
146
146
|
}
|
147
147
|
|
148
148
|
case class SuccessfulRedirectDetails(uploadType: UploadType, redirectTarget: String)
|
149
|
-
case class SuccessfulNewOrCreatedDetails(uploadType: UploadType, uploadSize: Long, uploadDuration: Option[Long])
|
149
|
+
case class SuccessfulNewOrCreatedDetails(uploadType: UploadType, uploadSize: Long, uploadDuration: Option[Long], reasonForUpload: String)
|
150
150
|
|
151
151
|
case class SuccessfulUpload(s3Key: S3Key,
|
152
152
|
details: Either[SuccessfulNewOrCreatedDetails, SuccessfulRedirectDetails],
|
@@ -167,12 +167,20 @@ object S3 {
|
|
167
167
|
md.getContentEncoding ::
|
168
168
|
putObjectRequest.getStorageClass ::
|
169
169
|
Nil map (Option(_)) // AWS SDK may return nulls
|
170
|
-
) :+ uploadSizeForHumans :+ uploadSpeedForHumans
|
170
|
+
) :+ uploadSizeForHumans :+ uploadSpeedForHumans :+ uploadReason
|
171
171
|
detailFragments.collect {
|
172
172
|
case Some(detailFragment) => detailFragment
|
173
173
|
}.mkString(" | ")
|
174
174
|
}
|
175
175
|
|
176
|
+
lazy val uploadReason =
|
177
|
+
details
|
178
|
+
.fold(uploadDetails => Some(uploadDetails.reasonForUpload), _ => None)
|
179
|
+
.collect {
|
180
|
+
case reasonForUpload if logger.verboseOutput =>
|
181
|
+
s"upload reason: $reasonForUpload"
|
182
|
+
}
|
183
|
+
|
176
184
|
lazy val uploadSize = details.fold(
|
177
185
|
newOrCreatedDetails => Some(newOrCreatedDetails.uploadSize),
|
178
186
|
redirectDetails => None
|
@@ -7,7 +7,7 @@ import java.util.zip.GZIPOutputStream
|
|
7
7
|
import org.apache.tika.Tika
|
8
8
|
import s3.website.Ruby._
|
9
9
|
import s3.website._
|
10
|
-
import s3.website.model.
|
10
|
+
import s3.website.model.Upload.tika
|
11
11
|
import s3.website.model.Encoding.encodingOnS3
|
12
12
|
import java.io.File.createTempFile
|
13
13
|
import org.apache.commons.io.IOUtils.copy
|
@@ -50,7 +50,7 @@ case object RedirectFile extends UploadType {
|
|
50
50
|
val pushAction = Redirected
|
51
51
|
}
|
52
52
|
|
53
|
-
case class
|
53
|
+
case class Upload(originalFile: File, uploadType: UploadType, reasonForUpload: String)(implicit site: Site) {
|
54
54
|
lazy val s3Key = site.resolveS3Key(originalFile)
|
55
55
|
|
56
56
|
lazy val encodingOnS3 = Encoding.encodingOnS3(s3Key)
|
@@ -60,7 +60,7 @@ case class LocalFile(originalFile: File, uploadType: UploadType)(implicit site:
|
|
60
60
|
*
|
61
61
|
* May throw an exception, so remember to call this in a Try or Future monad
|
62
62
|
*/
|
63
|
-
lazy val uploadFile: Try[File] =
|
63
|
+
lazy val uploadFile: Try[File] = Upload uploadFile originalFile
|
64
64
|
|
65
65
|
lazy val contentType: Try[String] = tika map { tika =>
|
66
66
|
val mimeType = tika.detect(originalFile)
|
@@ -92,10 +92,10 @@ case class LocalFile(originalFile: File, uploadType: UploadType)(implicit site:
|
|
92
92
|
/**
|
93
93
|
* May throw an exception, so remember to call this in a Try or Future monad
|
94
94
|
*/
|
95
|
-
lazy val md5 =
|
95
|
+
lazy val md5 = Upload md5 originalFile
|
96
96
|
}
|
97
97
|
|
98
|
-
object
|
98
|
+
object Upload {
|
99
99
|
lazy val tika = Try(new Tika())
|
100
100
|
|
101
101
|
def md5(originalFile: File)(implicit site: Site): Try[MD5] =
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: s3_website
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0
|
4
|
+
version: 2.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Lauri Lehmijoki
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2014-06-
|
11
|
+
date: 2014-06-06 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: thor
|