embulk-output-s3_parquet 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +12 -0
- data/.scalafmt.conf +9 -0
- data/CHANGELOG.md +9 -0
- data/LICENSE.txt +21 -0
- data/README.md +122 -0
- data/build.gradle +101 -0
- data/example/config.yml +25 -0
- data/example/data.tsv +5 -0
- data/gradle/wrapper/gradle-wrapper.jar +0 -0
- data/gradle/wrapper/gradle-wrapper.properties +5 -0
- data/gradlew +172 -0
- data/gradlew.bat +84 -0
- data/lib/embulk/output/s3_parquet.rb +3 -0
- data/settings.gradle +1 -0
- data/src/main/scala/org/embulk/output/s3_parquet/S3ParquetOutputPlugin.scala +199 -0
- data/src/main/scala/org/embulk/output/s3_parquet/S3ParquetPageOutput.scala +65 -0
- data/src/main/scala/org/embulk/output/s3_parquet/aws/Aws.scala +45 -0
- data/src/main/scala/org/embulk/output/s3_parquet/aws/AwsClientConfiguration.scala +34 -0
- data/src/main/scala/org/embulk/output/s3_parquet/aws/AwsCredentials.scala +128 -0
- data/src/main/scala/org/embulk/output/s3_parquet/aws/AwsEndpointConfiguration.scala +49 -0
- data/src/main/scala/org/embulk/output/s3_parquet/aws/AwsS3Configuration.scala +56 -0
- data/src/main/scala/org/embulk/output/s3_parquet/aws/HttpProxy.scala +56 -0
- data/src/main/scala/org/embulk/output/s3_parquet/parquet/EmbulkMessageType.scala +59 -0
- data/src/main/scala/org/embulk/output/s3_parquet/parquet/ParquetFileWriteSupport.scala +33 -0
- data/src/main/scala/org/embulk/output/s3_parquet/parquet/ParquetFileWriter.scala +125 -0
- data/src/test/resources/org/embulk/output/s3_parquet/in1.csv +6 -0
- data/src/test/resources/org/embulk/output/s3_parquet/out1.tsv +5 -0
- data/src/test/scala/org/embulk/output/s3_parquet/TestS3ParquetOutputPlugin.scala +140 -0
- metadata +184 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: fac4eca9b96e218930333123d01d82ec5acae146
|
4
|
+
data.tar.gz: dc1de337cd4ada9fa86d53239be85b5a154115c1
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 8451431ba506a80159ad768017268b826e73f6010fcb923804f6b3a8f8cb27e7669e90c4d8f25a6d348d55ce558fd8ea488942451a973c7d8387bd70a825fb77
|
7
|
+
data.tar.gz: 835f5d30265595270925e57587bea633a1e6156cfa8f7b660af3eac1539fcf9b40f1e7dfb986ae4a290c67b7eee2638275846cfa86edd217168ff5b2c0672313
|
data/.gitignore
ADDED
data/.scalafmt.conf
ADDED
data/CHANGELOG.md
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
|
2
|
+
MIT License
|
3
|
+
|
4
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
5
|
+
a copy of this software and associated documentation files (the
|
6
|
+
"Software"), to deal in the Software without restriction, including
|
7
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
8
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
9
|
+
permit persons to whom the Software is furnished to do so, subject to
|
10
|
+
the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be
|
13
|
+
included in all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
17
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
18
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
19
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
20
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
21
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,122 @@
|
|
1
|
+
# S3 Parquet output plugin for Embulk
|
2
|
+
|
3
|
+
[Embulk](https://github.com/embulk/embulk/) output plugin to dump records as [Apache Parquet](https://parquet.apache.org/) files on S3.
|
4
|
+
|
5
|
+
## Overview
|
6
|
+
|
7
|
+
* **Plugin type**: output
|
8
|
+
* **Load all or nothing**: no
|
9
|
+
* **Resume supported**: no
|
10
|
+
* **Cleanup supported**: yes
|
11
|
+
|
12
|
+
## Configuration
|
13
|
+
|
14
|
+
- **bucket**: s3 bucket name (string, required)
|
15
|
+
- **path_prefix**: prefix of target keys (string, optional)
|
16
|
+
- **sequence_format**: format of the sequence number of the output files (string, default: `"%03d.%02d."`)
|
17
|
+
- **sequence_format** formats task index and sequence number in a task.
|
18
|
+
- **file_ext**: path suffix of the output files (string, default: `"parquet"`)
|
19
|
+
- **compression_codec**: compression codec for parquet file (`"uncompressed"`,`"snappy"`,`"gzip"`,`"lzo"`,`"brotli"`,`"lz4"` or `"zstd"`, default: `"uncompressed"`)
|
20
|
+
- **default_timestamp_format**: default timestamp format (string, default: `"%Y-%m-%d %H:%M:%S.%6N %z"`)
|
21
|
+
- **default_timezone**: default timezone (string, default: `"UTC"`)
|
22
|
+
- **column_options**: a map whose keys are name of columns, and values are configuration with following parameters (optional)
|
23
|
+
- **timezone**: timezone if type of this column is timestamp. If not set, **default_timezone** is used. (string, optional)
|
24
|
+
- **format**: timestamp format if type of this column is timestamp. If not set, **default_timestamp_format**: is used. (string, optional)
|
25
|
+
- **canned_acl**: grants one of [canned ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) for created objects (string, default: `private`)
|
26
|
+
- **block_size**: The block size is the size of a row group being buffered in memory. This limits the memory usage when writing. Larger values will improve the I/O when reading but consume more memory when writing. (int, default: `134217728` (128MB))
|
27
|
+
- **page_size**: The page size is for compression. When reading, each page can be decompressed independently. A block is composed of pages. The page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. (int, default: `1048576` (1MB))
|
28
|
+
- **max_padding_size**: The max size (bytes) to write as padding and the min size of a row group (int, default: `8388608` (8MB))
|
29
|
+
- **enable_dictionary_encoding**: The boolean value is to enable/disable dictionary encoding. (boolean, default: `true`)
|
30
|
+
- **auth_method**: name of mechanism to authenticate requests (`"basic"`, `"env"`, `"instance"`, `"profile"`, `"properties"`, `"anonymous"`, or `"session"`, default: `"default"`)
|
31
|
+
- `"basic"`: uses **access_key_id** and **secret_access_key** to authenticate.
|
32
|
+
- `"env"`: uses `AWS_ACCESS_KEY_ID` (or `AWS_ACCESS_KEY`) and `AWS_SECRET_KEY` (or `AWS_SECRET_ACCESS_KEY`) environment variables.
|
33
|
+
- `"instance"`: uses EC2 instance profile or attached ECS task role.
|
34
|
+
- `"profile"`: uses credentials written in a file. Format of the file is as following, where `[...]` is a name of profile.
|
35
|
+
```
|
36
|
+
[default]
|
37
|
+
aws_access_key_id=YOUR_ACCESS_KEY_ID
|
38
|
+
aws_secret_access_key=YOUR_SECRET_ACCESS_KEY
|
39
|
+
|
40
|
+
[profile2]
|
41
|
+
...
|
42
|
+
```
|
43
|
+
- `"properties"`: uses aws.accessKeyId and aws.secretKey Java system properties.
|
44
|
+
- `"anonymous"`: uses anonymous access. This auth method can access only public files.
|
45
|
+
- `"session"`: uses temporary-generated **access_key_id**, **secret_access_key** and **session_token**.
|
46
|
+
- `"assume_role"`: uses temporary-generated credentials by assuming **role_arn** role.
|
47
|
+
- `"default"`: uses AWS SDK's default strategy to look up available credentials from runtime environment. This method behaves like the combination of the following methods.
|
48
|
+
1. `"env"`
|
49
|
+
1. `"properties"`
|
50
|
+
1. `"profile"`
|
51
|
+
1. `"instance"`
|
52
|
+
- **profile_file**: path to a profiles file. this is optionally used when **auth_method** is `"profile"`. (string, default: given by `AWS_CREDENTIAL_PROFILES_FILE` environment variable, or ~/.aws/credentials).
|
53
|
+
- **profile_name**: name of a profile. this is optionally used when **auth_method** is `"profile"`. (string, default: `"default"`)
|
54
|
+
- **access_key_id**: aws access key id. this is required when **auth_method** is `"basic"` or `"session"`. (string, optional)
|
55
|
+
- **secret_access_key**: aws secret access key. this is required when **auth_method** is `"basic"` or `"session"`. (string, optional)
|
56
|
+
- **session_token**: aws session token. this is required when **auth_method** is `"session"`. (string, optional)
|
57
|
+
- **role_arn**: arn of the role to assume. this is required for **auth_method** is `"assume_role"`. (string, optional)
|
58
|
+
- **role_session_name**: an identifier for the assumed role session. this is required when **auth_method** is `"assume_role"`. (string, optional)
|
59
|
+
- **role_external_id**: a unique identifier that is used by third parties when assuming roles in their customers' accounts. this is optionally used for **auth_method**: `"assume_role"`. (string, optional)
|
60
|
+
- **role_session_duration_seconds**: duration, in seconds, of the role session. this is optionally used for **auth_method**: `"assume_role"`. (int, optional)
|
61
|
+
- **scope_down_policy**: an iam policy in json format. this is optionally used for **auth_method**: `"assume_role"`. (string, optional)
|
62
|
+
- **endpoint**: The AWS Service endpoint (string, optional)
|
63
|
+
- **region**: The AWS region (string, optional)
|
64
|
+
- **http_proxy**: Indicate whether using when accessing AWS via http proxy. (optional)
|
65
|
+
- **host** proxy host (string, required)
|
66
|
+
- **port** proxy port (int, optional)
|
67
|
+
- **protocol** proxy protocol (string, default: `"https"`)
|
68
|
+
- **user** proxy user (string, optional)
|
69
|
+
- **password** proxy password (string, optional)
|
70
|
+
- **buffer_dir**: buffer directory for parquet files to be uploaded on S3 (string, default: Create a Temporary Directory)
|
71
|
+
|
72
|
+
|
73
|
+
## Example
|
74
|
+
|
75
|
+
```yaml
|
76
|
+
out:
|
77
|
+
type: s3_parquet
|
78
|
+
bucket: my-bucket
|
79
|
+
path_prefix: path/to/my-obj.
|
80
|
+
file_ext: snappy.parquet
|
81
|
+
compression_codec: snappy
|
82
|
+
default_timezone: Asia/Tokyo
|
83
|
+
canned_acl: bucket-owner-full-control
|
84
|
+
```
|
85
|
+
|
86
|
+
## Note
|
87
|
+
|
88
|
+
* The current implementation does not support [LogicalTypes](https://github.com/apache/parquet-format/blob/2b38663/LogicalTypes.md). I will implement them later as **column_options**. So, currently **timestamp** type and **json** type are stored as UTF-8 String. Please be careful.
|
89
|
+
|
90
|
+
## Development
|
91
|
+
|
92
|
+
### Run example:
|
93
|
+
|
94
|
+
```shell
|
95
|
+
$ ./gradlew classpath
|
96
|
+
$ embulk run example/config.yml -Ilib
|
97
|
+
```
|
98
|
+
|
99
|
+
### Run test:
|
100
|
+
|
101
|
+
```shell
|
102
|
+
$ ./gradlew test
|
103
|
+
```
|
104
|
+
|
105
|
+
### Build
|
106
|
+
|
107
|
+
```
|
108
|
+
$ ./gradlew gem # -t to watch change of files and rebuild continuously
|
109
|
+
```
|
110
|
+
|
111
|
+
### Release gem:
|
112
|
+
Fix [build.gradle](./build.gradle), then
|
113
|
+
|
114
|
+
|
115
|
+
```shell
|
116
|
+
$ ./gradlew gemPush
|
117
|
+
|
118
|
+
```
|
119
|
+
|
120
|
+
## ChangeLog
|
121
|
+
|
122
|
+
[CHANGELOG.md](./CHANGELOG.md)
|
data/build.gradle
ADDED
@@ -0,0 +1,101 @@
|
|
1
|
+
plugins {
|
2
|
+
id "scala"
|
3
|
+
id "com.jfrog.bintray" version "1.1"
|
4
|
+
id "com.github.jruby-gradle.base" version "1.5.0"
|
5
|
+
id "com.diffplug.gradle.spotless" version "3.13.0"
|
6
|
+
id "com.adarshr.test-logger" version "1.6.0" // For Pretty test logging
|
7
|
+
}
|
8
|
+
import com.github.jrubygradle.JRubyExec
|
9
|
+
repositories {
|
10
|
+
mavenCentral()
|
11
|
+
jcenter()
|
12
|
+
}
|
13
|
+
configurations {
|
14
|
+
provided
|
15
|
+
}
|
16
|
+
|
17
|
+
version = "0.0.2"
|
18
|
+
|
19
|
+
sourceCompatibility = 1.8
|
20
|
+
targetCompatibility = 1.8
|
21
|
+
|
22
|
+
dependencies {
|
23
|
+
compile "org.embulk:embulk-core:0.9.12"
|
24
|
+
provided "org.embulk:embulk-core:0.9.12"
|
25
|
+
|
26
|
+
compile 'org.scala-lang:scala-library:2.12.8'
|
27
|
+
['s3', 'sts'].each { v ->
|
28
|
+
compile "com.amazonaws:aws-java-sdk-${v}:1.11.479"
|
29
|
+
}
|
30
|
+
['column', 'common', 'encoding', 'format', 'hadoop', 'jackson'].each { v ->
|
31
|
+
compile "org.apache.parquet:parquet-${v}:1.10.0"
|
32
|
+
}
|
33
|
+
compile 'org.apache.hadoop:hadoop-common:2.9.2'
|
34
|
+
compile 'org.xerial.snappy:snappy-java:1.1.7.2'
|
35
|
+
|
36
|
+
testCompile 'org.scalatest:scalatest_2.12:3.0.5'
|
37
|
+
testCompile 'org.embulk:embulk-test:0.9.12'
|
38
|
+
testCompile 'org.embulk:embulk-standards:0.9.12'
|
39
|
+
testCompile 'cloud.localstack:localstack-utils:0.1.15'
|
40
|
+
testCompile 'org.apache.parquet:parquet-tools:1.8.0'
|
41
|
+
testCompile 'org.apache.hadoop:hadoop-client:2.9.2'
|
42
|
+
}
|
43
|
+
|
44
|
+
testlogger {
|
45
|
+
theme "mocha"
|
46
|
+
}
|
47
|
+
|
48
|
+
task classpath(type: Copy, dependsOn: ["jar"]) {
|
49
|
+
doFirst { file("classpath").deleteDir() }
|
50
|
+
from (configurations.runtime - configurations.provided + files(jar.archivePath))
|
51
|
+
into "classpath"
|
52
|
+
}
|
53
|
+
clean { delete "classpath" }
|
54
|
+
|
55
|
+
task gem(type: JRubyExec, dependsOn: ["gemspec", "classpath"]) {
|
56
|
+
jrubyArgs "-S"
|
57
|
+
script "gem"
|
58
|
+
scriptArgs "build", "${project.name}.gemspec"
|
59
|
+
doLast { ant.move(file: "${project.name}-${project.version}.gem", todir: "pkg") }
|
60
|
+
}
|
61
|
+
|
62
|
+
task gemPush(type: JRubyExec, dependsOn: ["gem"]) {
|
63
|
+
jrubyArgs "-S"
|
64
|
+
script "gem"
|
65
|
+
scriptArgs "push", "pkg/${project.name}-${project.version}.gem"
|
66
|
+
}
|
67
|
+
|
68
|
+
task "package"(dependsOn: ["gemspec", "classpath"]) {
|
69
|
+
doLast {
|
70
|
+
println "> Build succeeded."
|
71
|
+
println "> You can run embulk with '-L ${file(".").absolutePath}' argument."
|
72
|
+
}
|
73
|
+
}
|
74
|
+
|
75
|
+
task gemspec {
|
76
|
+
ext.gemspecFile = file("${project.name}.gemspec")
|
77
|
+
inputs.file "build.gradle"
|
78
|
+
outputs.file gemspecFile
|
79
|
+
doLast { gemspecFile.write($/
|
80
|
+
Gem::Specification.new do |spec|
|
81
|
+
spec.name = "${project.name}"
|
82
|
+
spec.version = "${project.version}"
|
83
|
+
spec.authors = ["Civitaspo"]
|
84
|
+
spec.summary = %[S3 Parquet output plugin for Embulk]
|
85
|
+
spec.description = %[Dumps records to S3 Parquet.]
|
86
|
+
spec.email = ["civitaspo@gmail.com"]
|
87
|
+
spec.licenses = ["MIT"]
|
88
|
+
spec.homepage = "https://github.com/civitaspo/embulk-output-s3_parquet"
|
89
|
+
|
90
|
+
spec.files = `git ls-files`.split("\n") + Dir["classpath/*.jar"]
|
91
|
+
spec.test_files = spec.files.grep(%r"^(test|spec)/")
|
92
|
+
spec.require_paths = ["lib"]
|
93
|
+
|
94
|
+
#spec.add_dependency 'YOUR_GEM_DEPENDENCY', ['~> YOUR_GEM_DEPENDENCY_VERSION']
|
95
|
+
spec.add_development_dependency 'bundler', ['~> 1.0']
|
96
|
+
spec.add_development_dependency 'rake', ['~> 12.0']
|
97
|
+
end
|
98
|
+
/$)
|
99
|
+
}
|
100
|
+
}
|
101
|
+
clean { delete "${project.name}.gemspec" }
|
data/example/config.yml
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
|
2
|
+
in:
|
3
|
+
type: file
|
4
|
+
path_prefix: ./example/data.tsv
|
5
|
+
parser:
|
6
|
+
type: csv
|
7
|
+
delimiter: "\t"
|
8
|
+
skip_header_lines: 0
|
9
|
+
null_string: ""
|
10
|
+
columns:
|
11
|
+
- { name: id, type: long }
|
12
|
+
- { name: description, type: string }
|
13
|
+
- { name: name, type: string }
|
14
|
+
- { name: t, type: timestamp, format: "%Y-%m-%d %H:%M:%S %z"}
|
15
|
+
- { name: payload, type: json}
|
16
|
+
stop_on_invalid_record: true
|
17
|
+
|
18
|
+
out:
|
19
|
+
type: s3_parquet
|
20
|
+
bucket: my-bucket
|
21
|
+
path_prefix: path/to/my-obj.
|
22
|
+
file_ext: snappy.parquet
|
23
|
+
compression_codec: snappy
|
24
|
+
default_timezone: Asia/Tokyo
|
25
|
+
canned_acl: bucket-owner-full-control
|
data/example/data.tsv
ADDED
@@ -0,0 +1,5 @@
|
|
1
|
+
0 c20ef94602 c212c89f91 2017-10-24 03:54:35 +0900 {"a":0,"b":"99"}
|
2
|
+
1 330a9fc33a e25b33b616 2017-10-22 19:53:31 +0900 {"a":1,"b":"a9"}
|
3
|
+
2 707b3b7588 90823c6a1f 2017-10-23 23:42:43 +0900 {"a":2,"b":"96"}
|
4
|
+
3 8d8288e66f 2017-10-22 06:12:13 +0900 {"a":3,"b":"86"}
|
5
|
+
4 c54d8b6481 e56a40571c 2017-10-23 04:59:16 +0900 {"a":4,"b":"d2"}
|
Binary file
|
data/gradlew
ADDED
@@ -0,0 +1,172 @@
|
|
1
|
+
#!/usr/bin/env sh
|
2
|
+
|
3
|
+
##############################################################################
|
4
|
+
##
|
5
|
+
## Gradle start up script for UN*X
|
6
|
+
##
|
7
|
+
##############################################################################
|
8
|
+
|
9
|
+
# Attempt to set APP_HOME
|
10
|
+
# Resolve links: $0 may be a link
|
11
|
+
PRG="$0"
|
12
|
+
# Need this for relative symlinks.
|
13
|
+
while [ -h "$PRG" ] ; do
|
14
|
+
ls=`ls -ld "$PRG"`
|
15
|
+
link=`expr "$ls" : '.*-> \(.*\)$'`
|
16
|
+
if expr "$link" : '/.*' > /dev/null; then
|
17
|
+
PRG="$link"
|
18
|
+
else
|
19
|
+
PRG=`dirname "$PRG"`"/$link"
|
20
|
+
fi
|
21
|
+
done
|
22
|
+
SAVED="`pwd`"
|
23
|
+
cd "`dirname \"$PRG\"`/" >/dev/null
|
24
|
+
APP_HOME="`pwd -P`"
|
25
|
+
cd "$SAVED" >/dev/null
|
26
|
+
|
27
|
+
APP_NAME="Gradle"
|
28
|
+
APP_BASE_NAME=`basename "$0"`
|
29
|
+
|
30
|
+
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
31
|
+
DEFAULT_JVM_OPTS=""
|
32
|
+
|
33
|
+
# Use the maximum available, or set MAX_FD != -1 to use that value.
|
34
|
+
MAX_FD="maximum"
|
35
|
+
|
36
|
+
warn () {
|
37
|
+
echo "$*"
|
38
|
+
}
|
39
|
+
|
40
|
+
die () {
|
41
|
+
echo
|
42
|
+
echo "$*"
|
43
|
+
echo
|
44
|
+
exit 1
|
45
|
+
}
|
46
|
+
|
47
|
+
# OS specific support (must be 'true' or 'false').
|
48
|
+
cygwin=false
|
49
|
+
msys=false
|
50
|
+
darwin=false
|
51
|
+
nonstop=false
|
52
|
+
case "`uname`" in
|
53
|
+
CYGWIN* )
|
54
|
+
cygwin=true
|
55
|
+
;;
|
56
|
+
Darwin* )
|
57
|
+
darwin=true
|
58
|
+
;;
|
59
|
+
MINGW* )
|
60
|
+
msys=true
|
61
|
+
;;
|
62
|
+
NONSTOP* )
|
63
|
+
nonstop=true
|
64
|
+
;;
|
65
|
+
esac
|
66
|
+
|
67
|
+
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
|
68
|
+
|
69
|
+
# Determine the Java command to use to start the JVM.
|
70
|
+
if [ -n "$JAVA_HOME" ] ; then
|
71
|
+
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
72
|
+
# IBM's JDK on AIX uses strange locations for the executables
|
73
|
+
JAVACMD="$JAVA_HOME/jre/sh/java"
|
74
|
+
else
|
75
|
+
JAVACMD="$JAVA_HOME/bin/java"
|
76
|
+
fi
|
77
|
+
if [ ! -x "$JAVACMD" ] ; then
|
78
|
+
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
|
79
|
+
|
80
|
+
Please set the JAVA_HOME variable in your environment to match the
|
81
|
+
location of your Java installation."
|
82
|
+
fi
|
83
|
+
else
|
84
|
+
JAVACMD="java"
|
85
|
+
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
86
|
+
|
87
|
+
Please set the JAVA_HOME variable in your environment to match the
|
88
|
+
location of your Java installation."
|
89
|
+
fi
|
90
|
+
|
91
|
+
# Increase the maximum file descriptors if we can.
|
92
|
+
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
|
93
|
+
MAX_FD_LIMIT=`ulimit -H -n`
|
94
|
+
if [ $? -eq 0 ] ; then
|
95
|
+
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
|
96
|
+
MAX_FD="$MAX_FD_LIMIT"
|
97
|
+
fi
|
98
|
+
ulimit -n $MAX_FD
|
99
|
+
if [ $? -ne 0 ] ; then
|
100
|
+
warn "Could not set maximum file descriptor limit: $MAX_FD"
|
101
|
+
fi
|
102
|
+
else
|
103
|
+
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
|
104
|
+
fi
|
105
|
+
fi
|
106
|
+
|
107
|
+
# For Darwin, add options to specify how the application appears in the dock
|
108
|
+
if $darwin; then
|
109
|
+
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
|
110
|
+
fi
|
111
|
+
|
112
|
+
# For Cygwin, switch paths to Windows format before running java
|
113
|
+
if $cygwin ; then
|
114
|
+
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
|
115
|
+
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
|
116
|
+
JAVACMD=`cygpath --unix "$JAVACMD"`
|
117
|
+
|
118
|
+
# We build the pattern for arguments to be converted via cygpath
|
119
|
+
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
|
120
|
+
SEP=""
|
121
|
+
for dir in $ROOTDIRSRAW ; do
|
122
|
+
ROOTDIRS="$ROOTDIRS$SEP$dir"
|
123
|
+
SEP="|"
|
124
|
+
done
|
125
|
+
OURCYGPATTERN="(^($ROOTDIRS))"
|
126
|
+
# Add a user-defined pattern to the cygpath arguments
|
127
|
+
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
|
128
|
+
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
|
129
|
+
fi
|
130
|
+
# Now convert the arguments - kludge to limit ourselves to /bin/sh
|
131
|
+
i=0
|
132
|
+
for arg in "$@" ; do
|
133
|
+
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
|
134
|
+
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
|
135
|
+
|
136
|
+
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
|
137
|
+
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
|
138
|
+
else
|
139
|
+
eval `echo args$i`="\"$arg\""
|
140
|
+
fi
|
141
|
+
i=$((i+1))
|
142
|
+
done
|
143
|
+
case $i in
|
144
|
+
(0) set -- ;;
|
145
|
+
(1) set -- "$args0" ;;
|
146
|
+
(2) set -- "$args0" "$args1" ;;
|
147
|
+
(3) set -- "$args0" "$args1" "$args2" ;;
|
148
|
+
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
|
149
|
+
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
|
150
|
+
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
|
151
|
+
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
|
152
|
+
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
|
153
|
+
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
|
154
|
+
esac
|
155
|
+
fi
|
156
|
+
|
157
|
+
# Escape application args
|
158
|
+
save () {
|
159
|
+
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
|
160
|
+
echo " "
|
161
|
+
}
|
162
|
+
APP_ARGS=$(save "$@")
|
163
|
+
|
164
|
+
# Collect all arguments for the java command, following the shell quoting and substitution rules
|
165
|
+
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
|
166
|
+
|
167
|
+
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
|
168
|
+
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
|
169
|
+
cd "$(dirname "$0")"
|
170
|
+
fi
|
171
|
+
|
172
|
+
exec "$JAVACMD" "$@"
|