embulk-parser-header_based_csv 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: '029ace0c5c4d035516de52ffb5e5f706f9c44c42'
4
+ data.tar.gz: fdb2f0a62854079e593948f5d4bfb2a1c8bcfdb5
5
+ SHA512:
6
+ metadata.gz: af14e92ce1071d1341c6ec60d421761a6212c66f0cf2c84af74d0a99956eb04f90db4538dbf7a7c21e10b4af36ea851445622ffac1b408c1fc93e752b1661d42
7
+ data.tar.gz: b3e4a555f6c0f96932a51ca96486693f87d99ba12038f1709e3d203e5688cdff048daaad1c7e7c381346a555ffa2c579b2248de763e511b2432a53309bc45292
data/.gitignore ADDED
@@ -0,0 +1,14 @@
1
+ *~
2
+ /pkg/
3
+ /tmp/
4
+ *.gemspec
5
+ .gradle/
6
+ /classpath/
7
+ build/
8
+ .idea
9
+ /.settings/
10
+ /.metadata/
11
+ .classpath
12
+ .project
13
+ *.iml
14
+ /out/
data/.gitlab-ci.yml ADDED
@@ -0,0 +1,14 @@
1
+ image: openjdk:8u181
2
+
3
+ deploy:
4
+ stage: deploy
5
+ before_script:
6
+ - mkdir ~/.gem
7
+ - echo '---' >> ~/.gem/credentials
8
+ - echo ':rubygems_api_key:' $RUBYGEMS_API_KEY >> ~/.gem/credentials
9
+ - chmod 0600 ~/.gem/credentials
10
+ script:
11
+ - ./gradlew gemPush
12
+ only:
13
+ - tags
14
+ when: manual
data/.scalafmt.conf ADDED
@@ -0,0 +1,7 @@
1
+ style = defaultWithAlign
2
+ danglingParentheses = true
3
+ indentOperator = spray
4
+ maxColumn = 120
5
+ rewrite.rules = [RedundantParens, SortImports, PreferCurlyFors]
6
+ binPack.literalArgumentLists = false
7
+ unindentTopLevelOperators = true
data/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
1
+
2
+ MIT License
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining
5
+ a copy of this software and associated documentation files (the
6
+ "Software"), to deal in the Software without restriction, including
7
+ without limitation the rights to use, copy, modify, merge, publish,
8
+ distribute, sublicense, and/or sell copies of the Software, and to
9
+ permit persons to whom the Software is furnished to do so, subject to
10
+ the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be
13
+ included in all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,43 @@
1
+ # Header Based Csv parser plugin for Embulk
2
+
3
+ ## Overview
4
+
5
+ * **Plugin type**: parser
6
+ * **Guess supported**: no
7
+
8
+ 組み込みプラグインのCSVパーサー拡張です
9
+
10
+ this parser is an extension of built-in csv parser plugin
11
+
12
+ このパーサーはパースするとき、ファイルのヘッダー名とコンフィグファイルのカラム名が完全一致しているか調べます
13
+ そのためファイルのカラムの並び順の変更に影響されずパースできます
14
+
15
+ when it parses files, it checks that a header name on your file and a column name on your configuration file are exactly matched
16
+ so the change of column order on your files does not affect the parser
17
+
18
+ CSVファイルにはヘッダーが必要です
19
+ `skip_header_lines`を指定しない限り、一行目をヘッダーとみなします
20
+
21
+ csv file must have one header line
22
+ the first line is supposed to be a header line unless you set `skip_header_lines` option
23
+
24
+ ## Configuration
25
+
26
+ このプラグイン独自のオプションはありません(CSVパーサーと同じです)
27
+ ただし`skip_header_lines`を指定する場合、ヘッダー行は含まないようにしてください
28
+
29
+ there is no unique option for this plugin (it is the same as that of csv parser plugin)
30
+ but you should be careful not to include its header line when you set `skip_header_lines`
31
+
32
+
33
+ ## install
34
+
35
+ ```
36
+ $ embulk gem install embulk-parser-header_based_csv
37
+ ```
38
+
39
+ ## Build
40
+
41
+ ```
42
+ $ ./gradlew gem # -t to watch change of files and rebuild continuously
43
+ ```
data/build.gradle ADDED
@@ -0,0 +1,93 @@
1
+ plugins {
2
+ id "com.jfrog.bintray" version "1.1"
3
+ id "com.github.jruby-gradle.base" version "1.5.0"
4
+ id "scala"
5
+ id "com.diffplug.gradle.spotless" version "3.14.0"
6
+ }
7
+ import com.github.jrubygradle.JRubyExec
8
+ repositories {
9
+ mavenCentral()
10
+ jcenter()
11
+ }
12
+ configurations {
13
+ provided
14
+ }
15
+
16
+ version = "0.1.0"
17
+
18
+ sourceCompatibility = 1.8
19
+ targetCompatibility = 1.8
20
+
21
+ dependencies {
22
+ compile "org.embulk:embulk-core:0.9.7"
23
+ compile "org.embulk:embulk-standards:0.9.7"
24
+ compile "org.scala-lang:scala-library:2.12.6"
25
+ testCompile "org.scalatest:scalatest_2.12:3.0.5"
26
+ testCompile "org.embulk:embulk-core:0.9.7"
27
+ provided "org.embulk:embulk-core:0.9.7"
28
+ }
29
+
30
+ spotless {
31
+ scala {
32
+ scalafmt('1.5.1').configFile('.scalafmt.conf')
33
+ }
34
+ }
35
+
36
+ afterEvaluate {
37
+ tasks.getByName('spotlessCheck').dependsOn(tasks.getByName('spotlessApply'))
38
+ }
39
+
40
+ task classpath(type: Copy, dependsOn: ["jar"]) {
41
+ doFirst { file("classpath").deleteDir() }
42
+ from (configurations.runtime - configurations.provided + files(jar.archivePath))
43
+ into "classpath"
44
+ }
45
+ clean { delete "classpath" }
46
+
47
+ task gem(type: JRubyExec, dependsOn: ["gemspec", "classpath"]) {
48
+ jrubyArgs "-S"
49
+ script "gem"
50
+ scriptArgs "build", "${project.name}.gemspec"
51
+ doLast { ant.move(file: "${project.name}-${project.version}.gem", todir: "pkg") }
52
+ }
53
+
54
+ task gemPush(type: JRubyExec, dependsOn: ["gem"]) {
55
+ jrubyArgs "-S"
56
+ script "gem"
57
+ scriptArgs "push", "pkg/${project.name}-${project.version}.gem"
58
+ }
59
+
60
+ task "package"(dependsOn: ["gemspec", "classpath"]) {
61
+ doLast {
62
+ println "> Build succeeded."
63
+ println "> You can run embulk with '-L ${file(".").absolutePath}' argument."
64
+ }
65
+ }
66
+
67
+ task gemspec {
68
+ ext.gemspecFile = file("${project.name}.gemspec")
69
+ inputs.file "build.gradle"
70
+ outputs.file gemspecFile
71
+ doLast { gemspecFile.write($/
72
+ Gem::Specification.new do |spec|
73
+ spec.name = "${project.name}"
74
+ spec.version = "${project.version}"
75
+ spec.authors = ["k_higuchi"]
76
+ spec.summary = %[Header Based Csv parser plugin for Embulk]
77
+ spec.description = %[Parses Header Based Csv files read by other file input plugins.]
78
+ spec.email = ["k_higuchi@septeni-original.co.jp"]
79
+ spec.licenses = ["MIT"]
80
+ # TODO set this: spec.homepage = "https://github.com/k_higuchi/embulk-parser-header_based_csv"
81
+
82
+ spec.files = `git ls-files`.split("\n") + Dir["classpath/*.jar"]
83
+ spec.test_files = spec.files.grep(%r"^(test|spec)/")
84
+ spec.require_paths = ["lib"]
85
+
86
+ #spec.add_dependency 'YOUR_GEM_DEPENDENCY', ['~> YOUR_GEM_DEPENDENCY_VERSION']
87
+ spec.add_development_dependency 'bundler', ['~> 1.0']
88
+ spec.add_development_dependency 'rake', ['>= 10.0']
89
+ end
90
+ /$)
91
+ }
92
+ }
93
+ clean { delete "${project.name}.gemspec" }
data/csv/test_1.csv ADDED
@@ -0,0 +1,3 @@
1
+ b,a,c
2
+ 2,1,3
3
+ 5,4,6
data/csv/test_2.csv ADDED
@@ -0,0 +1,3 @@
1
+ a,b,c
2
+ 1,2,3
3
+ 4,5,6
Binary file
@@ -0,0 +1,6 @@
1
+ #Tue Aug 21 16:25:32 JST 2018
2
+ distributionBase=GRADLE_USER_HOME
3
+ distributionPath=wrapper/dists
4
+ zipStoreBase=GRADLE_USER_HOME
5
+ zipStorePath=wrapper/dists
6
+ distributionUrl=https\://services.gradle.org/distributions/gradle-4.1-all.zip
data/gradlew ADDED
@@ -0,0 +1,172 @@
1
+ #!/usr/bin/env sh
2
+
3
+ ##############################################################################
4
+ ##
5
+ ## Gradle start up script for UN*X
6
+ ##
7
+ ##############################################################################
8
+
9
+ # Attempt to set APP_HOME
10
+ # Resolve links: $0 may be a link
11
+ PRG="$0"
12
+ # Need this for relative symlinks.
13
+ while [ -h "$PRG" ] ; do
14
+ ls=`ls -ld "$PRG"`
15
+ link=`expr "$ls" : '.*-> \(.*\)$'`
16
+ if expr "$link" : '/.*' > /dev/null; then
17
+ PRG="$link"
18
+ else
19
+ PRG=`dirname "$PRG"`"/$link"
20
+ fi
21
+ done
22
+ SAVED="`pwd`"
23
+ cd "`dirname \"$PRG\"`/" >/dev/null
24
+ APP_HOME="`pwd -P`"
25
+ cd "$SAVED" >/dev/null
26
+
27
+ APP_NAME="Gradle"
28
+ APP_BASE_NAME=`basename "$0"`
29
+
30
+ # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31
+ DEFAULT_JVM_OPTS=""
32
+
33
+ # Use the maximum available, or set MAX_FD != -1 to use that value.
34
+ MAX_FD="maximum"
35
+
36
+ warn () {
37
+ echo "$*"
38
+ }
39
+
40
+ die () {
41
+ echo
42
+ echo "$*"
43
+ echo
44
+ exit 1
45
+ }
46
+
47
+ # OS specific support (must be 'true' or 'false').
48
+ cygwin=false
49
+ msys=false
50
+ darwin=false
51
+ nonstop=false
52
+ case "`uname`" in
53
+ CYGWIN* )
54
+ cygwin=true
55
+ ;;
56
+ Darwin* )
57
+ darwin=true
58
+ ;;
59
+ MINGW* )
60
+ msys=true
61
+ ;;
62
+ NONSTOP* )
63
+ nonstop=true
64
+ ;;
65
+ esac
66
+
67
+ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68
+
69
+ # Determine the Java command to use to start the JVM.
70
+ if [ -n "$JAVA_HOME" ] ; then
71
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72
+ # IBM's JDK on AIX uses strange locations for the executables
73
+ JAVACMD="$JAVA_HOME/jre/sh/java"
74
+ else
75
+ JAVACMD="$JAVA_HOME/bin/java"
76
+ fi
77
+ if [ ! -x "$JAVACMD" ] ; then
78
+ die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79
+
80
+ Please set the JAVA_HOME variable in your environment to match the
81
+ location of your Java installation."
82
+ fi
83
+ else
84
+ JAVACMD="java"
85
+ which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86
+
87
+ Please set the JAVA_HOME variable in your environment to match the
88
+ location of your Java installation."
89
+ fi
90
+
91
+ # Increase the maximum file descriptors if we can.
92
+ if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93
+ MAX_FD_LIMIT=`ulimit -H -n`
94
+ if [ $? -eq 0 ] ; then
95
+ if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96
+ MAX_FD="$MAX_FD_LIMIT"
97
+ fi
98
+ ulimit -n $MAX_FD
99
+ if [ $? -ne 0 ] ; then
100
+ warn "Could not set maximum file descriptor limit: $MAX_FD"
101
+ fi
102
+ else
103
+ warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104
+ fi
105
+ fi
106
+
107
+ # For Darwin, add options to specify how the application appears in the dock
108
+ if $darwin; then
109
+ GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110
+ fi
111
+
112
+ # For Cygwin, switch paths to Windows format before running java
113
+ if $cygwin ; then
114
+ APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115
+ CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116
+ JAVACMD=`cygpath --unix "$JAVACMD"`
117
+
118
+ # We build the pattern for arguments to be converted via cygpath
119
+ ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120
+ SEP=""
121
+ for dir in $ROOTDIRSRAW ; do
122
+ ROOTDIRS="$ROOTDIRS$SEP$dir"
123
+ SEP="|"
124
+ done
125
+ OURCYGPATTERN="(^($ROOTDIRS))"
126
+ # Add a user-defined pattern to the cygpath arguments
127
+ if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128
+ OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129
+ fi
130
+ # Now convert the arguments - kludge to limit ourselves to /bin/sh
131
+ i=0
132
+ for arg in "$@" ; do
133
+ CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134
+ CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135
+
136
+ if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137
+ eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138
+ else
139
+ eval `echo args$i`="\"$arg\""
140
+ fi
141
+ i=$((i+1))
142
+ done
143
+ case $i in
144
+ (0) set -- ;;
145
+ (1) set -- "$args0" ;;
146
+ (2) set -- "$args0" "$args1" ;;
147
+ (3) set -- "$args0" "$args1" "$args2" ;;
148
+ (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149
+ (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150
+ (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151
+ (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152
+ (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153
+ (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154
+ esac
155
+ fi
156
+
157
+ # Escape application args
158
+ save () {
159
+ for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160
+ echo " "
161
+ }
162
+ APP_ARGS=$(save "$@")
163
+
164
+ # Collect all arguments for the java command, following the shell quoting and substitution rules
165
+ eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166
+
167
+ # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168
+ if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169
+ cd "$(dirname "$0")"
170
+ fi
171
+
172
+ exec "$JAVACMD" "$@"
data/gradlew.bat ADDED
@@ -0,0 +1,84 @@
1
+ @if "%DEBUG%" == "" @echo off
2
+ @rem ##########################################################################
3
+ @rem
4
+ @rem Gradle startup script for Windows
5
+ @rem
6
+ @rem ##########################################################################
7
+
8
+ @rem Set local scope for the variables with windows NT shell
9
+ if "%OS%"=="Windows_NT" setlocal
10
+
11
+ set DIRNAME=%~dp0
12
+ if "%DIRNAME%" == "" set DIRNAME=.
13
+ set APP_BASE_NAME=%~n0
14
+ set APP_HOME=%DIRNAME%
15
+
16
+ @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17
+ set DEFAULT_JVM_OPTS=
18
+
19
+ @rem Find java.exe
20
+ if defined JAVA_HOME goto findJavaFromJavaHome
21
+
22
+ set JAVA_EXE=java.exe
23
+ %JAVA_EXE% -version >NUL 2>&1
24
+ if "%ERRORLEVEL%" == "0" goto init
25
+
26
+ echo.
27
+ echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28
+ echo.
29
+ echo Please set the JAVA_HOME variable in your environment to match the
30
+ echo location of your Java installation.
31
+
32
+ goto fail
33
+
34
+ :findJavaFromJavaHome
35
+ set JAVA_HOME=%JAVA_HOME:"=%
36
+ set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37
+
38
+ if exist "%JAVA_EXE%" goto init
39
+
40
+ echo.
41
+ echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42
+ echo.
43
+ echo Please set the JAVA_HOME variable in your environment to match the
44
+ echo location of your Java installation.
45
+
46
+ goto fail
47
+
48
+ :init
49
+ @rem Get command-line arguments, handling Windows variants
50
+
51
+ if not "%OS%" == "Windows_NT" goto win9xME_args
52
+
53
+ :win9xME_args
54
+ @rem Slurp the command line arguments.
55
+ set CMD_LINE_ARGS=
56
+ set _SKIP=2
57
+
58
+ :win9xME_args_slurp
59
+ if "x%~1" == "x" goto execute
60
+
61
+ set CMD_LINE_ARGS=%*
62
+
63
+ :execute
64
+ @rem Setup the command line
65
+
66
+ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67
+
68
+ @rem Execute Gradle
69
+ "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70
+
71
+ :end
72
+ @rem End local scope for the variables with windows NT shell
73
+ if "%ERRORLEVEL%"=="0" goto mainEnd
74
+
75
+ :fail
76
+ rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77
+ rem the _cmd.exe /c_ return code!
78
+ if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79
+ exit /b 1
80
+
81
+ :mainEnd
82
+ if "%OS%"=="Windows_NT" endlocal
83
+
84
+ :omega
@@ -0,0 +1,61 @@
1
+ module Embulk
2
+ module Guess
3
+
4
+ # TODO implement guess plugin to make this command work:
5
+ # $ embulk guess -g "header_based_csv" partial-config.yml
6
+ #
7
+ # Depending on the file format the plugin uses, you can use choose
8
+ # one of binary guess (GuessPlugin), text guess (TextGuessPlugin),
9
+ # or line guess (LineGuessPlugin).
10
+
11
+ # class HeaderBasedCsv < GuessPlugin
12
+ # Plugin.register_guess("header_based_csv", self)
13
+ #
14
+ # def guess(config, sample_buffer)
15
+ # if sample_buffer[0,2] == GZIP_HEADER
16
+ # guessed = {}
17
+ # guessed["type"] = "header_based_csv"
18
+ # guessed["property1"] = "guessed-value"
19
+ # return {"parser" => guessed}
20
+ # else
21
+ # return {}
22
+ # end
23
+ # end
24
+ # end
25
+
26
+ # class HeaderBasedCsv < TextGuessPlugin
27
+ # Plugin.register_guess("header_based_csv", self)
28
+ #
29
+ # def guess_text(config, sample_text)
30
+ # js = JSON.parse(sample_text) rescue nil
31
+ # if js && js["mykeyword"] == "keyword"
32
+ # guessed = {}
33
+ # guessed["type"] = "header_based_csv"
34
+ # guessed["property1"] = "guessed-value"
35
+ # return {"parser" => guessed}
36
+ # else
37
+ # return {}
38
+ # end
39
+ # end
40
+ # end
41
+
42
+ # class HeaderBasedCsv < LineGuessPlugin
43
+ # Plugin.register_guess("header_based_csv", self)
44
+ #
45
+ # def guess_lines(config, sample_lines)
46
+ # all_line_matched = sample_lines.all? do |line|
47
+ # line =~ /mypattern/
48
+ # end
49
+ # if all_line_matched
50
+ # guessed = {}
51
+ # guessed["type"] = "header_based_csv"
52
+ # guessed["property1"] = "guessed-value"
53
+ # return {"parser" => guessed}
54
+ # else
55
+ # return {}
56
+ # end
57
+ # end
58
+ # end
59
+
60
+ end
61
+ end
@@ -0,0 +1,3 @@
1
+ Embulk::JavaPlugin.register_parser(
2
+ "header_based_csv", "org.embulk.parser.header_based_csv.HeaderBasedCsvParserPlugin",
3
+ File.expand_path('../../../../classpath', __FILE__))
@@ -0,0 +1,11 @@
1
+ package helper
2
+
3
+ import com.google.common.base.Optional
4
+
5
+ object GoogleOptionalHelper {
6
+
7
+ implicit class RichOptional[T](opt: Optional[T]) {
8
+ def toOption: Option[T] = if (opt.isPresent) Some(opt.get()) else None
9
+ }
10
+
11
+ }
@@ -0,0 +1,136 @@
1
+ package org.embulk.parser.header_based_csv
2
+
3
+ import org.embulk.config._
4
+ import org.embulk.parser.header_based_csv.exception.{ColumnValueNotFoundException, TooManyColumnValuesException}
5
+ import org.embulk.parser.header_based_csv.output.ColumnValueWriter
6
+ import org.embulk.parser.header_based_csv.tokenizer.TokenizerHelper.RichCsvTokenizer
7
+ import org.embulk.spi.util.LineDecoder
8
+ import org.embulk.spi._
9
+ import org.embulk.standards.{CsvParserPlugin, CsvTokenizer}
10
+ import org.slf4j.Logger
11
+ import helper.GoogleOptionalHelper.RichOptional
12
+
13
+ import scala.annotation.tailrec
14
+ import scala.collection.JavaConverters._
15
+ import scala.util.{Failure, Success, Try}
16
+
17
+ class HeaderBasedCsvParserPlugin extends CsvParserPlugin {
18
+
19
+ trait PluginTask extends CsvParserPlugin.PluginTask
20
+
21
+ private val log: Logger = Exec.getLogger(classOf[CsvParserPlugin])
22
+
23
+ override def transaction(config: ConfigSource, control: ParserPlugin.Control): Unit = {
24
+ val task = config.loadConfig(classOf[PluginTask])
25
+
26
+ val skipHeaderLines = task.getSkipHeaderLines
27
+
28
+ // CsvParserPlugin had header_line option (not anymore)
29
+ // super class has backward compatibility code in its "transaction" method and it discards the header line when header-line option is true
30
+ // so you have to override the method and keep the header line
31
+ task.getHeaderLine.toOption match {
32
+ case Some(_) if skipHeaderLines > 0 =>
33
+ throw new ConfigException("'header_line' option is invalid if 'skip_header_lines' is set.")
34
+ case Some(hasHeader) =>
35
+ if (hasHeader)
36
+ task.setSkipHeaderLines(0) // keep the header line (should set 0 not to skip the first line unlike super class)
37
+ else
38
+ throw new ConfigException("this parser needs a header line")
39
+ case None => task.setSkipHeaderLines(0)
40
+ }
41
+
42
+ control.run(task.dump, task.getSchemaConfig.toSchema)
43
+ }
44
+
45
+ override def run(taskSource: TaskSource, schema: Schema, input: FileInput, output: PageOutput): Unit =
46
+ try {
47
+ val task = taskSource.loadTask(classOf[PluginTask])
48
+
49
+ val allowExtraColumnValues = task.getAllowExtraColumns
50
+ val allowOptionalColumns = task.getAllowOptionalColumns
51
+ val stopOnInvalidRecord = task.getStopOnInvalidRecord
52
+
53
+ val skipHeaderLines = task.getSkipHeaderLines
54
+
55
+ val tokenizer = new CsvTokenizer(new LineDecoder(input, task), task)
56
+ val pageBuilder = new PageBuilder(Exec.getBufferAllocator, schema, output)
57
+ val columnValueWriter = new ColumnValueWriter(task, pageBuilder)
58
+
59
+ while (tokenizer.nextFile()) {
60
+
61
+ (1 to skipHeaderLines).foreach(_ => tokenizer.skipHeaderLine())
62
+
63
+ tokenizer.forEachRecord { record =>
64
+ val columns = schema.getColumns.asScala.toList
65
+ val result = writeColumnValues(columns,
66
+ record,
67
+ tokenizer,
68
+ columnValueWriter,
69
+ allowExtraColumnValues,
70
+ allowOptionalColumns)
71
+ skipOrStopIfFailed(result, tokenizer, stopOnInvalidRecord)
72
+ if (result.isSuccess) {
73
+ pageBuilder.addRecord()
74
+ }
75
+ }
76
+ pageBuilder.flush()
77
+ }
78
+ pageBuilder.finish()
79
+ } catch {
80
+ case e: Throwable => e.printStackTrace()
81
+ }
82
+
83
+ private def writeSingleColumnValue(record: Map[String, String],
84
+ column: Column,
85
+ columnValueWriter: ColumnValueWriter,
86
+ columnCount: Int,
87
+ allowOptionalColumns: Boolean): Try[Unit] =
88
+ record.get(column.getName) match {
89
+ case Some(value) =>
90
+ columnValueWriter.writeValue(column, value)
91
+
92
+ case None if allowOptionalColumns && record.size < columnCount =>
93
+ columnValueWriter.writeNull(column)
94
+
95
+ case None =>
96
+ Failure(new ColumnValueNotFoundException(column))
97
+ }
98
+
99
+ private def writeColumnValues(columns: Seq[Column],
100
+ record: Map[String, String],
101
+ tokenizer: CsvTokenizer,
102
+ columnValueWriter: ColumnValueWriter,
103
+ allowExtraColumnValues: Boolean,
104
+ allowOptionalColumns: Boolean): Try[Unit] = {
105
+
106
+ @tailrec def loop(cs: Seq[Column]): Try[Unit] =
107
+ cs match {
108
+ case Nil =>
109
+ Success(())
110
+
111
+ // allowExtraColumns (which renamed to allowExtraColumnValues here because its more suitable)
112
+ // "If true, ignore too many columns. Otherwise, skip the row in case of too many columns"
113
+ case _ :: _ if !allowExtraColumnValues && columns.size < record.size =>
114
+ Failure(new TooManyColumnValuesException(columns.size, record.size))
115
+
116
+ case column :: tail =>
117
+ writeSingleColumnValue(record, column, columnValueWriter, columns.length, allowOptionalColumns) match {
118
+ case Success(_) => loop(tail)
119
+ case f: Failure[_] => f
120
+ }
121
+ }
122
+ loop(columns)
123
+ }
124
+
125
+ private def skipOrStopIfFailed(result: Try[Unit], tokenizer: CsvTokenizer, stopOnInvalidRecord: Boolean): Unit =
126
+ result.failed
127
+ .foreach { e =>
128
+ val skippedLine = tokenizer.skipCurrentLine()
129
+ val lineNumber = tokenizer.getCurrentLineNumber
130
+ if (stopOnInvalidRecord)
131
+ throw new DataException(s"Invalid record at line $lineNumber: $skippedLine", e)
132
+ else
133
+ log.warn(s"Skipped line $lineNumber (${e.getMessage}): $skippedLine")
134
+ }
135
+
136
+ }
@@ -0,0 +1,6 @@
1
+ package org.embulk.parser.header_based_csv.exception
2
+
3
+ import org.embulk.spi.{Column, DataException}
4
+
5
+ class ColumnValueNotFoundException(column: Column)
6
+ extends DataException(s"a value for this column is not found. column name: ${column.getName}")
@@ -0,0 +1,8 @@
1
+ package org.embulk.parser.header_based_csv.exception
2
+
3
+ import org.embulk.spi.DataException
4
+
5
+ class TooManyColumnValuesException(columnCount: Int, columnValueCount: Int)
6
+ extends DataException(
7
+ s"this record has more values than columns in the config file. columns: $columnCount values: $columnValueCount"
8
+ )
@@ -0,0 +1,57 @@
1
+ package org.embulk.parser.header_based_csv.output
2
+
3
+ import org.embulk.parser.header_based_csv.HeaderBasedCsvParserPlugin
4
+ import org.embulk.spi.`type`._
5
+ import org.embulk.spi.json.JsonParser
6
+ import org.embulk.spi.time.TimestampParser
7
+ import org.embulk.spi.util.Timestamps
8
+ import org.embulk.spi.{Column, PageBuilder}
9
+ import helper.GoogleOptionalHelper.RichOptional
10
+
11
+ import scala.util.{Failure, Try}
12
+
13
+ class ColumnValueWriter(task: HeaderBasedCsvParserPlugin#PluginTask, pageBuilder: PageBuilder) {
14
+
15
+ private lazy val timestampParsers = Timestamps.newTimestampColumnParsers(task, task.getSchemaConfig)
16
+ private lazy val jsonParser = new JsonParser()
17
+
18
+ def writeValue(column: Column, value: String): Try[Unit] = column.getType match {
19
+ case _: LongType => writeLong(column, value)
20
+ case _: DoubleType => writeDouble(column, value)
21
+ case _: StringType => writeString(column, value)
22
+ case _: TimestampType => writeTimestamp(column, value)(timestampParsers)
23
+ case _: JsonType => writeJson(column, value)
24
+ case _ => Failure(new UnsupportedOperationException(""))
25
+ }
26
+
27
+ def writeNull(column: Column): Try[Unit] = Try(pageBuilder.setNull(column))
28
+
29
+ private[output] def writeString(column: Column, value: String): Try[Unit] = {
30
+ task.getNullString.toOption match {
31
+ case Some(strToBeNull) if value.matches(strToBeNull) =>
32
+ Try(pageBuilder.setNull(column))
33
+ case _ =>
34
+ Try(pageBuilder.setString(column, value))
35
+ }
36
+ }
37
+
38
+ private[output] def writeBoolean(column: Column, value: String): Try[Unit] =
39
+ Try(pageBuilder.setBoolean(column, value.toBoolean))
40
+
41
+ private[output] def writeLong(column: Column, value: String): Try[Unit] =
42
+ Try(pageBuilder.setLong(column, value.toLong))
43
+
44
+ private[output] def writeDouble(column: Column, value: String): Try[Unit] =
45
+ Try(pageBuilder.setDouble(column, value.toDouble))
46
+
47
+ private[output] def writeTimestamp(column: Column,
48
+ value: String)(timestampParsers: Array[TimestampParser]): Try[Unit] =
49
+ Try {
50
+ val parser = timestampParsers(column.getIndex)
51
+ pageBuilder.setTimestamp(column, parser.parse(value))
52
+ }
53
+
54
+ private[output] def writeJson(column: Column, value: String): Try[Unit] =
55
+ Try(pageBuilder.setJson(column, jsonParser.parse(value)))
56
+
57
+ }
@@ -0,0 +1,28 @@
1
+ package org.embulk.parser.header_based_csv.tokenizer
2
+
3
+ import org.embulk.standards.CsvTokenizer
4
+
5
+ import scala.collection.mutable
6
+
7
+ object TokenizerHelper {
8
+
9
+ implicit class RichCsvTokenizer(tokenizer: CsvTokenizer) {
10
+ def forEachRecord(f: Map[String, String] => Unit): Unit = {
11
+ val header: Seq[String] = if (tokenizer.nextRecord()) parseLine(tokenizer) else Seq.empty
12
+ while (tokenizer.nextRecord()) {
13
+ val values = parseLine(tokenizer)
14
+ val record = header.zip(values).toMap
15
+ f(record)
16
+ }
17
+ }
18
+ }
19
+
20
+ private def parseLine(tokenizer: CsvTokenizer): Seq[String] = {
21
+ val values = mutable.ListBuffer.empty[String]
22
+ while (tokenizer.hasNextColumn) {
23
+ values += tokenizer.nextColumn()
24
+ }
25
+ values.reverse.toList
26
+ }
27
+
28
+ }
@@ -0,0 +1,3 @@
1
+ package org.embulk.parser.header_based_csv
2
+
3
+ class TestHeaderBasedCsvParserPluginSpec
data/test.yml ADDED
@@ -0,0 +1,11 @@
1
+ in:
2
+ type: file
3
+ path_prefix: csv
4
+ parser:
5
+ type: header_based_csv
6
+ columns:
7
+ - {name: a, type: long}
8
+ - {name: b, type: string}
9
+ - {name: c, type: double}
10
+ out:
11
+ type: stdout
metadata ADDED
@@ -0,0 +1,98 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: embulk-parser-header_based_csv
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - k_higuchi
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2018-08-27 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - "~>"
17
+ - !ruby/object:Gem::Version
18
+ version: '1.0'
19
+ name: bundler
20
+ prerelease: false
21
+ type: :development
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '1.0'
27
+ - !ruby/object:Gem::Dependency
28
+ requirement: !ruby/object:Gem::Requirement
29
+ requirements:
30
+ - - ">="
31
+ - !ruby/object:Gem::Version
32
+ version: '10.0'
33
+ name: rake
34
+ prerelease: false
35
+ type: :development
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '10.0'
41
+ description: Parses Header Based Csv files read by other file input plugins.
42
+ email:
43
+ - k_higuchi@septeni-original.co.jp
44
+ executables: []
45
+ extensions: []
46
+ extra_rdoc_files: []
47
+ files:
48
+ - ".gitignore"
49
+ - ".gitlab-ci.yml"
50
+ - ".scalafmt.conf"
51
+ - LICENSE.txt
52
+ - README.md
53
+ - build.gradle
54
+ - classpath/commons-compress-1.10.jar
55
+ - classpath/embulk-parser-header_based_csv-0.1.0.jar
56
+ - classpath/embulk-standards-0.9.7.jar
57
+ - classpath/scala-library-2.12.6.jar
58
+ - csv/test_1.csv
59
+ - csv/test_2.csv
60
+ - gradle/wrapper/gradle-wrapper.jar
61
+ - gradle/wrapper/gradle-wrapper.properties
62
+ - gradlew
63
+ - gradlew.bat
64
+ - lib/embulk/guess/header_based_csv.rb
65
+ - lib/embulk/parser/header_based_csv.rb
66
+ - src/main/scala/helper/GoogleOptionalHelper.scala
67
+ - src/main/scala/org/embulk/parser/header_based_csv/HeaderBasedCsvParserPlugin.scala
68
+ - src/main/scala/org/embulk/parser/header_based_csv/exception/ColumnValueNotFoundException.scala
69
+ - src/main/scala/org/embulk/parser/header_based_csv/exception/TooManyColumnValuesException.scala
70
+ - src/main/scala/org/embulk/parser/header_based_csv/output/ColumnValueWriter.scala
71
+ - src/main/scala/org/embulk/parser/header_based_csv/tokenizer/TokenizerHelper.scala
72
+ - src/test/scala/org/embulk/parser/header_based_csv/TestHeaderBasedCsvParserPluginSpec.scala
73
+ - test.yml
74
+ homepage:
75
+ licenses:
76
+ - MIT
77
+ metadata: {}
78
+ post_install_message:
79
+ rdoc_options: []
80
+ require_paths:
81
+ - lib
82
+ required_ruby_version: !ruby/object:Gem::Requirement
83
+ requirements:
84
+ - - ">="
85
+ - !ruby/object:Gem::Version
86
+ version: '0'
87
+ required_rubygems_version: !ruby/object:Gem::Requirement
88
+ requirements:
89
+ - - ">="
90
+ - !ruby/object:Gem::Version
91
+ version: '0'
92
+ requirements: []
93
+ rubyforge_project:
94
+ rubygems_version: 2.6.8
95
+ signing_key:
96
+ specification_version: 4
97
+ summary: Header Based Csv parser plugin for Embulk
98
+ test_files: []