dataMetaByteSer 1.0.4 → 1.0.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 998d9dae64a2fc1570164a16321daece8d9fcf87
4
- data.tar.gz: f530f6555ab927b67f3ac903bf64455eca93ab0a
3
+ metadata.gz: 4ac6e0dbe32386d286c0b4d71a43b8b95e69a0b2
4
+ data.tar.gz: dd090b22b2dc22fc3959bbb477f1a725b2b6f7a4
5
5
  SHA512:
6
- metadata.gz: a65bce5a7bd19c3979d3340ae94b8db47e9ca067f93e5df9ae5504b415f63cef0886725cea2ea97cfe2493cf490457c6b88ab3bb1475e72532c86e558333f92a
7
- data.tar.gz: eef113b2c9a3b325eaec22f85f49d0b202720a6f493ad9499d0779dc85e4df451b0ea917464d4d31c88ab698211023daab1190c69f6d4044e58a0b5f15cc3ffc
6
+ metadata.gz: 7e47bc0ff4c61a4c5db7cffab3e42b4cf0d0a62515fb18793bf74f220a1c5f4bbec2affb0674d235c022c9e5fe91e9b5287e5129154ebc202c7e57b0d6e904de
7
+ data.tar.gz: 40d1ce4473189409bc5f9ad00afccd39634039c26323bf923ad0e0474a364abb69b8bfd35f01436745887bdff182552912211c7d0eae9cf6c1c629e086184362
data/History.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # `dataMetaByteSer` Release history:
2
2
 
3
+ ## `1.0.6` - `2018-05-13 Sun` by [`mub`](http://github.com/mub)
4
+ * Update:
5
+ * Support for Enum serialization
6
+
7
+ ## `1.0.5` - `2017-05-27 Sat` by [`mub`](http://github.com/mub)
8
+ * Update:
9
+ * Altered the Read Switch generator code to keep existing files.
10
+
3
11
  ## `1.0.4` - `2017-04-04 Tue` by [`mub`](http://github.com/mub)
4
12
  * Update:
5
13
  * Upgraded to the core version `1.0.6`
@@ -14,7 +14,7 @@ For command line details either check the new method's source or the README.rdoc
14
14
  =end
15
15
  module DataMetaByteSer
16
16
  # Current version
17
- VERSION = '1.0.4'
17
+ VERSION = '1.0.6'
18
18
  include DataMetaDom, DataMetaDom::PojoLexer
19
19
 
20
20
  =begin rdoc
@@ -106,12 +106,11 @@ HDFS Reader and Writer the raw data type, the byte array.
106
106
  =end
107
107
  RAW_RW_METHODS = RwHolder.new(
108
108
  lambda { |ctx|
109
- aggrNotSupported(ctx.fld, 'Raw Data') if ctx.fld.aggr
110
- ctx.rw.call('readByteArray(in)')
109
+ ctx.fld.aggr ? ctx.rw.call("read#{aggrBaseName(aggrJavaFull(ctx.fld.aggr))}Enum(in, #{ctx.fld.dataType.type}.class)") : ctx.rw.call('readByteArray(in)')
111
110
  },
112
111
  lambda { |ctx|
113
112
  aggrNotSupported(ctx.fld, 'Raw Data') if ctx.fld.aggr
114
- "writeByteArray(out, val.#{ctx.valGetter})" }
113
+ ctx.fld.aggr ? "write#{aggrBaseName(aggrJavaFull(ctx.fld.aggr))}ZonedDateTime(out, val.#{ctx.valGetter})" : "writeByteArray(out, val.#{ctx.valGetter})" }
115
114
  )
116
115
 
117
116
  =begin rdoc
@@ -125,12 +124,10 @@ HDFS Reader and Writer the Java Enums.
125
124
  =end
126
125
  ENUM_RW_METHODS = RwHolder.new(
127
126
  lambda{|ctx|
128
- aggrNotSupported(ctx.fld, 'Enums') if ctx.fld.aggr
129
- "#{DataMetaDom.condenseType(ctx.fType.type, ctx.pckg)}.forOrd(readVInt(in))"
127
+ ctx.fld.aggr ? "read#{aggrBaseName(aggrJavaFull(ctx.fld.aggr))}Enum(in, #{ctx.fType.type}.class)" : "#{DataMetaDom.condenseType(ctx.fType.type, ctx.pckg)}.forOrd(readVInt(in))"
130
128
  },
131
129
  lambda { |ctx|
132
- aggrNotSupported(ctx.fld, 'Enums') if ctx.fld.aggr
133
- "writeVInt(out, val.#{ctx.valGetter}.ordinal())"
130
+ ctx.fld.aggr ? "write#{aggrBaseName(aggrJavaFull(ctx.fld.aggr))}Enum(out, val.#{ctx.valGetter})" : "writeVInt(out, val.#{ctx.valGetter}.ordinal())"
134
131
  }
135
132
  )
136
133
 
@@ -175,6 +172,7 @@ Read/write methods for the standard data types.
175
172
  NUMERIC => NUMERIC_RW_METHODS,
176
173
  URL => URL_RW_METHODS
177
174
  }
175
+
178
176
  # DataMeta DOM object renderer
179
177
  RECORD_RW_METHODS = RwHolder.new(
180
178
  lambda { |ctx|
@@ -29,11 +29,11 @@ Builds a class name for a InOutable.
29
29
  end
30
30
 
31
31
  def mapsNotSupported(fld)
32
- raise ArgumentError, "Field #{fld.name}: maps are not currently supported on Hadoop layer"
32
+ raise ArgumentError, "Field #{fld.name}: maps are not currently supported for Byte Array format"
33
33
  end
34
34
 
35
35
  def aggrNotSupported(fld, forWhat)
36
- raise ArgumentError, "Field #{fld.name}: aggregate types are not supported for #{forWhat} on Hadoop layer"
36
+ raise ArgumentError, "Field #{fld.name}: aggregate types are not supported for #{forWhat} for Byte Array format"
37
37
  end
38
38
 
39
39
  =begin rdoc
@@ -40,9 +40,19 @@ Generates Versioned Read switch that channels the read to the proper migration s
40
40
  javaClassName = "Read__Switch_v#{ver1.toVarName}_to_v#{ver2.toVarName}"
41
41
  destDir = File.join(outRoot, packagePath)
42
42
  FileUtils.mkdir_p destDir
43
- IO::write(File.join(destDir, "#{javaClassName}.java"),
44
- ERB.new(IO.read(File.join(File.dirname(__FILE__), '../../tmpl/readSwitch.erb')),
45
- $SAFE, '%<>').result(binding), mode: 'wb')
43
+ javaDestFile = File.join(destDir, "#{javaClassName}.java")
44
+
45
+ skippedCount = 0
46
+ if File.file?(javaDestFile)
47
+ skippedCount += 1
48
+ $stderr.puts %<Read switch target "#{javaDestFile} present, therefore skipped">
49
+ else
50
+ IO::write(javaDestFile,
51
+ ERB.new(IO.read(File.join(File.dirname(__FILE__), '../../tmpl/readSwitch.erb')),
52
+ $SAFE, '%<>').result(binding), mode: 'wb')
53
+ end
54
+
55
+ $stderr.puts %<Read Switch targets skipped: #{skippedCount}> if skippedCount > 0
46
56
  end
47
57
  module_function :genVerReadSwitch
48
58
  end
data/tmpl/readSwitch.erb CHANGED
@@ -29,7 +29,7 @@ public class <%=javaClassName%> {
29
29
  if(ver.equals(<%=vars.baseName%>.VERSION)) {
30
30
  return <%=vars.baseName%>_InOutable.getInstance().read(in);
31
31
  <%
32
- while vars.versCases.length > 1 # loop through the case statement - a version per each
32
+ while vars.versCases.length > 0 # loop through the case statement - a version per each
33
33
  vars.switchTargVer = vars.versCases.shift
34
34
  vars.brackets = ''
35
35
  caseObjName = flipVer(trgE.name, ver2.toVarName, vars.switchTargVer.toVarName)
@@ -38,7 +38,7 @@ public class <%=javaClassName%> {
38
38
  %>
39
39
  }
40
40
  else if(ver.equals(<%=caseObjName%>.VERSION)){<% vars.versMigr = vers.clone.select{|v| v <= ver2}.sort{|x, y| y<=>x}%>
41
- return <% while vars.versMigr.length > 1 # migration steps loop nested in the case statement loop
41
+ return <% while vars.versMigr.length > 0 # migration steps loop nested in the case statement loop
42
42
  vars.brackets << ')'
43
43
  vars.migrTargVer = vars.versMigr.shift # target version for migration loop
44
44
  vars.srcVer = vars.versMigr[0]
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: dataMetaByteSer
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.4
4
+ version: 1.0.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Michael Bergens
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-04-04 00:00:00.000000000 Z
11
+ date: 2018-05-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: dataMetaDom
@@ -72,9 +72,9 @@ required_rubygems_version: !ruby/object:Gem::Requirement
72
72
  requirements:
73
73
  - Hadoop libraries
74
74
  rubyforge_project:
75
- rubygems_version: 2.5.1
75
+ rubygems_version: 2.5.2.1
76
76
  signing_key:
77
77
  specification_version: 4
78
- summary: DataMeta Byte Array Serializers Gen
78
+ summary: DataMeta Byte Array Serializers Gem
79
79
  test_files:
80
80
  - test/test_dataMetaByteSer.rb