dbc 1.1.2 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/bin/dbcparse.rb +33 -28
- data/lib/dbc/ctokenizer.rb +113 -122
- data/lib/dbc/ctype.rb +2 -2
- data/lib/dbc/dbc.rb +6 -0
- data/lib/dbc/define.rb +14 -16
- data/lib/dbc/ocl.rb +34 -32
- data/lib/dbc/parameters.rb +6 -6
- data/lib/dbc/parseerrorhandler.rb +9 -6
- data/lib/dbc/preprocessor.rb +1152 -912
- metadata +2 -2
data/bin/dbcparse.rb
CHANGED
@@ -16,9 +16,9 @@ require 'getoptlong'
|
|
16
16
|
|
17
17
|
command_dir = File.dirname($0)
|
18
18
|
command = File.basename($0)
|
19
|
-
|
20
|
-
|
21
|
-
|
19
|
+
if $0 != command
|
20
|
+
$:.unshift(File.join(command_dir,'..','lib'))
|
21
|
+
end
|
22
22
|
require 'dbc/searchpath'
|
23
23
|
require 'dbc/preprocessor'
|
24
24
|
require 'dbc/expand_function'
|
@@ -32,11 +32,11 @@ class GetoptLong
|
|
32
32
|
end
|
33
33
|
|
34
34
|
opts = GetoptLong.new(
|
35
|
+
[ "--always-output", "-a", GetoptLong::NO_ARGUMENT ],
|
35
36
|
[ "--docs", "-d", GetoptLong::NO_ARGUMENT ],
|
36
37
|
[ "--help", "-h", GetoptLong::NO_ARGUMENT ],
|
37
38
|
[ "--no_line", "-n", GetoptLong::NO_ARGUMENT ],
|
38
39
|
[ "--preprocess-only","-p", GetoptLong::NO_ARGUMENT ],
|
39
|
-
[ "--quiet", "-q", GetoptLong::NO_ARGUMENT ],
|
40
40
|
# [ "--trace", "-t", GetoptLong::NO_ARGUMENT ],
|
41
41
|
[ "--check_level", "-c", GetoptLong::REQUIRED_ARGUMENT ],
|
42
42
|
[ "--output", "-o", GetoptLong::REQUIRED_ARGUMENT ],
|
@@ -46,18 +46,19 @@ opts = GetoptLong.new(
|
|
46
46
|
)
|
47
47
|
|
48
48
|
# initialize with defaults
|
49
|
-
line_info = true
|
50
|
-
quiet = false
|
51
49
|
dest_file = nil
|
52
50
|
src_file = nil
|
53
|
-
# could make overwrite an option in the future
|
54
|
-
overwrite = true
|
55
|
-
docs = nil
|
56
|
-
preprocess_only = nil
|
57
|
-
check_level = nil
|
58
51
|
search_path = SearchPath.new
|
59
52
|
defines = []
|
60
53
|
|
54
|
+
always_output = false
|
55
|
+
check_level = nil
|
56
|
+
docs = nil
|
57
|
+
line_info = true
|
58
|
+
preprocess_only = nil
|
59
|
+
|
60
|
+
overwrite = true # unused
|
61
|
+
|
61
62
|
begin
|
62
63
|
i = 1
|
63
64
|
opts.each do |opt, arg|
|
@@ -88,20 +89,20 @@ begin
|
|
88
89
|
opts.error("multiple output files give")
|
89
90
|
end
|
90
91
|
dest_file = arg
|
91
|
-
when "--
|
92
|
-
when "--
|
93
|
-
when "--
|
94
|
-
when "--
|
92
|
+
when "--always-output" then always_output = true
|
93
|
+
when "--docs" then docs = true
|
94
|
+
when "--no_line" then line_info = false
|
95
|
+
when "--preprocess-only" then preprocess_only = true
|
95
96
|
when "--help"
|
96
97
|
puts "Converts OCL design by contract tags to C code."
|
97
98
|
puts "Usage:"
|
98
99
|
puts "\t#{command} [options] [commands] input_file"
|
99
100
|
puts "Options:"
|
101
|
+
puts "\t-a, --always-output : always output to destination file"
|
100
102
|
puts "\t-d, --docs : generate Doxygen documentation"
|
101
|
-
puts "\t-p, --preprocess-only : preprocess input files only"
|
102
103
|
puts "\t-h, --help : print this message"
|
103
104
|
puts "\t-n, --no_line : do not output '\#line' directives"
|
104
|
-
puts "\t-
|
105
|
+
puts "\t-p, --preprocess-only : preprocess input files only"
|
105
106
|
puts "Commands:"
|
106
107
|
puts "\t-o, --output <file>"
|
107
108
|
puts "\t-c, --check_level <0,1,2>"
|
@@ -193,21 +194,16 @@ defines << ['__STDC__', nil, '1']
|
|
193
194
|
# included files
|
194
195
|
includes = {}
|
195
196
|
|
196
|
-
if dest_file
|
197
|
-
out = File.new(dest_file, 'w')
|
198
|
-
else
|
199
|
-
out = STDOUT
|
200
|
-
end
|
201
|
-
|
202
197
|
# Cache Tokens => Preprocessor => Parse OCL => Parse C Types
|
203
198
|
# Cached tokens are output.
|
204
199
|
if not docs and check_level == DBC::NONE
|
205
|
-
|
200
|
+
out_str = text
|
206
201
|
else
|
207
202
|
begin
|
208
203
|
if docs
|
209
|
-
|
204
|
+
out_str = DBC.parse_docs(CTokenizer::Lexer.new(text, src_file))
|
210
205
|
elsif preprocess_only
|
206
|
+
out_str = ''
|
211
207
|
preproc = Preprocessor::Parser.new(text, src_file) do |f|
|
212
208
|
if inc_text = includes[f]
|
213
209
|
inc_text
|
@@ -221,9 +217,10 @@ else
|
|
221
217
|
end
|
222
218
|
defines.each { |d,p,v| preproc.define(d, p, v) }
|
223
219
|
preproc.each do |t|
|
224
|
-
|
220
|
+
out_str << t[1]
|
225
221
|
end
|
226
222
|
else
|
223
|
+
out_str = ''
|
227
224
|
# cache statements
|
228
225
|
cache = DBC::Cache.new(text, src_file)
|
229
226
|
# preprocesses all tokens
|
@@ -262,11 +259,11 @@ else
|
|
262
259
|
unless context.first
|
263
260
|
raise CTokenizer.error(nil, line, "unmatched braket")
|
264
261
|
end
|
265
|
-
|
262
|
+
out_str << expand_function(source.conditions, context.first, \
|
266
263
|
stmt, line, line_info)
|
267
264
|
else
|
268
265
|
stmt.each do |t|
|
269
|
-
|
266
|
+
out_str << t[1]
|
270
267
|
end
|
271
268
|
end
|
272
269
|
end
|
@@ -274,6 +271,14 @@ else
|
|
274
271
|
rescue CTokenizer::Error, CType::EvaluationError
|
275
272
|
warn $!
|
276
273
|
exit(-1)
|
274
|
+
ensure
|
275
|
+
if !$! or always_output
|
276
|
+
if dest_file
|
277
|
+
File.open(dest_file, 'w') { |f| f.write(out_str) }
|
278
|
+
else
|
279
|
+
$stdout.write(out_str)
|
280
|
+
end
|
281
|
+
end
|
277
282
|
end
|
278
283
|
end
|
279
284
|
|
data/lib/dbc/ctokenizer.rb
CHANGED
@@ -3,7 +3,50 @@
|
|
3
3
|
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
|
4
4
|
# See included LICENCE file.
|
5
5
|
|
6
|
+
require 'strscan'
|
7
|
+
|
6
8
|
module CTokenizer
|
9
|
+
EOF_TOKEN = [false, false].freeze
|
10
|
+
|
11
|
+
module Expression
|
12
|
+
NEWLINE = /\r\n|\n\r|\r|\n/
|
13
|
+
|
14
|
+
SPACE_1 = /[\t ]+/
|
15
|
+
SPACE_2 = /\\[\t ]*#{NEWLINE}/
|
16
|
+
SPACE = %r(#{SPACE_1}|#{SPACE_2})
|
17
|
+
|
18
|
+
IDENTIFIER = /[a-zA-Z_]\w*/
|
19
|
+
|
20
|
+
COMMENT_1 = /\/\*.*?\*\//m
|
21
|
+
# scarry comment - bad style - beward of '\' at end of line...
|
22
|
+
COMMENT_2 = /\/\/(?:\\[ \t]*#{NEWLINE}|[^\r\n])+/m
|
23
|
+
COMMENT = %r(#{COMMENT_1}|#{COMMENT_2})m
|
24
|
+
|
25
|
+
SYMBOL_1 = /\+=|\-=|\*=|\/=|%=|\&=|\^=|\|=|<<=|>>=|##|\.\.\./
|
26
|
+
SYMBOL_2 = /==|!=|<=|>=|->|\&\&|\|\||<<|>>|\+\+|\-\-|<:|:>|<%|%>/
|
27
|
+
SYMBOL_3 = /[\(\)\[\]\{\}\|\&\+\-\/\*%<>\.,=!:;\?\^~#]/
|
28
|
+
SYMBOL = %r(#{SYMBOL_1}|#{SYMBOL_2}|#{SYMBOL_3})
|
29
|
+
|
30
|
+
CHARACTER = /L?'(?:[^']|\\.)*'/
|
31
|
+
STRING = /L?"(?:[^"]|\\.)*"/
|
32
|
+
|
33
|
+
# Note: FLOAT should come before INTEGER
|
34
|
+
f_e = /[eE][-+]?[0-9]+/
|
35
|
+
f_s = /[fFlL]/
|
36
|
+
FLOAT_1 = /[0-9]+#{f_e}#{f_s}?/
|
37
|
+
FLOAT_2 = /[0-9]*\.[0-9]+#{f_e}?#{f_s}?/
|
38
|
+
# FLOAT_3 causes ambiguities... :(
|
39
|
+
#FLOAT_3 = /[0-9]+\.[0-9]*#{f_e}?#{f_s}?/
|
40
|
+
#FLOAT = %r(#{FLOAT_1}|#{FLOAT_2}|#{FLOAT_3})
|
41
|
+
FLOAT = %r(#{FLOAT_1}|#{FLOAT_2})
|
42
|
+
|
43
|
+
i_s = /[uU]?[lL]|[lL][uU]?/
|
44
|
+
INTEGER_1 = /0[xX][0-9a-fA-F]+#{i_s}?/
|
45
|
+
INTEGER_2 = /0[0-7]+#{i_s}?/
|
46
|
+
INTEGER_3 = /[0-9]+#{i_s}?/
|
47
|
+
INTEGER = %r(#{INTEGER_1}|#{INTEGER_2}|#{INTEGER_3})
|
48
|
+
end # Expression
|
49
|
+
|
7
50
|
class Error < StandardError
|
8
51
|
def initialize(file, line)
|
9
52
|
@file = file
|
@@ -18,78 +61,17 @@ module CTokenizer
|
|
18
61
|
raise CTokenizer::Error.new(file, line), msg
|
19
62
|
end
|
20
63
|
|
21
|
-
def CTokenizer.check_string(str)
|
22
|
-
raise "expecting a String: #{str.class}" unless str.class <= String
|
23
|
-
end
|
24
64
|
def CTokenizer.check_token(t)
|
25
65
|
raise "expecting a Array[2]: #{t.inspect}" \
|
26
66
|
unless t.class <= Array and t.length == 2
|
27
67
|
end
|
28
68
|
|
29
|
-
def CTokenizer.create_newlines(start, finish)
|
30
|
-
newlines = ''
|
31
|
-
(finish - start).times { newlines << "\n" }
|
32
|
-
[:NEWLINE, newlines.freeze].freeze
|
33
|
-
end
|
34
|
-
|
35
69
|
def CTokenizer.line_count(str)
|
36
70
|
count = 0
|
37
|
-
str.scan(
|
71
|
+
str.scan(Expression::NEWLINE) { count += 1 } if str.class == String
|
38
72
|
count
|
39
73
|
end
|
40
74
|
|
41
|
-
# tokens are immutable
|
42
|
-
def CTokenizer.split_token(str)
|
43
|
-
check_string(str)
|
44
|
-
# would be easier if '\n' was the only kind of newline....
|
45
|
-
token = case str
|
46
|
-
when /\A[\t ]+/o
|
47
|
-
[:SPACE, $&]
|
48
|
-
when /\A(?:\r\n|\n\r|\r|\n)/o
|
49
|
-
[:NEWLINE, $&]
|
50
|
-
when /\A\\[\t ]*(?:\r\n|\n\r|\r|\n)/o
|
51
|
-
[:SPACE, $&]
|
52
|
-
when /\A\/\*.*?\*\//m
|
53
|
-
[:COMMENT, $&]
|
54
|
-
when /\A\/\/(?:\\[ \t]*(?:\r\n|\n\r|\r|\n)|[^\r\n])+/o
|
55
|
-
# scarry comment - bad style - beward of line \ at end of line...
|
56
|
-
[:COMMENT, $&]
|
57
|
-
when /\A(?:\+=|\-=|\*=|\/=|%=|\&=|\^=|\|=|<<=|>>=|##|\.\.\.)/
|
58
|
-
[:SYMBOL, $&]
|
59
|
-
when /\A(?:==|!=|<=|>=|->|\&\&|\|\||<<|>>|\+\+|\-\-)/o
|
60
|
-
[:SYMBOL, $&]
|
61
|
-
when /\A(?:<:|:>|<%|%>)/o
|
62
|
-
[:SYMBOL, $&]
|
63
|
-
when /\A[\(\)\[\]\{\}\|\&\+\-\/\*%<>\.,=!:;\?\^~#]/o
|
64
|
-
[:SYMBOL, $&]
|
65
|
-
when /\AL?'(?:[^']|\\.)*'/o
|
66
|
-
[:CHARACTER, $&]
|
67
|
-
when /\AL?"(?:[^"]|\\.)*"/o
|
68
|
-
[:STRING, $&]
|
69
|
-
when /\A[a-zA-Z_]\w*/o
|
70
|
-
[:IDENTIFIER, $&]
|
71
|
-
# FLOAT should come before INTEGER
|
72
|
-
when /\A(?:[0-9]*\.[0-9]+)|(?:[0-9]+\.)[eE][-+]?[0-9]+?[fFlL]?/o
|
73
|
-
[:FLOAT, $&]
|
74
|
-
when /\A[0-9]+[eE][-+]?[0-9]+[fFlL]?/o
|
75
|
-
[:FLOAT, $&]
|
76
|
-
when /\A0[xX][0-9a-fA-F]+(?:(?:[uU][lL]?)|(?:[lL][uU]?)?)/o
|
77
|
-
[:INTEGER, $&]
|
78
|
-
when /\A0[0-7]+(?:(?:[uU][lL]?)|(?:[lL][uU]?)?)/o
|
79
|
-
[:INTEGER, $&]
|
80
|
-
when /\A\d+(?:(?:[uU][lL]?)|(?:[lL][uU]?)?)/o
|
81
|
-
[:INTEGER, $&]
|
82
|
-
when /\A\Z/o
|
83
|
-
[false, false] # end of file
|
84
|
-
when /\A./m
|
85
|
-
[:UNKNOWN, $&]
|
86
|
-
else
|
87
|
-
raise "shouldn't get here!"
|
88
|
-
end # case
|
89
|
-
token[1].freeze
|
90
|
-
[token.freeze, $']
|
91
|
-
end
|
92
|
-
|
93
75
|
def CTokenizer.whitespace?(t)
|
94
76
|
case t[0]
|
95
77
|
when :SPACE, :NEWLINE, :COMMENT
|
@@ -98,23 +80,6 @@ module CTokenizer
|
|
98
80
|
false
|
99
81
|
end
|
100
82
|
end
|
101
|
-
|
102
|
-
def CTokenizer.split(str)
|
103
|
-
tokens = []
|
104
|
-
until str.empty?
|
105
|
-
t, str = CTokenizer.split_token(str)
|
106
|
-
tokens << t
|
107
|
-
end # until
|
108
|
-
tokens
|
109
|
-
end
|
110
|
-
|
111
|
-
def CTokenizer.join(tokens)
|
112
|
-
str = ''
|
113
|
-
tokens.each do |t|
|
114
|
-
str << t[1]
|
115
|
-
end
|
116
|
-
str
|
117
|
-
end
|
118
83
|
|
119
84
|
def error(msg)
|
120
85
|
CTokenizer.error(file, line, msg)
|
@@ -155,37 +120,56 @@ module CTokenizer
|
|
155
120
|
ary
|
156
121
|
end
|
157
122
|
|
158
|
-
class Lexer
|
159
|
-
# C Lexer
|
123
|
+
class Lexer < StringScanner
|
124
|
+
# C Lexer
|
160
125
|
include CTokenizer
|
161
126
|
|
162
127
|
def initialize(str, file=nil, line=1)
|
163
|
-
|
164
|
-
|
128
|
+
str.freeze
|
129
|
+
super(str, false) # DO NOT dup str
|
165
130
|
@file = file
|
166
131
|
@line = line
|
167
132
|
end
|
168
133
|
|
169
134
|
attr_reader :file, :line
|
170
135
|
|
171
|
-
|
172
|
-
t = nil
|
173
|
-
tmp_rest = @rest # @rest is unchanged
|
174
|
-
loop do
|
175
|
-
t, tmp_rest = CTokenizer.split_token(tmp_rest)
|
176
|
-
break unless CTokenizer.whitespace?(t)
|
177
|
-
end
|
178
|
-
t
|
179
|
-
end
|
180
|
-
|
181
|
-
def empty?
|
182
|
-
@rest.empty?
|
183
|
-
end
|
136
|
+
alias empty? eos?
|
184
137
|
|
185
138
|
def shift
|
186
|
-
t
|
187
|
-
|
188
|
-
|
139
|
+
# don't need \A in regexp's cause StringScanner does this automatically.
|
140
|
+
t = case
|
141
|
+
when m = scan(Expression::SPACE)
|
142
|
+
@line += CTokenizer.line_count(m)
|
143
|
+
[:SPACE, m]
|
144
|
+
when m = scan(Expression::IDENTIFIER)
|
145
|
+
[:IDENTIFIER, m]
|
146
|
+
when m = scan(Expression::COMMENT)
|
147
|
+
@line += CTokenizer.line_count(m)
|
148
|
+
[:COMMENT, m]
|
149
|
+
when m = scan(Expression::SYMBOL)
|
150
|
+
[:SYMBOL, m]
|
151
|
+
when m = scan(Expression::NEWLINE)
|
152
|
+
@line += CTokenizer.line_count(m)
|
153
|
+
[:NEWLINE, m]
|
154
|
+
# FLOAT should come before INTEGER
|
155
|
+
when m = scan(Expression::FLOAT)
|
156
|
+
[:FLOAT, m]
|
157
|
+
when m = scan(Expression::INTEGER)
|
158
|
+
[:INTEGER, m]
|
159
|
+
when m = scan(Expression::CHARACTER)
|
160
|
+
[:CHARACTER, m]
|
161
|
+
when m = scan(Expression::STRING)
|
162
|
+
[:STRING, m]
|
163
|
+
when eos?
|
164
|
+
EOF_TOKEN # end of file, \Z don't work with StringScanner
|
165
|
+
when m = getch
|
166
|
+
@line += CTokenizer.line_count(m)
|
167
|
+
[:UNKNOWN, m]
|
168
|
+
else
|
169
|
+
raise "shouldn't get here!"
|
170
|
+
end # case
|
171
|
+
m.freeze
|
172
|
+
t.freeze
|
189
173
|
end
|
190
174
|
|
191
175
|
end
|
@@ -239,6 +223,20 @@ module CTokenizer
|
|
239
223
|
end
|
240
224
|
end
|
241
225
|
|
226
|
+
attr_reader :source
|
227
|
+
|
228
|
+
def scan(regexp)
|
229
|
+
@source.scan(regexp)
|
230
|
+
end
|
231
|
+
|
232
|
+
def match?(regexp)
|
233
|
+
@source.match?(regexp)
|
234
|
+
end
|
235
|
+
|
236
|
+
def post_match
|
237
|
+
@source.post_match
|
238
|
+
end
|
239
|
+
|
242
240
|
def file
|
243
241
|
@source.file
|
244
242
|
end
|
@@ -246,14 +244,8 @@ module CTokenizer
|
|
246
244
|
@source.line
|
247
245
|
end
|
248
246
|
|
249
|
-
def peek_nonspace
|
250
|
-
@source.peek_nonspace
|
251
|
-
end
|
252
|
-
|
253
247
|
def shift
|
254
|
-
|
255
|
-
CTokenizer.check_token(t)
|
256
|
-
t
|
248
|
+
@source.shift
|
257
249
|
end
|
258
250
|
|
259
251
|
def empty?
|
@@ -279,7 +271,6 @@ module CTokenizer
|
|
279
271
|
end
|
280
272
|
def shift
|
281
273
|
t = @cache.shift
|
282
|
-
CTokenizer.check_token(t)
|
283
274
|
@line += CTokenizer.line_count(t[1])
|
284
275
|
t
|
285
276
|
end
|
@@ -330,30 +321,29 @@ module CTokenizer
|
|
330
321
|
|
331
322
|
# C Lexer
|
332
323
|
class CLexer < LexerBase
|
333
|
-
|
324
|
+
|
334
325
|
def CLexer.reserved_word?(str)
|
335
326
|
str =~ /\A(?:auto|break|case|char|const|continue|default|do|double|else|enum|extern|float|for|goto|if|inline|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while)\Z/o
|
336
327
|
end
|
337
328
|
|
338
329
|
def CLexer.convert_token(t)
|
330
|
+
str = t[1]
|
339
331
|
case t[0]
|
340
332
|
when :SYMBOL
|
341
|
-
|
333
|
+
case str
|
342
334
|
when '<:'
|
343
|
-
'['
|
335
|
+
str = '['
|
344
336
|
when ':>'
|
345
|
-
']'
|
337
|
+
str = ']'
|
346
338
|
when '<%'
|
347
|
-
'{'
|
339
|
+
str = '{'
|
348
340
|
when '%>'
|
349
|
-
'}'
|
350
|
-
else
|
351
|
-
t[1]
|
341
|
+
str = '}'
|
352
342
|
end # case
|
353
|
-
t = [
|
343
|
+
t = [str, str].freeze
|
354
344
|
when :IDENTIFIER
|
355
|
-
if CLexer.reserved_word?(
|
356
|
-
t = [
|
345
|
+
if CLexer.reserved_word?(str)
|
346
|
+
t = [str, str].freeze
|
357
347
|
end
|
358
348
|
end #case
|
359
349
|
t
|
@@ -373,19 +363,20 @@ module CTokenizer
|
|
373
363
|
end
|
374
364
|
|
375
365
|
def CPLexer.convert_token(t)
|
366
|
+
str = t[1]
|
376
367
|
case t[0]
|
377
368
|
when :SYMBOL
|
378
|
-
case
|
369
|
+
case str
|
379
370
|
when '!', '*', '/', '%', '+', '-', \
|
380
371
|
'<<', '>>', '(', ')', \
|
381
372
|
'<', '<=', '>', '>=', '==', '!=', \
|
382
373
|
'&', '^', '|', '&&', '||', \
|
383
374
|
'?', ':', '#', '##', '...', ','
|
384
|
-
t = [
|
375
|
+
t = [str, str].freeze
|
385
376
|
end # case
|
386
377
|
when :IDENTIFIER
|
387
|
-
if CPLexer.reserved_word?(
|
388
|
-
t = [
|
378
|
+
if CPLexer.reserved_word?(str)
|
379
|
+
t = [str, str].freeze
|
389
380
|
end
|
390
381
|
end # case
|
391
382
|
t
|