dbc 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,281 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # Copyright (c) 2004 Charles M Mills
4
+ # This document is licenced under The MIT Licence.
5
+ # THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
6
+ # See included LICENCE file.
7
+
8
+ if RUBY_VERSION =~ /\A1\.6/
9
+ warn("attempting to run with Ruby 1.6.* (should be using Ruby 1.8+)")
10
+ $stdin = STDIN
11
+ $stdout = STDOUT
12
+ $stderr = STDERR
13
+ end
14
+
15
+ require 'getoptlong'
16
+
17
+ command_dir = File.dirname($0)
18
+ command = File.basename($0)
19
+ #if $0 != command
20
+ # $:.unshift(command_dir)
21
+ #end
22
+ require 'dbc/searchpath'
23
+ require 'dbc/preprocessor'
24
+ require 'dbc/expand_function'
25
+ require 'dbc/ctype'
26
+ require 'dbc/dbc'
27
+
28
+ class GetoptLong
29
+ def error(msg)
30
+ raise GetoptLong::Error, msg
31
+ end
32
+ end
33
+
34
+ opts = GetoptLong.new(
35
+ [ "--docs", "-d", GetoptLong::NO_ARGUMENT ],
36
+ [ "--help", "-h", GetoptLong::NO_ARGUMENT ],
37
+ [ "--no_line", "-n", GetoptLong::NO_ARGUMENT ],
38
+ [ "--preprocess-only","-p", GetoptLong::NO_ARGUMENT ],
39
+ [ "--quiet", "-q", GetoptLong::NO_ARGUMENT ],
40
+ # [ "--trace", "-t", GetoptLong::NO_ARGUMENT ],
41
+ [ "--check_level", "-c", GetoptLong::REQUIRED_ARGUMENT ],
42
+ [ "--output", "-o", GetoptLong::REQUIRED_ARGUMENT ],
43
+ # cc compatibility options
44
+ [ "--define", "-D", GetoptLong::REQUIRED_ARGUMENT ],
45
+ [ "--include", "-I", GetoptLong::REQUIRED_ARGUMENT ]
46
+ )
47
+
48
+ # initialize with defaults
49
+ line_info = true
50
+ quiet = false
51
+ dest_file = nil
52
+ src_file = nil
53
+ # could make overwrite an option in the future
54
+ overwrite = true
55
+ docs = nil
56
+ preprocess_only = nil
57
+ check_level = nil
58
+ search_path = SearchPath.new
59
+ defines = []
60
+
61
+ begin
62
+ i = 1
63
+ opts.each do |opt, arg|
64
+ case opt
65
+ when "--check_level"
66
+ check_level = arg.to_i
67
+ unless DBC.valid_check_level?(check_level)
68
+ opts.error("invalid check level: #{arg}")
69
+ end
70
+ when "--include"
71
+ # should support including file in the future
72
+ begin
73
+ search_path << arg
74
+ rescue
75
+ opts.error($!)
76
+ end
77
+ when "--define"
78
+ unless arg =~ /\A([A-Za-z_]\w*)(?:\(([\w,\s]*)\))?(?:=(.*))?\Z/
79
+ opts.error("macro names must be identifiers: #{arg}")
80
+ end
81
+ macro = $1
82
+ opts.error("cannot use 'defined' as a macro name") if macro == 'defined'
83
+ value = $3 || '1'
84
+ params = $2.split(/\s*,\s*/) if $2
85
+ defines << [macro,params,value]
86
+ when "--output"
87
+ if dest_file
88
+ opts.error("multiple output files give")
89
+ end
90
+ dest_file = arg
91
+ when "--docs" then docs = true
92
+ when "--preprocess-only" then preprocess_only = true
93
+ when "--quiet" then quiet = true
94
+ when "--no_line" then line_info = false
95
+ when "--help"
96
+ puts "Converts OCL design by contract tags to C code."
97
+ puts "Usage:"
98
+ puts "\t#{command} [options] [commands] input_file"
99
+ puts "Options:"
100
+ puts "\t-d, --docs : generate Doxygen documentation"
101
+ puts "\t-p, --preprocess-only : preprocess input files only"
102
+ puts "\t-h, --help : print this message"
103
+ puts "\t-n, --no_line : do not output '\#line' directives"
104
+ puts "\t-q, --quiet : supress error messages"
105
+ puts "Commands:"
106
+ puts "\t-o, --output <file>"
107
+ puts "\t-c, --check_level <0,1,2>"
108
+ puts "\t-D, --define <macro(=value)>"
109
+ puts "\t-I, --include <directory>"
110
+ puts ""
111
+ puts "Check levels: 0 = none, 1 = preconditions only, 2 = all conditions."
112
+ puts "Reads from STDIN if input file is '-' or none is given."
113
+ puts "Outputs to STDOUT if no output file is given."
114
+ exit(0)
115
+ else raise GetoptLong::InvalidOption, "unrecognized option '#{opt}'"
116
+ end # case
117
+ i += 1
118
+ end # each
119
+ rescue GetoptLong::InvalidOption, GetoptLong::Error
120
+ msg = $!.message
121
+ warn "<command line>:#{i}: #{msg} (-h will show vaild options)"
122
+ exit(2)
123
+ end
124
+
125
+ if [check_level, preprocess_only, docs].nitems > 1
126
+ warn %q{conflicting options: only one of the following may be selected:
127
+ --check_level
128
+ --preprocess-only
129
+ --docs}
130
+ exit(2)
131
+ end
132
+
133
+ # default condition is all
134
+ check_level = DBC::ALL unless check_level
135
+
136
+ if ARGV.length > 1
137
+ warn "expecting one input file, got #{ARGV.length}"
138
+ exit(2)
139
+ end
140
+
141
+ unless ARGV.empty? || ARGV[0] == '-'
142
+ src_file = ARGV[0]
143
+ unless File.file?(src_file)
144
+ warn "file does not exist: #{src_file}"
145
+ exit(2)
146
+ end
147
+ end
148
+
149
+ if dest_file
150
+ if File.exists?(dest_file) and not overwrite
151
+ warn "destination file exists: #{dest_file}"
152
+ exit(2)
153
+ end
154
+ # this is always bad - so don't allow it even if overwrite is true
155
+ if src_file and File.expand_path(dest_file) == File.expand_path(src_file)
156
+ warn "destination file and source file are the same"
157
+ exit(2)
158
+ end
159
+ end
160
+
161
+ if src_file
162
+ text = File.open(src_file, 'r') do |in_file|
163
+ in_file.read
164
+ end
165
+ else
166
+ text = STDIN.read
167
+ end
168
+
169
+ ### Compatibility with various compilers ###
170
+ search_path.unshift('/usr/include') if File.exists?('/usr/include')
171
+ search_path.unshift(File.dirname(src_file)) if src_file
172
+ # search for /usr/lib/gcc*/<system>/<version>/include
173
+ Dir['/usr/lib/gcc*/*/*/include'].each do |gcc_inc|
174
+ search_path << gcc_inc if File.directory?(gcc_inc)
175
+ end
176
+ case RUBY_PLATFORM
177
+ when /i[0-9]86|cygwin|mingw/
178
+ defines << ['__i386__', nil, nil]
179
+ # I have found that these setting work best with GNU C libs
180
+ # experiance may vary :)
181
+ defines << ['__GLIBC_HAVE_LONG_LONG', nil, '1']
182
+ defines << ['__extension__', nil, ' ']
183
+ when /powerpc|darwin/
184
+ defines << ['__ppc__', nil, nil]
185
+ end
186
+ # for gcc 3.4+ compatibility
187
+ defines << ['__builtin_va_list', nil, 'int *']
188
+ defines << ['SHLIB_COMPAT', ['arg'], '(0)']
189
+ #############################################
190
+
191
+ # output file text
192
+ outstr = ''
193
+ # included files
194
+ includes = {}
195
+
196
+ # Cache Tokens => Preprocessor => Parse OCL => Parse C Types
197
+ # Cached tokens are output.
198
+ if not docs and check_level == DBC::NONE
199
+ outstr = text
200
+ else
201
+ begin
202
+ if docs
203
+ outstr = DBC.parse_docs(CTokenizer::Lexer.new(text, src_file))
204
+ elsif preprocess_only
205
+ preproc = Preprocessor::Parser.new(text, src_file) do |f|
206
+ if inc_text = includes[f]
207
+ inc_text
208
+ else
209
+ begin
210
+ File.open(search_path.find(f)) { |in_f| includes[f] = in_f.read }
211
+ rescue ArgumentError
212
+ preproc.error($!)
213
+ end
214
+ end
215
+ end
216
+ defines.each { |d,p,v| preproc.define(d, p, v) }
217
+ preproc.each do |t|
218
+ outstr << t[1]
219
+ end
220
+ else
221
+ # cache statements
222
+ cache = DBC::Cache.new(text, src_file)
223
+ # preprocesses all tokens
224
+ preproc = Preprocessor::Parser.new(cache) do |f|
225
+ # open included files
226
+ if inc_text = includes[f]
227
+ inc_text
228
+ else
229
+ begin
230
+ File.open(search_path.find(f)) { |in_f| includes[f] = in_f.read }
231
+ rescue ArgumentError
232
+ preproc.error($!)
233
+ end
234
+ end
235
+ end
236
+ # define tokens passed
237
+ defines.each { |d,p,v| preproc.define(d, p, v) }
238
+
239
+ # extracts DBC condtions
240
+ source = DBC::OCLParser.new(preproc)
241
+
242
+ # extract all C Statements
243
+ DBC.parse(source) do |context, f_body|
244
+ stmt = cache.reset
245
+ line = stmt.line
246
+ stmt = stmt.cache
247
+ # remove end of file token if it exists
248
+ stmt.pop unless stmt.empty? or stmt.last[0]
249
+
250
+ # define C types
251
+ context.each do |ctxt|
252
+ CType[ctxt.identifier] = ctxt if ctxt.typedef?
253
+ end
254
+
255
+ if f_body and preproc.base?
256
+ unless context.first
257
+ raise CTokenizer.error(nil, line, "unmatched braket")
258
+ end
259
+ outstr << expand_function(source.conditions, context.first, \
260
+ stmt, line, line_info)
261
+ else
262
+ stmt.each do |t|
263
+ outstr << t[1]
264
+ end
265
+ end
266
+ end
267
+ end
268
+ rescue CTokenizer::Error, CType::EvaluationError
269
+ warn $!
270
+ exit(-1)
271
+ end
272
+ end
273
+
274
+ if dest_file
275
+ File.open(dest_file, 'w') do |out_file|
276
+ out_file.write(outstr)
277
+ end
278
+ else
279
+ STDOUT.write(outstr)
280
+ end
281
+
@@ -0,0 +1,379 @@
1
+ # Copyright (c) 2004 Charles M Mills
2
+ # This document is licenced under The MIT Licence.
3
+ # THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
4
+ # See included LICENCE file.
5
+
6
+ module CTokenizer
7
+ class Error < StandardError
8
+ def initialize(file, line)
9
+ @file = file
10
+ @line = line
11
+ end
12
+ def to_s
13
+ "#{@file + ':' if @file}#{@line}: #{super}"
14
+ end
15
+ end
16
+
17
+ def CTokenizer.error(file, line, msg)
18
+ raise CTokenizer::Error.new(file, line), msg
19
+ end
20
+
21
+ def CTokenizer.check_string(str)
22
+ raise "expecting a String: #{str.class}" unless str.class <= String
23
+ end
24
+ def CTokenizer.check_token(t)
25
+ raise "expecting a Array[2]: #{t.inspect}" \
26
+ unless t.class <= Array and t.length == 2
27
+ end
28
+
29
+ def CTokenizer.create_newlines(start, finish)
30
+ newlines = ''
31
+ (finish - start).times { newlines << "\n" }
32
+ [:NEWLINE, newlines.freeze].freeze
33
+ end
34
+
35
+ def CTokenizer.line_count(str)
36
+ count = 0
37
+ str.scan(/\r\n|\n\r|\n|\r/) { count += 1 } if str.class == String
38
+ count
39
+ end
40
+
41
+ # tokens are immutable
42
+ def CTokenizer.split_token(str)
43
+ check_string(str)
44
+ # would be easier if '\n' was the only kind of newline....
45
+ token = case str
46
+ when /\A[\t ]+/o
47
+ [:SPACE, $&]
48
+ when /\A(?:\r\n|\n\r|\r|\n)/o
49
+ [:NEWLINE, $&]
50
+ when /\A\\[\t ]*(?:\r\n|\n\r|\r|\n)/o
51
+ [:SPACE, $&]
52
+ when /\A\/\*.*?\*\//m
53
+ [:COMMENT, $&]
54
+ when /\A\/\/(?:\\[ \t]*(?:\r\n|\n\r|\r|\n)|[^\r\n])+/o
55
+ # scarry comment - bad style - beward of line \ at end of line...
56
+ [:COMMENT, $&]
57
+ when /\A(?:\+=|\-=|\*=|\/=|%=|\&=|\^=|\|=|<<=|>>=|##|\.\.\.)/
58
+ [:SYMBOL, $&]
59
+ when /\A(?:==|!=|<=|>=|->|\&\&|\|\||<<|>>|\+\+|\-\-)/o
60
+ [:SYMBOL, $&]
61
+ when /\A(?:<:|:>|<%|%>)/o
62
+ [:SYMBOL, $&]
63
+ when /\A[\(\)\[\]\{\}\|\&\+\-\/\*%<>\.,=!:;\?\^~#]/o
64
+ [:SYMBOL, $&]
65
+ when /\AL?'(?:[^']|\\.)*'/o
66
+ [:CHARACTER, $&]
67
+ when /\AL?"(?:[^"]|\\.)*"/o
68
+ [:STRING, $&]
69
+ when /\A[a-zA-Z_]\w*/o
70
+ [:IDENTIFIER, $&]
71
+ # FLOAT should come before INTEGER
72
+ when /\A(?:[0-9]*\.[0-9]+)|(?:[0-9]+\.)[eE][-+]?[0-9]+?[fFlL]?/o
73
+ [:FLOAT, $&]
74
+ when /\A[0-9]+[eE][-+]?[0-9]+[fFlL]?/o
75
+ [:FLOAT, $&]
76
+ when /\A0[xX][0-9a-fA-F]+(?:(?:[uU][lL]?)|(?:[lL][uU]?)?)/o
77
+ [:INTEGER, $&]
78
+ when /\A0[0-7]+(?:(?:[uU][lL]?)|(?:[lL][uU]?)?)/o
79
+ [:INTEGER, $&]
80
+ when /\A\d+(?:(?:[uU][lL]?)|(?:[lL][uU]?)?)/o
81
+ [:INTEGER, $&]
82
+ when /\A\Z/o
83
+ [false, false] # end of file
84
+ when /\A./m
85
+ [:UNKNOWN, $&]
86
+ else
87
+ raise "shouldn't get here!"
88
+ end # case
89
+ token[1].freeze
90
+ [token.freeze, $']
91
+ end
92
+
93
+ def CTokenizer.split(str)
94
+ tokens = []
95
+ until str.empty?
96
+ t, str = CTokenizer.split_token(str)
97
+ tokens << t
98
+ end # until
99
+ tokens
100
+ end
101
+
102
+ def CTokenizer.join(tokens)
103
+ str = ''
104
+ tokens.each do |t|
105
+ str << t[1]
106
+ end
107
+ str
108
+ end
109
+
110
+ def error(msg)
111
+ CTokenizer.error(file, line, msg)
112
+ end
113
+
114
+ def token_error(token)
115
+ self.error("unrecognized token: #{token}")
116
+ end
117
+
118
+ def parse_error(token)
119
+ self.error("parse error on token: #{token}")
120
+ end
121
+
122
+ def warning(msg)
123
+ warn "#{file + ':' if file}#{line}: #{msg}"
124
+ end
125
+
126
+ def to_a
127
+ ary = []
128
+ until self.empty?
129
+ ary << self.shift
130
+ end
131
+ ary
132
+ end
133
+
134
+ def each
135
+ until self.empty?
136
+ yield(self.shift)
137
+ end
138
+ self
139
+ end
140
+
141
+ def collect
142
+ ary = []
143
+ until self.empty?
144
+ ary << yield(self.shift)
145
+ end
146
+ ary
147
+ end
148
+
149
+ class Lexer
150
+ # C Lexer which keeps ALL tokens
151
+ include CTokenizer
152
+
153
+ def initialize(str, file=nil, line=1)
154
+ CTokenizer.check_string(str)
155
+ @rest = str
156
+ @file = file
157
+ @line = line
158
+ end
159
+
160
+ attr_reader :file, :line
161
+
162
+ def empty?
163
+ @rest.empty?
164
+ end
165
+
166
+ def shift
167
+ t, @rest = CTokenizer.split_token(@rest)
168
+ @line += CTokenizer.line_count(t[1])
169
+
170
+ t
171
+ end
172
+
173
+ end
174
+
175
+ # provides a way of keeping track of the current scope
176
+ module Scoped
177
+ attr_reader :scope
178
+
179
+ def start_of_line?
180
+ @start_line
181
+ end
182
+
183
+ def macro?
184
+ @macro
185
+ end
186
+
187
+ def process_scope(t)
188
+ case t[0]
189
+ when :SPACE, :COMMENT
190
+ # do nothing
191
+ when :NEWLINE
192
+ @start_line = true
193
+ @macro = false
194
+ else
195
+ unless @macro
196
+ # these tokens are ignored if inside of a macro
197
+ case t[1]
198
+ when '#'
199
+ @macro = @start_line
200
+ when '{'
201
+ @scope += 1
202
+ when '}'
203
+ @scope -= 1
204
+ end
205
+ end
206
+ @start_line = false
207
+ end
208
+ t
209
+ end
210
+ end # Scoped
211
+
212
+ # wraps a lexer and uses that to produce new tokens
213
+ class LexerBase
214
+ include CTokenizer
215
+
216
+ def initialize(str, file=nil, line=1)
217
+ if (str.class <= String)
218
+ @source = Lexer.new(str, file, line)
219
+ else
220
+ @source = str
221
+ end
222
+ end
223
+
224
+ def file
225
+ @source.file
226
+ end
227
+ def line
228
+ @source.line
229
+ end
230
+
231
+ def shift
232
+ t = @source.shift
233
+ CTokenizer.check_token(t)
234
+ t
235
+ end
236
+
237
+ def empty?
238
+ @source.empty?
239
+ end
240
+
241
+ end # LexerBase
242
+
243
+ class Cache
244
+ include CTokenizer
245
+ def initialize(file=nil, line=1)
246
+ @cache = []
247
+ @file = file
248
+ @line = line
249
+ end
250
+ attr_reader :cache, :file, :line
251
+ def reset(file, line)
252
+ @file = file
253
+ @line = line
254
+ end
255
+ def <<(t)
256
+ @cache << t
257
+ end
258
+ def shift
259
+ t = @cache.shift
260
+ CTokenizer.check_token(t)
261
+ @line += CTokenizer.line_count(t[1])
262
+ t
263
+ end
264
+ def empty?
265
+ @cache.empty?
266
+ end
267
+ end # Cache
268
+
269
+ # Skips macro tokens - NOT lossy - converts macro into a single newline token
270
+ class SkipMacros < LexerBase
271
+ include Scoped
272
+ def initialize(str, file=nil, line=1)
273
+ super(str, file, line)
274
+ @scope = 0
275
+ @macro = false
276
+ @start_line = true
277
+ end
278
+ def shift
279
+ t = process_scope(super)
280
+ if macro?
281
+ start = self.line
282
+ token = t[1].dup
283
+ while macro? and not empty?
284
+ t = process_scope(super)
285
+ token << t[1]
286
+ end
287
+ self.error("expecting newline") unless empty? or t[0] == :NEWLINE
288
+ t = [:NEWLINE, token.freeze].freeze
289
+ end
290
+ t
291
+ end
292
+ end # SkipMacros
293
+
294
+ # writes tokens to some output source
295
+ # may not be useful...
296
+ class Splitter < LexerBase
297
+ def initialize(out, source, file=nil, line=1)
298
+ super(source, file, line)
299
+ @out = out
300
+ end
301
+
302
+ def shift
303
+ t = super
304
+ @out << t
305
+ t
306
+ end
307
+ end
308
+
309
+ # C Lexer
310
+ class CLexer < LexerBase
311
+
312
+ def CLexer.reserved_word?(str)
313
+ str =~ /\A(?:auto|break|case|char|const|continue|default|do|double|else|enum|extern|float|for|goto|if|inline|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while)\Z/o
314
+ end
315
+
316
+ def CLexer.convert_token(t)
317
+ case t[0]
318
+ when :SYMBOL
319
+ t = case t[1]
320
+ when '<:'
321
+ '['
322
+ when ':>'
323
+ ']'
324
+ when '<%'
325
+ '{'
326
+ when '%>'
327
+ '}'
328
+ else
329
+ t[1]
330
+ end # case
331
+ t = [t, t].freeze
332
+ when :IDENTIFIER
333
+ if CLexer.reserved_word?(t[1])
334
+ t = [t[1], t[1]].freeze
335
+ end
336
+ end #case
337
+ t
338
+ end
339
+
340
+ def shift
341
+ CLexer.convert_token(super)
342
+ end
343
+
344
+ end # CLexer
345
+
346
+ # C Preprocessor Lexer
347
+ class CPLexer < LexerBase
348
+
349
+ def CPLexer.reserved_word?(str)
350
+ str =~ /\A(?:if|ifdef|ifndef|else|elif|endif|include|include_next|define|defined|undef|line|error|pragma)\Z/
351
+ end
352
+
353
+ def CPLexer.convert_token(t)
354
+ case t[0]
355
+ when :SYMBOL
356
+ case t[1]
357
+ when '!', '*', '/', '%', '+', '-', \
358
+ '<<', '>>', '(', ')', \
359
+ '<', '<=', '>', '>=', '==', '!=', \
360
+ '&', '^', '|', '&&', '||', \
361
+ '?', ':', '#', '##', '...', ','
362
+ t = [t[1], t[1]].freeze
363
+ end # case
364
+ when :IDENTIFIER
365
+ if CPLexer.reserved_word?(t[1])
366
+ t = [t[1], t[1]].freeze
367
+ end
368
+ end # case
369
+ t
370
+ end
371
+
372
+ def shift
373
+ CPLexer.convert_token(super)
374
+ end
375
+
376
+ end # CPLexer
377
+
378
+ end # Tokenizer
379
+