codnar 0.1.64
Sign up to get free protection for your applications and to get access to all the features.
- data/ChangeLog +165 -0
- data/LICENSE +19 -0
- data/README.rdoc +32 -0
- data/Rakefile +66 -0
- data/bin/codnar-split +5 -0
- data/bin/codnar-weave +5 -0
- data/codnar.html +10945 -0
- data/doc/logo.png +0 -0
- data/doc/root.html +22 -0
- data/doc/story.markdown +180 -0
- data/doc/system.markdown +671 -0
- data/lib/codnar.rb +41 -0
- data/lib/codnar/application.rb +92 -0
- data/lib/codnar/cache.rb +61 -0
- data/lib/codnar/data/contents.js +113 -0
- data/lib/codnar/data/control_chunks.js +44 -0
- data/lib/codnar/data/style.css +95 -0
- data/lib/codnar/data/sunlight/README.txt +4 -0
- data/lib/codnar/data/sunlight/css-min.js +1 -0
- data/lib/codnar/data/sunlight/default.css +236 -0
- data/lib/codnar/data/sunlight/javascript-min.js +1 -0
- data/lib/codnar/data/sunlight/min.js +1 -0
- data/lib/codnar/data/sunlight/ruby-min.js +1 -0
- data/lib/codnar/data/yui/README.txt +3 -0
- data/lib/codnar/data/yui/base.css +132 -0
- data/lib/codnar/data/yui/reset.css +142 -0
- data/lib/codnar/formatter.rb +180 -0
- data/lib/codnar/grouper.rb +28 -0
- data/lib/codnar/gvim.rb +132 -0
- data/lib/codnar/hash_extensions.rb +41 -0
- data/lib/codnar/markdown.rb +47 -0
- data/lib/codnar/merger.rb +138 -0
- data/lib/codnar/rake.rb +41 -0
- data/lib/codnar/rake/split_task.rb +71 -0
- data/lib/codnar/rake/weave_task.rb +59 -0
- data/lib/codnar/rdoc.rb +9 -0
- data/lib/codnar/reader.rb +121 -0
- data/lib/codnar/scanner.rb +216 -0
- data/lib/codnar/split.rb +58 -0
- data/lib/codnar/split_configurations.rb +367 -0
- data/lib/codnar/splitter.rb +32 -0
- data/lib/codnar/string_extensions.rb +25 -0
- data/lib/codnar/sunlight.rb +17 -0
- data/lib/codnar/version.rb +8 -0
- data/lib/codnar/weave.rb +58 -0
- data/lib/codnar/weave_configurations.rb +48 -0
- data/lib/codnar/weaver.rb +105 -0
- data/lib/codnar/writer.rb +38 -0
- data/test/cache_computations.rb +41 -0
- data/test/deep_merge.rb +29 -0
- data/test/embed_images.rb +12 -0
- data/test/expand_markdown.rb +27 -0
- data/test/expand_rdoc.rb +20 -0
- data/test/format_code_gvim_configurations.rb +55 -0
- data/test/format_code_sunlight_configurations.rb +37 -0
- data/test/format_comment_configurations.rb +86 -0
- data/test/format_lines.rb +72 -0
- data/test/group_lines.rb +31 -0
- data/test/gvim_highlight_syntax.rb +49 -0
- data/test/identify_chunks.rb +32 -0
- data/test/lib/test_with_configurations.rb +15 -0
- data/test/merge_lines.rb +133 -0
- data/test/rake_tasks.rb +38 -0
- data/test/read_chunks.rb +110 -0
- data/test/run_application.rb +56 -0
- data/test/run_split.rb +38 -0
- data/test/run_weave.rb +75 -0
- data/test/scan_lines.rb +78 -0
- data/test/split_chunk_configurations.rb +55 -0
- data/test/split_code.rb +109 -0
- data/test/split_code_configurations.rb +73 -0
- data/test/split_combined_configurations.rb +114 -0
- data/test/split_complex_comment_configurations.rb +73 -0
- data/test/split_documentation.rb +92 -0
- data/test/split_documentation_configurations.rb +97 -0
- data/test/split_simple_comment_configurations.rb +50 -0
- data/test/sunlight_highlight_syntax.rb +25 -0
- data/test/weave_configurations.rb +144 -0
- data/test/write_chunks.rb +28 -0
- metadata +363 -0
data/lib/codnar/rake.rb
ADDED
@@ -0,0 +1,41 @@
|
|
1
|
+
require "rake"
|
2
|
+
require "rake/tasklib"
|
3
|
+
|
4
|
+
require "codnar"
|
5
|
+
require "codnar/rake/split_task"
|
6
|
+
require "codnar/rake/weave_task"
|
7
|
+
|
8
|
+
module Codnar
|
9
|
+
|
10
|
+
# This module contains all the Codnar Rake tasks code.
|
11
|
+
module Rake
|
12
|
+
|
13
|
+
class << self
|
14
|
+
|
15
|
+
# The root folder to store all chunk files under.
|
16
|
+
attr_accessor :chunks_dir
|
17
|
+
|
18
|
+
# The list of split chunk files for later weaving.
|
19
|
+
attr_accessor :chunk_files
|
20
|
+
|
21
|
+
end
|
22
|
+
|
23
|
+
Rake.chunk_files = []
|
24
|
+
Rake.chunks_dir = "chunks"
|
25
|
+
|
26
|
+
# Compute options for invoking an application.
|
27
|
+
def self.application_options(output, configurations)
|
28
|
+
options = [ "-o", output ]
|
29
|
+
options += configurations.map { |configuration| [ "-c", configuration.to_s ] }.flatten
|
30
|
+
return options
|
31
|
+
end
|
32
|
+
|
33
|
+
# Return the list of actual configuration files (as opposed to names of
|
34
|
+
# built-in configurations) for use as dependencies.
|
35
|
+
def self.configuration_files(configurations)
|
36
|
+
return configurations.find_all { |configuration| File.exists?(configuration.to_s) }
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
@@ -0,0 +1,71 @@
|
|
1
|
+
module Codnar
|
2
|
+
|
3
|
+
module Rake
|
4
|
+
|
5
|
+
# A Rake task for splitting source files to chunks.
|
6
|
+
class SplitTask < ::Rake::TaskLib
|
7
|
+
|
8
|
+
# Create a new Rake task for splitting source files to chunks. Each of
|
9
|
+
# the specified disk files is split using the specified set of
|
10
|
+
# configurations.
|
11
|
+
def initialize(paths, configurations)
|
12
|
+
@configurations = configurations
|
13
|
+
paths.each do |path|
|
14
|
+
define_tasks(path)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
protected
|
19
|
+
|
20
|
+
# Define the tasks for splitting a single source file to chunks.
|
21
|
+
def define_tasks(path)
|
22
|
+
output = Rake.chunks_dir + "/" + path
|
23
|
+
define_split_file_task(path, output)
|
24
|
+
SplitTask.define_common_tasks
|
25
|
+
SplitTask.connect_common_tasks(output)
|
26
|
+
end
|
27
|
+
|
28
|
+
# Define the actual task for splitting the source file.
|
29
|
+
def define_split_file_task(path, output)
|
30
|
+
::Rake::FileTask.define_task(output => [ path ] + Rake.configuration_files(@configurations)) do
|
31
|
+
run_split_application(path, output)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
# Run the Split application for a single source file.
|
36
|
+
def run_split_application(path, output)
|
37
|
+
options = Rake.application_options(output, @configurations)
|
38
|
+
options << path
|
39
|
+
status = Application.with_argv(options) { Split.new.run }
|
40
|
+
raise "Codnar split errors" unless status == 0
|
41
|
+
end
|
42
|
+
|
43
|
+
# Define common Rake split tasks. This method may be invoked several
|
44
|
+
# times, only the first invocation actually defined the tasks. The common
|
45
|
+
# tasks are codnar_split (for splitting all the source files) and
|
46
|
+
# clean_codnar (for getting rid of the chunks directory).
|
47
|
+
def self.define_common_tasks
|
48
|
+
@defined_common_tasks ||= SplitTask.create_common_tasks
|
49
|
+
end
|
50
|
+
|
51
|
+
# Actually create common Rake split tasks.
|
52
|
+
def self.create_common_tasks
|
53
|
+
desc "Split all files into chunks"
|
54
|
+
::Rake::Task.define_task("codnar_split")
|
55
|
+
desc "Clean all split chunks"
|
56
|
+
::Rake::Task.define_task("clean_codnar") { rm_rf(Rake.chunks_dir) }
|
57
|
+
::Rake::Task.define_task(:clean => "clean_codnar")
|
58
|
+
end
|
59
|
+
|
60
|
+
# Connect the task for splitting a single source file to the common task
|
61
|
+
# of splitting all source files.
|
62
|
+
def self.connect_common_tasks(output)
|
63
|
+
::Rake::Task.define_task("codnar_split" => output)
|
64
|
+
Rake::chunk_files << output
|
65
|
+
end
|
66
|
+
|
67
|
+
end
|
68
|
+
|
69
|
+
end
|
70
|
+
|
71
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
module Codnar
|
2
|
+
|
3
|
+
module Rake
|
4
|
+
|
5
|
+
# A Rake task for weaving chunks to a single HTML.
|
6
|
+
class WeaveTask < ::Rake::TaskLib
|
7
|
+
|
8
|
+
# Create a Rake task for weaving chunks to a single HTML. The root source
|
9
|
+
# file is expected to embed all the chunks into the output HTML. The
|
10
|
+
# chunks are loaded from the results of all the previous created
|
11
|
+
# SplitTask-s.
|
12
|
+
def initialize(root, configurations, output = "codnar.html")
|
13
|
+
@root = Rake.chunks_dir + "/" + root
|
14
|
+
@output = output
|
15
|
+
@configurations = configurations
|
16
|
+
define_tasks
|
17
|
+
end
|
18
|
+
|
19
|
+
protected
|
20
|
+
|
21
|
+
# Define the tasks for weaving the chunks to a single HTML.
|
22
|
+
def define_tasks
|
23
|
+
define_weave_task
|
24
|
+
connect_common_tasks
|
25
|
+
end
|
26
|
+
|
27
|
+
# Define the actual task for weaving the chunks to a single HTML.
|
28
|
+
def define_weave_task
|
29
|
+
desc "Weave chunks into HTML" unless ::Rake.application.last_comment
|
30
|
+
::Rake::Task.define_task("codnar_weave" => @output)
|
31
|
+
::Rake::FileTask.define_task(@output => Rake.chunk_files + Rake.configuration_files(@configurations)) do
|
32
|
+
run_weave_application
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
# Run the Weave application for a single source file.
|
37
|
+
def run_weave_application
|
38
|
+
options = Rake.application_options(@output, @configurations)
|
39
|
+
options << @root
|
40
|
+
options += Rake.chunk_files.reject { |chunk| chunk == @root }
|
41
|
+
status = Application.with_argv(options) { Weave.new.run }
|
42
|
+
raise "Codnar weave errors" unless status == 0
|
43
|
+
end
|
44
|
+
|
45
|
+
# Connect the task for cleaning up after weaving (+clobber_codnar+) to the
|
46
|
+
# common task of cleaning up everything (+clobber+).
|
47
|
+
def connect_common_tasks
|
48
|
+
desc "Build the code narrative HTML"
|
49
|
+
::Rake::Task.define_task(:codnar => "codnar_weave")
|
50
|
+
desc "Remove woven HTML documentation"
|
51
|
+
::Rake::Task.define_task("clobber_codnar") { rm_rf(@output) }
|
52
|
+
::Rake::Task.define_task(:clobber => "clobber_codnar")
|
53
|
+
end
|
54
|
+
|
55
|
+
end
|
56
|
+
|
57
|
+
end
|
58
|
+
|
59
|
+
end
|
data/lib/codnar/rdoc.rb
ADDED
@@ -0,0 +1,121 @@
|
|
1
|
+
module Codnar
|
2
|
+
|
3
|
+
# Read chunks from disk files.
|
4
|
+
class Reader
|
5
|
+
|
6
|
+
# Load all chunks from the specified disk files to memory for later access
|
7
|
+
# by name.
|
8
|
+
def initialize(errors, paths)
|
9
|
+
@errors = errors
|
10
|
+
@chunks = {}
|
11
|
+
@used = {}
|
12
|
+
paths.each do |path|
|
13
|
+
read_path_chunks(path)
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
# Fetch a chunk by its name.
|
18
|
+
def [](name)
|
19
|
+
id = name.to_id
|
20
|
+
@used[id] = true
|
21
|
+
return @chunks[id] ||= (
|
22
|
+
@errors << "Missing chunk: #{name}"
|
23
|
+
Reader.fake_chunk(name)
|
24
|
+
)
|
25
|
+
end
|
26
|
+
|
27
|
+
# Collect errors for unused chunks.
|
28
|
+
def collect_unused_chunk_errors
|
29
|
+
@chunks.each do |id, chunk|
|
30
|
+
@errors.push("#{$0}: Unused chunk: #{chunk.name} #{Reader.locations_message(chunk)}") unless @used[id]
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
protected
|
35
|
+
|
36
|
+
# Load and merge all chunks from a disk file into memory.
|
37
|
+
def read_path_chunks(path)
|
38
|
+
@errors.in_path(path) do
|
39
|
+
chunks = load_path_chunks(path)
|
40
|
+
next unless chunks
|
41
|
+
merge_loaded_chunks(chunks)
|
42
|
+
@root_chunk ||= chunks[0].name
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
# Load all chunks from a disk file into memory.
|
47
|
+
def load_path_chunks(path)
|
48
|
+
chunks = YAML.load_file(path)
|
49
|
+
@errors << "Invalid chunks data" unless chunks
|
50
|
+
# TODO: A bit more validation would be nice.
|
51
|
+
return chunks
|
52
|
+
end
|
53
|
+
|
54
|
+
# Merge an array of chunks into memory.
|
55
|
+
def merge_loaded_chunks(chunks)
|
56
|
+
chunks.each do |new_chunk|
|
57
|
+
old_chunk = @chunks[id = new_chunk.name.to_id]
|
58
|
+
if old_chunk.nil?
|
59
|
+
@chunks[id] = new_chunk
|
60
|
+
elsif Reader.same_chunk?(old_chunk, new_chunk)
|
61
|
+
Reader.merge_same_chunks(old_chunk, new_chunk)
|
62
|
+
else
|
63
|
+
@errors.push(Reader.different_chunks_error(old_chunk, new_chunk))
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
# Merge a new "same" chunk into an old one.
|
69
|
+
def self.merge_same_chunks(old_chunk, new_chunk)
|
70
|
+
old_chunk.locations = \
|
71
|
+
(old_chunk.locations + new_chunk.locations).uniq.sort \
|
72
|
+
do |first_location, second_location|
|
73
|
+
[ first_location.file.to_id, first_location.line ] \
|
74
|
+
<=> [ second_location.file.to_id, second_location.line ]
|
75
|
+
end
|
76
|
+
old_chunk.containers = \
|
77
|
+
(old_chunk.containers + new_chunk.containers).uniq.sort \
|
78
|
+
do |first_name, second_name|
|
79
|
+
first_name.to_id <=> second_name.to_id
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
# Check whether two chunks contain the same "stuff".
|
84
|
+
def self.same_chunk?(old_chunk, new_chunk)
|
85
|
+
return Reader.chunk_payload(old_chunk) == Reader.chunk_payload(new_chunk)
|
86
|
+
end
|
87
|
+
|
88
|
+
# Return just the actual payload of a chunk for equality comparison.
|
89
|
+
def self.chunk_payload(chunk)
|
90
|
+
chunk = chunk.reject { |key, value| [ "locations", "name", "containers" ].include?(key) }
|
91
|
+
chunk.contained.map! { |name| name.to_id }
|
92
|
+
return chunk
|
93
|
+
end
|
94
|
+
|
95
|
+
# Error message when two different chunks have the same name.
|
96
|
+
def self.different_chunks_error(old_chunk, new_chunk)
|
97
|
+
old_location = Reader.locations_message(old_chunk)
|
98
|
+
new_location = Reader.locations_message(new_chunk)
|
99
|
+
return "#{$0}: Chunk: #{old_chunk.name} is different #{new_location}, and #{old_location}"
|
100
|
+
end
|
101
|
+
|
102
|
+
# Format a chunk's location for an error message.
|
103
|
+
def self.locations_message(chunk)
|
104
|
+
locations = chunk.locations.map { |location| "in file: #{location.file} at line: #{location.line}" }
|
105
|
+
return locations.join(" or ")
|
106
|
+
end
|
107
|
+
|
108
|
+
# Return a fake chunk for the specified name.
|
109
|
+
def self.fake_chunk(name)
|
110
|
+
return {
|
111
|
+
"name" => name,
|
112
|
+
"locations" => [ { "file" => "MISSING" } ],
|
113
|
+
"contained" => [],
|
114
|
+
"containers" => [],
|
115
|
+
"html" => "<div class='missing chunk error'>\nMISSING\n</div>"
|
116
|
+
}
|
117
|
+
end
|
118
|
+
|
119
|
+
end
|
120
|
+
|
121
|
+
end
|
@@ -0,0 +1,216 @@
|
|
1
|
+
module Codnar
|
2
|
+
|
3
|
+
# Scan a file into classified lines.
|
4
|
+
class Scanner
|
5
|
+
|
6
|
+
# Construct a scanner based on a syntax in the following structure:
|
7
|
+
#
|
8
|
+
# patterns:
|
9
|
+
# <name>:
|
10
|
+
# name: <name>
|
11
|
+
# kind: <kind>
|
12
|
+
# regexp: <regexp>
|
13
|
+
# groups:
|
14
|
+
# - <name>
|
15
|
+
# states:
|
16
|
+
# <name>:
|
17
|
+
# name: <name>
|
18
|
+
# transitions:
|
19
|
+
# - pattern: <pattern>
|
20
|
+
# kind: <kind>
|
21
|
+
# next_state: <state>
|
22
|
+
# start_state: <state>
|
23
|
+
#
|
24
|
+
# To allow for cleaner YAML files to specify the syntax, the following
|
25
|
+
# shorthands are supported:
|
26
|
+
#
|
27
|
+
# - A pattern or state reference can be presented by the string name of the
|
28
|
+
# pattern or state.
|
29
|
+
# - The name field of a state or pattern can be ommitted. If specified, it
|
30
|
+
# must be identical to the key in the states or patterns mapping.
|
31
|
+
# - The kind field of a pattern can be ommitted; by default it is assumed
|
32
|
+
# to be identical to the pattern name.
|
33
|
+
# - A pattern regexp can be presented by a plain string.
|
34
|
+
# - The pattern groups field can be ommitted or contain +nil+ if it is
|
35
|
+
# equal to [ "indentation", "payload" ].
|
36
|
+
# - The kind field of a transition can be ommitted; by default it is
|
37
|
+
# assumed to be identical to the pattern kind.
|
38
|
+
# - The next state of a transition can be ommitted; by default it is
|
39
|
+
# assumed to be identical to the containing state.
|
40
|
+
# - The start state can be ommitted; by default it is assumed to be named
|
41
|
+
# +start+.
|
42
|
+
#
|
43
|
+
# When the Scanner is constructed, a deep clone of the syntax object is
|
44
|
+
# created and modified to expand all the above shorthands. Any problems
|
45
|
+
# detected during this process are pushed into the errors.
|
46
|
+
def initialize(errors, syntax)
|
47
|
+
@errors = errors
|
48
|
+
@syntax = syntax.deep_clone
|
49
|
+
@syntax.patterns.each { |name, pattern| expand_pattern_shorthands(name, pattern) }
|
50
|
+
@syntax.states.each { |name, state| expand_state_shorthands(name, state) }
|
51
|
+
@syntax.start_state = resolve_start_state
|
52
|
+
end
|
53
|
+
|
54
|
+
# Scan a disk file into classified lines in the following format (where the
|
55
|
+
# groups contain the text extracted by the matching pattern):
|
56
|
+
#
|
57
|
+
# - kind: <kind>
|
58
|
+
# line: <text>
|
59
|
+
# <group>: <text>
|
60
|
+
#
|
61
|
+
# By convention, each classified line has a "payload" group that contains
|
62
|
+
# the "main" content of the line (chunk name for begin/end/nested chunk
|
63
|
+
# lines, clean comment text for comment lines, etc.). In addition, most
|
64
|
+
# classified lines have an "indentation" group that contains the leading
|
65
|
+
# white space (which is not included in the payload).
|
66
|
+
#
|
67
|
+
# If at some state, a file line does not match any pattern, the scanner
|
68
|
+
# will push a message into the errors. In addition it will classify the
|
69
|
+
# line as follows:
|
70
|
+
#
|
71
|
+
# - kind: error
|
72
|
+
# state: <name>
|
73
|
+
# line: <text>
|
74
|
+
# indentation: <leading white space>
|
75
|
+
# payload: <line text following the indentation>
|
76
|
+
def lines(path)
|
77
|
+
@path = path
|
78
|
+
@lines = []
|
79
|
+
@state = @syntax.start_state
|
80
|
+
@errors.in_path(path) { scan_path }
|
81
|
+
return @lines
|
82
|
+
end
|
83
|
+
|
84
|
+
protected
|
85
|
+
|
86
|
+
# {{{ Scanner pattern shorthands
|
87
|
+
|
88
|
+
# Expand all the shorthands used in the pattern.
|
89
|
+
def expand_pattern_shorthands(name, pattern)
|
90
|
+
pattern.kind ||= fill_name(name, pattern, "Pattern")
|
91
|
+
pattern.groups ||= [ "indentation", "payload" ]
|
92
|
+
pattern.regexp = convert_to_regexp(name, pattern.regexp)
|
93
|
+
end
|
94
|
+
|
95
|
+
# Convert a string regexp to a real Regexp.
|
96
|
+
def convert_to_regexp(name, regexp)
|
97
|
+
return regexp if Regexp == regexp
|
98
|
+
begin
|
99
|
+
return Regexp.new(regexp)
|
100
|
+
rescue
|
101
|
+
@errors << "Invalid pattern: #{name} regexp: #{regexp} error: #{$!}"
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
# Fill in the name field for state or pattern object.
|
106
|
+
def fill_name(name, data, type)
|
107
|
+
data_name = data.name ||= name
|
108
|
+
@errors << "#{type}: #{name} has wrong name: #{data_name}" if data_name != name
|
109
|
+
return data_name
|
110
|
+
end
|
111
|
+
|
112
|
+
# }}}
|
113
|
+
|
114
|
+
# {{{ Scanner state shorthands
|
115
|
+
|
116
|
+
# Expand all the shorthands used in the state.
|
117
|
+
def expand_state_shorthands(name, state)
|
118
|
+
fill_name(name, state, "State")
|
119
|
+
state.transitions.each do |transition|
|
120
|
+
pattern = transition.pattern = lookup(@syntax.patterns, "pattern", transition.pattern)
|
121
|
+
transition.kind ||= pattern.andand.kind
|
122
|
+
transition.next_state = lookup(@syntax.states, "state", transition.next_state || state)
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
# Convert a string name to an actual data reference.
|
127
|
+
def lookup(mapping, type, reference)
|
128
|
+
return reference unless String === reference
|
129
|
+
data = mapping[reference]
|
130
|
+
@errors << "Reference to a missing #{type}: #{reference}" unless data
|
131
|
+
return data
|
132
|
+
end
|
133
|
+
|
134
|
+
# Resolve the start state reference.
|
135
|
+
def resolve_start_state
|
136
|
+
return lookup(@syntax.states, "state", @syntax.start_state || "start") || {
|
137
|
+
"name" => "missing_start_state",
|
138
|
+
"kind" => "error",
|
139
|
+
"transitions" => []
|
140
|
+
}
|
141
|
+
end
|
142
|
+
|
143
|
+
# }}}
|
144
|
+
|
145
|
+
# {{{ Scanner file processing
|
146
|
+
|
147
|
+
# Scan a disk file.
|
148
|
+
def scan_path
|
149
|
+
File.open(@path, "r") do |file|
|
150
|
+
scan_file(file)
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
# Scan an opened file.
|
155
|
+
def scan_file(file)
|
156
|
+
@line_number = 0
|
157
|
+
file.read.each_line do |line|
|
158
|
+
@errors.at_line(@line_number += 1)
|
159
|
+
scan_line(line.chomp)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
# Scan the next file line.
|
164
|
+
def scan_line(line)
|
165
|
+
@state.transitions.each do |transition|
|
166
|
+
return if transition.pattern && transition.next_state && classify_matching_line(line, transition)
|
167
|
+
end
|
168
|
+
unclassified_line(line, @state.name)
|
169
|
+
end
|
170
|
+
|
171
|
+
# }}}
|
172
|
+
|
173
|
+
# {{{ Scanner line processing
|
174
|
+
|
175
|
+
# Handle a file line, only if it matches the pattern.
|
176
|
+
def classify_matching_line(line, transition)
|
177
|
+
match = (pattern = transition.pattern).regexp.match(line)
|
178
|
+
return false unless match
|
179
|
+
@lines << Scanner.extracted_groups(match, pattern.groups).update({
|
180
|
+
"line" => line,
|
181
|
+
"kind" => transition.kind,
|
182
|
+
"number" => @line_number
|
183
|
+
})
|
184
|
+
@state = transition.next_state
|
185
|
+
return true
|
186
|
+
end
|
187
|
+
|
188
|
+
# Extract named groups from a match. As a special case, indentation is
|
189
|
+
# deleted if there is no payload.
|
190
|
+
def self.extracted_groups(match, groups)
|
191
|
+
extracted = {}
|
192
|
+
groups.each_with_index do |group, index|
|
193
|
+
extracted[group] = match[index + 1]
|
194
|
+
end
|
195
|
+
extracted.delete("indentation") if match[0] == ""
|
196
|
+
return extracted
|
197
|
+
end
|
198
|
+
|
199
|
+
# Handle a file line that couldn't be classified.
|
200
|
+
def unclassified_line(line, state_name)
|
201
|
+
@lines << {
|
202
|
+
"line" => line,
|
203
|
+
"indentation" => line.indentation,
|
204
|
+
"payload" => line.unindent,
|
205
|
+
"kind" => "error",
|
206
|
+
"state" => state_name,
|
207
|
+
"number" => @line_number
|
208
|
+
}
|
209
|
+
@errors << "State: #{state_name} failed to classify line: #{@lines.last.payload}"
|
210
|
+
end
|
211
|
+
|
212
|
+
# }}}
|
213
|
+
|
214
|
+
end
|
215
|
+
|
216
|
+
end
|