redsnow 0.0.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +34 -0
- data/.gitmodules +3 -0
- data/.travis.yml +20 -0
- data/CHANGELOG.md +4 -0
- data/Gemfile +4 -0
- data/LICENSE +21 -0
- data/README.md +62 -0
- data/Rakefile +36 -0
- data/Vagrantfile +20 -0
- data/ext/snowcrash/Makefile +64 -0
- data/ext/snowcrash/Vagrantfile +20 -0
- data/ext/snowcrash/bin/snowcrash +0 -0
- data/ext/snowcrash/common.gypi +163 -0
- data/ext/snowcrash/config.gypi +10 -0
- data/ext/snowcrash/config.mk +5 -0
- data/ext/snowcrash/configure +213 -0
- data/ext/snowcrash/provisioning.sh +15 -0
- data/ext/snowcrash/snowcrash.gyp +141 -0
- data/ext/snowcrash/src/ActionParser.h +503 -0
- data/ext/snowcrash/src/AssetParser.h +215 -0
- data/ext/snowcrash/src/BlockUtility.h +186 -0
- data/ext/snowcrash/src/Blueprint.h +283 -0
- data/ext/snowcrash/src/BlueprintParser.h +347 -0
- data/ext/snowcrash/src/BlueprintParserCore.h +190 -0
- data/ext/snowcrash/src/BlueprintSection.h +140 -0
- data/ext/snowcrash/src/BlueprintUtility.h +126 -0
- data/ext/snowcrash/src/CBlueprint.cc +600 -0
- data/ext/snowcrash/src/CBlueprint.h +354 -0
- data/ext/snowcrash/src/CSourceAnnotation.cc +140 -0
- data/ext/snowcrash/src/CSourceAnnotation.h +106 -0
- data/ext/snowcrash/src/CodeBlockUtility.h +189 -0
- data/ext/snowcrash/src/DescriptionSectionUtility.h +156 -0
- data/ext/snowcrash/src/HTTP.cc +46 -0
- data/ext/snowcrash/src/HTTP.h +105 -0
- data/ext/snowcrash/src/HeaderParser.h +289 -0
- data/ext/snowcrash/src/ListBlockUtility.h +273 -0
- data/ext/snowcrash/src/ListUtility.h +95 -0
- data/ext/snowcrash/src/MarkdownBlock.cc +176 -0
- data/ext/snowcrash/src/MarkdownBlock.h +93 -0
- data/ext/snowcrash/src/MarkdownParser.cc +266 -0
- data/ext/snowcrash/src/MarkdownParser.h +88 -0
- data/ext/snowcrash/src/ParameterDefinitonParser.h +570 -0
- data/ext/snowcrash/src/ParametersParser.h +252 -0
- data/ext/snowcrash/src/Parser.cc +71 -0
- data/ext/snowcrash/src/Parser.h +29 -0
- data/ext/snowcrash/src/ParserCore.cc +120 -0
- data/ext/snowcrash/src/ParserCore.h +82 -0
- data/ext/snowcrash/src/PayloadParser.h +672 -0
- data/ext/snowcrash/src/Platform.h +54 -0
- data/ext/snowcrash/src/RegexMatch.h +32 -0
- data/ext/snowcrash/src/ResourceGroupParser.h +195 -0
- data/ext/snowcrash/src/ResourceParser.h +584 -0
- data/ext/snowcrash/src/SectionUtility.h +142 -0
- data/ext/snowcrash/src/Serialize.cc +52 -0
- data/ext/snowcrash/src/Serialize.h +69 -0
- data/ext/snowcrash/src/SerializeJSON.cc +601 -0
- data/ext/snowcrash/src/SerializeJSON.h +21 -0
- data/ext/snowcrash/src/SerializeYAML.cc +336 -0
- data/ext/snowcrash/src/SerializeYAML.h +21 -0
- data/ext/snowcrash/src/SourceAnnotation.h +177 -0
- data/ext/snowcrash/src/StringUtility.h +109 -0
- data/ext/snowcrash/src/SymbolTable.h +83 -0
- data/ext/snowcrash/src/UriTemplateParser.cc +195 -0
- data/ext/snowcrash/src/UriTemplateParser.h +243 -0
- data/ext/snowcrash/src/Version.h +39 -0
- data/ext/snowcrash/src/csnowcrash.cc +23 -0
- data/ext/snowcrash/src/csnowcrash.h +38 -0
- data/ext/snowcrash/src/posix/RegexMatch.cc +99 -0
- data/ext/snowcrash/src/snowcrash.cc +18 -0
- data/ext/snowcrash/src/snowcrash.h +41 -0
- data/ext/snowcrash/src/snowcrash/snowcrash.cc +170 -0
- data/ext/snowcrash/src/win/RegexMatch.cc +78 -0
- data/ext/snowcrash/sundown/CONTRIBUTING.md +10 -0
- data/ext/snowcrash/sundown/Makefile +83 -0
- data/ext/snowcrash/sundown/Makefile.win +33 -0
- data/ext/snowcrash/sundown/examples/smartypants.c +72 -0
- data/ext/snowcrash/sundown/examples/sundown.c +80 -0
- data/ext/snowcrash/sundown/html/houdini.h +37 -0
- data/ext/snowcrash/sundown/html/houdini_href_e.c +108 -0
- data/ext/snowcrash/sundown/html/houdini_html_e.c +84 -0
- data/ext/snowcrash/sundown/html/html.c +647 -0
- data/ext/snowcrash/sundown/html/html.h +77 -0
- data/ext/snowcrash/sundown/html/html_smartypants.c +389 -0
- data/ext/snowcrash/sundown/html_block_names.txt +25 -0
- data/ext/snowcrash/sundown/src/autolink.c +297 -0
- data/ext/snowcrash/sundown/src/autolink.h +51 -0
- data/ext/snowcrash/sundown/src/buffer.c +225 -0
- data/ext/snowcrash/sundown/src/buffer.h +96 -0
- data/ext/snowcrash/sundown/src/html_blocks.h +206 -0
- data/ext/snowcrash/sundown/src/markdown.c +2701 -0
- data/ext/snowcrash/sundown/src/markdown.h +147 -0
- data/ext/snowcrash/sundown/src/src_map.c +200 -0
- data/ext/snowcrash/sundown/src/src_map.h +58 -0
- data/ext/snowcrash/sundown/src/stack.c +81 -0
- data/ext/snowcrash/sundown/src/stack.h +29 -0
- data/ext/snowcrash/sundown/sundown.def +20 -0
- data/ext/snowcrash/tools/gyp/AUTHORS +11 -0
- data/ext/snowcrash/tools/gyp/DEPS +24 -0
- data/ext/snowcrash/tools/gyp/OWNERS +1 -0
- data/ext/snowcrash/tools/gyp/PRESUBMIT.py +120 -0
- data/ext/snowcrash/tools/gyp/buildbot/buildbot_run.py +190 -0
- data/ext/snowcrash/tools/gyp/codereview.settings +10 -0
- data/ext/snowcrash/tools/gyp/data/win/large-pdb-shim.cc +12 -0
- data/ext/snowcrash/tools/gyp/gyp +8 -0
- data/ext/snowcrash/tools/gyp/gyp.bat +5 -0
- data/ext/snowcrash/tools/gyp/gyp_main.py +18 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSNew.py +340 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSProject.py +208 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSSettings.py +1063 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSToolFile.py +58 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSUserFile.py +147 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSUtil.py +267 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/MSVSVersion.py +409 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/__init__.py +537 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/__init__.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/common.py +521 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/common.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/easy_xml.py +157 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/flock_tool.py +49 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/__init__.py +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/__init__.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/android.py +1069 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/cmake.py +1143 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/dump_dependency_json.py +81 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/eclipse.py +335 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/gypd.py +87 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/gypsh.py +56 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/make.py +2181 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/make.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/msvs.py +3335 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/ninja.py +2156 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/xcode.py +1224 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/generator/xcode.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/input.py +2809 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/input.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/mac_tool.py +510 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/msvs_emulation.py +972 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/ninja_syntax.py +160 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/ordered_dict.py +289 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/win_tool.py +292 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/xcode_emulation.py +1440 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/xcode_emulation.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/xcodeproj_file.py +2889 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/xcodeproj_file.pyc +0 -0
- data/ext/snowcrash/tools/gyp/pylib/gyp/xml_fix.py +69 -0
- data/ext/snowcrash/tools/gyp/pylintrc +307 -0
- data/ext/snowcrash/tools/gyp/samples/samples +81 -0
- data/ext/snowcrash/tools/gyp/samples/samples.bat +5 -0
- data/ext/snowcrash/tools/gyp/setup.py +19 -0
- data/ext/snowcrash/tools/gyp/tools/Xcode/Specifications/gyp.pbfilespec +27 -0
- data/ext/snowcrash/tools/gyp/tools/Xcode/Specifications/gyp.xclangspec +226 -0
- data/ext/snowcrash/tools/gyp/tools/emacs/gyp.el +252 -0
- data/ext/snowcrash/tools/gyp/tools/graphviz.py +100 -0
- data/ext/snowcrash/tools/gyp/tools/pretty_gyp.py +155 -0
- data/ext/snowcrash/tools/gyp/tools/pretty_sln.py +168 -0
- data/ext/snowcrash/tools/gyp/tools/pretty_vcproj.py +329 -0
- data/ext/snowcrash/tools/homebrew/snowcrash.rb +11 -0
- data/ext/snowcrash/vcbuild.bat +184 -0
- data/lib/redsnow.rb +31 -0
- data/lib/redsnow/binding.rb +132 -0
- data/lib/redsnow/blueprint.rb +365 -0
- data/lib/redsnow/object.rb +18 -0
- data/lib/redsnow/parseresult.rb +107 -0
- data/lib/redsnow/version.rb +4 -0
- data/provisioning.sh +20 -0
- data/redsnow.gemspec +35 -0
- data/test/_helper.rb +15 -0
- data/test/fixtures/sample-api-ast.json +97 -0
- data/test/fixtures/sample-api.apib +20 -0
- data/test/redsnow_binding_test.rb +35 -0
- data/test/redsnow_parseresult_test.rb +50 -0
- data/test/redsnow_test.rb +285 -0
- metadata +358 -0
@@ -0,0 +1,100 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
|
3
|
+
# Copyright (c) 2011 Google Inc. All rights reserved.
|
4
|
+
# Use of this source code is governed by a BSD-style license that can be
|
5
|
+
# found in the LICENSE file.
|
6
|
+
|
7
|
+
"""Using the JSON dumped by the dump-dependency-json generator,
|
8
|
+
generate input suitable for graphviz to render a dependency graph of
|
9
|
+
targets."""
|
10
|
+
|
11
|
+
import collections
|
12
|
+
import json
|
13
|
+
import sys
|
14
|
+
|
15
|
+
|
16
|
+
def ParseTarget(target):
|
17
|
+
target, _, suffix = target.partition('#')
|
18
|
+
filename, _, target = target.partition(':')
|
19
|
+
return filename, target, suffix
|
20
|
+
|
21
|
+
|
22
|
+
def LoadEdges(filename, targets):
|
23
|
+
"""Load the edges map from the dump file, and filter it to only
|
24
|
+
show targets in |targets| and their depedendents."""
|
25
|
+
|
26
|
+
file = open('dump.json')
|
27
|
+
edges = json.load(file)
|
28
|
+
file.close()
|
29
|
+
|
30
|
+
# Copy out only the edges we're interested in from the full edge list.
|
31
|
+
target_edges = {}
|
32
|
+
to_visit = targets[:]
|
33
|
+
while to_visit:
|
34
|
+
src = to_visit.pop()
|
35
|
+
if src in target_edges:
|
36
|
+
continue
|
37
|
+
target_edges[src] = edges[src]
|
38
|
+
to_visit.extend(edges[src])
|
39
|
+
|
40
|
+
return target_edges
|
41
|
+
|
42
|
+
|
43
|
+
def WriteGraph(edges):
|
44
|
+
"""Print a graphviz graph to stdout.
|
45
|
+
|edges| is a map of target to a list of other targets it depends on."""
|
46
|
+
|
47
|
+
# Bucket targets by file.
|
48
|
+
files = collections.defaultdict(list)
|
49
|
+
for src, dst in edges.items():
|
50
|
+
build_file, target_name, toolset = ParseTarget(src)
|
51
|
+
files[build_file].append(src)
|
52
|
+
|
53
|
+
print 'digraph D {'
|
54
|
+
print ' fontsize=8' # Used by subgraphs.
|
55
|
+
print ' node [fontsize=8]'
|
56
|
+
|
57
|
+
# Output nodes by file. We must first write out each node within
|
58
|
+
# its file grouping before writing out any edges that may refer
|
59
|
+
# to those nodes.
|
60
|
+
for filename, targets in files.items():
|
61
|
+
if len(targets) == 1:
|
62
|
+
# If there's only one node for this file, simplify
|
63
|
+
# the display by making it a box without an internal node.
|
64
|
+
target = targets[0]
|
65
|
+
build_file, target_name, toolset = ParseTarget(target)
|
66
|
+
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
|
67
|
+
target_name)
|
68
|
+
else:
|
69
|
+
# Group multiple nodes together in a subgraph.
|
70
|
+
print ' subgraph "cluster_%s" {' % filename
|
71
|
+
print ' label = "%s"' % filename
|
72
|
+
for target in targets:
|
73
|
+
build_file, target_name, toolset = ParseTarget(target)
|
74
|
+
print ' "%s" [label="%s"]' % (target, target_name)
|
75
|
+
print ' }'
|
76
|
+
|
77
|
+
# Now that we've placed all the nodes within subgraphs, output all
|
78
|
+
# the edges between nodes.
|
79
|
+
for src, dsts in edges.items():
|
80
|
+
for dst in dsts:
|
81
|
+
print ' "%s" -> "%s"' % (src, dst)
|
82
|
+
|
83
|
+
print '}'
|
84
|
+
|
85
|
+
|
86
|
+
def main():
|
87
|
+
if len(sys.argv) < 2:
|
88
|
+
print >>sys.stderr, __doc__
|
89
|
+
print >>sys.stderr
|
90
|
+
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
|
91
|
+
return 1
|
92
|
+
|
93
|
+
edges = LoadEdges('dump.json', sys.argv[1:])
|
94
|
+
|
95
|
+
WriteGraph(edges)
|
96
|
+
return 0
|
97
|
+
|
98
|
+
|
99
|
+
if __name__ == '__main__':
|
100
|
+
sys.exit(main())
|
@@ -0,0 +1,155 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
|
3
|
+
# Copyright (c) 2012 Google Inc. All rights reserved.
|
4
|
+
# Use of this source code is governed by a BSD-style license that can be
|
5
|
+
# found in the LICENSE file.
|
6
|
+
|
7
|
+
"""Pretty-prints the contents of a GYP file."""
|
8
|
+
|
9
|
+
import sys
|
10
|
+
import re
|
11
|
+
|
12
|
+
|
13
|
+
# Regex to remove comments when we're counting braces.
|
14
|
+
COMMENT_RE = re.compile(r'\s*#.*')
|
15
|
+
|
16
|
+
# Regex to remove quoted strings when we're counting braces.
|
17
|
+
# It takes into account quoted quotes, and makes sure that the quotes match.
|
18
|
+
# NOTE: It does not handle quotes that span more than one line, or
|
19
|
+
# cases where an escaped quote is preceeded by an escaped backslash.
|
20
|
+
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
|
21
|
+
QUOTE_RE = re.compile(QUOTE_RE_STR)
|
22
|
+
|
23
|
+
|
24
|
+
def comment_replace(matchobj):
|
25
|
+
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
|
26
|
+
|
27
|
+
|
28
|
+
def mask_comments(input):
|
29
|
+
"""Mask the quoted strings so we skip braces inside quoted strings."""
|
30
|
+
search_re = re.compile(r'(.*?)(#)(.*)')
|
31
|
+
return [search_re.sub(comment_replace, line) for line in input]
|
32
|
+
|
33
|
+
|
34
|
+
def quote_replace(matchobj):
|
35
|
+
return "%s%s%s%s" % (matchobj.group(1),
|
36
|
+
matchobj.group(2),
|
37
|
+
'x'*len(matchobj.group(3)),
|
38
|
+
matchobj.group(2))
|
39
|
+
|
40
|
+
|
41
|
+
def mask_quotes(input):
|
42
|
+
"""Mask the quoted strings so we skip braces inside quoted strings."""
|
43
|
+
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
|
44
|
+
return [search_re.sub(quote_replace, line) for line in input]
|
45
|
+
|
46
|
+
|
47
|
+
def do_split(input, masked_input, search_re):
|
48
|
+
output = []
|
49
|
+
mask_output = []
|
50
|
+
for (line, masked_line) in zip(input, masked_input):
|
51
|
+
m = search_re.match(masked_line)
|
52
|
+
while m:
|
53
|
+
split = len(m.group(1))
|
54
|
+
line = line[:split] + r'\n' + line[split:]
|
55
|
+
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
|
56
|
+
m = search_re.match(masked_line)
|
57
|
+
output.extend(line.split(r'\n'))
|
58
|
+
mask_output.extend(masked_line.split(r'\n'))
|
59
|
+
return (output, mask_output)
|
60
|
+
|
61
|
+
|
62
|
+
def split_double_braces(input):
|
63
|
+
"""Masks out the quotes and comments, and then splits appropriate
|
64
|
+
lines (lines that matche the double_*_brace re's above) before
|
65
|
+
indenting them below.
|
66
|
+
|
67
|
+
These are used to split lines which have multiple braces on them, so
|
68
|
+
that the indentation looks prettier when all laid out (e.g. closing
|
69
|
+
braces make a nice diagonal line).
|
70
|
+
"""
|
71
|
+
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
|
72
|
+
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
|
73
|
+
|
74
|
+
masked_input = mask_quotes(input)
|
75
|
+
masked_input = mask_comments(masked_input)
|
76
|
+
|
77
|
+
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
|
78
|
+
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
|
79
|
+
|
80
|
+
return output
|
81
|
+
|
82
|
+
|
83
|
+
def count_braces(line):
|
84
|
+
"""keeps track of the number of braces on a given line and returns the result.
|
85
|
+
|
86
|
+
It starts at zero and subtracts for closed braces, and adds for open braces.
|
87
|
+
"""
|
88
|
+
open_braces = ['[', '(', '{']
|
89
|
+
close_braces = [']', ')', '}']
|
90
|
+
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
|
91
|
+
cnt = 0
|
92
|
+
stripline = COMMENT_RE.sub(r'', line)
|
93
|
+
stripline = QUOTE_RE.sub(r"''", stripline)
|
94
|
+
for char in stripline:
|
95
|
+
for brace in open_braces:
|
96
|
+
if char == brace:
|
97
|
+
cnt += 1
|
98
|
+
for brace in close_braces:
|
99
|
+
if char == brace:
|
100
|
+
cnt -= 1
|
101
|
+
|
102
|
+
after = False
|
103
|
+
if cnt > 0:
|
104
|
+
after = True
|
105
|
+
|
106
|
+
# This catches the special case of a closing brace having something
|
107
|
+
# other than just whitespace ahead of it -- we don't want to
|
108
|
+
# unindent that until after this line is printed so it stays with
|
109
|
+
# the previous indentation level.
|
110
|
+
if cnt < 0 and closing_prefix_re.match(stripline):
|
111
|
+
after = True
|
112
|
+
return (cnt, after)
|
113
|
+
|
114
|
+
|
115
|
+
def prettyprint_input(lines):
|
116
|
+
"""Does the main work of indenting the input based on the brace counts."""
|
117
|
+
indent = 0
|
118
|
+
basic_offset = 2
|
119
|
+
last_line = ""
|
120
|
+
for line in lines:
|
121
|
+
if COMMENT_RE.match(line):
|
122
|
+
print line
|
123
|
+
else:
|
124
|
+
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
|
125
|
+
if len(line) > 0:
|
126
|
+
(brace_diff, after) = count_braces(line)
|
127
|
+
if brace_diff != 0:
|
128
|
+
if after:
|
129
|
+
print " " * (basic_offset * indent) + line
|
130
|
+
indent += brace_diff
|
131
|
+
else:
|
132
|
+
indent += brace_diff
|
133
|
+
print " " * (basic_offset * indent) + line
|
134
|
+
else:
|
135
|
+
print " " * (basic_offset * indent) + line
|
136
|
+
else:
|
137
|
+
print ""
|
138
|
+
last_line = line
|
139
|
+
|
140
|
+
|
141
|
+
def main():
|
142
|
+
if len(sys.argv) > 1:
|
143
|
+
data = open(sys.argv[1]).read().splitlines()
|
144
|
+
else:
|
145
|
+
data = sys.stdin.read().splitlines()
|
146
|
+
# Split up the double braces.
|
147
|
+
lines = split_double_braces(data)
|
148
|
+
|
149
|
+
# Indent and print the output.
|
150
|
+
prettyprint_input(lines)
|
151
|
+
return 0
|
152
|
+
|
153
|
+
|
154
|
+
if __name__ == '__main__':
|
155
|
+
sys.exit(main())
|
@@ -0,0 +1,168 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
|
3
|
+
# Copyright (c) 2012 Google Inc. All rights reserved.
|
4
|
+
# Use of this source code is governed by a BSD-style license that can be
|
5
|
+
# found in the LICENSE file.
|
6
|
+
|
7
|
+
"""Prints the information in a sln file in a diffable way.
|
8
|
+
|
9
|
+
It first outputs each projects in alphabetical order with their
|
10
|
+
dependencies.
|
11
|
+
|
12
|
+
Then it outputs a possible build order.
|
13
|
+
"""
|
14
|
+
|
15
|
+
__author__ = 'nsylvain (Nicolas Sylvain)'
|
16
|
+
|
17
|
+
import os
|
18
|
+
import re
|
19
|
+
import sys
|
20
|
+
import pretty_vcproj
|
21
|
+
|
22
|
+
def BuildProject(project, built, projects, deps):
|
23
|
+
# if all dependencies are done, we can build it, otherwise we try to build the
|
24
|
+
# dependency.
|
25
|
+
# This is not infinite-recursion proof.
|
26
|
+
for dep in deps[project]:
|
27
|
+
if dep not in built:
|
28
|
+
BuildProject(dep, built, projects, deps)
|
29
|
+
print project
|
30
|
+
built.append(project)
|
31
|
+
|
32
|
+
def ParseSolution(solution_file):
|
33
|
+
# All projects, their clsid and paths.
|
34
|
+
projects = dict()
|
35
|
+
|
36
|
+
# A list of dependencies associated with a project.
|
37
|
+
dependencies = dict()
|
38
|
+
|
39
|
+
# Regular expressions that matches the SLN format.
|
40
|
+
# The first line of a project definition.
|
41
|
+
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
|
42
|
+
'}"\) = "(.*)", "(.*)", "(.*)"$'))
|
43
|
+
# The last line of a project definition.
|
44
|
+
end_project = re.compile('^EndProject$')
|
45
|
+
# The first line of a dependency list.
|
46
|
+
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
|
47
|
+
# The last line of a dependency list.
|
48
|
+
end_dep = re.compile('EndProjectSection$')
|
49
|
+
# A line describing a dependency.
|
50
|
+
dep_line = re.compile(' *({.*}) = ({.*})$')
|
51
|
+
|
52
|
+
in_deps = False
|
53
|
+
solution = open(solution_file)
|
54
|
+
for line in solution:
|
55
|
+
results = begin_project.search(line)
|
56
|
+
if results:
|
57
|
+
# Hack to remove icu because the diff is too different.
|
58
|
+
if results.group(1).find('icu') != -1:
|
59
|
+
continue
|
60
|
+
# We remove "_gyp" from the names because it helps to diff them.
|
61
|
+
current_project = results.group(1).replace('_gyp', '')
|
62
|
+
projects[current_project] = [results.group(2).replace('_gyp', ''),
|
63
|
+
results.group(3),
|
64
|
+
results.group(2)]
|
65
|
+
dependencies[current_project] = []
|
66
|
+
continue
|
67
|
+
|
68
|
+
results = end_project.search(line)
|
69
|
+
if results:
|
70
|
+
current_project = None
|
71
|
+
continue
|
72
|
+
|
73
|
+
results = begin_dep.search(line)
|
74
|
+
if results:
|
75
|
+
in_deps = True
|
76
|
+
continue
|
77
|
+
|
78
|
+
results = end_dep.search(line)
|
79
|
+
if results:
|
80
|
+
in_deps = False
|
81
|
+
continue
|
82
|
+
|
83
|
+
results = dep_line.search(line)
|
84
|
+
if results and in_deps and current_project:
|
85
|
+
dependencies[current_project].append(results.group(1))
|
86
|
+
continue
|
87
|
+
|
88
|
+
# Change all dependencies clsid to name instead.
|
89
|
+
for project in dependencies:
|
90
|
+
# For each dependencies in this project
|
91
|
+
new_dep_array = []
|
92
|
+
for dep in dependencies[project]:
|
93
|
+
# Look for the project name matching this cldis
|
94
|
+
for project_info in projects:
|
95
|
+
if projects[project_info][1] == dep:
|
96
|
+
new_dep_array.append(project_info)
|
97
|
+
dependencies[project] = sorted(new_dep_array)
|
98
|
+
|
99
|
+
return (projects, dependencies)
|
100
|
+
|
101
|
+
def PrintDependencies(projects, deps):
|
102
|
+
print "---------------------------------------"
|
103
|
+
print "Dependencies for all projects"
|
104
|
+
print "---------------------------------------"
|
105
|
+
print "-- --"
|
106
|
+
|
107
|
+
for (project, dep_list) in sorted(deps.items()):
|
108
|
+
print "Project : %s" % project
|
109
|
+
print "Path : %s" % projects[project][0]
|
110
|
+
if dep_list:
|
111
|
+
for dep in dep_list:
|
112
|
+
print " - %s" % dep
|
113
|
+
print ""
|
114
|
+
|
115
|
+
print "-- --"
|
116
|
+
|
117
|
+
def PrintBuildOrder(projects, deps):
|
118
|
+
print "---------------------------------------"
|
119
|
+
print "Build order "
|
120
|
+
print "---------------------------------------"
|
121
|
+
print "-- --"
|
122
|
+
|
123
|
+
built = []
|
124
|
+
for (project, _) in sorted(deps.items()):
|
125
|
+
if project not in built:
|
126
|
+
BuildProject(project, built, projects, deps)
|
127
|
+
|
128
|
+
print "-- --"
|
129
|
+
|
130
|
+
def PrintVCProj(projects):
|
131
|
+
|
132
|
+
for project in projects:
|
133
|
+
print "-------------------------------------"
|
134
|
+
print "-------------------------------------"
|
135
|
+
print project
|
136
|
+
print project
|
137
|
+
print project
|
138
|
+
print "-------------------------------------"
|
139
|
+
print "-------------------------------------"
|
140
|
+
|
141
|
+
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
|
142
|
+
projects[project][2]))
|
143
|
+
|
144
|
+
pretty = pretty_vcproj
|
145
|
+
argv = [ '',
|
146
|
+
project_path,
|
147
|
+
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
|
148
|
+
]
|
149
|
+
argv.extend(sys.argv[3:])
|
150
|
+
pretty.main(argv)
|
151
|
+
|
152
|
+
def main():
|
153
|
+
# check if we have exactly 1 parameter.
|
154
|
+
if len(sys.argv) < 2:
|
155
|
+
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
|
156
|
+
return 1
|
157
|
+
|
158
|
+
(projects, deps) = ParseSolution(sys.argv[1])
|
159
|
+
PrintDependencies(projects, deps)
|
160
|
+
PrintBuildOrder(projects, deps)
|
161
|
+
|
162
|
+
if '--recursive' in sys.argv:
|
163
|
+
PrintVCProj(projects)
|
164
|
+
return 0
|
165
|
+
|
166
|
+
|
167
|
+
if __name__ == '__main__':
|
168
|
+
sys.exit(main())
|
@@ -0,0 +1,329 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
|
3
|
+
# Copyright (c) 2012 Google Inc. All rights reserved.
|
4
|
+
# Use of this source code is governed by a BSD-style license that can be
|
5
|
+
# found in the LICENSE file.
|
6
|
+
|
7
|
+
"""Make the format of a vcproj really pretty.
|
8
|
+
|
9
|
+
This script normalize and sort an xml. It also fetches all the properties
|
10
|
+
inside linked vsprops and include them explicitly in the vcproj.
|
11
|
+
|
12
|
+
It outputs the resulting xml to stdout.
|
13
|
+
"""
|
14
|
+
|
15
|
+
__author__ = 'nsylvain (Nicolas Sylvain)'
|
16
|
+
|
17
|
+
import os
|
18
|
+
import sys
|
19
|
+
|
20
|
+
from xml.dom.minidom import parse
|
21
|
+
from xml.dom.minidom import Node
|
22
|
+
|
23
|
+
REPLACEMENTS = dict()
|
24
|
+
ARGUMENTS = None
|
25
|
+
|
26
|
+
|
27
|
+
class CmpTuple(object):
|
28
|
+
"""Compare function between 2 tuple."""
|
29
|
+
def __call__(self, x, y):
|
30
|
+
return cmp(x[0], y[0])
|
31
|
+
|
32
|
+
|
33
|
+
class CmpNode(object):
|
34
|
+
"""Compare function between 2 xml nodes."""
|
35
|
+
|
36
|
+
def __call__(self, x, y):
|
37
|
+
def get_string(node):
|
38
|
+
node_string = "node"
|
39
|
+
node_string += node.nodeName
|
40
|
+
if node.nodeValue:
|
41
|
+
node_string += node.nodeValue
|
42
|
+
|
43
|
+
if node.attributes:
|
44
|
+
# We first sort by name, if present.
|
45
|
+
node_string += node.getAttribute("Name")
|
46
|
+
|
47
|
+
all_nodes = []
|
48
|
+
for (name, value) in node.attributes.items():
|
49
|
+
all_nodes.append((name, value))
|
50
|
+
|
51
|
+
all_nodes.sort(CmpTuple())
|
52
|
+
for (name, value) in all_nodes:
|
53
|
+
node_string += name
|
54
|
+
node_string += value
|
55
|
+
|
56
|
+
return node_string
|
57
|
+
|
58
|
+
return cmp(get_string(x), get_string(y))
|
59
|
+
|
60
|
+
|
61
|
+
def PrettyPrintNode(node, indent=0):
|
62
|
+
if node.nodeType == Node.TEXT_NODE:
|
63
|
+
if node.data.strip():
|
64
|
+
print '%s%s' % (' '*indent, node.data.strip())
|
65
|
+
return
|
66
|
+
|
67
|
+
if node.childNodes:
|
68
|
+
node.normalize()
|
69
|
+
# Get the number of attributes
|
70
|
+
attr_count = 0
|
71
|
+
if node.attributes:
|
72
|
+
attr_count = node.attributes.length
|
73
|
+
|
74
|
+
# Print the main tag
|
75
|
+
if attr_count == 0:
|
76
|
+
print '%s<%s>' % (' '*indent, node.nodeName)
|
77
|
+
else:
|
78
|
+
print '%s<%s' % (' '*indent, node.nodeName)
|
79
|
+
|
80
|
+
all_attributes = []
|
81
|
+
for (name, value) in node.attributes.items():
|
82
|
+
all_attributes.append((name, value))
|
83
|
+
all_attributes.sort(CmpTuple())
|
84
|
+
for (name, value) in all_attributes:
|
85
|
+
print '%s %s="%s"' % (' '*indent, name, value)
|
86
|
+
print '%s>' % (' '*indent)
|
87
|
+
if node.nodeValue:
|
88
|
+
print '%s %s' % (' '*indent, node.nodeValue)
|
89
|
+
|
90
|
+
for sub_node in node.childNodes:
|
91
|
+
PrettyPrintNode(sub_node, indent=indent+2)
|
92
|
+
print '%s</%s>' % (' '*indent, node.nodeName)
|
93
|
+
|
94
|
+
|
95
|
+
def FlattenFilter(node):
|
96
|
+
"""Returns a list of all the node and sub nodes."""
|
97
|
+
node_list = []
|
98
|
+
|
99
|
+
if (node.attributes and
|
100
|
+
node.getAttribute('Name') == '_excluded_files'):
|
101
|
+
# We don't add the "_excluded_files" filter.
|
102
|
+
return []
|
103
|
+
|
104
|
+
for current in node.childNodes:
|
105
|
+
if current.nodeName == 'Filter':
|
106
|
+
node_list.extend(FlattenFilter(current))
|
107
|
+
else:
|
108
|
+
node_list.append(current)
|
109
|
+
|
110
|
+
return node_list
|
111
|
+
|
112
|
+
|
113
|
+
def FixFilenames(filenames, current_directory):
|
114
|
+
new_list = []
|
115
|
+
for filename in filenames:
|
116
|
+
if filename:
|
117
|
+
for key in REPLACEMENTS:
|
118
|
+
filename = filename.replace(key, REPLACEMENTS[key])
|
119
|
+
os.chdir(current_directory)
|
120
|
+
filename = filename.strip('"\' ')
|
121
|
+
if filename.startswith('$'):
|
122
|
+
new_list.append(filename)
|
123
|
+
else:
|
124
|
+
new_list.append(os.path.abspath(filename))
|
125
|
+
return new_list
|
126
|
+
|
127
|
+
|
128
|
+
def AbsoluteNode(node):
|
129
|
+
"""Makes all the properties we know about in this node absolute."""
|
130
|
+
if node.attributes:
|
131
|
+
for (name, value) in node.attributes.items():
|
132
|
+
if name in ['InheritedPropertySheets', 'RelativePath',
|
133
|
+
'AdditionalIncludeDirectories',
|
134
|
+
'IntermediateDirectory', 'OutputDirectory',
|
135
|
+
'AdditionalLibraryDirectories']:
|
136
|
+
# We want to fix up these paths
|
137
|
+
path_list = value.split(';')
|
138
|
+
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
|
139
|
+
node.setAttribute(name, ';'.join(new_list))
|
140
|
+
if not value:
|
141
|
+
node.removeAttribute(name)
|
142
|
+
|
143
|
+
|
144
|
+
def CleanupVcproj(node):
|
145
|
+
"""For each sub node, we call recursively this function."""
|
146
|
+
for sub_node in node.childNodes:
|
147
|
+
AbsoluteNode(sub_node)
|
148
|
+
CleanupVcproj(sub_node)
|
149
|
+
|
150
|
+
# Normalize the node, and remove all extranous whitespaces.
|
151
|
+
for sub_node in node.childNodes:
|
152
|
+
if sub_node.nodeType == Node.TEXT_NODE:
|
153
|
+
sub_node.data = sub_node.data.replace("\r", "")
|
154
|
+
sub_node.data = sub_node.data.replace("\n", "")
|
155
|
+
sub_node.data = sub_node.data.rstrip()
|
156
|
+
|
157
|
+
# Fix all the semicolon separated attributes to be sorted, and we also
|
158
|
+
# remove the dups.
|
159
|
+
if node.attributes:
|
160
|
+
for (name, value) in node.attributes.items():
|
161
|
+
sorted_list = sorted(value.split(';'))
|
162
|
+
unique_list = []
|
163
|
+
for i in sorted_list:
|
164
|
+
if not unique_list.count(i):
|
165
|
+
unique_list.append(i)
|
166
|
+
node.setAttribute(name, ';'.join(unique_list))
|
167
|
+
if not value:
|
168
|
+
node.removeAttribute(name)
|
169
|
+
|
170
|
+
if node.childNodes:
|
171
|
+
node.normalize()
|
172
|
+
|
173
|
+
# For each node, take a copy, and remove it from the list.
|
174
|
+
node_array = []
|
175
|
+
while node.childNodes and node.childNodes[0]:
|
176
|
+
# Take a copy of the node and remove it from the list.
|
177
|
+
current = node.childNodes[0]
|
178
|
+
node.removeChild(current)
|
179
|
+
|
180
|
+
# If the child is a filter, we want to append all its children
|
181
|
+
# to this same list.
|
182
|
+
if current.nodeName == 'Filter':
|
183
|
+
node_array.extend(FlattenFilter(current))
|
184
|
+
else:
|
185
|
+
node_array.append(current)
|
186
|
+
|
187
|
+
|
188
|
+
# Sort the list.
|
189
|
+
node_array.sort(CmpNode())
|
190
|
+
|
191
|
+
# Insert the nodes in the correct order.
|
192
|
+
for new_node in node_array:
|
193
|
+
# But don't append empty tool node.
|
194
|
+
if new_node.nodeName == 'Tool':
|
195
|
+
if new_node.attributes and new_node.attributes.length == 1:
|
196
|
+
# This one was empty.
|
197
|
+
continue
|
198
|
+
if new_node.nodeName == 'UserMacro':
|
199
|
+
continue
|
200
|
+
node.appendChild(new_node)
|
201
|
+
|
202
|
+
|
203
|
+
def GetConfiguationNodes(vcproj):
|
204
|
+
#TODO(nsylvain): Find a better way to navigate the xml.
|
205
|
+
nodes = []
|
206
|
+
for node in vcproj.childNodes:
|
207
|
+
if node.nodeName == "Configurations":
|
208
|
+
for sub_node in node.childNodes:
|
209
|
+
if sub_node.nodeName == "Configuration":
|
210
|
+
nodes.append(sub_node)
|
211
|
+
|
212
|
+
return nodes
|
213
|
+
|
214
|
+
|
215
|
+
def GetChildrenVsprops(filename):
|
216
|
+
dom = parse(filename)
|
217
|
+
if dom.documentElement.attributes:
|
218
|
+
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
|
219
|
+
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
|
220
|
+
return []
|
221
|
+
|
222
|
+
def SeekToNode(node1, child2):
|
223
|
+
# A text node does not have properties.
|
224
|
+
if child2.nodeType == Node.TEXT_NODE:
|
225
|
+
return None
|
226
|
+
|
227
|
+
# Get the name of the current node.
|
228
|
+
current_name = child2.getAttribute("Name")
|
229
|
+
if not current_name:
|
230
|
+
# There is no name. We don't know how to merge.
|
231
|
+
return None
|
232
|
+
|
233
|
+
# Look through all the nodes to find a match.
|
234
|
+
for sub_node in node1.childNodes:
|
235
|
+
if sub_node.nodeName == child2.nodeName:
|
236
|
+
name = sub_node.getAttribute("Name")
|
237
|
+
if name == current_name:
|
238
|
+
return sub_node
|
239
|
+
|
240
|
+
# No match. We give up.
|
241
|
+
return None
|
242
|
+
|
243
|
+
|
244
|
+
def MergeAttributes(node1, node2):
|
245
|
+
# No attributes to merge?
|
246
|
+
if not node2.attributes:
|
247
|
+
return
|
248
|
+
|
249
|
+
for (name, value2) in node2.attributes.items():
|
250
|
+
# Don't merge the 'Name' attribute.
|
251
|
+
if name == 'Name':
|
252
|
+
continue
|
253
|
+
value1 = node1.getAttribute(name)
|
254
|
+
if value1:
|
255
|
+
# The attribute exist in the main node. If it's equal, we leave it
|
256
|
+
# untouched, otherwise we concatenate it.
|
257
|
+
if value1 != value2:
|
258
|
+
node1.setAttribute(name, ';'.join([value1, value2]))
|
259
|
+
else:
|
260
|
+
# The attribute does nto exist in the main node. We append this one.
|
261
|
+
node1.setAttribute(name, value2)
|
262
|
+
|
263
|
+
# If the attribute was a property sheet attributes, we remove it, since
|
264
|
+
# they are useless.
|
265
|
+
if name == 'InheritedPropertySheets':
|
266
|
+
node1.removeAttribute(name)
|
267
|
+
|
268
|
+
|
269
|
+
def MergeProperties(node1, node2):
|
270
|
+
MergeAttributes(node1, node2)
|
271
|
+
for child2 in node2.childNodes:
|
272
|
+
child1 = SeekToNode(node1, child2)
|
273
|
+
if child1:
|
274
|
+
MergeProperties(child1, child2)
|
275
|
+
else:
|
276
|
+
node1.appendChild(child2.cloneNode(True))
|
277
|
+
|
278
|
+
|
279
|
+
def main(argv):
|
280
|
+
"""Main function of this vcproj prettifier."""
|
281
|
+
global ARGUMENTS
|
282
|
+
ARGUMENTS = argv
|
283
|
+
|
284
|
+
# check if we have exactly 1 parameter.
|
285
|
+
if len(argv) < 2:
|
286
|
+
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
|
287
|
+
'[key2=value2]' % argv[0])
|
288
|
+
return 1
|
289
|
+
|
290
|
+
# Parse the keys
|
291
|
+
for i in range(2, len(argv)):
|
292
|
+
(key, value) = argv[i].split('=')
|
293
|
+
REPLACEMENTS[key] = value
|
294
|
+
|
295
|
+
# Open the vcproj and parse the xml.
|
296
|
+
dom = parse(argv[1])
|
297
|
+
|
298
|
+
# First thing we need to do is find the Configuration Node and merge them
|
299
|
+
# with the vsprops they include.
|
300
|
+
for configuration_node in GetConfiguationNodes(dom.documentElement):
|
301
|
+
# Get the property sheets associated with this configuration.
|
302
|
+
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
|
303
|
+
|
304
|
+
# Fix the filenames to be absolute.
|
305
|
+
vsprops_list = FixFilenames(vsprops.strip().split(';'),
|
306
|
+
os.path.dirname(argv[1]))
|
307
|
+
|
308
|
+
# Extend the list of vsprops with all vsprops contained in the current
|
309
|
+
# vsprops.
|
310
|
+
for current_vsprops in vsprops_list:
|
311
|
+
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
|
312
|
+
|
313
|
+
# Now that we have all the vsprops, we need to merge them.
|
314
|
+
for current_vsprops in vsprops_list:
|
315
|
+
MergeProperties(configuration_node,
|
316
|
+
parse(current_vsprops).documentElement)
|
317
|
+
|
318
|
+
# Now that everything is merged, we need to cleanup the xml.
|
319
|
+
CleanupVcproj(dom.documentElement)
|
320
|
+
|
321
|
+
# Finally, we use the prett xml function to print the vcproj back to the
|
322
|
+
# user.
|
323
|
+
#print dom.toprettyxml(newl="\n")
|
324
|
+
PrettyPrintNode(dom.documentElement)
|
325
|
+
return 0
|
326
|
+
|
327
|
+
|
328
|
+
if __name__ == '__main__':
|
329
|
+
sys.exit(main(sys.argv))
|