coffee-script 0.1.6 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,208 @@
1
+ module CoffeeScript
2
+
3
+ # In order to keep the grammar simple, the stream of tokens that the Lexer
4
+ # emits is rewritten by the Rewriter, smoothing out ambiguities, mis-nested
5
+ # indentation, and single-line flavors of expressions.
6
+ class Rewriter
7
+
8
+ # Tokens that must be balanced.
9
+ BALANCED_PAIRS = [['(', ')'], ['[', ']'], ['{', '}'], [:INDENT, :OUTDENT]]
10
+
11
+ # Tokens that signal the start of a balanced pair.
12
+ EXPRESSION_START = BALANCED_PAIRS.map {|pair| pair.first }
13
+
14
+ # Tokens that signal the end of a balanced pair.
15
+ EXPRESSION_TAIL = BALANCED_PAIRS.map {|pair| pair.last }
16
+
17
+ # Tokens that indicate the close of a clause of an expression.
18
+ EXPRESSION_CLOSE = [:CATCH, :WHEN, :ELSE, :FINALLY] + EXPRESSION_TAIL
19
+
20
+ # The inverse mappings of token pairs we're trying to fix up.
21
+ INVERSES = BALANCED_PAIRS.inject({}) do |memo, pair|
22
+ memo[pair.first] = pair.last
23
+ memo[pair.last] = pair.first
24
+ memo
25
+ end
26
+
27
+ # Single-line flavors of block expressions that have unclosed endings.
28
+ # The grammar can't disambiguate them, so we insert the implicit indentation.
29
+ SINGLE_LINERS = [:ELSE, "=>", :TRY, :FINALLY, :THEN]
30
+ SINGLE_CLOSERS = ["\n", :CATCH, :FINALLY, :ELSE, :OUTDENT, :LEADING_WHEN]
31
+
32
+ # Rewrite the token stream in multiple passes, one logical filter at
33
+ # a time. This could certainly be changed into a single pass through the
34
+ # stream, with a big ol' efficient switch, but it's much nicer like this.
35
+ def rewrite(tokens)
36
+ @tokens = tokens
37
+ adjust_comments
38
+ remove_mid_expression_newlines
39
+ move_commas_outside_outdents
40
+ add_implicit_indentation
41
+ ensure_balance(*BALANCED_PAIRS)
42
+ rewrite_closing_parens
43
+ @tokens
44
+ end
45
+
46
+ # Rewrite the token stream, looking one token ahead and behind.
47
+ # Allow the return value of the block to tell us how many tokens to move
48
+ # forwards (or backwards) in the stream, to make sure we don't miss anything
49
+ # as the stream changes length under our feet.
50
+ def scan_tokens
51
+ i = 0
52
+ loop do
53
+ break unless @tokens[i]
54
+ move = yield(@tokens[i - 1], @tokens[i], @tokens[i + 1], i)
55
+ i += move
56
+ end
57
+ end
58
+
59
+ # Massage newlines and indentations so that comments don't have to be
60
+ # correctly indented, or appear on their own line.
61
+ def adjust_comments
62
+ scan_tokens do |prev, token, post, i|
63
+ next 1 unless token[0] == :COMMENT
64
+ before, after = @tokens[i - 2], @tokens[i + 2]
65
+ if before && after &&
66
+ ((before[0] == :INDENT && after[0] == :OUTDENT) ||
67
+ (before[0] == :OUTDENT && after[0] == :INDENT)) &&
68
+ before[1] == after[1]
69
+ @tokens.delete_at(i + 2)
70
+ @tokens.delete_at(i - 2)
71
+ next 0
72
+ elsif !["\n", :INDENT, :OUTDENT].include?(prev[0])
73
+ @tokens.insert(i, ["\n", Value.new("\n", token[1].line)])
74
+ next 2
75
+ else
76
+ next 1
77
+ end
78
+ end
79
+ end
80
+
81
+ # Some blocks occur in the middle of expressions -- when we're expecting
82
+ # this, remove their trailing newlines.
83
+ def remove_mid_expression_newlines
84
+ scan_tokens do |prev, token, post, i|
85
+ next 1 unless post && EXPRESSION_CLOSE.include?(post[0]) && token[0] == "\n"
86
+ @tokens.delete_at(i)
87
+ next 0
88
+ end
89
+ end
90
+
91
+ # Make sure that we don't accidentally break trailing commas, which need
92
+ # to go on the outside of expression closers.
93
+ def move_commas_outside_outdents
94
+ scan_tokens do |prev, token, post, i|
95
+ if token[0] == :OUTDENT && prev[0] == ','
96
+ @tokens.delete_at(i)
97
+ @tokens.insert(i - 1, token)
98
+ end
99
+ next 1
100
+ end
101
+ end
102
+
103
+ # Because our grammar is LALR(1), it can't handle some single-line
104
+ # expressions that lack ending delimiters. Use the lexer to add the implicit
105
+ # blocks, so it doesn't need to.
106
+ # ')' can close a single-line block, but we need to make sure it's balanced.
107
+ def add_implicit_indentation
108
+ scan_tokens do |prev, token, post, i|
109
+ next 1 unless SINGLE_LINERS.include?(token[0]) && post[0] != :INDENT &&
110
+ !(token[0] == :ELSE && post[0] == :IF) # Elsifs shouldn't get blocks.
111
+ line = token[1].line
112
+ @tokens.insert(i + 1, [:INDENT, Value.new(2, line)])
113
+ idx = i + 1
114
+ parens = 0
115
+ loop do
116
+ idx += 1
117
+ tok = @tokens[idx]
118
+ if !tok || SINGLE_CLOSERS.include?(tok[0]) ||
119
+ (tok[0] == ')' && parens == 0)
120
+ @tokens.insert(idx, [:OUTDENT, Value.new(2, line)])
121
+ break
122
+ end
123
+ parens += 1 if tok[0] == '('
124
+ parens -= 1 if tok[0] == ')'
125
+ end
126
+ next 1 unless token[0] == :THEN
127
+ @tokens.delete_at(i)
128
+ next 0
129
+ end
130
+ end
131
+
132
+ # Ensure that all listed pairs of tokens are correctly balanced throughout
133
+ # the course of the token stream.
134
+ def ensure_balance(*pairs)
135
+ levels = Hash.new(0)
136
+ scan_tokens do |prev, token, post, i|
137
+ pairs.each do |pair|
138
+ open, close = *pair
139
+ levels[open] += 1 if token[0] == open
140
+ levels[open] -= 1 if token[0] == close
141
+ raise ParseError.new(token[0], token[1], nil) if levels[open] < 0
142
+ end
143
+ next 1
144
+ end
145
+ unclosed = levels.detect {|k, v| v > 0 }
146
+ raise SyntaxError, "unclosed '#{unclosed[0]}'" if unclosed
147
+ end
148
+
149
+ # We'd like to support syntax like this:
150
+ # el.click(event =>
151
+ # el.hide())
152
+ # In order to accomplish this, move outdents that follow closing parens
153
+ # inwards, safely. The steps to accomplish this are:
154
+ #
155
+ # 1. Check that all paired tokens are balanced and in order.
156
+ # 2. Rewrite the stream with a stack: if you see an '(' or INDENT, add it
157
+ # to the stack. If you see an ')' or OUTDENT, pop the stack and replace
158
+ # it with the inverse of what we've just popped.
159
+ # 3. Keep track of "debt" for tokens that we fake, to make sure we end
160
+ # up balanced in the end.
161
+ #
162
+ def rewrite_closing_parens
163
+ verbose = ENV['VERBOSE']
164
+ stack, debt = [], Hash.new(0)
165
+ stack_stats = lambda { "stack: #{stack.inspect} debt: #{debt.inspect}\n\n" }
166
+ puts "rewrite_closing_original: #{@tokens.inspect}" if verbose
167
+ scan_tokens do |prev, token, post, i|
168
+ tag, inv = token[0], INVERSES[token[0]]
169
+ # Push openers onto the stack.
170
+ if EXPRESSION_START.include?(tag)
171
+ stack.push(token)
172
+ puts "pushing #{tag} #{stack_stats[]}" if verbose
173
+ next 1
174
+ # The end of an expression, check stack and debt for a pair.
175
+ elsif EXPRESSION_TAIL.include?(tag)
176
+ puts @tokens[i..-1].inspect if verbose
177
+ # If the tag is already in our debt, swallow it.
178
+ if debt[inv] > 0
179
+ debt[inv] -= 1
180
+ @tokens.delete_at(i)
181
+ puts "tag in debt #{tag} #{stack_stats[]}" if verbose
182
+ next 0
183
+ else
184
+ # Pop the stack of open delimiters.
185
+ match = stack.pop
186
+ mtag = match[0]
187
+ # Continue onwards if it's the expected tag.
188
+ if tag == INVERSES[mtag]
189
+ puts "expected tag #{tag} #{stack_stats[]}" if verbose
190
+ next 1
191
+ else
192
+ # Unexpected close, insert correct close, adding to the debt.
193
+ debt[mtag] += 1
194
+ puts "unexpected #{tag}, replacing with #{INVERSES[mtag]} #{stack_stats[]}" if verbose
195
+ val = mtag == :INDENT ? match[1] : INVERSES[mtag]
196
+ @tokens.insert(i, [INVERSES[mtag], Value.new(val, token[1].line)])
197
+ next 1
198
+ end
199
+ end
200
+ else
201
+ # Uninteresting token:
202
+ next 1
203
+ end
204
+ end
205
+ end
206
+
207
+ end
208
+ end
@@ -5,18 +5,19 @@ module CoffeeScript
5
5
  # whether a variable has been seen before or if it needs to be declared.
6
6
  class Scope
7
7
 
8
- attr_reader :parent, :variables, :temp_variable
8
+ attr_reader :parent, :expressions, :variables, :temp_variable
9
9
 
10
- # Initialize a scope with its parent, for lookups up the chain.
11
- def initialize(parent=nil)
12
- @parent = parent
10
+ # Initialize a scope with its parent, for lookups up the chain,
11
+ # as well as the Expressions body where it should declare its variables.
12
+ def initialize(parent, expressions)
13
+ @parent, @expressions = parent, expressions
13
14
  @variables = {}
14
- @temp_variable = @parent ? @parent.temp_variable : '__a'
15
+ @temp_variable = @parent ? @parent.temp_variable.dup : '__a'
15
16
  end
16
17
 
17
18
  # Look up a variable in lexical scope, or declare it if not found.
18
19
  def find(name, remote=false)
19
- found = check(name, remote)
20
+ found = check(name)
20
21
  return found if found || remote
21
22
  @variables[name.to_sym] = :var
22
23
  found
@@ -29,9 +30,9 @@ module CoffeeScript
29
30
  end
30
31
 
31
32
  # Just check to see if a variable has already been declared.
32
- def check(name, remote=false)
33
+ def check(name)
33
34
  return true if @variables[name.to_sym]
34
- @parent && @parent.find(name, true)
35
+ !!(@parent && @parent.check(name))
35
36
  end
36
37
 
37
38
  # You can reset a found variable on the immediate scope.
@@ -46,8 +47,8 @@ module CoffeeScript
46
47
  @temp_variable.dup
47
48
  end
48
49
 
49
- def any_declared?
50
- !declared_variables.empty?
50
+ def declarations?(body)
51
+ !declared_variables.empty? && body == @expressions
51
52
  end
52
53
 
53
54
  # Return the list of variables first declared in current scope.
@@ -55,6 +56,10 @@ module CoffeeScript
55
56
  @variables.select {|k, v| v == :var }.map {|pair| pair[0].to_s }.sort
56
57
  end
57
58
 
59
+ def inspect
60
+ "<Scope:#{__id__} #{@variables.inspect}>"
61
+ end
62
+
58
63
  end
59
64
 
60
65
  end
data/package.json ADDED
@@ -0,0 +1,9 @@
1
+ {
2
+ "name": "coffee-script",
3
+ "lib": "lib/coffee_script/narwhal/lib",
4
+ "preload": ["coffee-script/loader"],
5
+ "description": "Unfancy JavaScript",
6
+ "keywords": ["javascript", "language"],
7
+ "author": "Jeremy Ashkenas",
8
+ "version": "0.2.0"
9
+ }
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: coffee-script
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.6
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jeremy Ashkenas
@@ -9,7 +9,7 @@ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
11
 
12
- date: 2009-12-27 00:00:00 -08:00
12
+ date: 2010-01-05 00:00:00 -05:00
13
13
  default_executable:
14
14
  dependencies: []
15
15
 
@@ -36,20 +36,20 @@ files:
36
36
  - lib/coffee_script/grammar.y
37
37
  - lib/coffee_script/lexer.rb
38
38
  - lib/coffee_script/narwhal/coffee-script.coffee
39
- - lib/coffee_script/narwhal/js/coffee-script.js
40
- - lib/coffee_script/narwhal/js/launcher.js
41
- - lib/coffee_script/narwhal/js/loader.js
42
- - lib/coffee_script/narwhal/launcher.coffee
39
+ - lib/coffee_script/narwhal/lib/coffee-script/loader.js
40
+ - lib/coffee_script/narwhal/lib/coffee-script.js
43
41
  - lib/coffee_script/narwhal/loader.coffee
44
42
  - lib/coffee_script/nodes.rb
45
43
  - lib/coffee_script/parse_error.rb
46
44
  - lib/coffee_script/parser.output
47
45
  - lib/coffee_script/parser.rb
46
+ - lib/coffee_script/rewriter.rb
48
47
  - lib/coffee_script/scope.rb
49
48
  - lib/coffee_script/value.rb
50
49
  - coffee-script.gemspec
51
50
  - LICENSE
52
51
  - README
52
+ - package.json
53
53
  has_rdoc: false
54
54
  homepage: http://jashkenas.github.com/coffee-script/
55
55
  licenses: []
@@ -1,3 +0,0 @@
1
- (function(){
2
- require("./coffee-script").run(system.args);
3
- })();
@@ -1 +0,0 @@
1
- require("./coffee-script").run(system.args)