shell-lite 0.5__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- shell_lite/interpreter.py +1 -1
- shell_lite/lexer.py +48 -12
- shell_lite/main.py +1 -1
- shell_lite/parser_gbp.py +41 -13
- shell_lite-0.5.2.dist-info/LICENSE +688 -0
- shell_lite-0.5.2.dist-info/METADATA +478 -0
- shell_lite-0.5.2.dist-info/RECORD +20 -0
- {shell_lite-0.5.dist-info → shell_lite-0.5.2.dist-info}/WHEEL +1 -1
- {shell_lite-0.5.dist-info → shell_lite-0.5.2.dist-info}/top_level.txt +0 -1
- shell_lite-0.5.dist-info/LICENSE +0 -21
- shell_lite-0.5.dist-info/METADATA +0 -93
- shell_lite-0.5.dist-info/RECORD +0 -33
- tests/__init__.py +0 -1
- tests/benchmark_driver.py +0 -43
- tests/compare_parsers.py +0 -31
- tests/debug_jit.py +0 -49
- tests/generate_actual_graph.py +0 -84
- tests/generate_perf_graph.py +0 -68
- tests/generate_runtime_graph.py +0 -58
- tests/run_jit.py +0 -70
- tests/test_gbp_standalone.py +0 -37
- tests/test_interpreter.py +0 -8
- tests/test_lexer.py +0 -8
- tests/test_parser.py +0 -8
- tests/test_stdlib.py +0 -8
- {shell_lite-0.5.dist-info → shell_lite-0.5.2.dist-info}/entry_points.txt +0 -0
shell_lite/interpreter.py
CHANGED
|
@@ -1502,7 +1502,7 @@ class Interpreter:
|
|
|
1502
1502
|
self.wfile.write(str(e).encode())
|
|
1503
1503
|
except: pass
|
|
1504
1504
|
server = ReusableHTTPServer(('0.0.0.0', port_val), ShellLiteHandler)
|
|
1505
|
-
print(f"\n ShellLite Server v0.
|
|
1505
|
+
print(f"\n ShellLite Server v0.5.2 is running!")
|
|
1506
1506
|
print(f" \u001b[1;36m➜\u001b[0m Local: \u001b[1;4;36mhttp://localhost:{port_val}/\u001b[0m\n")
|
|
1507
1507
|
try: server.serve_forever()
|
|
1508
1508
|
except KeyboardInterrupt:
|
shell_lite/lexer.py
CHANGED
|
@@ -15,6 +15,8 @@ class Lexer:
|
|
|
15
15
|
self.current_char_index = 0
|
|
16
16
|
self.line_number = 1
|
|
17
17
|
self.indent_stack = [0]
|
|
18
|
+
self.bracket_depth = 0
|
|
19
|
+
|
|
18
20
|
def tokenize(self) -> List[Token]:
|
|
19
21
|
source = self._remove_multiline_comments(self.source_code)
|
|
20
22
|
lines = source.split('\n')
|
|
@@ -27,21 +29,49 @@ class Lexer:
|
|
|
27
29
|
if stripped_line.startswith('#'):
|
|
28
30
|
continue
|
|
29
31
|
if indent_level > self.indent_stack[-1]:
|
|
30
|
-
self.
|
|
31
|
-
|
|
32
|
+
if self.bracket_depth == 0:
|
|
33
|
+
self.indent_stack.append(indent_level)
|
|
34
|
+
self.tokens.append(Token('INDENT', '', self.line_number, indent_level + 1))
|
|
32
35
|
elif indent_level < self.indent_stack[-1]:
|
|
33
|
-
|
|
34
|
-
self.indent_stack
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
36
|
+
if self.bracket_depth == 0:
|
|
37
|
+
while indent_level < self.indent_stack[-1]:
|
|
38
|
+
self.indent_stack.pop()
|
|
39
|
+
self.tokens.append(Token('DEDENT', '', self.line_number, indent_level + 1))
|
|
40
|
+
if indent_level != self.indent_stack[-1]:
|
|
41
|
+
raise IndentationError(f"Unindent does not match any outer indentation level on line {self.line_number}")
|
|
38
42
|
self.tokenize_line(stripped_line, indent_level + 1)
|
|
39
|
-
|
|
43
|
+
if self.bracket_depth == 0:
|
|
44
|
+
self.tokens.append(Token('NEWLINE', '', self.line_number, len(line) + 1))
|
|
40
45
|
while len(self.indent_stack) > 1:
|
|
41
46
|
self.indent_stack.pop()
|
|
42
47
|
self.tokens.append(Token('DEDENT', '', self.line_number, 1))
|
|
43
48
|
self.tokens.append(Token('EOF', '', self.line_number, 1))
|
|
49
|
+
# Post-process: Convert BEGIN/END to INDENT/DEDENT
|
|
50
|
+
self.tokens = self._convert_begin_end(self.tokens)
|
|
44
51
|
return self.tokens
|
|
52
|
+
|
|
53
|
+
def _convert_begin_end(self, tokens: List[Token]) -> List[Token]:
|
|
54
|
+
"""Convert BEGIN/END keywords to INDENT/DEDENT for uniform parsing."""
|
|
55
|
+
result = []
|
|
56
|
+
i = 0
|
|
57
|
+
while i < len(tokens):
|
|
58
|
+
token = tokens[i]
|
|
59
|
+
if token.type == 'BEGIN':
|
|
60
|
+
# Remove preceding NEWLINE if present (since begin is on its own line)
|
|
61
|
+
if result and result[-1].type == 'NEWLINE':
|
|
62
|
+
result.pop()
|
|
63
|
+
# Just add INDENT (the newline was already there from previous line)
|
|
64
|
+
result.append(Token('INDENT', '', token.line, token.column))
|
|
65
|
+
elif token.type == 'END':
|
|
66
|
+
# Add DEDENT
|
|
67
|
+
result.append(Token('DEDENT', '', token.line, token.column))
|
|
68
|
+
# Skip the NEWLINE after end if present
|
|
69
|
+
if i + 1 < len(tokens) and tokens[i + 1].type == 'NEWLINE':
|
|
70
|
+
i += 1 # Skip the next NEWLINE
|
|
71
|
+
else:
|
|
72
|
+
result.append(token)
|
|
73
|
+
i += 1
|
|
74
|
+
return result
|
|
45
75
|
def _remove_multiline_comments(self, source: str) -> str:
|
|
46
76
|
result = []
|
|
47
77
|
i = 0
|
|
@@ -150,6 +180,15 @@ class Lexer:
|
|
|
150
180
|
}
|
|
151
181
|
if char in single_char_tokens:
|
|
152
182
|
self.tokens.append(Token(single_char_tokens[char], char, self.line_number, current_col))
|
|
183
|
+
|
|
184
|
+
# Track bracket depth here too
|
|
185
|
+
if char in '([{':
|
|
186
|
+
self.bracket_depth += 1
|
|
187
|
+
elif char in ')]}':
|
|
188
|
+
self.bracket_depth -= 1
|
|
189
|
+
if self.bracket_depth < 0:
|
|
190
|
+
self.bracket_depth = 0
|
|
191
|
+
|
|
153
192
|
pos += 1
|
|
154
193
|
continue
|
|
155
194
|
if char.isalpha() or char == '_':
|
|
@@ -167,6 +206,7 @@ class Lexer:
|
|
|
167
206
|
'check': 'CHECK',
|
|
168
207
|
'unless': 'UNLESS', 'when': 'WHEN', 'otherwise': 'OTHERWISE',
|
|
169
208
|
'then': 'THEN', 'do': 'DO',
|
|
209
|
+
'begin': 'BEGIN', 'end': 'END',
|
|
170
210
|
'print': 'PRINT', 'say': 'SAY', 'show': 'SAY',
|
|
171
211
|
'input': 'INPUT', 'ask': 'ASK',
|
|
172
212
|
'to': 'TO', 'can': 'TO',
|
|
@@ -246,8 +286,4 @@ class Lexer:
|
|
|
246
286
|
self.tokens.append(Token(token_type, value, self.line_number, current_col))
|
|
247
287
|
pos += len(value)
|
|
248
288
|
continue
|
|
249
|
-
if char in single_char_tokens:
|
|
250
|
-
self.tokens.append(Token(single_char_tokens[char], char, self.line_number, current_col))
|
|
251
|
-
pos += 1
|
|
252
|
-
continue
|
|
253
289
|
raise SyntaxError(f"Illegal character '{char}' at line {self.line_number}")
|
shell_lite/main.py
CHANGED
|
@@ -150,7 +150,7 @@ def install_globally():
|
|
|
150
150
|
return
|
|
151
151
|
ps_cmd = f'$oldPath = [Environment]::GetEnvironmentVariable("Path", "User"); if ($oldPath -notlike "*ShellLite*") {{ [Environment]::SetEnvironmentVariable("Path", "$oldPath;{install_dir}", "User") }}'
|
|
152
152
|
subprocess.run(["powershell", "-Command", ps_cmd], capture_output=True)
|
|
153
|
-
print(f"\n[SUCCESS] ShellLite (v0.
|
|
153
|
+
print(f"\n[SUCCESS] ShellLite (v0.5.2) is installed!")
|
|
154
154
|
print(f"Location: {install_dir}")
|
|
155
155
|
print("\nIMPORTANT STEP REQUIRED:")
|
|
156
156
|
print("1. Close ALL open terminal windows (CMD, PowerShell, VS Code).")
|
shell_lite/parser_gbp.py
CHANGED
|
@@ -11,6 +11,7 @@ class GeoNode:
|
|
|
11
11
|
tokens: List[Token] = field(default_factory=list)
|
|
12
12
|
children: List['GeoNode'] = field(default_factory=list)
|
|
13
13
|
parent: Optional['GeoNode'] = None
|
|
14
|
+
|
|
14
15
|
def __repr__(self):
|
|
15
16
|
return f"GeoNode(line={self.line}, indent={self.indent_level}, head={self.head_token.type})"
|
|
16
17
|
class GeometricBindingParser:
|
|
@@ -39,19 +40,6 @@ class GeometricBindingParser:
|
|
|
39
40
|
Phase 1: Scans tokens to build GeoNodes.
|
|
40
41
|
Phase 2: Links them into a tree based on nesting.
|
|
41
42
|
"""
|
|
42
|
-
logical_lines = []
|
|
43
|
-
current_line_tokens = []
|
|
44
|
-
for token in self.tokens:
|
|
45
|
-
if token.type == 'NEWLINE':
|
|
46
|
-
if current_line_tokens:
|
|
47
|
-
logical_lines.append(current_line_tokens)
|
|
48
|
-
current_line_tokens = []
|
|
49
|
-
elif token.type in ('INDENT', 'DEDENT', 'EOF'):
|
|
50
|
-
pass # logic handled below in a smarter way?
|
|
51
|
-
else:
|
|
52
|
-
current_line_tokens.append(token)
|
|
53
|
-
if current_line_tokens:
|
|
54
|
-
logical_lines.append(current_line_tokens)
|
|
55
43
|
node_stack: List[GeoNode] = [] # The active parents
|
|
56
44
|
current_tokens_accumulator = []
|
|
57
45
|
current_node: Optional[GeoNode] = None
|
|
@@ -103,6 +91,10 @@ class GeometricBindingParser:
|
|
|
103
91
|
return self.bind_return(node)
|
|
104
92
|
elif head_type == 'REPEAT':
|
|
105
93
|
return self.bind_repeat(node)
|
|
94
|
+
elif head_type == 'START':
|
|
95
|
+
return self.bind_start(node)
|
|
96
|
+
elif head_type == 'LISTEN':
|
|
97
|
+
return self.bind_listen(node)
|
|
106
98
|
elif head_type == 'ID':
|
|
107
99
|
if any(t.type == 'ASSIGN' for t in node.tokens):
|
|
108
100
|
return self.bind_assignment(node)
|
|
@@ -151,6 +143,17 @@ class GeometricBindingParser:
|
|
|
151
143
|
return Assign(name, value)
|
|
152
144
|
def bind_expression_stmt(self, node: GeoNode) -> Any:
|
|
153
145
|
return self.parse_expr_iterative(node.tokens)
|
|
146
|
+
def bind_start(self, node: GeoNode) -> Listen:
|
|
147
|
+
# 'start website' -> Listen(8080)
|
|
148
|
+
# We could parse args if needed, but for now we assume default
|
|
149
|
+
return Listen(Number(8080))
|
|
150
|
+
def bind_listen(self, node: GeoNode) -> Listen:
|
|
151
|
+
# 'listen 8080' or 'listen port 8080'
|
|
152
|
+
expr_tokens = self._extract_expr_tokens(node.tokens, start=1)
|
|
153
|
+
if expr_tokens and expr_tokens[0].type == 'PORT':
|
|
154
|
+
expr_tokens.pop(0)
|
|
155
|
+
port = self.parse_expr_iterative(expr_tokens)
|
|
156
|
+
return Listen(port)
|
|
154
157
|
def bind_func(self, node: GeoNode) -> FunctionDef:
|
|
155
158
|
start = 1
|
|
156
159
|
if node.tokens[0].type == 'DEFINE': start = 2
|
|
@@ -199,6 +202,31 @@ class GeometricBindingParser:
|
|
|
199
202
|
values.append(Number(int(t.value) if '.' not in t.value else float(t.value)))
|
|
200
203
|
elif t.type == 'STRING':
|
|
201
204
|
values.append(String(t.value))
|
|
205
|
+
elif t.type == 'LBRACKET':
|
|
206
|
+
# Consumed nested list
|
|
207
|
+
depth = 1
|
|
208
|
+
j = i + 1
|
|
209
|
+
elements_tokens = []
|
|
210
|
+
current_elem = []
|
|
211
|
+
while j < len(tokens):
|
|
212
|
+
if tokens[j].type == 'LBRACKET': depth += 1
|
|
213
|
+
elif tokens[j].type == 'RBRACKET': depth -= 1
|
|
214
|
+
|
|
215
|
+
if depth == 0:
|
|
216
|
+
if current_elem: elements_tokens.append(current_elem)
|
|
217
|
+
break
|
|
218
|
+
|
|
219
|
+
if tokens[j].type == 'COMMA' and depth == 1:
|
|
220
|
+
elements_tokens.append(current_elem)
|
|
221
|
+
current_elem = []
|
|
222
|
+
else:
|
|
223
|
+
current_elem.append(tokens[j])
|
|
224
|
+
j += 1
|
|
225
|
+
|
|
226
|
+
# Parse elements
|
|
227
|
+
items = [self.parse_expr_iterative(elem) for elem in elements_tokens if elem]
|
|
228
|
+
values.append(ListVal(items))
|
|
229
|
+
i = j # Advance past list
|
|
202
230
|
elif t.type == 'ID':
|
|
203
231
|
if i+1 < len(tokens) and tokens[i+1].type == 'LPAREN':
|
|
204
232
|
values.append(VarAccess(t.value))
|