shell-lite 0.4.2__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
shell_lite/lexer.py CHANGED
@@ -223,9 +223,16 @@ class Lexer:
223
223
  'count': 'COUNT', 'many': 'MANY', 'how': 'HOW',
224
224
  'field': 'FIELD', 'submit': 'SUBMIT', 'named': 'NAMED',
225
225
  'placeholder': 'PLACEHOLDER',
226
- 'app': 'APP', 'title': 'ID', 'size': 'ID',
227
- 'column': 'ID', 'row': 'ID',
228
- 'button': 'ID', 'heading': 'HEADING', 'text': 'ID',
226
+ 'app': 'APP', 'title': 'ID', 'size': 'SIZE',
227
+ 'column': 'COLUMN', 'row': 'ROW',
228
+ 'button': 'BUTTON', 'heading': 'HEADING', 'text': 'TEXT',
229
+ 'sum': 'SUM', 'upper': 'UPPER', 'lower': 'LOWER',
230
+ 'only': 'ONLY', 'letters': 'LETTERS',
231
+ 'numbers': 'NUMBERS', 'digits': 'DIGITS',
232
+ 'that': 'THAT', 'are': 'ARE', 'prime': 'PRIME',
233
+ 'increment': 'INCREMENT', 'decrement': 'DECREMENT',
234
+ 'multiply': 'MULTIPLY', 'divide': 'DIVIDE',
235
+ 'be': 'BE', 'by': 'BY',
229
236
  }
230
237
  token_type = keywords.get(value, 'ID')
231
238
  self.tokens.append(Token(token_type, value, self.line_number, current_col))
@@ -0,0 +1,252 @@
1
+ import re
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional
4
+ @dataclass
5
+ @dataclass
6
+ class Token:
7
+ type: str
8
+ value: str
9
+ line: int
10
+ column: int = 1
11
+ class Lexer:
12
+ def __init__(self, source_code: str):
13
+ self.source_code = source_code
14
+ self.tokens: List[Token] = []
15
+ self.current_char_index = 0
16
+ self.line_number = 1
17
+ self.indent_stack = [0]
18
+ def tokenize(self) -> List[Token]:
19
+ source = self._remove_multiline_comments(self.source_code)
20
+ lines = source.split('\n')
21
+ for line_num, line in enumerate(lines, 1):
22
+ self.line_number = line_num
23
+ stripped_line = line.strip()
24
+ if not stripped_line:
25
+ continue
26
+ indent_level = len(line) - len(line.lstrip())
27
+ if stripped_line.startswith('#'):
28
+ # self.tokens.append(Token('COMMENT', stripped_line, self.line_number, indent_level + 1))
29
+ # self.tokens.append(Token('NEWLINE', '', self.line_number, len(line) + 1))
30
+ continue
31
+ if indent_level > self.indent_stack[-1]:
32
+ self.indent_stack.append(indent_level)
33
+ self.tokens.append(Token('INDENT', '', self.line_number, indent_level + 1))
34
+ elif indent_level < self.indent_stack[-1]:
35
+ while indent_level < self.indent_stack[-1]:
36
+ self.indent_stack.pop()
37
+ self.tokens.append(Token('DEDENT', '', self.line_number, indent_level + 1))
38
+ if indent_level != self.indent_stack[-1]:
39
+ raise IndentationError(f"Unindent does not match any outer indentation level on line {self.line_number}")
40
+ self.tokenize_line(stripped_line, indent_level + 1)
41
+ self.tokens.append(Token('NEWLINE', '', self.line_number, len(line) + 1))
42
+ while len(self.indent_stack) > 1:
43
+ self.indent_stack.pop()
44
+ self.tokens.append(Token('DEDENT', '', self.line_number, 1))
45
+ self.tokens.append(Token('EOF', '', self.line_number, 1))
46
+ return self.tokens
47
+ def _remove_multiline_comments(self, source: str) -> str:
48
+ result = []
49
+ i = 0
50
+ while i < len(source):
51
+ if source[i:i+2] == '/*':
52
+ end = source.find('*/', i + 2)
53
+ if end == -1:
54
+ raise SyntaxError("Unterminated multi-line comment")
55
+ comment = source[i:end+2]
56
+ result.append('\n' * comment.count('\n'))
57
+ i = end + 2
58
+ else:
59
+ result.append(source[i])
60
+ i += 1
61
+ return ''.join(result)
62
+ def tokenize_line(self, line: str, start_col: int = 1):
63
+ pos = 0
64
+ while pos < len(line):
65
+ match = None
66
+ current_col = start_col + pos
67
+ if line[pos] == '#':
68
+ self.tokens.append(Token('COMMENT', line[pos:], self.line_number, current_col))
69
+ break
70
+ if line[pos].isspace():
71
+ pos += 1
72
+ continue
73
+ if line[pos].isdigit():
74
+ match = re.match(r'^\d+(\.\d+)?', line[pos:])
75
+ if match:
76
+ value = match.group(0)
77
+ self.tokens.append(Token('NUMBER', value, self.line_number, current_col))
78
+ pos += len(value)
79
+ continue
80
+ if line[pos:pos+3] in ('"""', "'''"):
81
+ quote_char = line[pos:pos+3]
82
+ pass
83
+ if line[pos] in ('"', "'"):
84
+ quote_char = line[pos]
85
+ end_quote = line.find(quote_char, pos + 1)
86
+ if end_quote == -1:
87
+ raise SyntaxError(f"Unterminated string on line {self.line_number}")
88
+ value = line[pos+1:end_quote]
89
+ value = value.replace("\\n", "\n").replace("\\t", "\t").replace("\\r", "\r").replace("\\\"", "\"").replace("\\\'", "\'")
90
+ self.tokens.append(Token('STRING', value, self.line_number, current_col))
91
+ pos = end_quote + 1
92
+ continue
93
+ if line[pos:pos+3] == '...':
94
+ self.tokens.append(Token('DOTDOTDOT', '...', self.line_number, current_col))
95
+ pos += 3
96
+ continue
97
+ two_char = line[pos:pos+2]
98
+ two_char_tokens = {
99
+ '=>': 'ARROW', '==': 'EQ', '!=': 'NEQ',
100
+ '<=': 'LE', '>=': 'GE', '+=': 'PLUSEQ',
101
+ '-=': 'MINUSEQ', '*=': 'MULEQ', '/=': 'DIVEQ',
102
+ '%=': 'MODEQ'
103
+ }
104
+ if two_char in two_char_tokens:
105
+ self.tokens.append(Token(two_char_tokens[two_char], two_char, self.line_number, current_col))
106
+ pos += 2
107
+ continue
108
+ char = line[pos]
109
+ rest_of_line = line[pos:]
110
+ if rest_of_line.startswith('is at least '):
111
+ self.tokens.append(Token('GE', '>=', self.line_number, current_col))
112
+ pos += 12
113
+ continue
114
+ elif rest_of_line.startswith('is exactly '):
115
+ self.tokens.append(Token('EQ', '==', self.line_number, current_col))
116
+ pos += 11
117
+ continue
118
+ elif rest_of_line.startswith('is less than '):
119
+ self.tokens.append(Token('LT', '<', self.line_number, current_col))
120
+ pos += 13
121
+ continue
122
+ elif rest_of_line.startswith('is more than '):
123
+ self.tokens.append(Token('GT', '>', self.line_number, current_col))
124
+ pos += 13
125
+ continue
126
+ if rest_of_line.startswith('the') and (len(rest_of_line) == 3 or not rest_of_line[3].isalnum()):
127
+ pos += 3
128
+ continue
129
+ if char == '/':
130
+ last_type = self.tokens[-1].type if self.tokens else None
131
+ is_division = False
132
+ if last_type in ('NUMBER', 'STRING', 'ID', 'RPAREN', 'RBRACKET'):
133
+ is_division = True
134
+ if not is_division:
135
+ end_slash = line.find('/', pos + 1)
136
+ if end_slash != -1:
137
+ pattern = line[pos+1:end_slash]
138
+ flags = ""
139
+ k = end_slash + 1
140
+ while k < len(line) and line[k].isalpha():
141
+ flags += line[k]
142
+ k += 1
143
+ self.tokens.append(Token('REGEX', pattern, self.line_number, current_col))
144
+ pos = k
145
+ continue
146
+ single_char_tokens = {
147
+ '+': 'PLUS', '-': 'MINUS', '*': 'MUL', '/': 'DIV',
148
+ '%': 'MOD', '=': 'ASSIGN', '>': 'GT', '<': 'LT',
149
+ '?': 'QUESTION', '(': 'LPAREN', ')': 'RPAREN',
150
+ '[': 'LBRACKET', ']': 'RBRACKET', ':': 'COLON',
151
+ '{': 'LBRACE', '}': 'RBRACE', ',': 'COMMA', '.': 'DOT'
152
+ }
153
+ if char in single_char_tokens:
154
+ self.tokens.append(Token(single_char_tokens[char], char, self.line_number, current_col))
155
+ pos += 1
156
+ continue
157
+ if char.isalpha() or char == '_':
158
+ match = re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*', line[pos:])
159
+ if match:
160
+ value = match.group(0)
161
+ keywords = {
162
+ 'if': 'IF', 'else': 'ELSE', 'elif': 'ELIF',
163
+ 'for': 'FOR', 'in': 'IN', 'range': 'RANGE',
164
+ 'loop': 'LOOP', 'times': 'TIMES',
165
+ 'while': 'WHILE', 'until': 'UNTIL',
166
+ 'repeat': 'REPEAT', 'forever': 'FOREVER',
167
+ 'stop': 'STOP', 'skip': 'SKIP', 'exit': 'EXIT',
168
+ 'each': 'FOR',
169
+ 'unless': 'UNLESS', 'when': 'WHEN', 'otherwise': 'OTHERWISE',
170
+ 'then': 'THEN', 'do': 'DO',
171
+ 'print': 'PRINT', 'say': 'SAY', 'show': 'SAY',
172
+ 'input': 'INPUT', 'ask': 'ASK',
173
+ 'to': 'TO', 'can': 'TO',
174
+ 'return': 'RETURN', 'give': 'RETURN',
175
+ 'fn': 'FN',
176
+ 'structure': 'STRUCTURE', 'thing': 'STRUCTURE', 'class': 'STRUCTURE',
177
+ 'has': 'HAS', 'with': 'WITH',
178
+ 'is': 'IS', 'extends': 'EXTENDS', 'from': 'FROM',
179
+ 'make': 'MAKE', 'new': 'MAKE',
180
+ 'yes': 'YES', 'no': 'NO',
181
+ 'true': 'YES', 'false': 'NO',
182
+ 'const': 'CONST',
183
+ 'and': 'AND', 'or': 'OR', 'not': 'NOT',
184
+ 'try': 'TRY', 'catch': 'CATCH', 'always': 'ALWAYS',
185
+ 'use': 'USE', 'as': 'AS', 'share': 'SHARE',
186
+ 'execute': 'EXECUTE', 'run': 'EXECUTE',
187
+ 'alert': 'ALERT', 'prompt': 'PROMPT', 'confirm': 'CONFIRM',
188
+ 'spawn': 'SPAWN', 'await': 'AWAIT',
189
+ 'matches': 'MATCHES',
190
+ 'on': 'ON',
191
+ 'download': 'DOWNLOAD',
192
+ 'compress': 'COMPRESS', 'extract': 'EXTRACT', 'folder': 'FOLDER',
193
+ 'load': 'LOAD', 'save': 'SAVE', 'csv': 'CSV',
194
+ 'copy': 'COPY', 'paste': 'PASTE', 'clipboard': 'CLIPBOARD',
195
+ 'press': 'PRESS', 'type': 'TYPE', 'click': 'CLICK', 'at': 'AT',
196
+ 'notify': 'NOTIFY',
197
+ 'date': 'ID', 'today': 'ID', 'after': 'AFTER', 'before': 'BEFORE',
198
+ 'list': 'LIST', 'set': 'SET', 'unique': 'UNIQUE', 'of': 'OF',
199
+ 'wait': 'WAIT',
200
+ 'convert': 'CONVERT', 'json': 'JSON',
201
+ 'listen': 'LISTEN', 'port': 'PORT',
202
+ 'every': 'EVERY', 'minute': 'MINUTE', 'minutes': 'MINUTE',
203
+ 'second': 'SECOND', 'seconds': 'SECOND',
204
+ 'progress': 'PROGRESS',
205
+ 'bold': 'BOLD',
206
+ 'red': 'RED', 'green': 'GREEN', 'blue': 'BLUE',
207
+ 'yellow': 'YELLOW', 'cyan': 'CYAN', 'magenta': 'MAGENTA',
208
+ 'serve': 'SERVE', 'static': 'STATIC',
209
+ 'write': 'WRITE', 'append': 'APPEND', 'read': 'READ', 'file': 'FILE',
210
+ 'write': 'WRITE', 'append': 'APPEND', 'read': 'READ', 'file': 'FILE',
211
+ 'db': 'DB', 'database': 'DB',
212
+ 'query': 'QUERY', 'open': 'OPEN', 'close': 'CLOSE', 'exec': 'EXEC',
213
+ 'middleware': 'MIDDLEWARE', 'before': 'BEFORE',
214
+ 'when': 'WHEN', 'someone': 'SOMEONE', 'visits': 'VISITS',
215
+ 'submits': 'SUBMITS', 'start': 'START', 'server': 'SERVER',
216
+ 'files': 'FILES',
217
+ 'define': 'DEFINE', 'page': 'PAGE', 'called': 'CALLED',
218
+ 'using': 'USING', 'component': 'PAGE',
219
+ 'heading': 'HEADING', 'paragraph': 'PARAGRAPH',
220
+ 'image': 'IMAGE',
221
+ 'add': 'ADD', 'put': 'ADD', 'into': 'INTO',
222
+ 'count': 'COUNT', 'many': 'MANY', 'how': 'HOW',
223
+ 'field': 'FIELD', 'submit': 'SUBMIT', 'named': 'NAMED',
224
+ 'placeholder': 'PLACEHOLDER',
225
+ 'app': 'APP', 'title': 'ID', 'size': 'SIZE',
226
+ 'column': 'COLUMN', 'row': 'ROW',
227
+ 'button': 'BUTTON', 'heading': 'HEADING',
228
+ 'sum': 'SUM', 'upper': 'UPPER', 'lower': 'LOWER',
229
+ 'increment': 'INCREMENT', 'decrement': 'DECREMENT',
230
+ 'multiply': 'MULTIPLY', 'divide': 'DIVIDE',
231
+ 'be': 'BE', 'by': 'BY',
232
+ 'plus': 'PLUS', 'minus': 'MINUS', 'divided': 'DIV',
233
+ 'greater': 'GREATER', 'less': 'LESS', 'equal': 'EQUAL',
234
+ 'define': 'DEFINE', 'function': 'FUNCTION',
235
+ 'contains': 'CONTAINS', 'empty': 'EMPTY',
236
+ 'remove': 'REMOVE',
237
+ 'than': 'THAN',
238
+ 'doing': 'DOING',
239
+ 'make': 'MAKE', 'be': 'BE',
240
+ 'as': 'AS', 'long': 'LONG',
241
+ 'otherwise': 'OTHERWISE',
242
+ 'ask': 'ASK',
243
+ }
244
+ token_type = keywords.get(value, 'ID')
245
+ self.tokens.append(Token(token_type, value, self.line_number, current_col))
246
+ pos += len(value)
247
+ continue
248
+ if char in single_char_tokens:
249
+ self.tokens.append(Token(single_char_tokens[char], char, self.line_number, current_col))
250
+ pos += 1
251
+ continue
252
+ raise SyntaxError(f"Illegal character '{char}' at line {self.line_number}")
shell_lite/main.py CHANGED
@@ -5,13 +5,18 @@ import urllib.request
5
5
  import zipfile
6
6
  import io
7
7
  import subprocess
8
- from .lexer import Lexer
9
- from .parser import Parser
10
- from .interpreter import Interpreter
8
+ from .lexer_new import Lexer
9
+ from .parser_new import Parser
10
+ from .interpreter_new import Interpreter
11
11
  from .ast_nodes import *
12
12
  import json
13
13
  def execute_source(source: str, interpreter: Interpreter):
14
14
  lines = source.split('\n')
15
+ import sys
16
+ if 'shell_lite.interpreter' in sys.modules:
17
+ print(f"DEBUG: Loaded interpreter from {sys.modules['shell_lite.interpreter'].__file__}")
18
+ else:
19
+ print("DEBUG: shell_lite.interpreter not in sys.modules yet?")
15
20
  import difflib
16
21
  try:
17
22
  lexer = Lexer(source)
@@ -52,6 +57,12 @@ def run_file(filename: str):
52
57
  if not os.path.exists(filename):
53
58
  print(f"Error: File '{filename}' not found.")
54
59
  return
60
+ import sys
61
+ if 'shell_lite.interpreter' in sys.modules:
62
+ print(f"DEBUG: shell_lite.interpreter file: {sys.modules['shell_lite.interpreter'].__file__}")
63
+ from .interpreter_final import Interpreter
64
+ print(f"DEBUG: Interpreter class: {Interpreter}")
65
+
55
66
  with open(filename, 'r', encoding='utf-8') as f:
56
67
  source = f.read()
57
68
  interpreter = Interpreter()
@@ -61,7 +72,7 @@ def run_repl():
61
72
  print("\n" + "="*40)
62
73
  print(" ShellLite REPL - English Syntax")
63
74
  print("="*40)
64
- print("Version: v0.04.2 | Made by Shrey Naithani")
75
+ print("Version: v0.04.3 | Made by Shrey Naithani")
65
76
  print("Commands: Type 'exit' to quit, 'help' for examples.")
66
77
  print("Note: Terminal commands (like 'shl install') must be run in CMD/PowerShell, not here.")
67
78
 
@@ -192,7 +203,7 @@ def install_globally():
192
203
  ps_cmd = f'$oldPath = [Environment]::GetEnvironmentVariable("Path", "User"); if ($oldPath -notlike "*ShellLite*") {{ [Environment]::SetEnvironmentVariable("Path", "$oldPath;{install_dir}", "User") }}'
193
204
  subprocess.run(["powershell", "-Command", ps_cmd], capture_output=True)
194
205
 
195
- print(f"\n[SUCCESS] ShellLite (v0.04.1) is installed!")
206
+ print(f"\n[SUCCESS] ShellLite (v0.04.3) is installed!")
196
207
  print(f"Location: {install_dir}")
197
208
  print("\nIMPORTANT STEP REQUIRED:")
198
209
  print("1. Close ALL open terminal windows (CMD, PowerShell, VS Code).")
@@ -0,0 +1,25 @@
1
+
2
+ class Environment:
3
+ def __init__(self, parent=None):
4
+ self.variables = {}
5
+ self.parent = parent
6
+ def get(self, name):
7
+ if name in self.variables: return self.variables[name]
8
+ if self.parent: return self.parent.get(name)
9
+ raise NameError(f"Var '{name}' not found")
10
+ def set(self, name, val):
11
+ self.variables[name] = val
12
+
13
+ class Interpreter:
14
+ def __init__(self):
15
+ print("DEBUG: MINIMAL INTERPRETER LOADED")
16
+ self.global_env = Environment()
17
+ self.current_env = self.global_env
18
+ self.functions = {}
19
+ self.builtins = {
20
+ 'str': str,
21
+ 'print': print
22
+ }
23
+ def visit(self, node):
24
+ print(f"Visiting {type(node).__name__}")
25
+ return None
shell_lite/parser.py CHANGED
@@ -133,6 +133,16 @@ class Parser:
133
133
  return self.parse_heading()
134
134
  elif self.check('PARAGRAPH'):
135
135
  return self.parse_paragraph()
136
+ elif self.check('INCREMENT'):
137
+ return self.parse_increment()
138
+ elif self.check('DECREMENT'):
139
+ return self.parse_decrement()
140
+ elif self.check('MULTIPLY'):
141
+ return self.parse_multiply()
142
+ elif self.check('DIVIDE'):
143
+ return self.parse_divide()
144
+ elif self.check('SET'):
145
+ return self.parse_set()
136
146
  else:
137
147
  return self.parse_expression_stmt()
138
148
  def parse_alert(self) -> Alert:
@@ -307,9 +317,17 @@ class Parser:
307
317
  self.consume('NEWLINE')
308
318
  return node
309
319
 
310
- def parse_make_expr(self) -> Make:
320
+ def parse_make_expr(self) -> Node:
311
321
  token = self.consume('MAKE')
312
322
  class_name = self.consume('ID').value
323
+
324
+ if self.check('BE'):
325
+ self.consume('BE')
326
+ value = self.parse_expression()
327
+ node = Assign(class_name, value) # class_name is actually variable name here
328
+ node.line = token.line
329
+ return node
330
+
313
331
  args = []
314
332
  if self.check('LPAREN'):
315
333
  self.consume('LPAREN')
@@ -1087,6 +1105,16 @@ class Parser:
1087
1105
  return node
1088
1106
  first_expr = self.parse_expression()
1089
1107
  skip_formatted()
1108
+
1109
+ if self.check('TO'):
1110
+ self.consume('TO')
1111
+ end_val = self.parse_expression()
1112
+ skip_formatted()
1113
+ self.consume('RBRACKET')
1114
+ node = Call('range', [first_expr, end_val])
1115
+ node.line = token.line
1116
+ return node
1117
+
1090
1118
  if self.check('FOR'):
1091
1119
  self.consume('FOR')
1092
1120
  var_name = self.consume('ID').value
@@ -1407,11 +1435,18 @@ class Parser:
1407
1435
  return node
1408
1436
  elif token.type == 'READ':
1409
1437
  token = self.consume('READ')
1410
- self.consume('FILE')
1438
+ if self.check('FILE'):
1439
+ self.consume('FILE')
1411
1440
  path = self.parse_factor()
1412
1441
  node = FileRead(path)
1413
1442
  node.line = token.line
1414
1443
  return node
1444
+ elif token.type == 'SUM':
1445
+ return self.parse_sum()
1446
+ elif token.type == 'UPPER':
1447
+ return self.parse_upper()
1448
+ elif token.type == 'NUMBERS':
1449
+ return self.parse_numbers_range()
1415
1450
  elif token.type == 'DATE':
1416
1451
  token = self.consume('DATE')
1417
1452
  s = self.consume('STRING').value
@@ -1814,3 +1849,245 @@ class Parser:
1814
1849
  node = FileWrite(path, content, 'a')
1815
1850
  node.line = token.line
1816
1851
  return node
1852
+
1853
+
1854
+ def parse_increment(self) -> Assign:
1855
+
1856
+ token = self.consume('INCREMENT')
1857
+
1858
+ name = self.consume('ID').value
1859
+
1860
+ amount = Number(1)
1861
+
1862
+ if self.check('BY'):
1863
+
1864
+ self.consume('BY')
1865
+
1866
+ amount = self.parse_expression()
1867
+
1868
+ self.consume('NEWLINE')
1869
+
1870
+ node = Assign(name, BinOp(VarAccess(name), '+', amount))
1871
+
1872
+ node.line = token.line
1873
+
1874
+ return node
1875
+
1876
+
1877
+
1878
+ def parse_decrement(self) -> Assign:
1879
+
1880
+ token = self.consume('DECREMENT')
1881
+
1882
+ name = self.consume('ID').value
1883
+
1884
+ amount = Number(1)
1885
+
1886
+ if self.check('BY'):
1887
+
1888
+ self.consume('BY')
1889
+
1890
+ amount = self.parse_expression()
1891
+
1892
+ self.consume('NEWLINE')
1893
+
1894
+ node = Assign(name, BinOp(VarAccess(name), '-', amount))
1895
+
1896
+ node.line = token.line
1897
+
1898
+ return node
1899
+
1900
+
1901
+
1902
+ def parse_multiply(self) -> Assign:
1903
+
1904
+ token = self.consume('MULTIPLY')
1905
+
1906
+ name = self.consume('ID').value
1907
+
1908
+ self.consume('BY')
1909
+
1910
+ amount = self.parse_expression()
1911
+
1912
+ self.consume('NEWLINE')
1913
+
1914
+ node = Assign(name, BinOp(VarAccess(name), '*', amount))
1915
+
1916
+ node.line = token.line
1917
+
1918
+ return node
1919
+
1920
+
1921
+
1922
+ def parse_divide(self) -> Assign:
1923
+
1924
+ token = self.consume('DIVIDE')
1925
+
1926
+ name = self.consume('ID').value
1927
+
1928
+ self.consume('BY')
1929
+
1930
+ amount = self.parse_expression()
1931
+
1932
+ self.consume('NEWLINE')
1933
+
1934
+ node = Assign(name, BinOp(VarAccess(name), '/', amount))
1935
+
1936
+ node.line = token.line
1937
+
1938
+ return node
1939
+
1940
+
1941
+
1942
+ def parse_set(self) -> Assign:
1943
+
1944
+ token = self.consume('SET')
1945
+
1946
+ name = self.consume('ID').value
1947
+
1948
+ self.consume('TO')
1949
+
1950
+ value = self.parse_expression()
1951
+
1952
+ self.consume('NEWLINE')
1953
+
1954
+ node = Assign(name, value)
1955
+
1956
+ node.line = token.line
1957
+
1958
+ return node
1959
+
1960
+
1961
+
1962
+ def parse_sum(self) -> Node:
1963
+
1964
+ token = self.consume('SUM')
1965
+
1966
+ self.consume('OF')
1967
+
1968
+
1969
+
1970
+ # Check for 'numbers from ...'
1971
+
1972
+ if self.check('NUMBERS'):
1973
+
1974
+ range_node = self.parse_numbers_range()
1975
+
1976
+ # range_node is Call('range_list', ...)
1977
+
1978
+ # We want Call('sum', [range_node])
1979
+
1980
+ node = Call('sum', [range_node])
1981
+
1982
+ node.line = token.line
1983
+
1984
+ return node
1985
+
1986
+
1987
+
1988
+ expr = self.parse_expression()
1989
+
1990
+ node = Call('sum', [expr])
1991
+
1992
+ node.line = token.line
1993
+
1994
+ return node
1995
+
1996
+
1997
+
1998
+ def parse_upper(self) -> Node:
1999
+
2000
+ token = self.consume('UPPER')
2001
+
2002
+ expr = self.parse_expression()
2003
+
2004
+ only_letters = Boolean(False)
2005
+
2006
+ if self.check('ONLY'):
2007
+
2008
+ self.consume('ONLY')
2009
+
2010
+ if self.check('LETTERS'):
2011
+
2012
+ self.consume('LETTERS')
2013
+
2014
+ only_letters = Boolean(True)
2015
+
2016
+ node = Call('upper', [expr, only_letters])
2017
+
2018
+ node.line = token.line
2019
+
2020
+ return node
2021
+
2022
+
2023
+
2024
+ def parse_numbers_range(self) -> Node:
2025
+
2026
+ token = self.consume('NUMBERS')
2027
+
2028
+ self.consume('FROM')
2029
+
2030
+ start = self.parse_expression()
2031
+
2032
+ self.consume('TO')
2033
+
2034
+ end = self.parse_expression()
2035
+
2036
+
2037
+
2038
+ condition = None
2039
+
2040
+ if self.check('THAT'):
2041
+
2042
+ self.consume('THAT')
2043
+
2044
+ self.consume('ARE')
2045
+
2046
+ if self.check('PRIME'):
2047
+
2048
+ self.consume('PRIME')
2049
+
2050
+ condition = String('prime')
2051
+
2052
+ elif self.check('DIGITS'):
2053
+
2054
+ self.consume('DIGITS')
2055
+
2056
+ condition = String('digits')
2057
+
2058
+ elif self.check('WHEN'):
2059
+
2060
+ self.consume('WHEN')
2061
+
2062
+ # 'when even' -> check for ID 'even' or expression?
2063
+
2064
+ # User example: 'when even'. Implicit variable?
2065
+
2066
+ # Let's verify repro: 'when even'
2067
+
2068
+ if self.check('ID') and self.peek().value == 'even':
2069
+
2070
+ self.consume()
2071
+
2072
+ condition = String('even')
2073
+
2074
+ elif self.check('ID') and self.peek().value == 'odd':
2075
+
2076
+ self.consume()
2077
+
2078
+ condition = String('odd')
2079
+
2080
+ else:
2081
+
2082
+ # TODO: handle generic expression filter if needed
2083
+
2084
+ pass
2085
+
2086
+
2087
+
2088
+ node = Call('range_list', [start, end, condition if condition else Boolean(False)])
2089
+
2090
+ node.line = token.line
2091
+
2092
+ return node
2093
+