cinderx 2026.1.16.2__cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. __static__/__init__.py +641 -0
  2. __static__/compiler_flags.py +8 -0
  3. __static__/enum.py +160 -0
  4. __static__/native_utils.py +77 -0
  5. __static__/type_code.py +48 -0
  6. __strict__/__init__.py +39 -0
  7. _cinderx.so +0 -0
  8. cinderx/__init__.py +577 -0
  9. cinderx/__pycache__/__init__.cpython-314.pyc +0 -0
  10. cinderx/_asyncio.py +156 -0
  11. cinderx/compileall.py +710 -0
  12. cinderx/compiler/__init__.py +40 -0
  13. cinderx/compiler/__main__.py +137 -0
  14. cinderx/compiler/config.py +7 -0
  15. cinderx/compiler/consts.py +72 -0
  16. cinderx/compiler/debug.py +70 -0
  17. cinderx/compiler/dis_stable.py +283 -0
  18. cinderx/compiler/errors.py +151 -0
  19. cinderx/compiler/flow_graph_optimizer.py +1287 -0
  20. cinderx/compiler/future.py +91 -0
  21. cinderx/compiler/misc.py +32 -0
  22. cinderx/compiler/opcode_cinder.py +18 -0
  23. cinderx/compiler/opcode_static.py +100 -0
  24. cinderx/compiler/opcodebase.py +158 -0
  25. cinderx/compiler/opcodes.py +991 -0
  26. cinderx/compiler/optimizer.py +547 -0
  27. cinderx/compiler/pyassem.py +3711 -0
  28. cinderx/compiler/pycodegen.py +7660 -0
  29. cinderx/compiler/pysourceloader.py +62 -0
  30. cinderx/compiler/static/__init__.py +1404 -0
  31. cinderx/compiler/static/compiler.py +629 -0
  32. cinderx/compiler/static/declaration_visitor.py +335 -0
  33. cinderx/compiler/static/definite_assignment_checker.py +280 -0
  34. cinderx/compiler/static/effects.py +160 -0
  35. cinderx/compiler/static/module_table.py +666 -0
  36. cinderx/compiler/static/type_binder.py +2176 -0
  37. cinderx/compiler/static/types.py +10580 -0
  38. cinderx/compiler/static/util.py +81 -0
  39. cinderx/compiler/static/visitor.py +91 -0
  40. cinderx/compiler/strict/__init__.py +69 -0
  41. cinderx/compiler/strict/class_conflict_checker.py +249 -0
  42. cinderx/compiler/strict/code_gen_base.py +409 -0
  43. cinderx/compiler/strict/common.py +507 -0
  44. cinderx/compiler/strict/compiler.py +352 -0
  45. cinderx/compiler/strict/feature_extractor.py +130 -0
  46. cinderx/compiler/strict/flag_extractor.py +97 -0
  47. cinderx/compiler/strict/loader.py +827 -0
  48. cinderx/compiler/strict/preprocessor.py +11 -0
  49. cinderx/compiler/strict/rewriter/__init__.py +5 -0
  50. cinderx/compiler/strict/rewriter/remove_annotations.py +84 -0
  51. cinderx/compiler/strict/rewriter/rewriter.py +975 -0
  52. cinderx/compiler/strict/runtime.py +77 -0
  53. cinderx/compiler/symbols.py +1754 -0
  54. cinderx/compiler/unparse.py +414 -0
  55. cinderx/compiler/visitor.py +194 -0
  56. cinderx/jit.py +230 -0
  57. cinderx/opcode.py +202 -0
  58. cinderx/static.py +113 -0
  59. cinderx/strictmodule.py +6 -0
  60. cinderx/test_support.py +341 -0
  61. cinderx-2026.1.16.2.dist-info/METADATA +15 -0
  62. cinderx-2026.1.16.2.dist-info/RECORD +68 -0
  63. cinderx-2026.1.16.2.dist-info/WHEEL +6 -0
  64. cinderx-2026.1.16.2.dist-info/licenses/LICENSE +21 -0
  65. cinderx-2026.1.16.2.dist-info/top_level.txt +5 -0
  66. opcodes/__init__.py +0 -0
  67. opcodes/assign_opcode_numbers.py +272 -0
  68. opcodes/cinderx_opcodes.py +121 -0
@@ -0,0 +1,1287 @@
1
+ # Portions copyright (c) Meta Platforms, Inc. and affiliates.
2
+
3
+ # pyre-strict
4
+
5
+ from __future__ import annotations
6
+
7
+ import operator
8
+ from typing import cast
9
+
10
+ # pyre-fixme[21]: Could not find name `INTRINSIC_1` in `cinderx.compiler.opcodes`.
11
+ from .opcodes import find_op_idx, INTRINSIC_1
12
+ from .optimizer import PyLimits, safe_lshift, safe_mod, safe_multiply, safe_power
13
+
14
+ TYPE_CHECKING = False
15
+ if TYPE_CHECKING:
16
+ from typing import Callable
17
+
18
+ from .pyassem import Block, Instruction, PyFlowGraph
19
+
20
+ Handler = Callable[
21
+ [
22
+ "FlowGraphOptimizer",
23
+ int,
24
+ Instruction,
25
+ Instruction | None,
26
+ Instruction | None,
27
+ Block,
28
+ ],
29
+ int | None,
30
+ ]
31
+
32
+
33
+ PyCmp_LT = 0
34
+ PyCmp_LE = 1
35
+ PyCmp_EQ = 2
36
+ PyCmp_NE = 3
37
+ PyCmp_GT = 4
38
+ PyCmp_GE = 5
39
+ PyCmp_IN = 6
40
+ PyCmp_NOT_IN = 7
41
+ PyCmp_IS = 8
42
+ PyCmp_IS_NOT = 9
43
+ PyCmp_EXC_MATCH = 10
44
+
45
+ STACK_USE_GUIDELINE = 30
46
+ MIN_CONST_SEQUENCE_SIZE = 3
47
+
48
+
49
+ UNARY_OPS: dict[str, object] = {
50
+ "UNARY_INVERT": lambda v: ~v,
51
+ "UNARY_NEGATIVE": lambda v: -v,
52
+ "UNARY_POSITIVE": lambda v: +v,
53
+ }
54
+
55
+ BINARY_OPS: dict[int, Callable[[object, object], object]] = {
56
+ find_op_idx("NB_POWER"): lambda x, y: safe_power(x, y, PyLimits),
57
+ find_op_idx("NB_MULTIPLY"): lambda x, y: safe_multiply(x, y, PyLimits),
58
+ find_op_idx("NB_TRUE_DIVIDE"): lambda left, right: left / right,
59
+ find_op_idx("NB_FLOOR_DIVIDE"): lambda left, right: left // right,
60
+ find_op_idx("NB_REMAINDER"): lambda x, y: safe_mod(x, y, PyLimits),
61
+ find_op_idx("NB_ADD"): lambda left, right: left + right,
62
+ find_op_idx("NB_SUBTRACT"): lambda left, right: left - right,
63
+ find_op_idx("NB_SUBSCR"): lambda left, right: left[right],
64
+ find_op_idx("NB_LSHIFT"): lambda x, y: safe_lshift(x, y, PyLimits),
65
+ find_op_idx("NB_RSHIFT"): lambda left, right: left >> right,
66
+ find_op_idx("NB_AND"): lambda left, right: left & right,
67
+ find_op_idx("NB_XOR"): lambda left, right: left ^ right,
68
+ find_op_idx("NB_OR"): lambda left, right: left | right,
69
+ }
70
+
71
+ SWAPPABLE: set[str] = {"STORE_FAST", "STORE_FAST_MAYBE_NULL", "POP_TOP"}
72
+
73
+
74
+ class FlowGraphOptimizer:
75
+ """Flow graph optimizer."""
76
+
77
+ JUMP_ABS: str = "<INVALID JUMP OPCODE>" # Set to different opcodes in 3.10 and 3.12
78
+
79
+ def __init__(self, graph: PyFlowGraph) -> None:
80
+ self.graph = graph
81
+
82
+ def optimize_basic_block(self, block: Block) -> None:
83
+ raise NotImplementedError()
84
+
85
+ def set_to_nop(self, instr: Instruction) -> None:
86
+ raise NotImplemented()
87
+
88
+ def dispatch_instr(
89
+ self,
90
+ instr_index: int,
91
+ instr: Instruction,
92
+ next_instr: Instruction | None,
93
+ target: Instruction | None,
94
+ block: Block,
95
+ ) -> int | None:
96
+ handler = self.handlers.get(instr.opname)
97
+ if handler is not None:
98
+ return handler(self, instr_index, instr, next_instr, target, block)
99
+
100
+ def skip_nops(self, next_block: Block, lineno: int) -> bool:
101
+ return False
102
+
103
+ def clean_basic_block(self, block: Block, prev_lineno: int) -> bool:
104
+ """Remove all NOPs from a function when legal."""
105
+ new_instrs = []
106
+ num_instrs = len(block.insts)
107
+ for idx in range(num_instrs):
108
+ instr = block.insts[idx]
109
+ if instr.opname == "NOP":
110
+ lineno = instr.lineno
111
+ # Eliminate no-op if it doesn't have a line number
112
+ if lineno < 0:
113
+ continue
114
+ # or, if the previous instruction had the same line number.
115
+ if prev_lineno == lineno:
116
+ continue
117
+ # or, if the next instruction has same line number or no line number
118
+ if idx < num_instrs - 1:
119
+ next_instr = block.insts[idx + 1]
120
+ next_lineno = next_instr.lineno
121
+ if next_lineno == lineno:
122
+ continue
123
+ elif next_lineno < 0:
124
+ next_instr.loc = instr.loc
125
+ continue
126
+ else:
127
+ next_block = block.next
128
+ while next_block and len(next_block.insts) == 0:
129
+ next_block = next_block.next
130
+ # or if last instruction in BB and next BB has same line number
131
+ if next_block:
132
+ if lineno == next_block.insts[0].lineno or self.skip_nops(
133
+ next_block, lineno
134
+ ):
135
+ continue
136
+ new_instrs.append(instr)
137
+ prev_lineno = instr.lineno
138
+ cleaned = len(block.insts) != len(new_instrs)
139
+ block.insts = new_instrs
140
+ return cleaned
141
+
142
+ def jump_thread(
143
+ self, block: Block, instr: Instruction, target: Instruction, opname: str
144
+ ) -> int:
145
+ raise NotImplementedError()
146
+
147
+ def get_const_loading_instrs(
148
+ self, block: Block, start: int, size: int
149
+ ) -> list[Instruction] | None:
150
+ raise NotImplementedError()
151
+
152
+ def opt_jump_if_false_or_pop(
153
+ self,
154
+ instr_index: int,
155
+ instr: Instruction,
156
+ next_instr: Instruction | None,
157
+ target: Instruction | None,
158
+ block: Block,
159
+ ) -> int | None:
160
+ assert target is not None
161
+ if target.opname == "POP_JUMP_IF_FALSE":
162
+ return instr_index + self.jump_thread(
163
+ block, instr, target, "POP_JUMP_IF_FALSE"
164
+ )
165
+ elif target.opname in (self.JUMP_ABS, "JUMP_FORWARD", "JUMP_IF_FALSE_OR_POP"):
166
+ return instr_index + self.jump_thread(
167
+ block, instr, target, "JUMP_IF_FALSE_OR_POP"
168
+ )
169
+ elif target.opname in ("JUMP_IF_TRUE_OR_POP", "POP_JUMP_IF_TRUE"):
170
+ if instr.lineno == target.lineno:
171
+ target_block = instr.target
172
+ assert target_block and target_block != target_block.next
173
+ instr.opname = "POP_JUMP_IF_FALSE"
174
+ instr.target = target_block.next
175
+ return instr_index
176
+ return instr_index + 1
177
+
178
+ def opt_jump_if_true_or_pop(
179
+ self,
180
+ instr_index: int,
181
+ instr: Instruction,
182
+ next_instr: Instruction | None,
183
+ target: Instruction | None,
184
+ block: Block,
185
+ ) -> int | None:
186
+ assert target is not None
187
+ if target.opname == "POP_JUMP_IF_TRUE":
188
+ return instr_index + self.jump_thread(
189
+ block, instr, target, "POP_JUMP_IF_TRUE"
190
+ )
191
+ elif target.opname in (self.JUMP_ABS, "JUMP_FORWARD", "JUMP_IF_TRUE_OR_POP"):
192
+ return instr_index + self.jump_thread(
193
+ block, instr, target, "JUMP_IF_TRUE_OR_POP"
194
+ )
195
+ elif target.opname in ("JUMP_IF_FALSE_OR_POP", "POP_JUMP_IF_FALSE"):
196
+ if instr.lineno == target.lineno:
197
+ target_block = instr.target
198
+ assert target_block and target_block != target_block.next
199
+ instr.opname = "POP_JUMP_IF_TRUE"
200
+ instr.target = target_block.next
201
+ return instr_index
202
+ return instr_index + 1
203
+
204
+ def opt_pop_jump_if(
205
+ self,
206
+ instr_index: int,
207
+ instr: Instruction,
208
+ next_instr: Instruction | None,
209
+ target: Instruction | None,
210
+ block: Block,
211
+ ) -> int | None:
212
+ assert target is not None
213
+ if target.opname in (self.JUMP_ABS, "JUMP_FORWARD", "JUMP"):
214
+ return instr_index + self.jump_thread(block, instr, target, instr.opname)
215
+
216
+ def opt_jump(
217
+ self,
218
+ instr_index: int,
219
+ instr: Instruction,
220
+ next_instr: Instruction | None,
221
+ target: Instruction | None,
222
+ block: Block,
223
+ ) -> int | None:
224
+ assert target is not None
225
+ if target.opname in (self.JUMP_ABS, "JUMP_FORWARD"):
226
+ return instr_index + self.jump_thread(block, instr, target, self.JUMP_ABS)
227
+
228
+ def opt_for_iter(
229
+ self,
230
+ instr_index: int,
231
+ instr: Instruction,
232
+ next_instr: Instruction | None,
233
+ target: Instruction | None,
234
+ block: Block,
235
+ ) -> int | None:
236
+ assert target is not None
237
+ if target.opname == "JUMP_FORWARD":
238
+ return instr_index + self.jump_thread(block, instr, target, "FOR_ITER")
239
+
240
+ def opt_rot_n(
241
+ self,
242
+ instr_index: int,
243
+ instr: Instruction,
244
+ next_instr: Instruction | None,
245
+ target: Instruction | None,
246
+ block: Block,
247
+ ) -> int | None:
248
+ if instr.ioparg < 2:
249
+ self.set_to_nop(instr)
250
+ return
251
+ elif instr.ioparg == 2:
252
+ instr.opname = "ROT_TWO"
253
+ elif instr.ioparg == 3:
254
+ instr.opname = "ROT_THREE"
255
+ elif instr.ioparg == 4:
256
+ instr.opname = "ROT_FOUR"
257
+ if instr_index >= instr.ioparg - 1:
258
+ self.fold_rotations(
259
+ block.insts[instr_index - instr.ioparg + 1 : instr_index + 1],
260
+ instr.ioparg,
261
+ )
262
+
263
+ def fold_rotations(self, instrs: list[Instruction], n: int) -> None:
264
+ for instr in instrs:
265
+ if instr.opname == "ROT_N":
266
+ rot = instr.ioparg
267
+ elif instr.opname == "ROT_FOUR":
268
+ rot = 4
269
+ elif instr.opname == "ROT_THREE":
270
+ rot = 3
271
+ elif instr.opname == "ROT_TWO":
272
+ rot = 2
273
+ else:
274
+ return
275
+ if rot != n:
276
+ return
277
+ for instr in instrs:
278
+ self.set_to_nop(instr)
279
+
280
+ def opt_load_const(
281
+ self,
282
+ instr_index: int,
283
+ instr: Instruction,
284
+ next_instr: Instruction | None,
285
+ target: Instruction | None,
286
+ block: Block,
287
+ ) -> int | None:
288
+ # Remove LOAD_CONST const; conditional jump
289
+ const = instr.oparg
290
+ if next_instr is None:
291
+ return
292
+
293
+ if next_instr.opname in (
294
+ "POP_JUMP_IF_FALSE",
295
+ "POP_JUMP_IF_TRUE",
296
+ "JUMP_IF_FALSE",
297
+ "JUMP_IF_TRUE",
298
+ ):
299
+ is_true = bool(const)
300
+ if (
301
+ next_instr.opname == "POP_JUMP_IF_FALSE"
302
+ or next_instr.opname == "POP_JUMP_IF_TRUE"
303
+ ):
304
+ self.set_to_nop(block.insts[instr_index])
305
+ jump_if_true = (
306
+ next_instr.opname == "POP_JUMP_IF_TRUE"
307
+ or next_instr.opname == "JUMP_IF_TRUE"
308
+ )
309
+ if is_true == jump_if_true:
310
+ next_instr.opname = self.JUMP_ABS
311
+ block.has_fallthrough = False
312
+ else:
313
+ next_instr.target = None
314
+ self.set_to_nop(next_instr)
315
+ elif next_instr.opname in ("JUMP_IF_FALSE_OR_POP", "JUMP_IF_TRUE_OR_POP"):
316
+ is_true = bool(const)
317
+ jump_if_true = next_instr.opname == "JUMP_IF_TRUE_OR_POP"
318
+ if is_true == jump_if_true:
319
+ next_instr.opname = self.JUMP_ABS
320
+ block.has_fallthrough = False
321
+ else:
322
+ self.set_to_nop(block.insts[instr_index])
323
+ self.set_to_nop(next_instr)
324
+
325
+ def fold_tuple_on_constants(
326
+ self, instr_index: int, instr: Instruction, block: Block
327
+ ) -> None:
328
+ load_const_instrs = []
329
+ for i in range(instr_index - instr.ioparg, instr_index):
330
+ maybe_load_const = block.insts[i]
331
+ if maybe_load_const.opname != "LOAD_CONST":
332
+ return
333
+ load_const_instrs.append(maybe_load_const)
334
+ newconst = tuple(lc.oparg for lc in load_const_instrs)
335
+ for lc in load_const_instrs:
336
+ self.set_to_nop(lc)
337
+ instr.opname = "LOAD_CONST"
338
+ instr.oparg = newconst
339
+ instr.ioparg = self.graph.convertArg("LOAD_CONST", newconst)
340
+
341
+ def opt_return_value(
342
+ self,
343
+ instr_index: int,
344
+ instr: Instruction,
345
+ next_instr: Instruction | None,
346
+ target: Instruction | None,
347
+ block: Block,
348
+ ) -> int | None:
349
+ block.insts = block.insts[: instr_index + 1]
350
+
351
+ handlers: dict[str, Handler] = {
352
+ "JUMP_IF_FALSE_OR_POP": opt_jump_if_false_or_pop,
353
+ "JUMP_IF_TRUE_OR_POP": opt_jump_if_true_or_pop,
354
+ "POP_JUMP_IF_FALSE": opt_pop_jump_if,
355
+ "POP_JUMP_IF_TRUE": opt_pop_jump_if,
356
+ "JUMP_FORWARD": opt_jump,
357
+ "FOR_ITER": opt_for_iter,
358
+ "ROT_N": opt_rot_n,
359
+ "LOAD_CONST": opt_load_const,
360
+ "RETURN_VALUE": opt_return_value,
361
+ }
362
+
363
+
364
+ class FlowGraphOptimizer310(FlowGraphOptimizer):
365
+ """Python 3.10-specifc optimizations."""
366
+
367
+ def opt_build_tuple(
368
+ self: FlowGraphOptimizer,
369
+ instr_index: int,
370
+ instr: Instruction,
371
+ next_instr: Instruction | None,
372
+ target: Instruction | None,
373
+ block: Block,
374
+ ) -> int | None:
375
+ if (
376
+ next_instr
377
+ and next_instr.opname == "UNPACK_SEQUENCE"
378
+ and instr.ioparg == next_instr.ioparg
379
+ ):
380
+ if instr.ioparg == 1:
381
+ self.set_to_nop(instr)
382
+ self.set_to_nop(next_instr)
383
+ elif instr.ioparg == 2:
384
+ instr.opname = "ROT_TWO"
385
+ self.set_to_nop(next_instr)
386
+ elif instr.ioparg == 3:
387
+ instr.opname = "ROT_THREE"
388
+ next_instr.opname = "ROT_TWO"
389
+ return
390
+ if instr_index >= instr.ioparg:
391
+ self.fold_tuple_on_constants(instr_index, instr, block)
392
+
393
+ JUMP_ABS = "JUMP_ABSOLUTE"
394
+ handlers: dict[str, Handler] = {
395
+ **FlowGraphOptimizer.handlers,
396
+ JUMP_ABS: FlowGraphOptimizer.opt_jump,
397
+ "BUILD_TUPLE": opt_build_tuple,
398
+ }
399
+
400
+ def set_to_nop(self, instr: Instruction) -> None:
401
+ instr.opname = "NOP"
402
+
403
+ def jump_thread(
404
+ self, block: Block, instr: Instruction, target: Instruction, opname: str
405
+ ) -> int:
406
+ """Attempt to eliminate jumps to jumps by updating inst to jump to
407
+ target->i_target using the provided opcode. Return 0 if successful, 1 if
408
+ not; this makes it easier for our callers to revisit the same
409
+ instruction again only if we changed it."""
410
+ assert instr.is_jump(self.graph.opcode)
411
+ assert target.is_jump(self.graph.opcode)
412
+ if instr.lineno == target.lineno and instr.target != target.target:
413
+ instr.target = target.target
414
+ instr.opname = opname
415
+ return 0
416
+ return 1
417
+
418
+ def optimize_basic_block(self, block: Block) -> None:
419
+ instr_index = 0
420
+
421
+ while instr_index < len(block.insts):
422
+ instr = block.insts[instr_index]
423
+
424
+ target_instr: Instruction | None = None
425
+ if instr.is_jump(self.graph.opcode):
426
+ target = instr.target
427
+ assert target is not None
428
+ # Skip over empty basic blocks.
429
+ while len(target.insts) == 0:
430
+ instr.target = target.next
431
+ target = instr.target
432
+ assert target is not None
433
+ target_instr = target.insts[0]
434
+
435
+ next_instr = (
436
+ block.insts[instr_index + 1]
437
+ if instr_index + 1 < len(block.insts)
438
+ else None
439
+ )
440
+
441
+ new_index = self.dispatch_instr(
442
+ instr_index, instr, next_instr, target_instr, block
443
+ )
444
+ instr_index = instr_index + 1 if new_index is None else new_index
445
+
446
+
447
+ LOAD_CONST_INSTRS = ("LOAD_CONST", "LOAD_SMALL_INT")
448
+
449
+
450
+ class FlowGraphOptimizer312(FlowGraphOptimizer):
451
+ """Python 3.12-specifc optimizations."""
452
+
453
+ JUMP_ABS = "JUMP"
454
+
455
+ def set_to_nop(self, instr: Instruction) -> None:
456
+ instr.set_to_nop()
457
+
458
+ def optimize_basic_block(self, block: Block) -> None:
459
+ instr_index = 0
460
+ opt_instr: Instruction | None = None
461
+ # The handler needs to be called with the original instr and index
462
+ # because the instr_index may be updated after the first call to
463
+ # dispatch_instr.
464
+ while instr_index < len(block.insts):
465
+ instr = block.insts[instr_index]
466
+ target_instr: Instruction | None = None
467
+
468
+ is_copy_of_load_const = (
469
+ opt_instr is not None
470
+ and opt_instr.opname == "LOAD_CONST"
471
+ and instr.opname == "COPY"
472
+ and instr.ioparg == 1
473
+ )
474
+
475
+ if not is_copy_of_load_const:
476
+ opt_instr = instr
477
+ if instr.is_jump(self.graph.opcode):
478
+ target = instr.target
479
+ assert target is not None
480
+ assert target.insts, f"{instr} {target.label} {target.bid}"
481
+ # Skip over empty basic blocks.
482
+ while len(target.insts) == 0:
483
+ instr.target = target.next
484
+ target = instr.target
485
+ assert target is not None
486
+ target_instr = target.insts[0]
487
+
488
+ next_instr = (
489
+ block.insts[instr_index + 1]
490
+ if instr_index + 1 < len(block.insts)
491
+ else None
492
+ )
493
+
494
+ assert opt_instr is not None
495
+ new_index = self.dispatch_instr(
496
+ instr_index, opt_instr, next_instr, target_instr, block
497
+ )
498
+
499
+ instr_index = instr_index + 1 if new_index is None else new_index
500
+
501
+ def try_opt_return_const(
502
+ self: FlowGraphOptimizer,
503
+ instr_index: int,
504
+ instr: Instruction,
505
+ next_instr: Instruction | None,
506
+ target: Instruction | None,
507
+ block: Block,
508
+ ) -> bool:
509
+ if next_instr and next_instr.opname == "RETURN_VALUE":
510
+ next_instr.opname = "RETURN_CONST"
511
+ next_instr.oparg = instr.oparg
512
+ next_instr.ioparg = instr.ioparg
513
+ block.insts[instr_index].set_to_nop()
514
+ return True
515
+
516
+ return False
517
+
518
+ def opt_load_const_is(
519
+ self: FlowGraphOptimizer,
520
+ instr_index: int,
521
+ instr: Instruction,
522
+ next_instr: Instruction,
523
+ target: Instruction | None,
524
+ block: Block,
525
+ ) -> int | None:
526
+ jmp_op = (
527
+ block.insts[instr_index + 2] if instr_index + 2 < len(block.insts) else None
528
+ )
529
+ if (
530
+ jmp_op is not None
531
+ and jmp_op.opname in ("POP_JUMP_IF_FALSE", "POP_JUMP_IF_TRUE")
532
+ and instr.oparg is None
533
+ ):
534
+ nextarg = next_instr.oparg == 1
535
+ instr.set_to_nop()
536
+ next_instr.set_to_nop()
537
+ jmp_op.opname = (
538
+ "POP_JUMP_IF_NOT_NONE"
539
+ if nextarg ^ (jmp_op.opname == "POP_JUMP_IF_FALSE")
540
+ else "POP_JUMP_IF_NONE"
541
+ )
542
+ return instr_index + 2
543
+
544
+ def opt_load_const(
545
+ self: FlowGraphOptimizer,
546
+ instr_index: int,
547
+ instr: Instruction,
548
+ next_instr: Instruction | None,
549
+ target: Instruction | None,
550
+ block: Block,
551
+ ) -> int | None:
552
+ assert isinstance(self, FlowGraphOptimizer312)
553
+ if self.try_opt_return_const(instr_index, instr, next_instr, target, block):
554
+ return
555
+ if next_instr is not None and next_instr.opname == "IS_OP":
556
+ return self.opt_load_const_is(instr_index, instr, next_instr, target, block)
557
+ else:
558
+ # The rest of the optimizations are common to 3.10 and 3.12
559
+ return super().opt_load_const(instr_index, instr, next_instr, target, block)
560
+
561
+ def opt_push_null(
562
+ self: FlowGraphOptimizer,
563
+ instr_index: int,
564
+ instr: Instruction,
565
+ next_instr: Instruction | None,
566
+ target: Instruction | None,
567
+ block: Block,
568
+ ) -> int | None:
569
+ if next_instr is None:
570
+ return
571
+
572
+ if next_instr.opname == "LOAD_GLOBAL" and (next_instr.ioparg & 1) == 0:
573
+ instr.set_to_nop()
574
+ next_instr.oparg = (next_instr.oparg, 1)
575
+ next_instr.ioparg |= 1
576
+
577
+ def opt_build_tuple(
578
+ self: FlowGraphOptimizer,
579
+ instr_index: int,
580
+ instr: Instruction,
581
+ next_instr: Instruction | None,
582
+ target: Instruction | None,
583
+ block: Block,
584
+ ) -> int | None:
585
+ if (
586
+ next_instr
587
+ and next_instr.opname == "UNPACK_SEQUENCE"
588
+ and instr.ioparg == next_instr.ioparg
589
+ ):
590
+ if instr.ioparg == 1:
591
+ instr.set_to_nop()
592
+ next_instr.set_to_nop()
593
+ return
594
+ elif instr.ioparg == 2 or instr.ioparg == 3:
595
+ instr.set_to_nop()
596
+ next_instr.opname = "SWAP"
597
+ return
598
+ if instr_index >= instr.ioparg:
599
+ self.fold_tuple_on_constants(instr_index, instr, block)
600
+
601
+ def opt_swap(
602
+ self,
603
+ instr_index: int,
604
+ instr: Instruction,
605
+ next_instr: Instruction | None,
606
+ target: Instruction | None,
607
+ block: Block,
608
+ ) -> int | None:
609
+ if instr.oparg == 1:
610
+ instr.set_to_nop()
611
+ return
612
+
613
+ new_index = self.swaptimize(instr_index, block)
614
+ self.apply_static_swaps(new_index or instr_index, block)
615
+ return new_index
616
+
617
+ def next_swappable_instruction(self, i: int, block: Block, lineno: int) -> int:
618
+ while i + 1 < len(block.insts):
619
+ i += 1
620
+ inst = block.insts[i]
621
+ if lineno >= 0 and inst.lineno != lineno:
622
+ # Optimizing across this instruction could cause user-visible
623
+ # changes in the names bound between line tracing events!
624
+ return -1
625
+ elif inst.opname == "NOP":
626
+ continue
627
+ elif inst.opname in SWAPPABLE:
628
+ return i
629
+
630
+ return -1
631
+
632
+ return -1
633
+
634
+ # Attempt to apply SWAPs statically by swapping *instructions* rather than
635
+ # stack items. For example, we can replace SWAP(2), POP_TOP, STORE_FAST(42)
636
+ # with the more efficient NOP, STORE_FAST(42), POP_TOP.
637
+ def apply_static_swaps(self, instr_index: int, block: Block) -> None:
638
+ # SWAPs are to our left, and potential swaperands are to our right:
639
+ for i in range(instr_index, -1, -1):
640
+ swap = block.insts[i]
641
+ if swap.opname != "SWAP":
642
+ if swap.opname == "NOP" or swap.opname in SWAPPABLE:
643
+ # Nope, but we know how to handle these. Keep looking:
644
+ continue
645
+ # We can't reason about what this instruction does. Bail:
646
+ return
647
+
648
+ j = self.next_swappable_instruction(i, block, -1)
649
+ if j < 0:
650
+ return
651
+
652
+ k = j
653
+ lineno = block.insts[j].lineno
654
+ for _i in range(swap.ioparg - 1, 0, -1):
655
+ k = self.next_swappable_instruction(k, block, lineno)
656
+ if k < 0:
657
+ return
658
+
659
+ # The reordering is not safe if the two instructions to be swapped
660
+ # store to the same location, or if any intervening instruction stores
661
+ # to the same location as either of them.
662
+ store_j = block.insts[j].stores_to
663
+ store_k = block.insts[k].stores_to
664
+ if store_j is not None or store_k is not None:
665
+ if store_j == store_k:
666
+ return
667
+ for idx in range(j + 1, k):
668
+ store_idx = block.insts[idx].stores_to
669
+ if store_idx is not None and (
670
+ store_idx == store_j or store_idx == store_k
671
+ ):
672
+ return
673
+
674
+ swap.set_to_nop()
675
+ temp = block.insts[j]
676
+ block.insts[j] = block.insts[k]
677
+ block.insts[k] = temp
678
+
679
+ def swaptimize(self, instr_index: int, block: Block) -> int | None:
680
+ """Replace an arbitrary run of SWAPs and NOPs with an optimal one that has the
681
+ same effect."""
682
+
683
+ # Find the length of the current sequence of SWAPs and NOPs, and record the
684
+ # maximum depth of the stack manipulations:
685
+ instructions = block.insts
686
+ depth = instructions[instr_index].ioparg
687
+ more = False
688
+ cnt = 0
689
+ for cnt in range(instr_index + 1, len(instructions)):
690
+ opname = instructions[cnt].opname
691
+ if opname == "SWAP":
692
+ depth = max(depth, instructions[cnt].ioparg)
693
+ more = True
694
+ elif opname != "NOP":
695
+ break
696
+
697
+ if not more:
698
+ return None
699
+
700
+ # Create an array with elements {0, 1, 2, ..., depth - 1}:
701
+ stack = [i for i in range(depth)]
702
+ # Simulate the combined effect of these instructions by "running" them on
703
+ # our "stack":
704
+ for i in range(instr_index, cnt):
705
+ if block.insts[i].opname == "SWAP":
706
+ oparg = instructions[i].ioparg
707
+ top = stack[0]
708
+ # SWAPs are 1-indexed:
709
+ stack[0] = stack[oparg - 1]
710
+ stack[oparg - 1] = top
711
+
712
+ ## Now we can begin! Our approach here is based on a solution to a closely
713
+ ## related problem (https://cs.stackexchange.com/a/13938). It's easiest to
714
+ ## think of this algorithm as determining the steps needed to efficiently
715
+ ## "un-shuffle" our stack. By performing the moves in *reverse* order,
716
+ ## though, we can efficiently *shuffle* it! For this reason, we will be
717
+ ## replacing instructions starting from the *end* of the run. Since the
718
+ ## solution is optimal, we don't need to worry about running out of space:
719
+ current = cnt - instr_index - 1
720
+ VISITED = -1
721
+ for i in range(depth):
722
+ if stack[i] == VISITED or stack[i] == i:
723
+ continue
724
+
725
+ # Okay, we've found an item that hasn't been visited. It forms a cycle
726
+ # with other items; traversing the cycle and swapping each item with
727
+ # the next will put them all in the correct place. The weird
728
+ # loop-and-a-half is necessary to insert 0 into every cycle, since we
729
+ # can only swap from that position:
730
+ j = i
731
+ while True:
732
+ # Skip the actual swap if our item is zero, since swapping the top
733
+ # item with itself is pointless:
734
+ if j:
735
+ # SWAPs are 1-indexed:
736
+ instructions[current + instr_index].opname = "SWAP"
737
+ instructions[current + instr_index].ioparg = j + 1
738
+ current -= 1
739
+
740
+ if stack[j] == VISITED:
741
+ # Completed the cycle:
742
+ assert j == i
743
+ break
744
+
745
+ next_j = stack[j]
746
+ stack[j] = VISITED
747
+ j = next_j
748
+
749
+ while 0 <= current:
750
+ instructions[current + instr_index].set_to_nop()
751
+ current -= 1
752
+
753
+ return cnt
754
+
755
+ def jump_thread(
756
+ self, block: Block, instr: Instruction, target: Instruction, opname: str
757
+ ) -> int:
758
+ """Attempt to eliminate jumps to jumps by updating inst to jump to
759
+ target->i_target using the provided opcode. Return 0 if successful, 1 if
760
+ not; this makes it easier for our callers to revisit the same
761
+ instruction again only if we changed it."""
762
+ assert instr.is_jump(self.graph.opcode)
763
+ assert target.is_jump(self.graph.opcode)
764
+ if (
765
+ instr.lineno == target.lineno or target.lineno == -1
766
+ ) and instr.target != target.target:
767
+ instr.target = target.target
768
+ instr.opname = opname
769
+ return 0
770
+ return 1
771
+
772
+ handlers: dict[str, Handler] = {
773
+ **FlowGraphOptimizer.handlers,
774
+ JUMP_ABS: FlowGraphOptimizer.opt_jump,
775
+ "LOAD_CONST": opt_load_const,
776
+ "PUSH_NULL": opt_push_null,
777
+ "BUILD_TUPLE": opt_build_tuple,
778
+ "SWAP": cast("Handler", opt_swap),
779
+ "POP_JUMP_IF_NONE": FlowGraphOptimizer.opt_pop_jump_if,
780
+ "POP_JUMP_IF_NOT_NONE": FlowGraphOptimizer.opt_pop_jump_if,
781
+ }
782
+
783
+
784
+ def is_small_int(const: object) -> bool:
785
+ return type(const) is int and const >= 0 and const < 256
786
+
787
+
788
+ class BaseFlowGraphOptimizer314(FlowGraphOptimizer312):
789
+ def skip_nops(self, next_block: Block, lineno: int) -> bool:
790
+ next_lineno = -1
791
+ for next_instr in next_block.insts:
792
+ # pyre-ignore[16]: no lineno
793
+ if next_instr.opname == "NOP" and next_instr.loc.lineno < 0:
794
+ # Skip over NOPs without a location, they will be removed
795
+ continue
796
+ # pyre-ignore[16]: no lineno
797
+ next_lineno = next_instr.loc.lineno
798
+ return lineno == next_lineno
799
+
800
+ def opt_jump_if(
801
+ self: FlowGraphOptimizer,
802
+ instr_index: int,
803
+ instr: Instruction,
804
+ next_instr: Instruction | None,
805
+ target: Instruction | None,
806
+ block: Block,
807
+ ) -> int | None:
808
+ assert target is not None
809
+ if target.opname in ("JUMP", instr.opname):
810
+ return instr_index + self.jump_thread(block, instr, target, instr.opname)
811
+ elif target.opname == (
812
+ "JUMP_IF_FALSE" if instr.opname == "JUMP_IF_TRUE" else "JUMP_IF_TRUE"
813
+ ):
814
+ # No need to check for loops here, a block's b_next
815
+ # cannot point to itself.
816
+ assert instr.target is not None
817
+ instr.target = instr.target.next
818
+ return instr_index - 1
819
+
820
+ def get_const_loading_instrs(
821
+ self, block: Block, start: int, size: int
822
+ ) -> list[Instruction] | None:
823
+ """Return a list of instructions that load the first `size` constants
824
+ starting at `start`. Returns None if we don't have size constants."""
825
+ const_loading_instrs = []
826
+ if not size:
827
+ return const_loading_instrs
828
+ i = start
829
+ while i >= 0:
830
+ instr = block.insts[i]
831
+ if instr.opname in LOAD_CONST_INSTRS:
832
+ const_loading_instrs.append(instr)
833
+ if len(const_loading_instrs) == size:
834
+ const_loading_instrs.reverse()
835
+ return const_loading_instrs
836
+ elif instr.opname != "NOP":
837
+ return
838
+ i -= 1
839
+
840
+ def optimize_lists_and_sets(
841
+ self: FlowGraphOptimizer,
842
+ instr_index: int,
843
+ instr: Instruction,
844
+ next_instr: Instruction | None,
845
+ target: Instruction | None,
846
+ block: Block,
847
+ ) -> int | None:
848
+ assert isinstance(self, FlowGraphOptimizer314)
849
+ contains_or_iter = next_instr is not None and (
850
+ next_instr.opname == "GET_ITER" or next_instr.opname == "CONTAINS_OP"
851
+ )
852
+ seq_size = instr.oparg
853
+ assert isinstance(seq_size, int)
854
+ if seq_size > STACK_USE_GUIDELINE or (
855
+ seq_size < MIN_CONST_SEQUENCE_SIZE and not contains_or_iter
856
+ ):
857
+ return
858
+
859
+ const_loading_instrs = self.get_const_loading_instrs(
860
+ block, instr_index - 1, seq_size
861
+ )
862
+ if const_loading_instrs is None:
863
+ # If we're doing contains/iterating over a sequence we
864
+ # know nothing will need to mutate it and a tuple is a
865
+ # suitable container.
866
+ if contains_or_iter and instr.opname == "BUILD_LIST":
867
+ instr.opname = "BUILD_TUPLE"
868
+ return
869
+
870
+ newconst = tuple(i.oparg for i in const_loading_instrs)
871
+ if instr.opname == "BUILD_SET":
872
+ newconst = frozenset(newconst)
873
+
874
+ index = self.graph.convertArg("LOAD_CONST", newconst)
875
+ for lc in const_loading_instrs:
876
+ lc.set_to_nop_no_loc()
877
+
878
+ if contains_or_iter:
879
+ instr.opname = "LOAD_CONST"
880
+ instr.ioparg = instr.oparg = index
881
+ else:
882
+ assert instr_index >= 2
883
+ assert instr.opname == "BUILD_LIST" or instr.opname == "BUILD_SET"
884
+
885
+ block.insts[instr_index - 2].loc = instr.loc
886
+ block.insts[instr_index - 2].opname = instr.opname
887
+ block.insts[instr_index - 2].oparg = block.insts[instr_index - 2].ioparg = 0
888
+
889
+ block.insts[instr_index - 1].opname = "LOAD_CONST"
890
+ block.insts[instr_index - 1].ioparg = block.insts[instr_index - 1].oparg = (
891
+ index
892
+ )
893
+
894
+ instr.opname = (
895
+ "LIST_EXTEND" if instr.opname == "BUILD_LIST" else "SET_UPDATE"
896
+ )
897
+ instr.oparg = instr.ioparg = 1
898
+
899
+ def fold_tuple_on_constants(
900
+ self, instr_index: int, instr: Instruction, block: Block
901
+ ) -> None:
902
+ load_const_instrs = self.get_const_loading_instrs(
903
+ block, instr_index - 1, instr.ioparg
904
+ )
905
+ if load_const_instrs is None:
906
+ return
907
+ newconst = tuple(lc.oparg for lc in load_const_instrs)
908
+ for lc in load_const_instrs:
909
+ lc.set_to_nop_no_loc()
910
+ instr.opname = "LOAD_CONST"
911
+ instr.oparg = newconst
912
+ instr.ioparg = self.graph.convertArg("LOAD_CONST", newconst)
913
+
914
+ def optimize_compare_op(
915
+ self: FlowGraphOptimizer,
916
+ instr_index: int,
917
+ instr: Instruction,
918
+ next_instr: Instruction | None,
919
+ target: Instruction | None,
920
+ block: Block,
921
+ ) -> int | None:
922
+ if next_instr and next_instr.opname == "TO_BOOL":
923
+ next_instr.opname = "COMPARE_OP"
924
+ next_instr.oparg = next_instr.ioparg = instr.ioparg | 16
925
+ instr.set_to_nop()
926
+
927
+ def optimize_load_global(
928
+ self: FlowGraphOptimizer,
929
+ instr_index: int,
930
+ instr: Instruction,
931
+ next_instr: Instruction | None,
932
+ target: Instruction | None,
933
+ block: Block,
934
+ ) -> int | None:
935
+ if next_instr is not None and next_instr.opname == "PUSH_NULL":
936
+ instr.oparg = (instr.oparg, 1)
937
+ instr.ioparg |= 1
938
+ next_instr.set_to_nop()
939
+
940
+ def opt_jump_no_interrupt(
941
+ self: FlowGraphOptimizer,
942
+ instr_index: int,
943
+ instr: Instruction,
944
+ next_instr: Instruction | None,
945
+ target: Instruction | None,
946
+ block: Block,
947
+ ) -> int | None:
948
+ assert target is not None
949
+ if target.opname == "JUMP":
950
+ return instr_index + self.jump_thread(block, instr, target, "JUMP")
951
+ elif target.opname == "JUMP_NO_INTERRUPT":
952
+ return instr_index + self.jump_thread(
953
+ block, instr, target, "JUMP_NO_INTERRUPT"
954
+ )
955
+
956
+ def jump_thread(
957
+ self, block: Block, instr: Instruction, target: Instruction, opname: str
958
+ ) -> int:
959
+ """Attempt to eliminate jumps to jumps by updating inst to jump to
960
+ target->i_target using the provided opcode. Return 0 if successful, 1 if
961
+ not; this makes it easier for our callers to revisit the same
962
+ instruction again only if we changed it."""
963
+ assert instr.is_jump(self.graph.opcode)
964
+ assert target.is_jump(self.graph.opcode)
965
+ if instr.target != target.target:
966
+ instr.set_to_nop()
967
+ assert block.insts[-1] == instr
968
+ block.append_instr(opname, 0, target=target.target, loc=target.loc)
969
+
970
+ return 1
971
+
972
+ def opt_store_fast(
973
+ self: FlowGraphOptimizer,
974
+ instr_index: int,
975
+ instr: Instruction,
976
+ next_instr: Instruction | None,
977
+ target: Instruction | None,
978
+ block: Block,
979
+ ) -> int | None:
980
+ if not next_instr:
981
+ return
982
+ # Remove the store fast instruction if it is storing the same local as the
983
+ # next instruction
984
+ if (
985
+ next_instr.opname == "STORE_FAST"
986
+ and next_instr.ioparg == instr.ioparg
987
+ # pyre-ignore[16]: `Instruction` has no attribute `loc`.
988
+ and next_instr.loc.lineno == instr.loc.lineno
989
+ ):
990
+ instr.opname = "POP_TOP"
991
+ instr.oparg = instr.ioparg = 0
992
+
993
+ def optimize_contains_is_op(
994
+ self: FlowGraphOptimizer,
995
+ instr_index: int,
996
+ instr: Instruction,
997
+ next_instr: Instruction | None,
998
+ target: Instruction | None,
999
+ block: Block,
1000
+ ) -> int | None:
1001
+ if next_instr is None:
1002
+ return
1003
+ if next_instr.opname == "TO_BOOL":
1004
+ next_instr.opname = instr.opname
1005
+ next_instr.oparg = instr.oparg
1006
+ next_instr.ioparg = instr.ioparg
1007
+ instr.set_to_nop()
1008
+ elif next_instr.opname == "UNARY_NOT":
1009
+ next_instr.opname = instr.opname
1010
+ next_instr.oparg = instr.oparg
1011
+ next_instr.ioparg = instr.ioparg ^ 1
1012
+ instr.set_to_nop()
1013
+
1014
+ def make_load_const(self, instr: Instruction, const: object) -> None:
1015
+ if is_small_int(const):
1016
+ assert isinstance(const, int)
1017
+ instr.opname = "LOAD_SMALL_INT"
1018
+ instr.ioparg = instr.oparg = const
1019
+ else:
1020
+ instr.opname = "LOAD_CONST"
1021
+ instr.oparg = const
1022
+ instr.ioparg = self.graph.convertArg("LOAD_CONST", const)
1023
+
1024
+ def optimize_binary_op(
1025
+ self: FlowGraphOptimizer,
1026
+ instr_index: int,
1027
+ instr: Instruction,
1028
+ next_instr: Instruction | None,
1029
+ target: Instruction | None,
1030
+ block: Block,
1031
+ ) -> int | None:
1032
+ assert isinstance(self, FlowGraphOptimizer314)
1033
+ consts = self.get_const_loading_instrs(block, instr_index - 1, 2)
1034
+ if consts is None:
1035
+ return
1036
+ lhs = consts[0].oparg
1037
+ rhs = consts[1].oparg
1038
+
1039
+ op = BINARY_OPS.get(instr.ioparg)
1040
+ if op is None:
1041
+ return
1042
+
1043
+ try:
1044
+ res = op(lhs, rhs)
1045
+ except (ArithmeticError, TypeError, ValueError, IndexError):
1046
+ return
1047
+
1048
+ consts[0].set_to_nop_no_loc()
1049
+ consts[1].set_to_nop_no_loc()
1050
+ self.make_load_const(instr, res)
1051
+
1052
+ def optimize_one_unary(
1053
+ self,
1054
+ instr_index: int,
1055
+ instr: Instruction,
1056
+ block: Block,
1057
+ op: Callable[[object], object],
1058
+ ) -> object:
1059
+ consts = self.get_const_loading_instrs(block, instr_index - 1, 1)
1060
+ if consts is None:
1061
+ return
1062
+
1063
+ try:
1064
+ res = op(consts[0].oparg)
1065
+ except (ArithmeticError, TypeError, ValueError, IndexError):
1066
+ return
1067
+
1068
+ consts[0].set_to_nop_no_loc()
1069
+ self.make_load_const(instr, res)
1070
+
1071
+ def optimize_unary_invert(
1072
+ self: FlowGraphOptimizer,
1073
+ instr_index: int,
1074
+ instr: Instruction,
1075
+ next_instr: Instruction | None,
1076
+ target: Instruction | None,
1077
+ block: Block,
1078
+ ) -> int | None:
1079
+ assert isinstance(self, FlowGraphOptimizer314)
1080
+ self.optimize_one_unary(instr_index, instr, block, operator.inv)
1081
+
1082
+ def optimize_unary_negative(
1083
+ self: FlowGraphOptimizer,
1084
+ instr_index: int,
1085
+ instr: Instruction,
1086
+ next_instr: Instruction | None,
1087
+ target: Instruction | None,
1088
+ block: Block,
1089
+ ) -> int | None:
1090
+ assert isinstance(self, FlowGraphOptimizer314)
1091
+ self.optimize_one_unary(instr_index, instr, block, operator.neg)
1092
+
1093
+ def optimize_unary_not(
1094
+ self: FlowGraphOptimizer,
1095
+ instr_index: int,
1096
+ instr: Instruction,
1097
+ next_instr: Instruction | None,
1098
+ target: Instruction | None,
1099
+ block: Block,
1100
+ ) -> int | None:
1101
+ if next_instr is not None and next_instr.opname == "TO_BOOL":
1102
+ instr.set_to_nop()
1103
+ next_instr.opname = "UNARY_NOT"
1104
+ return
1105
+ if next_instr is not None and next_instr.opname == "UNARY_NOT":
1106
+ instr.set_to_nop()
1107
+ next_instr.set_to_nop()
1108
+ return
1109
+
1110
+ assert isinstance(self, FlowGraphOptimizer314)
1111
+ self.optimize_one_unary(instr_index, instr, block, operator.not_)
1112
+
1113
+ def fold_constrant_intrinsic_list_to_tuple(
1114
+ self, block: Block, instr_index: int
1115
+ ) -> None:
1116
+ consts_found = 0
1117
+ expect_append = True
1118
+ for i in range(instr_index - 1, -1, -1):
1119
+ instr = block.insts[i]
1120
+ opcode = instr.opname
1121
+ oparg = instr.oparg
1122
+ if opcode == "NOP":
1123
+ continue
1124
+
1125
+ if opcode == "BUILD_LIST" and oparg == 0:
1126
+ if not expect_append:
1127
+ # Not a start sequence
1128
+ return
1129
+
1130
+ # Sequence start, we are done.
1131
+ consts = []
1132
+ if opcode == "BUILD_LIST" and oparg == 0:
1133
+ for newpos in range(instr_index - 1, i - 1, -1):
1134
+ instr = block.insts[newpos]
1135
+ if instr.opname in LOAD_CONST_INSTRS:
1136
+ const = instr.oparg
1137
+ consts.append(const)
1138
+ instr.set_to_nop_no_loc()
1139
+
1140
+ consts.reverse()
1141
+ self.make_load_const(block.insts[instr_index], tuple(consts))
1142
+ return
1143
+
1144
+ if expect_append:
1145
+ if opcode != "LIST_APPEND" or oparg != 1:
1146
+ return
1147
+ elif opcode not in LOAD_CONST_INSTRS:
1148
+ return
1149
+ consts_found += 1
1150
+ expect_append = not expect_append
1151
+
1152
+ def optimize_call_instrinsic_1(
1153
+ self: FlowGraphOptimizer,
1154
+ instr_index: int,
1155
+ instr: Instruction,
1156
+ next_instr: Instruction | None,
1157
+ target: Instruction | None,
1158
+ block: Block,
1159
+ ) -> int | None:
1160
+ assert isinstance(self, FlowGraphOptimizer314)
1161
+ # pyre-fixme[16]: Module `opcodes` has no attribute `INTRINSIC_1`.
1162
+ intrins = INTRINSIC_1[instr.ioparg]
1163
+ if intrins == "INTRINSIC_LIST_TO_TUPLE":
1164
+ if next_instr is not None and next_instr.opname == "GET_ITER":
1165
+ instr.set_to_nop()
1166
+ else:
1167
+ self.fold_constrant_intrinsic_list_to_tuple(block, instr_index)
1168
+ if intrins == "INTRINSIC_UNARY_POSITIVE":
1169
+ self.optimize_one_unary(instr_index, instr, block, operator.pos)
1170
+
1171
+ def optimize_swap(
1172
+ self: FlowGraphOptimizer,
1173
+ instr_index: int,
1174
+ instr: Instruction,
1175
+ next_instr: Instruction | None,
1176
+ target: Instruction | None,
1177
+ block: Block,
1178
+ ) -> int | None:
1179
+ if instr.oparg == 1:
1180
+ instr.set_to_nop()
1181
+
1182
+ handlers: dict[str, Handler] = {
1183
+ **FlowGraphOptimizer312.handlers,
1184
+ "JUMP_IF_FALSE": opt_jump_if,
1185
+ "JUMP_IF_TRUE": opt_jump_if,
1186
+ "BUILD_LIST": optimize_lists_and_sets,
1187
+ "BUILD_SET": optimize_lists_and_sets,
1188
+ "COMPARE_OP": optimize_compare_op,
1189
+ "CONTAINS_OP": optimize_contains_is_op,
1190
+ "IS_OP": optimize_contains_is_op,
1191
+ "LOAD_GLOBAL": optimize_load_global,
1192
+ "JUMP_NO_INTERRUPT": opt_jump_no_interrupt,
1193
+ "STORE_FAST": opt_store_fast,
1194
+ "BINARY_OP": optimize_binary_op,
1195
+ "UNARY_INVERT": optimize_unary_invert,
1196
+ "UNARY_NEGATIVE": optimize_unary_negative,
1197
+ "UNARY_NOT": optimize_unary_not,
1198
+ "CALL_INTRINSIC_1": optimize_call_instrinsic_1,
1199
+ "SWAP": optimize_swap,
1200
+ }
1201
+ del handlers["PUSH_NULL"]
1202
+ del handlers["LOAD_CONST"]
1203
+
1204
+
1205
+ class FlowGraphOptimizer314(BaseFlowGraphOptimizer314):
1206
+ def optimize_basic_block(self, block: Block) -> None:
1207
+ super().optimize_basic_block(block)
1208
+ i = 0
1209
+ while i < len(block.insts):
1210
+ inst = block.insts[i]
1211
+ if inst.opname == "SWAP":
1212
+ new_i = self.swaptimize(i, block)
1213
+ self.apply_static_swaps(new_i or i, block)
1214
+ if new_i is not None:
1215
+ i = new_i
1216
+ i += 1
1217
+
1218
+
1219
+ class FlowGraphConstOptimizer314(BaseFlowGraphOptimizer314):
1220
+ def opt_load_const(
1221
+ self: FlowGraphOptimizer,
1222
+ instr_index: int,
1223
+ instr: Instruction,
1224
+ next_instr: Instruction | None,
1225
+ target: Instruction | None,
1226
+ block: Block,
1227
+ ) -> int | None:
1228
+ assert isinstance(self, FlowGraphConstOptimizer314)
1229
+ if instr.opname == "LOAD_CONST" and is_small_int(instr.oparg):
1230
+ assert isinstance(instr.oparg, int)
1231
+ instr.ioparg = instr.oparg
1232
+ instr.opname = "LOAD_SMALL_INT"
1233
+
1234
+ if next_instr is None:
1235
+ return
1236
+
1237
+ if next_instr.opname == "TO_BOOL":
1238
+ val = bool(instr.oparg)
1239
+ index = self.graph.convertArg("LOAD_CONST", val)
1240
+ instr.set_to_nop_no_loc()
1241
+ next_instr.opname = "LOAD_CONST"
1242
+ next_instr.oparg = val
1243
+ next_instr.ioparg = index
1244
+ elif next_instr.opname == "IS_OP":
1245
+ return self.opt_load_const_is(instr_index, instr, next_instr, target, block)
1246
+ else:
1247
+ # The rest of the optimizations are common to 3.10 and 3.12
1248
+ return FlowGraphOptimizer.opt_load_const(
1249
+ self, instr_index, instr, next_instr, target, block
1250
+ )
1251
+
1252
+ def opt_load_const_is(
1253
+ self: FlowGraphOptimizer,
1254
+ instr_index: int,
1255
+ instr: Instruction,
1256
+ next_instr: Instruction,
1257
+ target: Instruction | None,
1258
+ block: Block,
1259
+ ) -> int | None:
1260
+ jmp_op = (
1261
+ block.insts[instr_index + 2] if instr_index + 2 < len(block.insts) else None
1262
+ )
1263
+ if jmp_op is not None and jmp_op.opname == "TO_BOOL":
1264
+ jmp_op.set_to_nop()
1265
+ if instr_index + 3 >= len(block.insts):
1266
+ return
1267
+ jmp_op = block.insts[instr_index + 3]
1268
+
1269
+ if (
1270
+ jmp_op is not None
1271
+ and jmp_op.opname in ("POP_JUMP_IF_FALSE", "POP_JUMP_IF_TRUE")
1272
+ and instr.oparg is None
1273
+ ):
1274
+ nextarg = next_instr.oparg == 1
1275
+ instr.set_to_nop()
1276
+ next_instr.set_to_nop()
1277
+ jmp_op.opname = (
1278
+ "POP_JUMP_IF_NOT_NONE"
1279
+ if nextarg ^ (jmp_op.opname == "POP_JUMP_IF_FALSE")
1280
+ else "POP_JUMP_IF_NONE"
1281
+ )
1282
+ return instr_index + 2
1283
+
1284
+ handlers: dict[str, Handler] = {
1285
+ "LOAD_CONST": opt_load_const,
1286
+ "LOAD_SMALL_INT": opt_load_const,
1287
+ }