angr 9.2.171__cp310-abi3-manylinux_2_28_aarch64.whl → 9.2.173__cp310-abi3-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of angr might be problematic. Click here for more details.

Files changed (27) hide show
  1. angr/__init__.py +1 -1
  2. angr/analyses/calling_convention/fact_collector.py +6 -1
  3. angr/analyses/cfg/cfg_fast.py +12 -10
  4. angr/analyses/cfg/indirect_jump_resolvers/jumptable.py +132 -9
  5. angr/analyses/decompiler/block_simplifier.py +23 -5
  6. angr/analyses/decompiler/clinic.py +2 -3
  7. angr/analyses/decompiler/decompiler.py +7 -1
  8. angr/analyses/decompiler/optimization_passes/__init__.py +2 -0
  9. angr/analyses/decompiler/optimization_passes/peephole_simplifier.py +75 -0
  10. angr/analyses/decompiler/peephole_optimizations/__init__.py +2 -0
  11. angr/analyses/decompiler/peephole_optimizations/cas_intrinsics.py +15 -5
  12. angr/analyses/decompiler/peephole_optimizations/inlined_wstrcpy.py +162 -84
  13. angr/analyses/decompiler/peephole_optimizations/inlined_wstrcpy_consolidation.py +113 -0
  14. angr/analyses/decompiler/presets/basic.py +2 -0
  15. angr/analyses/decompiler/presets/fast.py +2 -0
  16. angr/analyses/decompiler/presets/full.py +2 -0
  17. angr/analyses/decompiler/presets/preset.py +2 -2
  18. angr/analyses/decompiler/structured_codegen/c.py +57 -41
  19. angr/analyses/s_reaching_definitions/s_rda_view.py +3 -0
  20. angr/knowledge_plugins/cfg/indirect_jump.py +74 -8
  21. angr/rustylib.abi3.so +0 -0
  22. {angr-9.2.171.dist-info → angr-9.2.173.dist-info}/METADATA +5 -5
  23. {angr-9.2.171.dist-info → angr-9.2.173.dist-info}/RECORD +27 -25
  24. {angr-9.2.171.dist-info → angr-9.2.173.dist-info}/WHEEL +0 -0
  25. {angr-9.2.171.dist-info → angr-9.2.173.dist-info}/entry_points.txt +0 -0
  26. {angr-9.2.171.dist-info → angr-9.2.173.dist-info}/licenses/LICENSE +0 -0
  27. {angr-9.2.171.dist-info → angr-9.2.173.dist-info}/top_level.txt +0 -0
angr/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
  # pylint: disable=wrong-import-position
3
3
  from __future__ import annotations
4
4
 
5
- __version__ = "9.2.171"
5
+ __version__ = "9.2.173"
6
6
 
7
7
  if bytes is str:
8
8
  raise Exception(
@@ -622,10 +622,15 @@ class FactCollector(Analysis):
622
622
 
623
623
  stack_offset_created = set()
624
624
  ret_addr_offset = 0 if not self.project.arch.call_pushes_ret else self.project.arch.bytes
625
+ # handle shadow stack args
626
+ cc_cls = default_cc(
627
+ self.project.arch.name, platform=self.project.simos.name if self.project.simos is not None else None
628
+ )
629
+ stackarg_sp_buff = cc_cls.STACKARG_SP_BUFF if cc_cls is not None else 0
625
630
  for state in end_states:
626
631
  for offset, size in state.stack_reads.items():
627
632
  offset = u2s(offset, self.project.arch.bits)
628
- if offset - ret_addr_offset > 0:
633
+ if offset - ret_addr_offset > stackarg_sp_buff:
629
634
  if offset in stack_offset_created or offset in callee_saved_reg_stack_offsets:
630
635
  continue
631
636
  stack_offset_created.add(offset)
@@ -3237,22 +3237,24 @@ class CFGFast(ForwardAnalysis[CFGNode, CFGNode, CFGJob, int], CFGBase): # pylin
3237
3237
  # Fill in the jump_tables dict
3238
3238
  self.jump_tables[jump.addr] = jump
3239
3239
  # occupy the jump table region
3240
- if jump.jumptable_addr is not None:
3241
- self._seg_list.occupy(jump.jumptable_addr, jump.jumptable_size, "data")
3240
+ for jumptable_info in jump.jumptables:
3241
+ if jumptable_info.addr is None:
3242
+ continue
3243
+ self._seg_list.occupy(jumptable_info.addr, jumptable_info.size, "data")
3242
3244
  if self._collect_data_ref:
3243
- if jump.jumptable_addr in self._memory_data:
3244
- memory_data = self._memory_data[jump.jumptable_addr]
3245
- memory_data.size = jump.jumptable_size
3246
- memory_data.max_size = jump.jumptable_size
3245
+ if jumptable_info.addr in self._memory_data:
3246
+ memory_data = self._memory_data[jumptable_info.addr]
3247
+ memory_data.size = jumptable_info.size
3248
+ memory_data.max_size = jumptable_info.size
3247
3249
  memory_data.sort = MemoryDataSort.Unknown
3248
3250
  else:
3249
3251
  memory_data = MemoryData(
3250
- jump.jumptable_addr,
3251
- jump.jumptable_size,
3252
+ jumptable_info.addr,
3253
+ jumptable_info.size,
3252
3254
  MemoryDataSort.Unknown,
3253
- max_size=jump.jumptable_size,
3255
+ max_size=jumptable_info.size,
3254
3256
  )
3255
- self._memory_data[jump.jumptable_addr] = memory_data
3257
+ self._memory_data[jumptable_info.addr] = memory_data
3256
3258
 
3257
3259
  jump.resolved_targets = targets
3258
3260
  all_targets = set(targets)
@@ -780,7 +780,7 @@ class JumpTableResolver(IndirectJumpResolver):
780
780
  self._find_bss_region()
781
781
 
782
782
  def filter(self, cfg, addr, func_addr, block, jumpkind):
783
- if pcode is not None and isinstance(block.vex, pcode.lifter.IRSB):
783
+ if pcode is not None and isinstance(block.vex, pcode.lifter.IRSB): # type:ignore
784
784
  if once("pcode__indirect_jump_resolver"):
785
785
  l.warning("JumpTableResolver does not support P-Code IR yet; CFG may be incomplete.")
786
786
  return False
@@ -1049,6 +1049,7 @@ class JumpTableResolver(IndirectJumpResolver):
1049
1049
 
1050
1050
  # Get the jumping targets
1051
1051
  for r in simgr.found:
1052
+ jt2, jt2_addr, jt2_entrysize, jt2_size = None, None, None, None
1052
1053
  if load_stmt is not None:
1053
1054
  ret = self._try_resolve_targets_load(
1054
1055
  r,
@@ -1064,7 +1065,18 @@ class JumpTableResolver(IndirectJumpResolver):
1064
1065
  if ret is None:
1065
1066
  # Try the next state
1066
1067
  continue
1067
- jump_table, jumptable_addr, entry_size, jumptable_size, all_targets, sort = ret
1068
+ (
1069
+ jump_table,
1070
+ jumptable_addr,
1071
+ entry_size,
1072
+ jumptable_size,
1073
+ all_targets,
1074
+ sort,
1075
+ jt2,
1076
+ jt2_addr,
1077
+ jt2_entrysize,
1078
+ jt2_size,
1079
+ ) = ret
1068
1080
  if sort == "jumptable":
1069
1081
  ij_type = IndirectJumpType.Jumptable_AddressLoadedFromMemory
1070
1082
  elif sort == "vtable":
@@ -1116,15 +1128,14 @@ class JumpTableResolver(IndirectJumpResolver):
1116
1128
  ij.jumptable = True
1117
1129
  else:
1118
1130
  ij.jumptable = False
1119
- ij.jumptable_addr = jumptable_addr
1120
- ij.jumptable_size = jumptable_size
1121
- ij.jumptable_entry_size = entry_size
1131
+ ij.add_jumptable(jumptable_addr, jumptable_size, entry_size, jump_table, is_primary=True)
1122
1132
  ij.resolved_targets = set(jump_table)
1123
- ij.jumptable_entries = jump_table
1124
1133
  ij.type = ij_type
1125
1134
  else:
1126
1135
  ij.jumptable = False
1127
1136
  ij.resolved_targets = set(jump_table)
1137
+ if jt2 is not None and jt2_addr is not None and jt2_size is not None and jt2_entrysize is not None:
1138
+ ij.add_jumptable(jt2_addr, jt2_size, jt2_entrysize, jt2, is_primary=False)
1128
1139
 
1129
1140
  return True, all_targets
1130
1141
 
@@ -1560,7 +1571,9 @@ class JumpTableResolver(IndirectJumpResolver):
1560
1571
  stmt_whitelist = annotatedcfg.get_whitelisted_statements(block_addr)
1561
1572
  assert isinstance(stmt_whitelist, list)
1562
1573
  try:
1563
- engine.process(state, block=block, whitelist=stmt_whitelist)
1574
+ engine.process(
1575
+ state, block=block, whitelist=set(stmt_whitelist) if stmt_whitelist is not None else None
1576
+ )
1564
1577
  except (claripy.ClaripyError, SimError, AngrError):
1565
1578
  # anything can happen
1566
1579
  break
@@ -1789,7 +1802,18 @@ class JumpTableResolver(IndirectJumpResolver):
1789
1802
  )
1790
1803
  else:
1791
1804
  l.debug("Table at %#x has %d plausible targets", table_base_addr, num_targets)
1792
- return jump_table, table_base_addr, load_size, num_targets * load_size, jump_table, sort
1805
+ return (
1806
+ jump_table,
1807
+ table_base_addr,
1808
+ load_size,
1809
+ num_targets * load_size,
1810
+ jump_table,
1811
+ sort,
1812
+ None,
1813
+ None,
1814
+ None,
1815
+ None,
1816
+ )
1793
1817
 
1794
1818
  # We resolved too many targets for this indirect jump. Something might have gone wrong.
1795
1819
  l.debug(
@@ -1848,6 +1872,7 @@ class JumpTableResolver(IndirectJumpResolver):
1848
1872
  # Adjust entries inside the jump table
1849
1873
  mask = (2**self.project.arch.bits) - 1
1850
1874
  transformation_list = list(reversed([v for v in transformations.values() if not v.first_load]))
1875
+ jt_2nd_memloads: dict[int, int] = {}
1851
1876
  if transformation_list:
1852
1877
 
1853
1878
  def handle_signed_ext(a):
@@ -1872,6 +1897,10 @@ class JumpTableResolver(IndirectJumpResolver):
1872
1897
  return (a + con) & mask
1873
1898
 
1874
1899
  def handle_load(size, a):
1900
+ if a not in jt_2nd_memloads:
1901
+ jt_2nd_memloads[a] = size
1902
+ else:
1903
+ jt_2nd_memloads[a] = max(jt_2nd_memloads[a], size)
1875
1904
  return cfg._fast_memory_load_pointer(a, size=size)
1876
1905
 
1877
1906
  invert_conversion_ops = []
@@ -1936,6 +1965,31 @@ class JumpTableResolver(IndirectJumpResolver):
1936
1965
  l.debug("Could not recover jump table")
1937
1966
  return None
1938
1967
 
1968
+ # there might be a secondary jumptable
1969
+ jt_2nd = self._get_secondary_jumptable_from_transformations(transformation_list)
1970
+ jt_2nd_entries: list[int] | None = None
1971
+ jt_2nd_baseaddr: int | None = None
1972
+ jt_2nd_entrysize: int | None = None
1973
+ jt_2nd_size: int | None = None
1974
+ if jt_2nd is not None and jt_2nd_memloads:
1975
+ # determine the size of the secondary jump table
1976
+ jt_2nd_baseaddr, jt_2nd_entrysize = jt_2nd
1977
+ if jt_2nd_baseaddr in jt_2nd_memloads:
1978
+ jt_2nd_size = max(jt_2nd_memloads) - jt_2nd_baseaddr + jt_2nd_entrysize
1979
+ if jt_2nd_size % jt_2nd_entrysize == 0:
1980
+ jt_2nd_entrycount = jt_2nd_size // jt_2nd_entrysize
1981
+ if jt_2nd_entrycount <= len(all_targets):
1982
+ # we found it!
1983
+ jt_2nd_entries = []
1984
+ for i in range(jt_2nd_entrycount):
1985
+ target = cfg._fast_memory_load_pointer(
1986
+ jt_2nd_baseaddr + i * jt_2nd_entrysize,
1987
+ size=jt_2nd_entrysize,
1988
+ )
1989
+ if target is None:
1990
+ break
1991
+ jt_2nd_entries.append(target)
1992
+
1939
1993
  # Finally... all targets are ready
1940
1994
  illegal_target_found = False
1941
1995
  for target in all_targets:
@@ -1953,7 +2007,18 @@ class JumpTableResolver(IndirectJumpResolver):
1953
2007
  if illegal_target_found:
1954
2008
  return None
1955
2009
 
1956
- return jump_table, min_jumptable_addr, load_size, total_cases * load_size, all_targets, sort
2010
+ return (
2011
+ jump_table,
2012
+ min_jumptable_addr,
2013
+ load_size,
2014
+ total_cases * load_size,
2015
+ all_targets,
2016
+ sort,
2017
+ jt_2nd_entries,
2018
+ jt_2nd_baseaddr,
2019
+ jt_2nd_entrysize,
2020
+ jt_2nd_size,
2021
+ )
1957
2022
 
1958
2023
  def _try_resolve_targets_ite(
1959
2024
  self, r, addr, cfg, annotatedcfg, ite_stmt: pyvex.IRStmt.WrTmp
@@ -2279,6 +2344,64 @@ class JumpTableResolver(IndirectJumpResolver):
2279
2344
  return None
2280
2345
  return jump_addr
2281
2346
 
2347
+ def _get_secondary_jumptable_from_transformations(
2348
+ self, transformations: list[AddressTransformation]
2349
+ ) -> tuple[int, int] | None:
2350
+ """
2351
+ Find the potential secondary "jump table" from a list of transformations.
2352
+
2353
+ :param transformations: A list of address transformations.
2354
+ :return: A tuple of [jump_table_addr, entry_size] if a secondary jump table is found. None otherwise.
2355
+ """
2356
+
2357
+ # find all add-(add-)load sequence
2358
+
2359
+ for i in range(len(transformations) - 1):
2360
+ prev_tran = transformations[i - 1] if i - 1 >= 0 else None
2361
+ tran = transformations[i]
2362
+ if not (
2363
+ tran.op == AddressTransformationTypes.Add
2364
+ and (prev_tran is None or prev_tran.op != AddressTransformationTypes.Add)
2365
+ ):
2366
+ continue
2367
+ next_tran = transformations[i + 1]
2368
+ add_tran, load_tran = None, None
2369
+ if next_tran.op == AddressTransformationTypes.Load:
2370
+ add_tran = None
2371
+ load_tran = next_tran
2372
+ elif next_tran.op == AddressTransformationTypes.Add:
2373
+ next2_tran = transformations[i + 2] if i + 2 < len(transformations) else None
2374
+ if next2_tran is not None and next2_tran.op == AddressTransformationTypes.Load:
2375
+ add_tran = next_tran
2376
+ load_tran = next2_tran
2377
+
2378
+ if load_tran is None:
2379
+ continue
2380
+ # we have found an add-(add-)load sequence
2381
+ jumptable_base_addr = None
2382
+ if isinstance(tran.operands[0], AddressOperand) and isinstance(tran.operands[1], int):
2383
+ jumptable_base_addr = tran.operands[1]
2384
+ elif isinstance(tran.operands[1], AddressOperand) and isinstance(tran.operands[0], int):
2385
+ jumptable_base_addr = tran.operands[0]
2386
+ else:
2387
+ # unsupported first add
2388
+ continue
2389
+
2390
+ if add_tran is not None:
2391
+ mask = (1 << self.project.arch.bits) - 1
2392
+ if isinstance(add_tran.operands[0], AddressOperand) and isinstance(add_tran.operands[1], int):
2393
+ jumptable_base_addr = (jumptable_base_addr + add_tran.operands[1]) & mask
2394
+ elif isinstance(add_tran.operands[1], AddressOperand) and isinstance(add_tran.operands[0], int):
2395
+ jumptable_base_addr = (jumptable_base_addr + add_tran.operands[0]) & mask
2396
+ else:
2397
+ # unsupported second add
2398
+ continue
2399
+
2400
+ load_size = load_tran.operands[1]
2401
+ # we have a potential secondary jump table!
2402
+ return jumptable_base_addr, load_size
2403
+ return None
2404
+
2282
2405
  def _sp_moved_up(self, block) -> bool:
2283
2406
  """
2284
2407
  Examine if the stack pointer moves up (if any values are popped out of the stack) within a single block.
@@ -303,6 +303,23 @@ class BlockSimplifier(Analysis):
303
303
  return block.copy(statements=new_statements)
304
304
 
305
305
  def _eliminate_dead_assignments(self, block):
306
+
307
+ def _statement_has_calls(stmt: Statement) -> bool:
308
+ """
309
+ Check if a statement has any Call expressions.
310
+ """
311
+ walker = HasCallExprWalker()
312
+ walker.walk_statement(stmt)
313
+ return walker.has_call_expr
314
+
315
+ def _expression_has_calls(expr: Expression) -> bool:
316
+ """
317
+ Check if an expression has any Call expressions.
318
+ """
319
+ walker = HasCallExprWalker()
320
+ walker.walk_expression(expr)
321
+ return walker.has_call_expr
322
+
306
323
  new_statements = []
307
324
  if not block.statements:
308
325
  return block
@@ -325,8 +342,11 @@ class BlockSimplifier(Analysis):
325
342
  # micro optimization: if all statements that use a tmp are going to be removed, we remove this tmp as well
326
343
  for tmp, used_locs in rd.all_tmp_uses[block_loc].items():
327
344
  used_at = {stmt_idx for _, stmt_idx in used_locs}
328
- if used_at.issubset(dead_defs_stmt_idx):
329
- continue
345
+ if used_at.issubset(dead_defs_stmt_idx): # noqa:SIM102
346
+ # cannot remove this tmp if any use sites involve call expressions; this is basically a duplicate of
347
+ # the logic in the larger loop below
348
+ if all(not _statement_has_calls(block.statements[i]) for i in used_at):
349
+ continue
330
350
  used_tmps.add(tmp.tmp_idx)
331
351
 
332
352
  # Remove dead assignments
@@ -337,9 +357,7 @@ class BlockSimplifier(Analysis):
337
357
  # is it assigning to an unused tmp or a dead virgin?
338
358
 
339
359
  # does .src involve any Call expressions? if so, we cannot remove it
340
- walker = HasCallExprWalker()
341
- walker.walk_expression(stmt.src)
342
- if not walker.has_call_expr:
360
+ if not _expression_has_calls(stmt.src):
343
361
  continue
344
362
 
345
363
  if type(stmt.dst) is Tmp and isinstance(stmt.src, Call):
@@ -2158,10 +2158,9 @@ class Clinic(Analysis):
2158
2158
  # custom string?
2159
2159
  if hasattr(expr, "custom_string") and expr.custom_string is True:
2160
2160
  s = self.kb.custom_strings[expr.value]
2161
+ ty = expr.type if hasattr(expr, "type") else SimTypePointer(SimTypeChar()).with_arch(self.project.arch)
2161
2162
  expr.tags["reference_values"] = {
2162
- SimTypePointer(SimTypeChar().with_arch(self.project.arch)).with_arch(self.project.arch): s.decode(
2163
- "latin-1"
2164
- ),
2163
+ ty: s,
2165
2164
  }
2166
2165
  else:
2167
2166
  # global variable?
@@ -562,7 +562,13 @@ class Decompiler(Analysis):
562
562
  continue
563
563
 
564
564
  pass_ = timethis(pass_)
565
- a = pass_(self.func, seq=seq_node, scratch=self._optimization_scratch, **kwargs)
565
+ a = pass_(
566
+ self.func,
567
+ seq=seq_node,
568
+ scratch=self._optimization_scratch,
569
+ peephole_optimizations=self._peephole_optimizations,
570
+ **kwargs,
571
+ )
566
572
  if a.out_seq:
567
573
  seq_node = a.out_seq
568
574
 
@@ -35,6 +35,7 @@ from .switch_reused_entry_rewriter import SwitchReusedEntryRewriter
35
35
  from .condition_constprop import ConditionConstantPropagation
36
36
  from .determine_load_sizes import DetermineLoadSizes
37
37
  from .eager_std_string_concatenation import EagerStdStringConcatenationPass
38
+ from .peephole_simplifier import PostStructuringPeepholeOptimizationPass
38
39
 
39
40
  if TYPE_CHECKING:
40
41
  from angr.analyses.decompiler.presets import DecompilationPreset
@@ -72,6 +73,7 @@ ALL_OPTIMIZATION_PASSES = [
72
73
  ConditionConstantPropagation,
73
74
  DetermineLoadSizes,
74
75
  EagerStdStringConcatenationPass,
76
+ PostStructuringPeepholeOptimizationPass,
75
77
  ]
76
78
 
77
79
  # these passes may duplicate code to remove gotos or improve the structure of the graph
@@ -0,0 +1,75 @@
1
+ from __future__ import annotations
2
+
3
+ from angr import ailment
4
+ from angr.analyses.decompiler.utils import (
5
+ peephole_optimize_expr,
6
+ )
7
+ from angr.analyses.decompiler.sequence_walker import SequenceWalker
8
+ from angr.analyses.decompiler.peephole_optimizations import (
9
+ PeepholeOptimizationExprBase,
10
+ EXPR_OPTS,
11
+ )
12
+ from .optimization_pass import OptimizationPassStage, SequenceOptimizationPass
13
+
14
+
15
+ class ExpressionSequenceWalker(SequenceWalker):
16
+ """
17
+ Walks sequences with generic expression handling.
18
+ """
19
+
20
+ def _handle(self, node, **kwargs):
21
+ if isinstance(node, ailment.Expr.Expression):
22
+ handler = self._handlers.get(ailment.Expr.Expression, None)
23
+ if handler:
24
+ return handler(node, **kwargs)
25
+ return super()._handle(node, **kwargs)
26
+
27
+
28
+ class PostStructuringPeepholeOptimizationPass(SequenceOptimizationPass):
29
+ """
30
+ Perform a post-structuring peephole optimization pass to simplify node statements and expressions.
31
+ """
32
+
33
+ ARCHES = None
34
+ PLATFORMS = None
35
+ STAGE = OptimizationPassStage.AFTER_STRUCTURING
36
+ NAME = "Post-Structuring Peephole Optimization"
37
+ DESCRIPTION = (__doc__ or "").strip()
38
+
39
+ def __init__(self, func, peephole_optimizations=None, **kwargs):
40
+ super().__init__(func, **kwargs)
41
+ self._peephole_optimizations = peephole_optimizations
42
+ self._expr_peephole_opts = [
43
+ cls(self.project, self.kb, self._func.addr)
44
+ for cls in (self._peephole_optimizations or EXPR_OPTS)
45
+ if issubclass(cls, PeepholeOptimizationExprBase)
46
+ ]
47
+ self.analyze()
48
+
49
+ def _check(self):
50
+ return True, None
51
+
52
+ def _analyze(self, cache=None):
53
+ walker = ExpressionSequenceWalker(
54
+ handlers={ailment.Expr.Expression: self._optimize_expr, ailment.Block: self._optimize_block}
55
+ )
56
+ walker.walk(self.seq)
57
+ self.out_seq = self.seq
58
+
59
+ def _optimize_expr(self, expr, **_):
60
+ new_expr = peephole_optimize_expr(expr, self._expr_peephole_opts)
61
+ return new_expr if expr != new_expr else None
62
+
63
+ def _optimize_block(self, block, **_):
64
+ old_block, new_block = None, block
65
+ while old_block != new_block:
66
+ old_block = new_block
67
+ # Note: AILBlockSimplifier updates expressions in place
68
+ simp = self.project.analyses.AILBlockSimplifier(
69
+ new_block,
70
+ func_addr=self._func.addr,
71
+ peephole_optimizations=self._peephole_optimizations,
72
+ )
73
+ assert simp.result_block is not None
74
+ new_block = simp.result_block
75
+ return new_block if block != new_block else None
@@ -47,6 +47,7 @@ from .inlined_memcpy import InlinedMemcpy
47
47
  from .inlined_strcpy import InlinedStrcpy
48
48
  from .inlined_strcpy_consolidation import InlinedStrcpyConsolidation
49
49
  from .inlined_wstrcpy import InlinedWstrcpy
50
+ from .inlined_wstrcpy_consolidation import InlinedWstrcpyConsolidation
50
51
  from .cmpord_rewriter import CmpORDRewriter
51
52
  from .coalesce_adjacent_shrs import CoalesceAdjacentShiftRights
52
53
  from .a_mul_const_sub_a import AMulConstSubA
@@ -104,6 +105,7 @@ ALL_PEEPHOLE_OPTS: list[type[PeepholeOptimizationExprBase]] = [
104
105
  InlinedStrcpy,
105
106
  InlinedStrcpyConsolidation,
106
107
  InlinedWstrcpy,
108
+ InlinedWstrcpyConsolidation,
107
109
  CmpORDRewriter,
108
110
  CoalesceAdjacentShiftRights,
109
111
  ShlToMul,
@@ -1,7 +1,7 @@
1
1
  # pylint:disable=arguments-differ,too-many-boolean-expressions
2
2
  from __future__ import annotations
3
3
 
4
- from angr.ailment.expression import BinaryOp, Load
4
+ from angr.ailment.expression import BinaryOp, Load, Expression, Tmp
5
5
  from angr.ailment.statement import CAS, ConditionalJump, Statement, Assignment, Call
6
6
 
7
7
  from .base import PeepholeOptimizationMultiStmtBase
@@ -60,11 +60,13 @@ class CASIntrinsics(PeepholeOptimizationMultiStmtBase):
60
60
  and next_stmt.ins_addr == cas_stmt.ins_addr
61
61
  ):
62
62
  addr = cas_stmt.addr
63
+ expd_lo = self._resolve_tmp_expr(cas_stmt.expd_lo, block)
64
+ next_stmt_cond_op1 = self._resolve_tmp_expr(next_stmt.condition.operands[1], block)
63
65
  if (
64
- isinstance(cas_stmt.expd_lo, Load)
65
- and cas_stmt.expd_lo.addr.likes(addr)
66
- and isinstance(next_stmt.condition.operands[1], Load)
67
- and next_stmt.condition.operands[1].addr.likes(addr)
66
+ isinstance(expd_lo, Load)
67
+ and expd_lo.addr.likes(addr)
68
+ and isinstance(next_stmt_cond_op1, Load)
69
+ and next_stmt_cond_op1.addr.likes(addr)
68
70
  and cas_stmt.old_lo.likes(next_stmt.condition.operands[0])
69
71
  and cas_stmt.old_hi is None
70
72
  ):
@@ -113,3 +115,11 @@ class CASIntrinsics(PeepholeOptimizationMultiStmtBase):
113
115
  os = "Linux"
114
116
  return _INTRINSICS_NAMES[mnemonic][os]
115
117
  return mnemonic
118
+
119
+ @staticmethod
120
+ def _resolve_tmp_expr(expr: Expression, block) -> Expression:
121
+ if isinstance(expr, Tmp):
122
+ for stmt in block.statements:
123
+ if isinstance(stmt, Assignment) and stmt.dst.likes(expr):
124
+ return stmt.src
125
+ return expr