certora-cli-beta-mirror 7.29.2__py3-none-any.whl → 7.30.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- certora_cli/CertoraProver/certoraBuild.py +176 -62
- certora_cli/CertoraProver/certoraContextAttributes.py +33 -0
- certora_cli/CertoraProver/erc7201.py +45 -0
- certora_cli/CertoraProver/storageExtension.py +386 -0
- certora_cli/EquivalenceCheck/Eq_default.conf +0 -1
- certora_cli/EquivalenceCheck/Eq_sanity.conf +0 -1
- certora_cli/Mutate/mutateApp.py +10 -3
- certora_cli/Shared/certoraUtils.py +1 -0
- certora_cli/Shared/proverCommon.py +300 -0
- certora_cli/Shared/rustProverCommon.py +62 -0
- certora_cli/certoraEVMProver.py +2 -1
- certora_cli/certoraRanger.py +4 -36
- certora_cli/certoraRun.py +62 -166
- certora_cli/certoraSolanaProver.py +39 -134
- certora_cli/certoraSorobanProver.py +31 -224
- {certora_cli_beta_mirror-7.29.2.dist-info → certora_cli_beta_mirror-7.30.1.dist-info}/METADATA +2 -2
- {certora_cli_beta_mirror-7.29.2.dist-info → certora_cli_beta_mirror-7.30.1.dist-info}/RECORD +23 -19
- certora_jars/CERTORA-CLI-VERSION-METADATA.json +1 -1
- certora_jars/Typechecker.jar +0 -0
- {certora_cli_beta_mirror-7.29.2.dist-info → certora_cli_beta_mirror-7.30.1.dist-info}/LICENSE +0 -0
- {certora_cli_beta_mirror-7.29.2.dist-info → certora_cli_beta_mirror-7.30.1.dist-info}/WHEEL +0 -0
- {certora_cli_beta_mirror-7.29.2.dist-info → certora_cli_beta_mirror-7.30.1.dist-info}/entry_points.txt +0 -0
- {certora_cli_beta_mirror-7.29.2.dist-info → certora_cli_beta_mirror-7.30.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,386 @@
|
|
|
1
|
+
# The Certora Prover
|
|
2
|
+
# Copyright (C) 2025 Certora Ltd.
|
|
3
|
+
#
|
|
4
|
+
# This program is free software: you can redistribute it and/or modify
|
|
5
|
+
# it under the terms of the GNU General Public License as published by
|
|
6
|
+
# the Free Software Foundation, version 3 of the License.
|
|
7
|
+
#
|
|
8
|
+
# This program is distributed in the hope that it will be useful,
|
|
9
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11
|
+
# GNU General Public License for more details.
|
|
12
|
+
#
|
|
13
|
+
# You should have received a copy of the GNU General Public License
|
|
14
|
+
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
15
|
+
|
|
16
|
+
import re
|
|
17
|
+
import time
|
|
18
|
+
import hashlib
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
import logging
|
|
22
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
|
23
|
+
try:
|
|
24
|
+
from typing import TypeAlias
|
|
25
|
+
except ImportError:
|
|
26
|
+
from typing_extensions import TypeAlias
|
|
27
|
+
|
|
28
|
+
scripts_dir_path = Path(__file__).parent.parent.resolve() # containing directory
|
|
29
|
+
sys.path.insert(0, str(scripts_dir_path))
|
|
30
|
+
|
|
31
|
+
from CertoraProver.certoraBuildDataClasses import ContractInSDC
|
|
32
|
+
from CertoraProver import erc7201
|
|
33
|
+
from Shared import certoraUtils as Util
|
|
34
|
+
from CertoraProver.certoraBuildDataClasses import SDC
|
|
35
|
+
from CertoraProver.Compiler.CompilerCollectorFactory import get_relevant_compiler
|
|
36
|
+
from CertoraProver.certoraContextClass import CertoraContext
|
|
37
|
+
|
|
38
|
+
NameSpacedStorage: TypeAlias = Tuple[str, str]
|
|
39
|
+
NewStorageFields = List[Dict[str, Any]]
|
|
40
|
+
NewStorageTypes = Dict[str, Any]
|
|
41
|
+
NewStorageInfo = Tuple[NewStorageFields, NewStorageTypes]
|
|
42
|
+
|
|
43
|
+
storage_extension_logger = logging.getLogger("storage_extension")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def erc7201_of_node(n: Dict[str, Any]) -> Optional[NameSpacedStorage]:
|
|
47
|
+
"""
|
|
48
|
+
If n is a StructDefinition node, try and extract the @custom:storage-location
|
|
49
|
+
structured documentation, if it exists.
|
|
50
|
+
|
|
51
|
+
@returns (type, namespace) where 'type' is the name of the annotated type and
|
|
52
|
+
'namespace' is the namespace string
|
|
53
|
+
"""
|
|
54
|
+
if n.get("nodeType") != "StructDefinition":
|
|
55
|
+
return None
|
|
56
|
+
typeName = n.get("canonicalName")
|
|
57
|
+
doc = n.get("documentation")
|
|
58
|
+
if doc is None or doc.get("nodeType") != "StructuredDocumentation" or typeName is None:
|
|
59
|
+
return None
|
|
60
|
+
storage_location_regex = r'@custom:storage-location erc7201:([a-zA-Z.0-9]+)'
|
|
61
|
+
match = re.search(storage_location_regex, doc.get("text"))
|
|
62
|
+
if match is None:
|
|
63
|
+
return None
|
|
64
|
+
ns = match.group(1)
|
|
65
|
+
return (typeName, ns)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def generate_harness_name(original_file: str) -> str:
|
|
69
|
+
"""
|
|
70
|
+
Generate a unique name for the harness contract based on the original file name.
|
|
71
|
+
The name is generated by hashing the original file name and appending a suffix to ensure uniqueness.
|
|
72
|
+
Args:
|
|
73
|
+
original_file (str): The path to the original file used to generate the harness name.
|
|
74
|
+
Returns:
|
|
75
|
+
str: A unique name for the harness contract.
|
|
76
|
+
"""
|
|
77
|
+
stem = Path(original_file).stem
|
|
78
|
+
# 1) Compute an 8-hex salt from path+time
|
|
79
|
+
digest = hashlib.sha1(f"{original_file}{time.time()}".encode()).hexdigest()[:8]
|
|
80
|
+
suffix = f"_{digest}_Harness"
|
|
81
|
+
# 2) Reserve space for suffix so whole name ≤31 chars
|
|
82
|
+
max_stem = 31 - len(suffix)
|
|
83
|
+
if len(stem) > max_stem:
|
|
84
|
+
stem = stem[:max_stem]
|
|
85
|
+
# 3) Build, sanitize, and ensure start‐with‐letter
|
|
86
|
+
raw = f"{stem}{suffix}"
|
|
87
|
+
name = re.sub(r'[^A-Za-z0-9_]', '_', raw)
|
|
88
|
+
if not name[0].isalpha():
|
|
89
|
+
name = f"H{name[1:]}"
|
|
90
|
+
return name
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def get_next_file_index(self_file_to_sdc_name: Dict[Path, str], max_index: int = 1000) -> int:
|
|
94
|
+
"""
|
|
95
|
+
Gets the next available file index for temporary files to avoid naming conflicts.
|
|
96
|
+
|
|
97
|
+
This method examines the existing SDC names in `file_to_sdc_name` dictionary and
|
|
98
|
+
extracts numeric indices from them. It identifies the highest index currently in use
|
|
99
|
+
and returns that value plus one. If no valid indices are found or if an error occurs,
|
|
100
|
+
a default value of 1000 is returned.
|
|
101
|
+
|
|
102
|
+
The method assumes SDC names follow the format "some_prefix_NUMBER", where NUMBER
|
|
103
|
+
is an integer that can be extracted from the last segment after splitting by underscore.
|
|
104
|
+
Args:
|
|
105
|
+
self_file_to_sdc_name (Dict[Path, str]): A dictionary mapping file paths to their SDC names.
|
|
106
|
+
max_index (int): The maximum index to return if no valid indices are found. Default is 1000.
|
|
107
|
+
Returns:
|
|
108
|
+
int: The next available file index, or a default value of 1000 if no valid indices are found.
|
|
109
|
+
"""
|
|
110
|
+
try:
|
|
111
|
+
# If file_to_sdc_name is empty, return a default starting index
|
|
112
|
+
if not self_file_to_sdc_name:
|
|
113
|
+
return max_index
|
|
114
|
+
|
|
115
|
+
indices = []
|
|
116
|
+
for sdc_name in self_file_to_sdc_name.values():
|
|
117
|
+
parts = sdc_name.split("_")
|
|
118
|
+
if len(parts) > 1: # Make sure there's at least one underscore
|
|
119
|
+
try:
|
|
120
|
+
# Try to convert the last part to an integer
|
|
121
|
+
index = int(parts[-1])
|
|
122
|
+
indices.append(index)
|
|
123
|
+
except ValueError:
|
|
124
|
+
# Skip if the last part isn't a number
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
# If we found no valid indices, return a default value
|
|
128
|
+
if not indices:
|
|
129
|
+
storage_extension_logger.debug(f"No valid indices found in file_to_sdc_name, using default value of {max_index}")
|
|
130
|
+
return max_index
|
|
131
|
+
|
|
132
|
+
return max(indices) + 1
|
|
133
|
+
except Exception:
|
|
134
|
+
# Fallback in case of any unexpected errors
|
|
135
|
+
storage_extension_logger.debug(f"Error determining next file index, using default value of {max_index}")
|
|
136
|
+
return max_index
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def write_harness_contract(tmp_file: Any,
|
|
140
|
+
harness_name: str,
|
|
141
|
+
ns_storage: Set[NameSpacedStorage]) -> Dict[str, str]:
|
|
142
|
+
"""
|
|
143
|
+
Write the harness contract with dummy fields to the temporary file.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
tmp_file: The temporary file to write to
|
|
147
|
+
harness_name: Name of the harness contract
|
|
148
|
+
ns_storage: Set of namespace storage declarations
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Dict[str, str]: Mapping from variable names to their slots
|
|
152
|
+
"""
|
|
153
|
+
tmp_file.write(f"contract {harness_name} {{\n")
|
|
154
|
+
|
|
155
|
+
# Map from variable names to their slots
|
|
156
|
+
var_to_slot = {}
|
|
157
|
+
|
|
158
|
+
# Add dummy fields for each namespaced storage
|
|
159
|
+
for type_name, namespace in ns_storage:
|
|
160
|
+
# Create a variable name by replacing dots with underscores and appending the hash
|
|
161
|
+
# Add a prefix to ensure the variable name is valid in Solidity (e.g., no leading digits)
|
|
162
|
+
var_name = f"ext_{namespace.replace('.', '_')}"
|
|
163
|
+
|
|
164
|
+
# Calculate the slot using ERC-7201 formula
|
|
165
|
+
# UTF-8 is the standard encoding for Ethereum and Solidity
|
|
166
|
+
slot = str(erc7201.erc7201(namespace.encode('utf-8')))
|
|
167
|
+
|
|
168
|
+
var_to_slot[var_name] = slot
|
|
169
|
+
tmp_file.write(f"\t{type_name} {var_name};\n")
|
|
170
|
+
|
|
171
|
+
tmp_file.write("}\n")
|
|
172
|
+
return var_to_slot
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def extract_harness_contract_layout(sdcs: List[SDC], harness_name: str) -> Dict[str, Any]:
|
|
176
|
+
"""
|
|
177
|
+
Extract the storage layout of the harness contract.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
sdcs: List of SDCs containing the compiled contracts
|
|
181
|
+
harness_name: Name of the harness contract
|
|
182
|
+
Returns:
|
|
183
|
+
Dict[str, Any]: The storage layout of the harness contract
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
# Search through all SDC's to find the correct contract
|
|
187
|
+
harness_contract = None
|
|
188
|
+
for sdc in sdcs:
|
|
189
|
+
harness_contract = sdc.find_contract(harness_name)
|
|
190
|
+
if harness_contract:
|
|
191
|
+
break
|
|
192
|
+
|
|
193
|
+
if not harness_contract:
|
|
194
|
+
raise RuntimeError(f"Could not find harness contract {harness_name} in compiled output")
|
|
195
|
+
# Extract the storage layout
|
|
196
|
+
layout = harness_contract.storage_layout
|
|
197
|
+
if not layout or 'storage' not in layout:
|
|
198
|
+
raise RuntimeError(f"Invalid storage layout for harness contract {harness_name}")
|
|
199
|
+
return layout
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def remapped_fields_from_layout(layout: Dict[str, Any], var_to_slot: Dict[str, str]) -> NewStorageFields:
|
|
203
|
+
"""
|
|
204
|
+
Remap the fields in the storage layout according to the variable to slot mapping.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
layout: The storage layout of the harness contract
|
|
208
|
+
var_to_slot: Mapping from variable names to their slots
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
List[Dict[str, Any]]: A list of remapped fields with updated slot information
|
|
212
|
+
"""
|
|
213
|
+
remapped_fields = []
|
|
214
|
+
for storage_item in layout['storage']:
|
|
215
|
+
cloned_item = storage_item.copy()
|
|
216
|
+
var_name = cloned_item["label"]
|
|
217
|
+
if var_name in var_to_slot:
|
|
218
|
+
cloned_item["slot"] = var_to_slot[var_name]
|
|
219
|
+
remapped_fields.append(cloned_item)
|
|
220
|
+
else:
|
|
221
|
+
storage_extension_logger.warning(f"Skipping adding variable {var_name} not found in variable to slot mapping")
|
|
222
|
+
|
|
223
|
+
remapped_fields.sort(key=lambda f: int(f["slot"]))
|
|
224
|
+
return remapped_fields
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def get_namespace_storage_from_ast(def_node: Dict[str, Any]) -> Set[NameSpacedStorage]:
|
|
228
|
+
"""
|
|
229
|
+
Extracts namespaced storage information from the AST nodes.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
def_node: The AST node representing the contract definition.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Set[NameSpacedStorage]: A set of namespaced storage information.
|
|
236
|
+
"""
|
|
237
|
+
ns_storage: Set[NameSpacedStorage] = set()
|
|
238
|
+
nodes = def_node.get("nodes")
|
|
239
|
+
if not nodes:
|
|
240
|
+
# No nodes found in the contract definition
|
|
241
|
+
return ns_storage
|
|
242
|
+
for n in nodes:
|
|
243
|
+
sinfo = erc7201_of_node(n)
|
|
244
|
+
if sinfo is not None:
|
|
245
|
+
storage_extension_logger.debug(f"Found namespaced storage: {sinfo}")
|
|
246
|
+
ns_storage.add(sinfo)
|
|
247
|
+
return ns_storage
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def apply_extensions(target_contract: ContractInSDC,
|
|
251
|
+
extensions: Set[str],
|
|
252
|
+
to_add: Dict[str, NewStorageInfo]) -> None:
|
|
253
|
+
"""
|
|
254
|
+
Apply the fields from each extension to the target contract,
|
|
255
|
+
@param target_contract contract to which to apply extensions
|
|
256
|
+
@param extensions set of extension contract names
|
|
257
|
+
@param to_add maps extension name in extensions to (storage layouts, new types)
|
|
258
|
+
"""
|
|
259
|
+
storage_layout = target_contract.storage_layout
|
|
260
|
+
# Check if the target contract has a storage layout
|
|
261
|
+
if not storage_layout:
|
|
262
|
+
storage_extension_logger.warning(f"Target contract {target_contract.name} has no storage layout")
|
|
263
|
+
return
|
|
264
|
+
|
|
265
|
+
# Check if the target contract has a storage layout with 'storage' key
|
|
266
|
+
if "storage" not in storage_layout:
|
|
267
|
+
storage_extension_logger.warning(f"Target contract {target_contract.name} storage layout does not contain 'storage' key")
|
|
268
|
+
storage_layout["storage"] = []
|
|
269
|
+
elif not isinstance(storage_layout["storage"], list):
|
|
270
|
+
storage_extension_logger.warning(f"Target contract {target_contract.name} 'storage' is not a list but {type(storage_layout['storage']).__name__}: {storage_layout['storage']}")
|
|
271
|
+
storage_layout["storage"] = []
|
|
272
|
+
|
|
273
|
+
# Check if the target contract has a storage layout with 'types' key
|
|
274
|
+
if "types" not in storage_layout:
|
|
275
|
+
storage_extension_logger.warning(f"Target contract {target_contract.name} storage layout does not contain 'types' key")
|
|
276
|
+
storage_layout["types"] = {}
|
|
277
|
+
elif not isinstance(storage_layout["types"], dict):
|
|
278
|
+
storage_extension_logger.warning(f"Target contract {target_contract.name} 'types' is not a dict but {type(storage_layout['types']).__name__}: {storage_layout['types']}")
|
|
279
|
+
storage_layout["types"] = {}
|
|
280
|
+
|
|
281
|
+
target_slots = {storage["slot"] for storage in storage_layout["storage"]}
|
|
282
|
+
target_vars = {storage["label"] for storage in storage_layout["storage"]}
|
|
283
|
+
# Keep track of slots we've added, and error if we
|
|
284
|
+
# find two extensions extending the same slot
|
|
285
|
+
added_slots: Dict[str, str] = {}
|
|
286
|
+
added_vars: Dict[str, str] = {}
|
|
287
|
+
for ext in extensions:
|
|
288
|
+
# Check if the extension is in the to_add mapping
|
|
289
|
+
if ext not in to_add:
|
|
290
|
+
storage_extension_logger.warning(f"Extension {ext} not found in to_add mapping")
|
|
291
|
+
continue
|
|
292
|
+
(new_fields, new_types) = to_add[ext]
|
|
293
|
+
for field in new_fields:
|
|
294
|
+
# See if any of the new fields is a slot or variable name we've already added
|
|
295
|
+
slot = field["slot"]
|
|
296
|
+
var = field["label"]
|
|
297
|
+
validate_new_fields(
|
|
298
|
+
target_contract,
|
|
299
|
+
ext,
|
|
300
|
+
slot,
|
|
301
|
+
var,
|
|
302
|
+
added_slots,
|
|
303
|
+
added_vars,
|
|
304
|
+
target_slots,
|
|
305
|
+
target_vars
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
added_slots[slot] = ext
|
|
309
|
+
added_vars[var] = ext
|
|
310
|
+
|
|
311
|
+
# Add the fields to the storage layout
|
|
312
|
+
storage_layout["storage"].extend(new_fields)
|
|
313
|
+
storage_extension_logger.debug(f"Added {len(new_fields)} fields from extension {ext} to contract {target_contract.name}: {[field['label'] for field in new_fields]}")
|
|
314
|
+
for (new_id, new_ty) in new_types.items():
|
|
315
|
+
if new_id in storage_layout["types"]:
|
|
316
|
+
continue
|
|
317
|
+
storage_layout["types"][new_id] = new_ty
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def validate_new_fields(
|
|
321
|
+
target_contract: ContractInSDC,
|
|
322
|
+
ext: str,
|
|
323
|
+
slot: str,
|
|
324
|
+
var: str,
|
|
325
|
+
added_slots: Dict[str, str],
|
|
326
|
+
added_vars: Dict[str, str],
|
|
327
|
+
target_slots: Set[str],
|
|
328
|
+
target_vars: Set[str]) -> None:
|
|
329
|
+
"""
|
|
330
|
+
Validate that the new fields being added to the target contract
|
|
331
|
+
do not conflict with existing fields or variables.
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
target_contract: The target contract to which the fields are being added
|
|
335
|
+
ext: The name of the extension contract
|
|
336
|
+
slot: The slot being added
|
|
337
|
+
var: The variable being added
|
|
338
|
+
added_slots: Dictionary of slots already added
|
|
339
|
+
added_vars: Dictionary of variables already added
|
|
340
|
+
target_slots: Set of slots in the target contract
|
|
341
|
+
target_vars: Set of variables in the target contract
|
|
342
|
+
"""
|
|
343
|
+
|
|
344
|
+
if slot in added_slots:
|
|
345
|
+
seen = added_slots[slot]
|
|
346
|
+
raise Util.CertoraUserInputError(f"Slot {slot} added to {target_contract.name} by {ext} was already added by {seen}")
|
|
347
|
+
if var in added_vars:
|
|
348
|
+
seen = added_vars[var]
|
|
349
|
+
raise Util.CertoraUserInputError(f"Var '{var}' added to {target_contract.name} by {ext} was already added by {seen}")
|
|
350
|
+
if slot in target_slots:
|
|
351
|
+
raise Util.CertoraUserInputError(f"Slot {slot} added to {target_contract.name} by {ext} is already mapped by {target_contract.name}")
|
|
352
|
+
if var in target_vars:
|
|
353
|
+
raise Util.CertoraUserInputError(f"Var '{var}' added to {target_contract.name} by {ext} is already declared by {target_contract.name}")
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def add_harness_to_compiler_map(original_file: str, harness_file: Any, context: CertoraContext) -> None:
|
|
357
|
+
"""
|
|
358
|
+
Associates the generated harness file with the same compiler version as the original file.
|
|
359
|
+
|
|
360
|
+
This ensures the harness contract is compiled with the same Solidity compiler version
|
|
361
|
+
as the contract it's extending, maintaining compatibility.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
original_file (str): Path to the original source file
|
|
365
|
+
harness_file (Any): File-like object representing the generated harness file
|
|
366
|
+
context (CertoraContext): The context object containing compiler mapping information
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
None
|
|
370
|
+
"""
|
|
371
|
+
# Validate prerequisites before updating compiler map
|
|
372
|
+
if context.compiler_map is None:
|
|
373
|
+
storage_extension_logger.debug("Cannot add compiler for harness: compiler_map is None (not a dict). Using the default compiler")
|
|
374
|
+
return
|
|
375
|
+
|
|
376
|
+
# Get the compiler version used for the original file
|
|
377
|
+
compiler_version = get_relevant_compiler(Path(original_file), context)
|
|
378
|
+
|
|
379
|
+
# Extract just the filename from the harness file path
|
|
380
|
+
harness_filename = Path(harness_file.name).name
|
|
381
|
+
|
|
382
|
+
# Add the compiler version to the context using glob pattern for the harness file
|
|
383
|
+
map_key = f"*{harness_filename}"
|
|
384
|
+
context.compiler_map[map_key] = compiler_version
|
|
385
|
+
|
|
386
|
+
storage_extension_logger.debug(f"Added compiler mapping: {map_key} -> {compiler_version}")
|
certora_cli/Mutate/mutateApp.py
CHANGED
|
@@ -44,7 +44,9 @@ from CertoraProver.certoraContextValidator import KEY_ENV_VAR
|
|
|
44
44
|
from Mutate import mutateConstants as MConstants
|
|
45
45
|
from Shared import certoraUtils as Util
|
|
46
46
|
from Shared.certoraLogging import LoggingManager
|
|
47
|
-
from certoraRun import run_certora
|
|
47
|
+
from certoraRun import run_certora
|
|
48
|
+
from Shared.proverCommon import CertoraRunResult, CertoraFoundViolations
|
|
49
|
+
from certoraSorobanProver import run_soroban_prover
|
|
48
50
|
from Shared import certoraValidateFuncs as Vf
|
|
49
51
|
from CertoraProver.Compiler.CompilerCollectorFactory import get_relevant_compiler
|
|
50
52
|
from Mutate import mutateUtil as MutUtil
|
|
@@ -1385,6 +1387,8 @@ class MutateApp:
|
|
|
1385
1387
|
args = ["--compilation_steps_only"]
|
|
1386
1388
|
if self.prover_version:
|
|
1387
1389
|
args += ['--prover_version', self.prover_version]
|
|
1390
|
+
if self.server:
|
|
1391
|
+
args += ['--server', self.server]
|
|
1388
1392
|
run_certora([str(self.conf)] + args)
|
|
1389
1393
|
except CertoraFoundViolations: # violations should not stop execution
|
|
1390
1394
|
pass
|
|
@@ -1416,7 +1420,10 @@ class MutateApp:
|
|
|
1416
1420
|
certora_args.extend(["--disable_local_typechecking"])
|
|
1417
1421
|
mutation_logger.debug(f"Running the Prover: {certora_args}")
|
|
1418
1422
|
try:
|
|
1419
|
-
|
|
1423
|
+
if self.is_soroban_run():
|
|
1424
|
+
certora_run_result = run_soroban_prover(certora_args)
|
|
1425
|
+
else:
|
|
1426
|
+
certora_run_result = run_certora(certora_args)
|
|
1420
1427
|
except CertoraFoundViolations as e:
|
|
1421
1428
|
assert e.results, "expect e.results not to be None"
|
|
1422
1429
|
certora_run_result = e.results
|
|
@@ -1893,7 +1900,7 @@ class MutateApp:
|
|
|
1893
1900
|
common_flags.extend(['--optimize'])
|
|
1894
1901
|
|
|
1895
1902
|
if hasattr(self.prover_context, MConstants.SOLC_ALLOW_PATH):
|
|
1896
|
-
common_flags.extend([
|
|
1903
|
+
common_flags.extend(['--allow-paths', '/'])
|
|
1897
1904
|
|
|
1898
1905
|
if hasattr(self.prover_context, MConstants.SOLC_EVM_VERSION):
|
|
1899
1906
|
common_flags.extend(['--evm-version', self.prover_context.solc_evm_version])
|