mm-web3 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mm_web3/__init__.py ADDED
@@ -0,0 +1,21 @@
1
+ from mm_web3.account import PrivateKeyMap as PrivateKeyMap
2
+ from mm_web3.calcs import calc_decimal_expression as calc_decimal_expression
3
+ from mm_web3.calcs import calc_expression_with_vars as calc_expression_with_vars
4
+ from mm_web3.calcs import convert_value_with_units as convert_value_with_units
5
+ from mm_web3.config import Web3CliConfig as Web3CliConfig
6
+ from mm_web3.log import init_loguru as init_loguru
7
+ from mm_web3.network import Network as Network
8
+ from mm_web3.network import NetworkType as NetworkType
9
+ from mm_web3.node import Nodes as Nodes
10
+ from mm_web3.node import random_node as random_node
11
+ from mm_web3.proxy import Proxies as Proxies
12
+ from mm_web3.proxy import fetch_proxies as fetch_proxies
13
+ from mm_web3.proxy import fetch_proxies_sync as fetch_proxies_sync
14
+ from mm_web3.proxy import is_valid_proxy_url as is_valid_proxy_url
15
+ from mm_web3.proxy import random_proxy as random_proxy
16
+ from mm_web3.retry import retry_with_node_and_proxy as retry_with_node_and_proxy
17
+ from mm_web3.retry import retry_with_proxy as retry_with_proxy
18
+ from mm_web3.utils import read_items_from_file as read_items_from_file
19
+ from mm_web3.utils import read_lines_from_file as read_lines_from_file
20
+ from mm_web3.validators import ConfigValidators as ConfigValidators
21
+ from mm_web3.validators import Transfer as Transfer
mm_web3/account.py ADDED
@@ -0,0 +1,88 @@
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ from collections.abc import Callable
5
+ from pathlib import Path
6
+
7
+ from pydantic import GetCoreSchemaHandler, ValidationInfo
8
+ from pydantic_core import core_schema
9
+
10
+
11
+ class PrivateKeyMap(dict[str, str]):
12
+ """Map of addresses to private keys with fast lookup by address."""
13
+
14
+ def contains_all_addresses(self, addresses: list[str]) -> bool:
15
+ """Check if all addresses are in the map."""
16
+ return set(addresses) <= set(self.keys())
17
+
18
+ @classmethod
19
+ def __get_pydantic_core_schema__(cls, _source: object, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
20
+ # Use the dict schema as the basis.
21
+ return core_schema.with_info_after_validator_function(
22
+ cls.validate, # our function that converts a dict to PrivateKeyMap
23
+ handler(dict), # get the schema for a plain dict
24
+ )
25
+
26
+ @classmethod
27
+ def validate(cls, value: object, _info: ValidationInfo) -> PrivateKeyMap:
28
+ """
29
+ Convert and validate an input value into a PrivateKeyMap.
30
+
31
+ - If the input is already a PrivateKeyMap, return it.
32
+ - If it is a dict, check that all keys and values are strings and
33
+ then return a PrivateKeyMap.
34
+ - Otherwise, raise a TypeError.
35
+ """
36
+ if isinstance(value, cls):
37
+ return value
38
+ if isinstance(value, dict):
39
+ # Optionally, ensure all keys and values are strings.
40
+ if not all(isinstance(k, str) for k in value):
41
+ raise TypeError("All keys in PrivateKeyMap must be strings")
42
+ if not all(isinstance(v, str) for v in value.values()):
43
+ raise TypeError("All values in PrivateKeyMap must be strings")
44
+ return cls(value)
45
+ raise TypeError("Invalid type for PrivateKeyMap. Expected dict or PrivateKeyMap.")
46
+
47
+ @staticmethod
48
+ def from_list(private_keys: list[str], address_from_private: Callable[[str], str]) -> PrivateKeyMap:
49
+ """Create a dictionary of private keys with addresses as keys.
50
+
51
+ Args:
52
+ private_keys: List of private keys. Must be fully valid:
53
+ - No empty strings
54
+ - No whitespace-only strings
55
+ - No duplicates
56
+ address_from_private: Function to derive address from private key
57
+
58
+ Raises:
59
+ ValueError: if any private key is invalid
60
+ """
61
+ # Check for duplicates
62
+ if len(private_keys) != len(set(private_keys)):
63
+ raise ValueError("duplicate private keys found")
64
+
65
+ result = PrivateKeyMap()
66
+ for private_key in private_keys:
67
+ address = None
68
+ with contextlib.suppress(Exception):
69
+ address = address_from_private(private_key)
70
+ if address is None:
71
+ raise ValueError("invalid private key")
72
+ result[address] = private_key
73
+ return result
74
+
75
+ @staticmethod
76
+ def from_file(private_keys_file: Path, address_from_private: Callable[[str], str]) -> PrivateKeyMap:
77
+ """Create a dictionary of private keys with addresses as keys from a file.
78
+ Raises:
79
+ ValueError: If the file cannot be read or any private key is invalid.
80
+ """
81
+ private_keys_file = private_keys_file.expanduser()
82
+ try:
83
+ content = private_keys_file.read_text().strip()
84
+ except OSError as e:
85
+ raise ValueError(f"can't read from the file: {private_keys_file}") from e
86
+
87
+ private_keys = content.split("\n") if content else []
88
+ return PrivateKeyMap.from_list(private_keys, address_from_private)
mm_web3/calcs.py ADDED
@@ -0,0 +1,217 @@
1
+ import random
2
+ import re
3
+ from decimal import Decimal
4
+
5
+ from mm_std import random_decimal
6
+
7
+
8
+ def calc_decimal_expression(expression: str) -> Decimal:
9
+ """Calculate decimal value from string expression.
10
+
11
+ Supports:
12
+ - Plain numbers: "123.45", "-0.5"
13
+ - Random function: "random(min, max)" returns random decimal between min and max
14
+
15
+ Args:
16
+ expression: String expression to calculate
17
+
18
+ Returns:
19
+ Calculated decimal value
20
+
21
+ Raises:
22
+ ValueError: If expression format is invalid or random range is invalid (min > max)
23
+ """
24
+ expression = expression.lower().strip()
25
+ if expression.startswith("random(") and expression.endswith(")"):
26
+ arr = expression.lstrip("random(").rstrip(")").split(",")
27
+ if len(arr) != 2:
28
+ raise ValueError(f"wrong expression, random part: {expression}")
29
+ try:
30
+ from_value = Decimal(arr[0])
31
+ to_value = Decimal(arr[1])
32
+ except Exception as e:
33
+ raise ValueError(f"wrong expression, random part: {expression}") from e
34
+ if from_value > to_value:
35
+ raise ValueError(f"wrong expression, random part: {expression}")
36
+ return random_decimal(from_value, to_value)
37
+
38
+ try:
39
+ return Decimal(expression)
40
+ except Exception as e:
41
+ raise ValueError(f"invalid decimal expression: {expression}") from e
42
+
43
+
44
+ def convert_value_with_units(value: str, unit_decimals: dict[str, int]) -> int:
45
+ """Convert value with units to base integer units.
46
+
47
+ Converts values like "1.5eth" to base units (wei) using decimal places mapping.
48
+
49
+ Args:
50
+ value: String value to convert (e.g., "123.45eth", "100")
51
+ unit_decimals: Mapping of unit suffixes to decimal places (e.g., {"eth": 18})
52
+
53
+ Returns:
54
+ Value converted to base integer units
55
+
56
+ Raises:
57
+ ValueError: If value is negative or unit suffix is not recognized
58
+ """
59
+ value = value.lower().strip()
60
+ if value.startswith("-"):
61
+ raise ValueError(f"negative value is illegal: {value}")
62
+ if value.isdigit():
63
+ return int(value)
64
+ unit_decimals = {k.lower(): v for k, v in unit_decimals.items()}
65
+ for suffix in unit_decimals:
66
+ if value.endswith(suffix):
67
+ value = value.removesuffix(suffix)
68
+ return int(Decimal(value) * 10 ** unit_decimals[suffix])
69
+
70
+ raise ValueError(f"illegal value: {value}")
71
+
72
+
73
+ def calc_expression_with_vars(
74
+ expression: str, variables: dict[str, int] | None = None, unit_decimals: dict[str, int] | None = None
75
+ ) -> int:
76
+ """Calculate complex integer expression with variables, units and random values.
77
+
78
+ Supports:
79
+ - Arithmetic operations: "+", "-"
80
+ - Variables with multipliers: "balance", "0.5balance"
81
+ - Unit conversions: "1.5eth", "100gwei"
82
+ - Random function: "random(1eth, 2eth)"
83
+ - Mixed expressions: "0.2balance + random(1gwei, 2gwei) - 100"
84
+
85
+ Args:
86
+ expression: String expression to calculate
87
+ variables: Mapping of variable names to their integer values
88
+ unit_decimals: Mapping of unit suffixes to decimal places
89
+
90
+ Returns:
91
+ Calculated integer value in base units
92
+
93
+ Raises:
94
+ ValueError: If expression format is invalid
95
+ TypeError: If expression is not a string
96
+ """
97
+ if not isinstance(expression, str):
98
+ raise TypeError(f"expression is not str: {expression}")
99
+ expression = expression.lower().strip()
100
+ if unit_decimals is None:
101
+ unit_decimals = {}
102
+ if variables is None:
103
+ variables = {}
104
+ unit_decimals = {k.lower(): v for k, v in unit_decimals.items()}
105
+ variables = {k.lower(): v for k, v in variables.items()}
106
+
107
+ # Check for conflicts between variable names and unit suffixes
108
+ for var_name in variables:
109
+ if var_name in unit_decimals:
110
+ raise ValueError(f"variable name conflicts with unit suffix: {var_name}")
111
+
112
+ try:
113
+ result = 0
114
+ for token in _split_on_plus_minus_tokens(expression.lower()):
115
+ operator = token[0]
116
+ term = token[1:]
117
+ suffix = _get_suffix(term, unit_decimals)
118
+
119
+ if term.isdigit():
120
+ term_value = int(term)
121
+ elif suffix is not None:
122
+ term_value = convert_value_with_units(term, unit_decimals)
123
+ elif variables:
124
+ # Check if term ends with any variable name
125
+ matched_var = None
126
+ for var_name in variables:
127
+ if term.endswith(var_name):
128
+ matched_var = var_name
129
+ break
130
+
131
+ if matched_var:
132
+ multiplier_part = term.removesuffix(matched_var)
133
+ multiplier = Decimal(multiplier_part) if multiplier_part else Decimal(1)
134
+ term_value = int(multiplier * variables[matched_var])
135
+ # Check for random function
136
+ elif term.startswith("random(") and term.endswith(")"):
137
+ term_value = _parse_random_function(term, unit_decimals)
138
+ else:
139
+ raise ValueError(f"unrecognized term: {term}") # noqa: TRY301
140
+ elif term.startswith("random(") and term.endswith(")"):
141
+ term_value = _parse_random_function(term, unit_decimals)
142
+ else:
143
+ raise ValueError(f"unrecognized term: {term}") # noqa: TRY301
144
+
145
+ if operator == "+":
146
+ result += term_value
147
+ if operator == "-":
148
+ result -= term_value
149
+
150
+ return result # noqa: TRY300
151
+ except Exception as e:
152
+ raise ValueError(e) from e
153
+
154
+
155
+ def _parse_random_function(term: str, unit_decimals: dict[str, int]) -> int:
156
+ """Extract random function parameters and generate random value within range.
157
+
158
+ Supports unit conversion in random bounds to ensure consistent base units.
159
+ """
160
+ content = term.lstrip("random(").rstrip(")")
161
+ parts = content.split(",")
162
+ if len(parts) != 2:
163
+ raise ValueError(f"random function must have exactly 2 arguments: {term}")
164
+
165
+ from_value = convert_value_with_units(parts[0].strip(), unit_decimals)
166
+ to_value = convert_value_with_units(parts[1].strip(), unit_decimals)
167
+
168
+ if from_value > to_value:
169
+ raise ValueError(f"random range invalid, min > max: {term}")
170
+
171
+ return random.randint(from_value, to_value)
172
+
173
+
174
+ def _get_suffix(item: str, unit_decimals: dict[str, int]) -> str | None:
175
+ """Find unit suffix in term to enable unit conversion.
176
+
177
+ Returns first matching suffix to avoid ambiguity in complex expressions.
178
+ """
179
+ for suffix in unit_decimals:
180
+ if item.endswith(suffix):
181
+ return suffix
182
+ return None
183
+
184
+
185
+ def _split_on_plus_minus_tokens(value: str) -> list[str]:
186
+ """Split expression into signed terms for sequential evaluation.
187
+
188
+ Normalizes input by removing spaces and adding leading '+' when needed.
189
+ Each token contains operator (+ or -) followed by the term value.
190
+ """
191
+ value = "".join(value.split())
192
+ if not value:
193
+ raise ValueError("value is empty")
194
+ if "++" in value:
195
+ raise ValueError("++ in value")
196
+ if "--" in value:
197
+ raise ValueError("-- in value")
198
+ if value.endswith("-"):
199
+ raise ValueError("ends with -")
200
+ if value.endswith("+"):
201
+ raise ValueError("ends with +")
202
+
203
+ if not value.startswith("+") and not value.startswith("-"):
204
+ value = "+" + value
205
+
206
+ result: list[str] = []
207
+ rest_value = value
208
+ while True:
209
+ if not rest_value:
210
+ return result
211
+ items = re.split(r"[+\-]", rest_value)
212
+ if rest_value.startswith("+"):
213
+ result.append("+" + items[1])
214
+ rest_value = rest_value.removeprefix("+" + items[1])
215
+ elif rest_value.startswith("-"):
216
+ result.append("-" + items[1])
217
+ rest_value = rest_value.removeprefix("-" + items[1])
mm_web3/config.py ADDED
@@ -0,0 +1,160 @@
1
+ import sys
2
+ import tomllib
3
+ from pathlib import Path
4
+ from typing import Any, NoReturn, Self, TypeVar
5
+ from zipfile import ZipFile
6
+
7
+ import mm_print
8
+ from mm_result import Result
9
+ from pydantic import BaseModel, ConfigDict, ValidationError
10
+
11
+ T = TypeVar("T", bound="Web3CliConfig")
12
+
13
+
14
+ class Web3CliConfig(BaseModel):
15
+ """Base configuration class for cryptocurrency CLI tools.
16
+
17
+ Provides TOML file loading with optional ZIP archive support,
18
+ validation error handling, and debug printing capabilities.
19
+ """
20
+
21
+ model_config = ConfigDict(extra="forbid")
22
+
23
+ def print_and_exit(self, exclude: set[str] | None = None, count: set[str] | None = None) -> NoReturn:
24
+ """Print config as JSON and exit the program.
25
+
26
+ Args:
27
+ exclude: Fields to exclude from output
28
+ count: Fields to show as length instead of full content
29
+ """
30
+ data = self.model_dump(exclude=exclude)
31
+ if count:
32
+ for k in count:
33
+ data[k] = len(data[k])
34
+ mm_print.json(data)
35
+ sys.exit(0)
36
+
37
+ @classmethod
38
+ def read_toml_config_or_exit(cls, config_path: Path, zip_password: str = "") -> Self: # nosec
39
+ """Read TOML config file, exit on error.
40
+
41
+ Args:
42
+ config_path: Path to TOML file or ZIP archive
43
+ zip_password: Password for encrypted ZIP archives
44
+
45
+ Returns:
46
+ Validated config instance
47
+ """
48
+ res: Result[Self] = cls.read_toml_config(config_path, zip_password)
49
+ if res.is_ok():
50
+ return res.unwrap()
51
+ cls._print_error_and_exit(res)
52
+
53
+ @classmethod
54
+ async def read_toml_config_or_exit_async(cls, config_path: Path, zip_password: str = "") -> Self: # nosec
55
+ """Read TOML config file with async validation, exit on error.
56
+
57
+ Args:
58
+ config_path: Path to TOML file or ZIP archive
59
+ zip_password: Password for encrypted ZIP archives
60
+
61
+ Returns:
62
+ Validated config instance
63
+ """
64
+ res: Result[Self] = await cls.read_toml_config_async(config_path, zip_password)
65
+ if res.is_ok():
66
+ return res.unwrap()
67
+ cls._print_error_and_exit(res)
68
+
69
+ @classmethod
70
+ def _load_toml_data(cls, config_path: Path, zip_password: str = "") -> dict[str, Any]: # nosec
71
+ """Load TOML data from file or ZIP archive.
72
+
73
+ Args:
74
+ config_path: Path to TOML file or ZIP archive
75
+ zip_password: Password for encrypted ZIP archives
76
+
77
+ Returns:
78
+ Parsed TOML data as dictionary
79
+ """
80
+ config_path = config_path.expanduser()
81
+ if config_path.name.endswith(".zip"):
82
+ return tomllib.loads(read_text_from_zip_archive(config_path, password=zip_password))
83
+ with config_path.open("rb") as f:
84
+ return tomllib.load(f)
85
+
86
+ @classmethod
87
+ def read_toml_config(cls, config_path: Path, zip_password: str = "") -> Result[Self]: # nosec
88
+ """Read and validate TOML config file.
89
+
90
+ Args:
91
+ config_path: Path to TOML file or ZIP archive
92
+ zip_password: Password for encrypted ZIP archives
93
+
94
+ Returns:
95
+ Result containing validated config or error details
96
+ """
97
+ try:
98
+ data = cls._load_toml_data(config_path, zip_password)
99
+ return Result.ok(cls(**data))
100
+ except ValidationError as e:
101
+ return Result.err(("validator_error", e), extra={"errors": e.errors()})
102
+ except Exception as e:
103
+ return Result.err(e)
104
+
105
+ @classmethod
106
+ async def read_toml_config_async(cls, config_path: Path, zip_password: str = "") -> Result[Self]: # nosec
107
+ """Read and validate TOML config file with async validators.
108
+
109
+ Use this method when your config has async model validators that
110
+ need to perform network requests or database queries.
111
+
112
+ Args:
113
+ config_path: Path to TOML file or ZIP archive
114
+ zip_password: Password for encrypted ZIP archives
115
+
116
+ Returns:
117
+ Result containing validated config or error details
118
+ """
119
+ try:
120
+ data = cls._load_toml_data(config_path, zip_password)
121
+ model = await cls.model_validate(data) # type: ignore[misc]
122
+ return Result.ok(model)
123
+ except ValidationError as e:
124
+ return Result.err(("validator_error", e), extra={"errors": e.errors()})
125
+ except Exception as e:
126
+ return Result.err(e)
127
+
128
+ @classmethod
129
+ def _print_error_and_exit(cls, res: Result[Any]) -> NoReturn:
130
+ """Print validation errors and exit with status code 1.
131
+
132
+ Args:
133
+ res: Failed Result containing error information
134
+ """
135
+ if res.error == "validator_error" and res.extra:
136
+ mm_print.plain("config validation errors")
137
+ for e in res.extra["errors"]:
138
+ loc = e["loc"]
139
+ field = ".".join(str(lo) for lo in loc) if len(loc) > 0 else ""
140
+ mm_print.plain(f"{field} {e['msg']}")
141
+ else:
142
+ mm_print.plain(f"can't parse config file: {res.error} {res.extra}")
143
+ sys.exit(1)
144
+
145
+
146
+ def read_text_from_zip_archive(zip_archive_path: Path, filename: str | None = None, password: str | None = None) -> str:
147
+ """Read text content from ZIP archive.
148
+
149
+ Args:
150
+ zip_archive_path: Path to ZIP archive
151
+ filename: Specific file to read (first file if None)
152
+ password: Archive password if encrypted
153
+
154
+ Returns:
155
+ Decoded text content of the file
156
+ """
157
+ with ZipFile(zip_archive_path) as zipfile:
158
+ if filename is None:
159
+ filename = zipfile.filelist[0].filename
160
+ return zipfile.read(filename, pwd=password.encode() if password else None).decode()
mm_web3/log.py ADDED
@@ -0,0 +1,20 @@
1
+ import sys
2
+ from pathlib import Path
3
+
4
+ from loguru import logger
5
+
6
+
7
+ def init_loguru(debug: bool, debug_file: Path | None, info_file: Path | None) -> None:
8
+ if debug:
9
+ level = "DEBUG"
10
+ format_ = "<green>{time:YYYY-MM-DD HH:mm:ss}</green> <level>{level}</level> {message}"
11
+ else:
12
+ level = "INFO"
13
+ format_ = "{message}"
14
+
15
+ logger.remove()
16
+ logger.add(sys.stderr, format=format_, colorize=True, level=level)
17
+ if debug_file:
18
+ logger.add(debug_file.expanduser(), format="{time:YYYY-MM-DD HH:mm:ss} {level} {message}")
19
+ if info_file:
20
+ logger.add(info_file.expanduser(), format="{message}", level="INFO")
mm_web3/network.py ADDED
@@ -0,0 +1,191 @@
1
+ """Network types and utilities for different blockchain networks."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from enum import StrEnum, unique
6
+
7
+
8
+ @unique
9
+ class NetworkType(StrEnum):
10
+ """Base network types (EVM, Solana, etc)."""
11
+
12
+ EVM = "evm"
13
+ SOLANA = "solana"
14
+ APTOS = "aptos"
15
+ STARKNET = "starknet"
16
+
17
+ def lowercase_address(self) -> bool:
18
+ """Whether addresses for this network type should be lowercase."""
19
+ match self:
20
+ case NetworkType.EVM:
21
+ return True
22
+ case NetworkType.SOLANA:
23
+ return False
24
+ case NetworkType.APTOS:
25
+ return True
26
+ case NetworkType.STARKNET:
27
+ return True
28
+ raise ValueError("no network found")
29
+
30
+
31
+ @unique
32
+ class Network(StrEnum):
33
+ """Blockchain networks"""
34
+
35
+ APTOS = "aptos"
36
+ ARBITRUM_ONE = "arbitrum-one"
37
+ AVAX_C = "avax-c"
38
+ BASE = "base"
39
+ BSC = "bsc"
40
+ CELO = "celo"
41
+ CORE = "core"
42
+ ETHEREUM = "ethereum"
43
+ FANTOM = "fantom"
44
+ LINEA = "linea"
45
+ OPBNB = "opbnb"
46
+ OP_MAINNET = "op-mainnet"
47
+ POLYGON = "polygon"
48
+ POLYGON_ZKEVM = "polygon-zkevm"
49
+ SCROLL = "scroll"
50
+ SOLANA = "solana"
51
+ STARKNET = "starknet"
52
+ ZKSYNC_ERA = "zksync-era"
53
+ ZORA = "zora"
54
+
55
+ @property
56
+ def network_type(self) -> NetworkType:
57
+ """Get the base network type (EVM, Solana, etc)."""
58
+ if self in self.evm_networks():
59
+ return NetworkType.EVM
60
+ if self in self.solana_networks():
61
+ return NetworkType.SOLANA
62
+ if self in self.aptos_networks():
63
+ return NetworkType.APTOS
64
+ if self in self.starknet_networks():
65
+ return NetworkType.STARKNET
66
+ raise ValueError("no network found")
67
+
68
+ def explorer_token(self, token: str) -> str:
69
+ """Get explorer URL for a token address."""
70
+ match self:
71
+ case Network.ARBITRUM_ONE:
72
+ return f"https://arbiscan.io/token/{token}"
73
+ case Network.AVAX_C:
74
+ return f"https://snowtrace.io/token/{token}"
75
+ case Network.APTOS:
76
+ return f"https://explorer.aptoslabs.com/coin/{token}"
77
+ case Network.BASE:
78
+ return f"https://basescan.org/token/{token}"
79
+ case Network.BSC:
80
+ return f"https://bscscan.com/token/{token}"
81
+ case Network.CELO:
82
+ return f"https://celoscan.io/token/{token}"
83
+ case Network.CORE:
84
+ return f"https://scan.coredao.org/token/{token}"
85
+ case Network.ETHEREUM:
86
+ return f"https://etherscan.io/token/{token}"
87
+ case Network.FANTOM:
88
+ return f"https://ftmscan.com/token/{token}"
89
+ case Network.LINEA:
90
+ return f"https://lineascan.build/token/{token}"
91
+ case Network.OPBNB:
92
+ return f"https://opbnbscan.com/token/{token}"
93
+ case Network.OP_MAINNET:
94
+ return f"https://optimistic.etherscan.io/token/{token}"
95
+ case Network.POLYGON:
96
+ return f"https://polygonscan.com/token/{token}"
97
+ case Network.POLYGON_ZKEVM:
98
+ return f"https://zkevm.polygonscan.com/token/{token}"
99
+ case Network.SCROLL:
100
+ return f"https://scrollscan.com/token/{token}"
101
+ case Network.SOLANA:
102
+ return f"https://solscan.io/token/{token}"
103
+ case Network.STARKNET:
104
+ return f"https://voyager.online/token/{token}"
105
+ case Network.ZKSYNC_ERA:
106
+ return f"https://explorer.zksync.io/token/{token}"
107
+ case Network.ZORA:
108
+ return f"https://explorer.zora.energy/tokens/{token}"
109
+
110
+ raise ValueError("no network found")
111
+
112
+ def explorer_account(self, account: str) -> str:
113
+ """Get explorer URL for an account address."""
114
+ match self:
115
+ case Network.ARBITRUM_ONE:
116
+ return f"https://arbiscan.io/address/{account}"
117
+ case Network.AVAX_C:
118
+ return f"https://snowtrace.io/address/{account}"
119
+ case Network.APTOS:
120
+ return f"https://explorer.aptoslabs.com/account/{account}"
121
+ case Network.BASE:
122
+ return f"https://basescan.org/address/{account}"
123
+ case Network.BSC:
124
+ return f"https://bscscan.com/address/{account}"
125
+ case Network.CELO:
126
+ return f"https://celoscan.io/address/{account}"
127
+ case Network.CORE:
128
+ return f"https://scan.coredao.org/address/{account}"
129
+ case Network.ETHEREUM:
130
+ return f"https://etherscan.io/address/{account}"
131
+ case Network.FANTOM:
132
+ return f"https://ftmscan.com/address/{account}"
133
+ case Network.LINEA:
134
+ return f"https://lineascan.build/address/{account}"
135
+ case Network.OPBNB:
136
+ return f"https://opbnbscan.com/address/{account}"
137
+ case Network.OP_MAINNET:
138
+ return f"https://optimistic.etherscan.io/address/{account}"
139
+ case Network.POLYGON:
140
+ return f"https://polygonscan.com/address/{account}"
141
+ case Network.POLYGON_ZKEVM:
142
+ return f"https://zkevm.polygonscan.com/address/{account}"
143
+ case Network.SCROLL:
144
+ return f"https://scrollscan.com/address/{account}"
145
+ case Network.SOLANA:
146
+ return f"https://solscan.io/account/{account}"
147
+ case Network.ZKSYNC_ERA:
148
+ return f"https://explorer.zksync.io/address/{account}"
149
+ case Network.STARKNET:
150
+ return f"https://voyager.online/contract/{account}"
151
+ case Network.ZORA:
152
+ return f"https://explorer.zora.energy/address/{account}"
153
+
154
+ raise ValueError("no network found")
155
+
156
+ @classmethod
157
+ def evm_networks(cls) -> list[Network]:
158
+ """Get list of all EVM-compatible networks."""
159
+ return [
160
+ Network.ARBITRUM_ONE,
161
+ Network.AVAX_C,
162
+ Network.BASE,
163
+ Network.BSC,
164
+ Network.CELO,
165
+ Network.CORE,
166
+ Network.ETHEREUM,
167
+ Network.FANTOM,
168
+ Network.LINEA,
169
+ Network.OPBNB,
170
+ Network.OP_MAINNET,
171
+ Network.POLYGON,
172
+ Network.POLYGON_ZKEVM,
173
+ Network.SCROLL,
174
+ Network.ZKSYNC_ERA,
175
+ Network.ZORA,
176
+ ]
177
+
178
+ @classmethod
179
+ def solana_networks(cls) -> list[Network]:
180
+ """Get list of all Solana networks."""
181
+ return [Network.SOLANA]
182
+
183
+ @classmethod
184
+ def aptos_networks(cls) -> list[Network]:
185
+ """Get list of all Aptos networks."""
186
+ return [Network.APTOS]
187
+
188
+ @classmethod
189
+ def starknet_networks(cls) -> list[Network]:
190
+ """Get list of all Starknet networks."""
191
+ return [Network.STARKNET]
mm_web3/node.py ADDED
@@ -0,0 +1,38 @@
1
+ import random
2
+ from collections.abc import Sequence
3
+
4
+ type Nodes = str | Sequence[str]
5
+ """
6
+ Type alias for JSON RPC node configuration.
7
+
8
+ Can be either:
9
+ - A single node URL as string
10
+ - A sequence (list, tuple) of node URLs
11
+ """
12
+
13
+
14
+ def random_node(nodes: Nodes, remove_slash: bool = True) -> str:
15
+ """
16
+ Select a random JSON RPC node from the provided nodes.
17
+
18
+ Args:
19
+ nodes: Single node URL or sequence of node URLs
20
+ remove_slash: Whether to remove trailing slash from the URL
21
+
22
+ Returns:
23
+ Selected node URL
24
+
25
+ Raises:
26
+ ValueError: When no valid node can be selected
27
+ """
28
+ if isinstance(nodes, str):
29
+ selected = nodes
30
+ else:
31
+ if not nodes:
32
+ raise ValueError("No nodes provided")
33
+ selected = random.choice(nodes)
34
+
35
+ if remove_slash and selected.endswith("/"):
36
+ selected = selected.removesuffix("/")
37
+
38
+ return selected
mm_web3/proxy.py ADDED
@@ -0,0 +1,106 @@
1
+ """Proxy utilities for HTTP requests."""
2
+
3
+ import random
4
+ from collections.abc import Sequence
5
+ from urllib.parse import urlparse
6
+
7
+ from mm_http import http_request, http_request_sync
8
+ from mm_result import Result
9
+
10
+ type Proxies = str | Sequence[str] | None
11
+ """Proxy configuration: single URL, sequence of URLs, or None for no proxy."""
12
+
13
+
14
+ def random_proxy(proxies: Proxies) -> str | None:
15
+ """Select a random proxy from the given configuration."""
16
+ if proxies is None:
17
+ return None
18
+
19
+ if isinstance(proxies, str):
20
+ return proxies
21
+
22
+ # proxies is a Sequence[str] at this point
23
+ if proxies:
24
+ return random.choice(proxies)
25
+
26
+ return None
27
+
28
+
29
+ async def fetch_proxies(proxies_url: str, timeout: float = 5) -> Result[list[str]]:
30
+ """Fetch proxies from the given url. Expects content-type: text/plain with one proxy per line. Each proxy must be valid."""
31
+ res = await http_request(proxies_url, timeout=timeout)
32
+ if res.is_err():
33
+ return res.to_result_err()
34
+
35
+ proxies = [p.strip() for p in (res.body or "").splitlines() if p.strip()]
36
+ proxies = list(dict.fromkeys(proxies))
37
+ for proxy in proxies:
38
+ if not is_valid_proxy_url(proxy):
39
+ return res.to_result_err(f"Invalid proxy URL: {proxy}")
40
+
41
+ if not proxies:
42
+ return res.to_result_err("No valid proxies found")
43
+ return res.to_result_ok(proxies)
44
+
45
+
46
+ def fetch_proxies_sync(proxies_url: str, timeout: float = 5) -> Result[list[str]]:
47
+ """Synchronous version of fetch_proxies."""
48
+ res = http_request_sync(proxies_url, timeout=timeout)
49
+ if res.is_err():
50
+ return res.to_result_err()
51
+
52
+ proxies = [p.strip() for p in (res.body or "").splitlines() if p.strip()]
53
+ proxies = list(dict.fromkeys(proxies))
54
+ for proxy in proxies:
55
+ if not is_valid_proxy_url(proxy):
56
+ return res.to_result_err(f"Invalid proxy URL: {proxy}")
57
+
58
+ if not proxies:
59
+ return res.to_result_err("No valid proxies found")
60
+ return res.to_result_ok(proxies)
61
+
62
+
63
+ def is_valid_proxy_url(proxy_url: str) -> bool:
64
+ """
65
+ Check if the given URL is a valid proxy URL.
66
+
67
+ A valid proxy URL must have:
68
+ - A scheme in {"http", "https", "socks4", "socks5", "zsocks5h"}.
69
+ - A non-empty hostname.
70
+ - A specified port.
71
+ - No extra path components (the path must be empty or "/").
72
+
73
+ For SOCKS4 URLs, authentication (username/password) is not supported.
74
+
75
+ Examples:
76
+ is_valid_proxy_url("socks5h://user:pass@proxy.example.com:1080") -> True
77
+ is_valid_proxy_url("http://proxy.example.com:8080") -> True
78
+ is_valid_proxy_url("socks4://proxy.example.com:1080") -> True
79
+ is_valid_proxy_url("socks4://user:pass@proxy.example.com:1080") -> False
80
+ is_valid_proxy_url("ftp://proxy.example.com:21") -> False
81
+ is_valid_proxy_url("socks4://proxy.example.com:1080/bla-bla-bla") -> False
82
+ """
83
+ try:
84
+ parsed = urlparse(proxy_url)
85
+ except Exception:
86
+ return False
87
+
88
+ allowed_schemes = {"http", "https", "socks4", "socks5", "socks5h"}
89
+ if parsed.scheme not in allowed_schemes:
90
+ return False
91
+
92
+ if not parsed.hostname:
93
+ return False
94
+
95
+ if not parsed.port:
96
+ return False
97
+
98
+ # For SOCKS4, authentication is not supported.
99
+ if parsed.scheme == "socks4" and (parsed.username or parsed.password):
100
+ return False
101
+
102
+ # Ensure that there is no extra path (only allow an empty path or a single "/")
103
+ if parsed.path and parsed.path not in ("", "/"): # noqa: SIM103
104
+ return False
105
+
106
+ return True
mm_web3/py.typed ADDED
File without changes
mm_web3/retry.py ADDED
@@ -0,0 +1,68 @@
1
+ from collections.abc import Awaitable, Callable
2
+ from typing import TypeVar
3
+
4
+ from mm_result import Result
5
+
6
+ from mm_web3.node import Nodes, random_node
7
+ from mm_web3.proxy import Proxies, random_proxy
8
+
9
+ T = TypeVar("T")
10
+
11
+ # Function that takes (node, proxy) and returns an Awaitable[Result[T]]
12
+ FuncWithNodeAndProxy = Callable[[str, str | None], Awaitable[Result[T]]]
13
+
14
+ # Function that takes only (proxy) and returns an Awaitable[Result[T]]
15
+ FuncWithProxy = Callable[[str | None], Awaitable[Result[T]]]
16
+
17
+
18
+ async def retry_with_node_and_proxy(retries: int, nodes: Nodes, proxies: Proxies, func: FuncWithNodeAndProxy[T]) -> Result[T]:
19
+ """
20
+ Retry the given function multiple times with random node and proxy on each attempt.
21
+
22
+ Args:
23
+ retries: Number of attempts to make.
24
+ nodes: Available nodes to randomly choose from.
25
+ proxies: Available proxies to randomly choose from.
26
+ func: Async function that accepts (node, proxy) and returns a Result.
27
+
28
+ Returns:
29
+ Result with success on first successful call, or last failure with logs of attempts.
30
+ """
31
+ res: Result[T] = Result.err("not_started")
32
+ logs = []
33
+
34
+ for _ in range(retries):
35
+ node = random_node(nodes)
36
+ proxy = random_proxy(proxies)
37
+ res = await func(node, proxy)
38
+ logs.append({"node": node, "proxy": proxy, "result": res.to_dict()})
39
+ if res.is_ok():
40
+ return Result.ok(res.unwrap(), {"retry_logs": logs})
41
+
42
+ return Result.err(res.unwrap_err(), {"retry_logs": logs})
43
+
44
+
45
+ async def retry_with_proxy(retries: int, proxies: Proxies, func: FuncWithProxy[T]) -> Result[T]:
46
+ """
47
+ Retry the given function multiple times using a random proxy on each attempt.
48
+
49
+
50
+ Args:
51
+ retries: Number of attempts to make.
52
+ proxies: Available proxies to randomly choose from.
53
+ func: Async function that accepts (proxy) and returns a Result.
54
+
55
+ Returns:
56
+ Result with success on first successful call, or last failure with logs of attempts.
57
+ """
58
+ res: Result[T] = Result.err("not_started")
59
+ logs = []
60
+
61
+ for _ in range(retries):
62
+ proxy = random_proxy(proxies)
63
+ res = await func(proxy)
64
+ logs.append({"proxy": proxy, "result": res.to_dict()})
65
+ if res.is_ok():
66
+ return Result.ok(res.unwrap(), {"retry_logs": logs})
67
+
68
+ return Result.err(res.unwrap_err(), {"retry_logs": logs})
mm_web3/utils.py ADDED
@@ -0,0 +1,67 @@
1
+ from collections.abc import Callable
2
+ from pathlib import Path
3
+
4
+
5
+ def read_items_from_file(path: Path, is_valid: Callable[[str], bool], lowercase: bool = False) -> list[str]:
6
+ """Read items from a file and validate them.
7
+
8
+ Raises:
9
+ ValueError: if the file cannot be read or any item is invalid.
10
+ """
11
+ path = path.expanduser()
12
+ if not path.is_file():
13
+ raise ValueError(f"{path} is not a file")
14
+
15
+ try:
16
+ with path.open() as file:
17
+ items = []
18
+ for line_num, raw_line in enumerate(file, 1):
19
+ item = raw_line.strip()
20
+ if not item: # Skip empty lines
21
+ continue
22
+
23
+ if lowercase:
24
+ item = item.lower()
25
+
26
+ if not is_valid(item):
27
+ raise ValueError(f"Invalid item in {path} at line {line_num}: {item}")
28
+ items.append(item)
29
+
30
+ return items
31
+ except OSError as e:
32
+ raise ValueError(f"Cannot read file {path}: {e}") from e
33
+
34
+
35
+ def read_lines_from_file(source: Path | str, lowercase: bool = False) -> list[str]:
36
+ """Read non-empty lines from a file.
37
+
38
+ Args:
39
+ source: Path to the file to read from.
40
+ lowercase: If True, convert all lines to lowercase.
41
+
42
+ Returns:
43
+ List of non-empty lines from the file.
44
+
45
+ Raises:
46
+ ValueError: if the file cannot be read or is not a file.
47
+ """
48
+ path = Path(source).expanduser()
49
+ if not path.is_file():
50
+ raise ValueError(f"{path} is not a file")
51
+
52
+ try:
53
+ with path.open() as file:
54
+ lines = []
55
+ for raw_line in file:
56
+ stripped_line = raw_line.strip()
57
+ if not stripped_line: # Skip empty lines
58
+ continue
59
+
60
+ if lowercase:
61
+ stripped_line = stripped_line.lower()
62
+
63
+ lines.append(stripped_line)
64
+
65
+ return lines
66
+ except OSError as e:
67
+ raise ValueError(f"Cannot read file {path}: {e}") from e
mm_web3/validators.py ADDED
@@ -0,0 +1,350 @@
1
+ import os
2
+ from collections.abc import Callable
3
+ from pathlib import Path
4
+
5
+ from mm_std import parse_lines
6
+ from pydantic import BaseModel
7
+
8
+ from mm_web3.account import PrivateKeyMap
9
+ from mm_web3.calcs import calc_decimal_expression, calc_expression_with_vars
10
+ from mm_web3.proxy import fetch_proxies_sync
11
+ from mm_web3.utils import read_lines_from_file
12
+
13
+ type IsAddress = Callable[[str], bool]
14
+
15
+
16
+ class Transfer(BaseModel):
17
+ from_address: str
18
+ to_address: str
19
+ value: str # can be empty string
20
+
21
+ @property
22
+ def log_prefix(self) -> str:
23
+ return f"{self.from_address}->{self.to_address}"
24
+
25
+
26
+ class ConfigValidators:
27
+ """Pydantic field validators for cryptocurrency CLI application configuration.
28
+
29
+ Provides static methods that return validator functions for use with Pydantic models
30
+ in cryptocurrency CLI applications. Each validator handles complex input formats
31
+ including direct values, file references, and external data sources.
32
+
33
+ These validators are designed for CLI configuration files where users need flexible
34
+ ways to specify cryptocurrency addresses, private keys, network nodes, proxies,
35
+ and mathematical expressions for transaction amounts.
36
+ """
37
+
38
+ @staticmethod
39
+ def transfers(is_address: IsAddress, lowercase: bool = False) -> Callable[[str], list[Transfer]]:
40
+ """Validate and parse cryptocurrency transfers configuration.
41
+
42
+ Parses transfer configurations from string or file references. Each transfer
43
+ requires source and destination addresses, with optional value specification.
44
+
45
+ Args:
46
+ is_address: Function to validate cryptocurrency addresses
47
+ lowercase: If True, convert addresses to lowercase
48
+
49
+ Returns:
50
+ Validator function that parses string into list of Transfer objects
51
+
52
+ Format:
53
+ - Direct: "from_addr to_addr [value]"
54
+ - File reference: "file:/path/to/transfers.txt"
55
+
56
+ The value field can be:
57
+ - Empty string: value taken from default config
58
+ - Decimal expression: "123.45" or "random(1.0, 5.0)"
59
+ - Expression with variables: "0.5balance + 1eth"
60
+
61
+ Raises:
62
+ ValueError: If addresses are invalid, format is wrong, or no transfers found
63
+ """
64
+
65
+ def validator(v: str) -> list[Transfer]:
66
+ result = []
67
+ for line in parse_lines(v, remove_comments=True): # don't use lowercase here because it can be a file: /To/Path.txt
68
+ if line.startswith("file:"):
69
+ for file_line in read_lines_from_file(line.removeprefix("file:").strip()):
70
+ arr = file_line.split()
71
+ if len(arr) < 2 or len(arr) > 3:
72
+ raise ValueError(f"illegal file_line: {file_line}")
73
+ result.append(Transfer(from_address=arr[0], to_address=arr[1], value=arr[2] if len(arr) > 2 else ""))
74
+
75
+ else:
76
+ arr = line.split()
77
+ if len(arr) < 2 or len(arr) > 3:
78
+ raise ValueError(f"illegal line: {line}")
79
+ result.append(Transfer(from_address=arr[0], to_address=arr[1], value=arr[2] if len(arr) > 2 else ""))
80
+
81
+ if lowercase:
82
+ result = [
83
+ Transfer(from_address=r.from_address.lower(), to_address=r.to_address.lower(), value=r.value) for r in result
84
+ ]
85
+
86
+ for route in result:
87
+ if not is_address(route.from_address):
88
+ raise ValueError(f"illegal address: {route.from_address}")
89
+ if not is_address(route.to_address):
90
+ raise ValueError(f"illegal address: {route.to_address}")
91
+
92
+ if not result:
93
+ raise ValueError("No valid transfers found")
94
+
95
+ return result
96
+
97
+ return validator
98
+
99
+ @staticmethod
100
+ def proxies() -> Callable[[str], list[str]]:
101
+ """Validate and parse proxy configuration from multiple sources.
102
+
103
+ Supports direct proxy specification, fetching from URLs, environment variables,
104
+ and local files. Automatically deduplicates results.
105
+
106
+ Returns:
107
+ Validator function that parses string into unique list of proxy addresses
108
+
109
+ Format:
110
+ - Direct: "proxy1:port\nproxy2:port"
111
+ - URL source: "url:http://example.com/proxies.txt"
112
+ - Environment URL: "env_url:PROXY_URL_VAR"
113
+ - File reference: "file:/path/to/proxies.txt"
114
+
115
+ Raises:
116
+ ValueError: If URL fetch fails or environment variable is missing
117
+ """
118
+
119
+ def validator(v: str) -> list[str]:
120
+ result = []
121
+ for line in parse_lines(v, deduplicate=True, remove_comments=True):
122
+ if line.startswith("url:"):
123
+ url = line.removeprefix("url:").strip()
124
+ res = fetch_proxies_sync(url)
125
+ if res.is_err():
126
+ raise ValueError(f"Can't get proxies: {res.unwrap_err()}")
127
+ result += res.unwrap()
128
+ elif line.startswith("env_url:"):
129
+ env_var = line.removeprefix("env_url:").strip()
130
+ url = os.getenv(env_var) or ""
131
+ if not url:
132
+ raise ValueError(f"missing env var: {env_var}")
133
+ res = fetch_proxies_sync(url)
134
+ if res.is_err():
135
+ raise ValueError(f"Can't get proxies: {res.unwrap_err()}")
136
+ result += res.unwrap()
137
+ elif line.startswith("file:"):
138
+ path = line.removeprefix("file:").strip()
139
+ result += read_lines_from_file(path)
140
+ else:
141
+ result.append(line)
142
+
143
+ return list(dict.fromkeys(result))
144
+
145
+ return validator
146
+
147
+ @staticmethod
148
+ def log_file() -> Callable[[Path], Path]:
149
+ """Validate and prepare log file path with automatic directory creation.
150
+
151
+ Creates parent directories and ensures file is writable. Expands user home directory (~).
152
+
153
+ Returns:
154
+ Validator function that validates Path and ensures write access
155
+
156
+ Raises:
157
+ ValueError: If path is not writable or cannot be created
158
+ """
159
+
160
+ def validator(v: Path) -> Path:
161
+ log_file = Path(v).expanduser()
162
+ log_file.parent.mkdir(parents=True, exist_ok=True)
163
+ log_file.touch(exist_ok=True)
164
+ if not log_file.is_file() or not os.access(log_file, os.W_OK):
165
+ raise ValueError(f"wrong log path: {v}")
166
+ return log_file
167
+
168
+ return validator
169
+
170
+ @staticmethod
171
+ def nodes(allow_empty: bool = False) -> Callable[[str], list[str]]:
172
+ """Validate blockchain node URLs configuration.
173
+
174
+ Parses and deduplicates node URLs from string input.
175
+
176
+ Args:
177
+ allow_empty: If True, allows empty node list
178
+
179
+ Returns:
180
+ Validator function that parses string into list of node URLs
181
+
182
+ Raises:
183
+ ValueError: If node list is empty when allow_empty=False
184
+ """
185
+
186
+ def validator(v: str) -> list[str]:
187
+ nodes = parse_lines(v, deduplicate=True, remove_comments=True)
188
+ if not allow_empty and not nodes:
189
+ raise ValueError("Node list cannot be empty")
190
+ return nodes
191
+
192
+ return validator
193
+
194
+ @staticmethod
195
+ def address(is_address: IsAddress, lowercase: bool = False) -> Callable[[str], str]:
196
+ """Validate single cryptocurrency address.
197
+
198
+ Args:
199
+ is_address: Function to validate cryptocurrency addresses
200
+ lowercase: If True, converts address to lowercase
201
+
202
+ Returns:
203
+ Validator function that validates and optionally lowercases address
204
+
205
+ Raises:
206
+ ValueError: If address is invalid
207
+ """
208
+
209
+ def validator(v: str) -> str:
210
+ if not is_address(v):
211
+ raise ValueError(f"illegal address: {v}")
212
+ if lowercase:
213
+ return v.lower()
214
+ return v
215
+
216
+ return validator
217
+
218
+ @staticmethod
219
+ def addresses(deduplicate: bool, lowercase: bool = False, is_address: IsAddress | None = None) -> Callable[[str], list[str]]:
220
+ """Validate list of cryptocurrency addresses from string or file references.
221
+
222
+ Supports direct address specification and file references. Optionally validates
223
+ each address and applies transformations.
224
+
225
+ Args:
226
+ deduplicate: If True, deduplicates addresses
227
+ lowercase: If True, converts addresses to lowercase
228
+ is_address: Optional function to validate each address
229
+
230
+ Returns:
231
+ Validator function that parses string into list of addresses
232
+
233
+ Format:
234
+ - Direct: "addr1\naddr2\naddr3"
235
+ - File reference: "file:/path/to/addresses.txt"
236
+
237
+ Raises:
238
+ ValueError: If any address is invalid (when is_address provided)
239
+ """
240
+
241
+ def validator(v: str) -> list[str]:
242
+ result = []
243
+ for line in parse_lines(v, deduplicate=deduplicate, remove_comments=True):
244
+ if line.startswith("file:"): # don't use lowercase here because it can be a file: /To/Path.txt
245
+ path = line.removeprefix("file:").strip()
246
+ result += read_lines_from_file(path)
247
+ else:
248
+ result.append(line)
249
+
250
+ if deduplicate:
251
+ result = list(dict.fromkeys(result))
252
+
253
+ if lowercase:
254
+ result = [r.lower() for r in result]
255
+
256
+ if is_address:
257
+ for address in result:
258
+ if not is_address(address):
259
+ raise ValueError(f"illegal address: {address}")
260
+ return result
261
+
262
+ return validator
263
+
264
+ @staticmethod
265
+ def private_keys(address_from_private: Callable[[str], str]) -> Callable[[str], PrivateKeyMap]:
266
+ """Validate and parse private keys configuration.
267
+
268
+ Parses private keys from string or file references and converts them to
269
+ address-to-private-key mapping using provided conversion function.
270
+
271
+ Args:
272
+ address_from_private: Function to derive address from private key
273
+
274
+ Returns:
275
+ Validator function that parses string into PrivateKeyMap
276
+
277
+ Format:
278
+ - Direct: "key1\nkey2\nkey3"
279
+ - File reference: "file:/path/to/keys.txt"
280
+
281
+ Raises:
282
+ ValueError: If any private key is invalid or duplicate
283
+ """
284
+
285
+ def validator(v: str) -> PrivateKeyMap:
286
+ private_keys = []
287
+ for line in parse_lines(v, deduplicate=True, remove_comments=True):
288
+ if line.startswith("file:"):
289
+ path = line.removeprefix("file:").strip()
290
+ private_keys += read_lines_from_file(path)
291
+ else:
292
+ private_keys.append(line)
293
+
294
+ return PrivateKeyMap.from_list(private_keys, address_from_private)
295
+
296
+ return validator
297
+
298
+ @staticmethod
299
+ def expression_with_vars(var_name: str | None = None, unit_decimals: dict[str, int] | None = None) -> Callable[[str], str]:
300
+ """Validate mathematical expressions with variables and units.
301
+
302
+ Validates expressions using calc_expression_with_vars function. Supports variables,
303
+ unit suffixes, and arithmetic operations for dynamic value calculations.
304
+
305
+ Args:
306
+ var_name: Variable name to include in validation context
307
+ unit_decimals: Mapping of unit suffixes to decimal places
308
+
309
+ Returns:
310
+ Validator function that validates expression syntax
311
+
312
+ Examples:
313
+ - "0.5balance + 1eth"
314
+ - "random(1gwei, 10gwei) - 100"
315
+
316
+ Raises:
317
+ ValueError: If expression syntax is invalid
318
+ """
319
+
320
+ def validator(v: str) -> str:
321
+ # Use arbitrary test value to validate expression syntax without actual calculation
322
+ variables = {var_name: 123} if var_name else {}
323
+ calc_expression_with_vars(v, variables, unit_decimals=unit_decimals)
324
+ return v
325
+
326
+ return validator
327
+
328
+ @staticmethod
329
+ def decimal_expression() -> Callable[[str], str]:
330
+ """Validate decimal expressions and random functions.
331
+
332
+ Validates expressions using calc_decimal_expression function. Supports simple
333
+ decimal values and random function calls.
334
+
335
+ Returns:
336
+ Validator function that validates decimal expression syntax
337
+
338
+ Examples:
339
+ - "123.45"
340
+ - "random(1.0, 5.0)"
341
+
342
+ Raises:
343
+ ValueError: If expression syntax is invalid
344
+ """
345
+
346
+ def validator(v: str) -> str:
347
+ calc_decimal_expression(v)
348
+ return v
349
+
350
+ return validator
@@ -0,0 +1,8 @@
1
+ Metadata-Version: 2.4
2
+ Name: mm-web3
3
+ Version: 0.5.0
4
+ Requires-Python: >=3.13
5
+ Requires-Dist: loguru>=0.7.3
6
+ Requires-Dist: mm-http~=0.1.0
7
+ Requires-Dist: mm-print~=0.1.1
8
+ Requires-Dist: mm-std~=0.5.3
@@ -0,0 +1,15 @@
1
+ mm_web3/__init__.py,sha256=uGPiC3UGbashWlRiIr27CjqGp1xUTHA6goDBO5cGBBU,1296
2
+ mm_web3/account.py,sha256=CNV2I6BRxMNwh2lldOKo9Gpv30zXaL2UzurztUDR_bY,3623
3
+ mm_web3/calcs.py,sha256=b5mWCDE8UdQjxkdVbWPMRNaMbG6XteSlqG0M2CYDQe4,7860
4
+ mm_web3/config.py,sha256=A_xHvWQgJlAZPI7iJ0q-53gFA1WWOIKQl3uMTlQm6qM,5733
5
+ mm_web3/log.py,sha256=rvrObh-Jo9nO0OEIsLD5-f98mxeuA1GQlkYUY7xLb0Q,653
6
+ mm_web3/network.py,sha256=99Qv59rGAvf5akp8Sbow_PXkKQk6B3Yh8oy2kH6JhFw,6902
7
+ mm_web3/node.py,sha256=vXO9PsKZ_yfKsLKc5R_HL62CAKTDJbOg4BHijqw4IbM,902
8
+ mm_web3/proxy.py,sha256=dfFeb4cUWfPwxmK7EUZ5WBu_XPkytzDMIYvXYrz8gUk,3523
9
+ mm_web3/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ mm_web3/retry.py,sha256=GPwESXK-C9EI4r5TcWK46sP2fQa68gnp34D8Lhwc6Cc,2384
11
+ mm_web3/utils.py,sha256=k-x8R8bLRZKBN3xqeepukD9Tzwt97qiTUETSrK7lIHM,2009
12
+ mm_web3/validators.py,sha256=nt1AXiLfz1ek5PWsqVMtgvENSsCky0-B0NtrXuf0afI,12894
13
+ mm_web3-0.5.0.dist-info/METADATA,sha256=TSwqQfLPOS4KgHyPehqVGwei0j7mPY1puF33WKTy1EI,194
14
+ mm_web3-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
15
+ mm_web3-0.5.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any