api-chain-runner 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- api_chain_runner/__init__.py +18 -0
- api_chain_runner/__main__.py +138 -0
- api_chain_runner/executor.py +285 -0
- api_chain_runner/generator.py +126 -0
- api_chain_runner/logger.py +89 -0
- api_chain_runner/models.py +149 -0
- api_chain_runner/pause.py +104 -0
- api_chain_runner/resolver.py +118 -0
- api_chain_runner/runner.py +416 -0
- api_chain_runner/store.py +80 -0
- api_chain_runner-1.0.0.dist-info/METADATA +12 -0
- api_chain_runner-1.0.0.dist-info/RECORD +15 -0
- api_chain_runner-1.0.0.dist-info/WHEEL +5 -0
- api_chain_runner-1.0.0.dist-info/entry_points.txt +2 -0
- api_chain_runner-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""API Chain Runner — execute chained API calls with dynamic reference resolution."""
|
|
2
|
+
|
|
3
|
+
__version__ = "1.0.0"
|
|
4
|
+
|
|
5
|
+
from api_chain_runner.runner import ChainRunner
|
|
6
|
+
from api_chain_runner.models import (
|
|
7
|
+
ChainResult,
|
|
8
|
+
ConfigurationError,
|
|
9
|
+
StepResult,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"__version__",
|
|
14
|
+
"ChainRunner",
|
|
15
|
+
"ChainResult",
|
|
16
|
+
"ConfigurationError",
|
|
17
|
+
"StepResult",
|
|
18
|
+
]
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""CLI entry point for API Chain Runner.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
python -m api_chain_runner example_chain.yaml
|
|
6
|
+
python -m api_chain_runner example_chain.yaml -o results.csv
|
|
7
|
+
python -m api_chain_runner example_chain.yaml -o results.xlsx -f xlsx
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
import argparse
|
|
12
|
+
import os
|
|
13
|
+
import re
|
|
14
|
+
import yaml
|
|
15
|
+
from api_chain_runner import __version__
|
|
16
|
+
from api_chain_runner.logger import ResultLogger
|
|
17
|
+
from api_chain_runner.runner import ChainRunner
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _substitute_env_vars(obj):
|
|
21
|
+
"""Recursively substitute ``${ENV:VAR_NAME}`` placeholders with
|
|
22
|
+
the corresponding environment variable values.
|
|
23
|
+
|
|
24
|
+
If the environment variable is not set the placeholder is left as-is
|
|
25
|
+
so the user gets a clear signal that something is missing.
|
|
26
|
+
"""
|
|
27
|
+
if isinstance(obj, dict):
|
|
28
|
+
return {k: _substitute_env_vars(v) for k, v in obj.items()}
|
|
29
|
+
if isinstance(obj, list):
|
|
30
|
+
return [_substitute_env_vars(item) for item in obj]
|
|
31
|
+
if isinstance(obj, str):
|
|
32
|
+
def _replace(match):
|
|
33
|
+
var_name = match.group(1)
|
|
34
|
+
return os.environ.get(var_name, match.group(0))
|
|
35
|
+
return re.sub(r"\$\{ENV:([^}]+)\}", _replace, obj)
|
|
36
|
+
return obj
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _preprocess_config(config_path: str) -> str:
|
|
40
|
+
"""Read a YAML config, perform env-var substitution, and write to a
|
|
41
|
+
temporary file so that :class:`ChainRunner` can load it normally.
|
|
42
|
+
|
|
43
|
+
Returns the path to the (possibly rewritten) config file.
|
|
44
|
+
"""
|
|
45
|
+
with open(config_path, "r", encoding="utf-8") as fh:
|
|
46
|
+
raw = yaml.safe_load(fh)
|
|
47
|
+
|
|
48
|
+
substituted = _substitute_env_vars(raw)
|
|
49
|
+
|
|
50
|
+
# If nothing changed we can just use the original file directly.
|
|
51
|
+
if substituted == raw:
|
|
52
|
+
return config_path
|
|
53
|
+
|
|
54
|
+
import tempfile
|
|
55
|
+
|
|
56
|
+
tmp = tempfile.NamedTemporaryFile(
|
|
57
|
+
mode="w", suffix=".yaml", delete=False, encoding="utf-8"
|
|
58
|
+
)
|
|
59
|
+
yaml.dump(substituted, tmp, default_flow_style=False)
|
|
60
|
+
tmp.close()
|
|
61
|
+
return tmp.name
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
65
|
+
"""Build the argument parser for the CLI."""
|
|
66
|
+
parser = argparse.ArgumentParser(
|
|
67
|
+
prog="api_chain_runner",
|
|
68
|
+
description="Execute a chain of API calls defined in a YAML config file.",
|
|
69
|
+
)
|
|
70
|
+
parser.add_argument(
|
|
71
|
+
"--version",
|
|
72
|
+
action="version",
|
|
73
|
+
version=f"%(prog)s {__version__}",
|
|
74
|
+
)
|
|
75
|
+
parser.add_argument(
|
|
76
|
+
"config",
|
|
77
|
+
help="Path to the YAML chain configuration file.",
|
|
78
|
+
)
|
|
79
|
+
parser.add_argument(
|
|
80
|
+
"-o",
|
|
81
|
+
"--output",
|
|
82
|
+
default=None,
|
|
83
|
+
help="Output file path for results (default: <config_stem>_results.csv).",
|
|
84
|
+
)
|
|
85
|
+
parser.add_argument(
|
|
86
|
+
"-f",
|
|
87
|
+
"--format",
|
|
88
|
+
choices=["csv", "xlsx"],
|
|
89
|
+
default="csv",
|
|
90
|
+
help="Output format: csv (default) or xlsx.",
|
|
91
|
+
)
|
|
92
|
+
return parser
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def main(argv: list[str] | None = None) -> None:
|
|
96
|
+
"""Parse arguments, run the chain, and print a summary."""
|
|
97
|
+
parser = build_parser()
|
|
98
|
+
args = parser.parse_args(argv)
|
|
99
|
+
|
|
100
|
+
# Pre-process config to resolve ${ENV:...} placeholders
|
|
101
|
+
processed_config = _preprocess_config(args.config)
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
runner = ChainRunner(processed_config)
|
|
105
|
+
|
|
106
|
+
# Override output path / format if the user specified them
|
|
107
|
+
if args.output or args.format != "csv":
|
|
108
|
+
output_path = args.output or runner.logger._output_path
|
|
109
|
+
runner.logger = ResultLogger(str(output_path), fmt=args.format)
|
|
110
|
+
# Re-wire the executor's logger reference
|
|
111
|
+
runner.executor.logger = runner.logger
|
|
112
|
+
|
|
113
|
+
result = runner.run()
|
|
114
|
+
finally:
|
|
115
|
+
# Clean up temp file if we created one
|
|
116
|
+
if processed_config != args.config:
|
|
117
|
+
import os as _os
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
_os.unlink(processed_config)
|
|
121
|
+
except OSError:
|
|
122
|
+
pass
|
|
123
|
+
|
|
124
|
+
# Print summary to stdout
|
|
125
|
+
print(
|
|
126
|
+
f"Executed {result.total_steps} steps: "
|
|
127
|
+
f"{result.passed} passed, {result.failed} failed"
|
|
128
|
+
)
|
|
129
|
+
for step_result in result.results:
|
|
130
|
+
status = "\u2713" if step_result.success else "\u2717"
|
|
131
|
+
print(
|
|
132
|
+
f" {status} {step_result.step_name} \u2014 "
|
|
133
|
+
f"HTTP {step_result.status_code} ({step_result.duration_ms:.0f}ms)"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
main()
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"""StepExecutor — executes a single API step with reference resolution and logging."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import mimetypes
|
|
7
|
+
import time
|
|
8
|
+
from datetime import datetime, timezone, timedelta
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import requests
|
|
11
|
+
from api_chain_runner.generator import UniqueDataGenerator
|
|
12
|
+
from api_chain_runner.logger import ResultLogger
|
|
13
|
+
from api_chain_runner.models import LogEntry, StepDefinition, StepResult
|
|
14
|
+
from api_chain_runner.pause import PauseController
|
|
15
|
+
from api_chain_runner.resolver import ReferenceResolver
|
|
16
|
+
from api_chain_runner.store import ResponseStore
|
|
17
|
+
|
|
18
|
+
IST = timezone(timedelta(hours=5, minutes=30))
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class StepExecutor:
|
|
22
|
+
"""Executes a single API step: resolve references, generate unique data,
|
|
23
|
+
make the HTTP call, store the response, and log the result.
|
|
24
|
+
|
|
25
|
+
Parameters
|
|
26
|
+
----------
|
|
27
|
+
resolver:
|
|
28
|
+
Resolves ``${step.key}`` references against stored responses.
|
|
29
|
+
generator:
|
|
30
|
+
Generates unique values for marked fields.
|
|
31
|
+
store:
|
|
32
|
+
In-memory response store for cross-step data sharing.
|
|
33
|
+
logger:
|
|
34
|
+
Logs every request/response to CSV or Excel.
|
|
35
|
+
pause_controller:
|
|
36
|
+
Optional controller for pause/resume during polling.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
resolver: ReferenceResolver,
|
|
42
|
+
generator: UniqueDataGenerator,
|
|
43
|
+
store: ResponseStore,
|
|
44
|
+
logger: ResultLogger,
|
|
45
|
+
pause_controller: PauseController | None = None,
|
|
46
|
+
) -> None:
|
|
47
|
+
self.resolver = resolver
|
|
48
|
+
self.generator = generator
|
|
49
|
+
self.store = store
|
|
50
|
+
self.logger = logger
|
|
51
|
+
self.pause_controller = pause_controller
|
|
52
|
+
|
|
53
|
+
def execute(self, step: StepDefinition) -> StepResult:
|
|
54
|
+
"""Execute a step, with optional polling until expected value is found.
|
|
55
|
+
|
|
56
|
+
If the step has a ``polling`` config, the step is re-executed at the
|
|
57
|
+
configured intervals until the response contains the expected value
|
|
58
|
+
at the specified key path, or the max timeout is exceeded.
|
|
59
|
+
|
|
60
|
+
Only the final polling result is logged to CSV — intermediate attempts
|
|
61
|
+
are printed to console but not written to the log file.
|
|
62
|
+
"""
|
|
63
|
+
if not step.polling:
|
|
64
|
+
return self._execute_once(step)
|
|
65
|
+
|
|
66
|
+
polling = step.polling
|
|
67
|
+
start_time = time.monotonic()
|
|
68
|
+
attempt = 0
|
|
69
|
+
|
|
70
|
+
while True:
|
|
71
|
+
# Check for pause before each poll attempt
|
|
72
|
+
if self.pause_controller:
|
|
73
|
+
self.pause_controller.wait_if_paused()
|
|
74
|
+
|
|
75
|
+
result = self._execute_once(step, log_to_csv=False)
|
|
76
|
+
attempt += 1
|
|
77
|
+
|
|
78
|
+
# Subtract paused time from elapsed so timeout doesn't tick while paused
|
|
79
|
+
paused_time = self.pause_controller.total_paused if self.pause_controller else 0.0
|
|
80
|
+
elapsed = time.monotonic() - start_time - paused_time
|
|
81
|
+
|
|
82
|
+
# Check if the response value matches any of the expected values
|
|
83
|
+
if result.success and isinstance(result.response_body, dict):
|
|
84
|
+
actual = self._get_nested(result.response_body, polling.key_path)
|
|
85
|
+
if str(actual) in polling.expected_values:
|
|
86
|
+
print(
|
|
87
|
+
f" [polling] '{step.name}' got expected "
|
|
88
|
+
f"'{polling.key_path}={actual}' "
|
|
89
|
+
f"after {attempt} attempt(s) ({elapsed:.1f}s)"
|
|
90
|
+
)
|
|
91
|
+
self._log_result(step, result)
|
|
92
|
+
return result
|
|
93
|
+
else:
|
|
94
|
+
print(
|
|
95
|
+
f" [polling] '{step.name}' attempt {attempt}: "
|
|
96
|
+
f"'{polling.key_path}' = '{actual}' "
|
|
97
|
+
f"(waiting for {polling.expected_values})"
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Check timeout before sleeping for next attempt
|
|
101
|
+
if elapsed >= polling.max_timeout:
|
|
102
|
+
actual_display = self._get_nested(
|
|
103
|
+
result.response_body, polling.key_path
|
|
104
|
+
) if isinstance(result.response_body, dict) else "N/A"
|
|
105
|
+
error_msg = (
|
|
106
|
+
f"Polling timed out after {elapsed:.1f}s ({attempt} attempts). "
|
|
107
|
+
f"Expected '{polling.key_path}' in {polling.expected_values}, "
|
|
108
|
+
f"last value='{actual_display}'."
|
|
109
|
+
)
|
|
110
|
+
print(f" [polling] '{step.name}' TIMEOUT: {error_msg}")
|
|
111
|
+
timeout_result = StepResult(
|
|
112
|
+
step_name=step.name,
|
|
113
|
+
status_code=result.status_code,
|
|
114
|
+
response_body=result.response_body,
|
|
115
|
+
duration_ms=(time.monotonic() - start_time - paused_time) * 1000,
|
|
116
|
+
success=False,
|
|
117
|
+
error=error_msg,
|
|
118
|
+
)
|
|
119
|
+
self._log_result(step, timeout_result)
|
|
120
|
+
return timeout_result
|
|
121
|
+
|
|
122
|
+
# Don't sleep past the timeout
|
|
123
|
+
remaining = polling.max_timeout - elapsed
|
|
124
|
+
actual_wait = min(polling.interval, remaining)
|
|
125
|
+
if actual_wait <= 0:
|
|
126
|
+
continue
|
|
127
|
+
print(
|
|
128
|
+
f" [polling] '{step.name}' attempt {attempt}: "
|
|
129
|
+
f"waiting {actual_wait:.0f}s before retry..."
|
|
130
|
+
)
|
|
131
|
+
self._interruptible_sleep(actual_wait)
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _get_nested(data, key_path: str):
|
|
135
|
+
"""Traverse a dict/list by dot-separated key path.
|
|
136
|
+
|
|
137
|
+
Supports array indexing via numeric segments, e.g.
|
|
138
|
+
``"applications.0.status"`` resolves to ``data["applications"][0]["status"]``.
|
|
139
|
+
Negative indices are supported, e.g. ``"applications.-1.status"``
|
|
140
|
+
resolves to the last element.
|
|
141
|
+
|
|
142
|
+
Returns None if any segment is not found.
|
|
143
|
+
"""
|
|
144
|
+
current = data
|
|
145
|
+
for key in key_path.split("."):
|
|
146
|
+
if isinstance(current, dict) and key in current:
|
|
147
|
+
current = current[key]
|
|
148
|
+
elif isinstance(current, list) and (key.isdigit() or (key.startswith("-") and key[1:].isdigit())):
|
|
149
|
+
idx = int(key)
|
|
150
|
+
if -len(current) <= idx < len(current):
|
|
151
|
+
current = current[idx]
|
|
152
|
+
else:
|
|
153
|
+
return None
|
|
154
|
+
else:
|
|
155
|
+
return None
|
|
156
|
+
return current
|
|
157
|
+
|
|
158
|
+
def _execute_once(self, step: StepDefinition, log_to_csv: bool = True) -> StepResult:
|
|
159
|
+
"""Execute a single API step.
|
|
160
|
+
|
|
161
|
+
Phases:
|
|
162
|
+
1. Resolve ``${step.key}`` references in headers and payload.
|
|
163
|
+
2. Apply unique field generation if ``unique_fields`` is set.
|
|
164
|
+
3. Execute the HTTP request with a 30 s timeout.
|
|
165
|
+
4. Store dict responses in :class:`ResponseStore`.
|
|
166
|
+
5. Log the request/response as a :class:`LogEntry`.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
step: The step definition to execute.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
A :class:`StepResult` capturing status code, body, timing, and errors.
|
|
173
|
+
"""
|
|
174
|
+
# Phase 1: Resolve references in url, headers, and payload
|
|
175
|
+
resolved_url = self.resolver.resolve(step.url)
|
|
176
|
+
resolved_headers = self.resolver.resolve(step.headers)
|
|
177
|
+
resolved_payload = self.resolver.resolve(step.payload) if step.payload else None
|
|
178
|
+
|
|
179
|
+
# Phase 2: Generate unique fields if specified
|
|
180
|
+
if resolved_payload and step.unique_fields:
|
|
181
|
+
resolved_payload = self.generator.apply(resolved_payload, step.unique_fields)
|
|
182
|
+
|
|
183
|
+
# Phase 3: Execute HTTP request
|
|
184
|
+
start = time.monotonic()
|
|
185
|
+
opened_files: list = []
|
|
186
|
+
try:
|
|
187
|
+
# Build request kwargs based on whether this is a file upload
|
|
188
|
+
request_kwargs: dict = {
|
|
189
|
+
"method": step.method,
|
|
190
|
+
"url": resolved_url,
|
|
191
|
+
"headers": resolved_headers,
|
|
192
|
+
"timeout": 30,
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
opened_files = []
|
|
196
|
+
if step.files:
|
|
197
|
+
# Multipart file upload — open files and attach as multipart/form-data
|
|
198
|
+
files_dict = {}
|
|
199
|
+
for field_name, file_path in step.files.items():
|
|
200
|
+
p = Path(file_path)
|
|
201
|
+
mime_type = mimetypes.guess_type(p.name)[0] or "application/octet-stream"
|
|
202
|
+
fh = open(p, "rb")
|
|
203
|
+
opened_files.append(fh)
|
|
204
|
+
files_dict[field_name] = (p.name, fh, mime_type)
|
|
205
|
+
request_kwargs["files"] = files_dict
|
|
206
|
+
# Send any extra payload fields as form data alongside the file
|
|
207
|
+
if resolved_payload:
|
|
208
|
+
request_kwargs["data"] = resolved_payload
|
|
209
|
+
else:
|
|
210
|
+
request_kwargs["json"] = resolved_payload
|
|
211
|
+
|
|
212
|
+
response = requests.request(**request_kwargs)
|
|
213
|
+
duration_ms = (time.monotonic() - start) * 1000
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
body = response.json()
|
|
217
|
+
except ValueError:
|
|
218
|
+
body = response.text
|
|
219
|
+
|
|
220
|
+
# Phase 4: Store response for downstream steps
|
|
221
|
+
if isinstance(body, dict):
|
|
222
|
+
self.store.save(step.name, body)
|
|
223
|
+
|
|
224
|
+
result = StepResult(
|
|
225
|
+
step_name=step.name,
|
|
226
|
+
status_code=response.status_code,
|
|
227
|
+
response_body=body,
|
|
228
|
+
duration_ms=duration_ms,
|
|
229
|
+
success=200 <= response.status_code < 300,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
except requests.RequestException as e:
|
|
233
|
+
duration_ms = (time.monotonic() - start) * 1000
|
|
234
|
+
result = StepResult(
|
|
235
|
+
step_name=step.name,
|
|
236
|
+
status_code=-1,
|
|
237
|
+
response_body="",
|
|
238
|
+
duration_ms=duration_ms,
|
|
239
|
+
success=False,
|
|
240
|
+
error=str(e),
|
|
241
|
+
)
|
|
242
|
+
finally:
|
|
243
|
+
for fh in opened_files:
|
|
244
|
+
fh.close()
|
|
245
|
+
|
|
246
|
+
# Phase 5: Log regardless of outcome (unless suppressed for polling)
|
|
247
|
+
if log_to_csv:
|
|
248
|
+
self._log_result(step, result)
|
|
249
|
+
|
|
250
|
+
return result
|
|
251
|
+
|
|
252
|
+
def _log_result(self, step: StepDefinition, result: StepResult) -> None:
|
|
253
|
+
"""Write a single result entry to the logger."""
|
|
254
|
+
resolved_url = self.resolver.resolve(step.url)
|
|
255
|
+
resolved_headers = self.resolver.resolve(step.headers)
|
|
256
|
+
resolved_payload = self.resolver.resolve(step.payload) if step.payload else None
|
|
257
|
+
|
|
258
|
+
self.logger.log(
|
|
259
|
+
LogEntry(
|
|
260
|
+
timestamp=datetime.now(IST).isoformat(),
|
|
261
|
+
step_name=result.step_name,
|
|
262
|
+
method=step.method,
|
|
263
|
+
url=resolved_url,
|
|
264
|
+
request_headers=json.dumps(resolved_headers),
|
|
265
|
+
request_body=json.dumps(resolved_payload) if resolved_payload else "",
|
|
266
|
+
status_code=result.status_code,
|
|
267
|
+
response_body=(
|
|
268
|
+
json.dumps(result.response_body)
|
|
269
|
+
if isinstance(result.response_body, dict)
|
|
270
|
+
else str(result.response_body)
|
|
271
|
+
),
|
|
272
|
+
duration_ms=result.duration_ms,
|
|
273
|
+
error=result.error,
|
|
274
|
+
)
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
def _interruptible_sleep(self, seconds: float) -> None:
|
|
278
|
+
"""Sleep for the given duration, checking for pause every 0.5s."""
|
|
279
|
+
remaining = seconds
|
|
280
|
+
while remaining > 0:
|
|
281
|
+
if self.pause_controller:
|
|
282
|
+
self.pause_controller.wait_if_paused()
|
|
283
|
+
chunk = min(0.5, remaining)
|
|
284
|
+
time.sleep(chunk)
|
|
285
|
+
remaining -= chunk
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"""Unique data generator for fields that must differ per run."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import copy
|
|
6
|
+
import random
|
|
7
|
+
import string
|
|
8
|
+
import time
|
|
9
|
+
import uuid
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class UniqueDataGenerator:
|
|
13
|
+
"""Generates unique values for email, PAN, and mobile fields.
|
|
14
|
+
|
|
15
|
+
Each call produces a distinct value with high probability via
|
|
16
|
+
timestamp/UUID suffixes (email) or random generation (PAN, mobile).
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
# Fourth character of a PAN encodes the entity type.
|
|
20
|
+
_PAN_FOURTH_CHARS = "PCHFAT"
|
|
21
|
+
|
|
22
|
+
def generate_email(self, base: str = "user") -> str:
|
|
23
|
+
"""Generate a unique RFC-valid email address.
|
|
24
|
+
|
|
25
|
+
Uses a combination of timestamp and a short UUID fragment to
|
|
26
|
+
guarantee uniqueness across runs.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
base: Local-part prefix (default ``"user"``).
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
An email string like ``"user_1718901234_a1b2c3@test.com"``.
|
|
33
|
+
"""
|
|
34
|
+
ts = int(time.time())
|
|
35
|
+
uid = uuid.uuid4().hex[:6]
|
|
36
|
+
return f"{base}_{ts}_{uid}@test.com"
|
|
37
|
+
|
|
38
|
+
def generate_pan(self) -> str:
|
|
39
|
+
"""Generate a valid-format Indian PAN number.
|
|
40
|
+
|
|
41
|
+
Format: ``[A-Z]{3}[PCHFAT][A-Z][0-9]{4}[A-Z]``
|
|
42
|
+
|
|
43
|
+
The fourth character is one of P (individual), C (company),
|
|
44
|
+
H (HUF), F (firm), A (AOP), or T (trust).
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
A 10-character PAN string.
|
|
48
|
+
"""
|
|
49
|
+
first_three = "".join(random.choices(string.ascii_uppercase, k=3))
|
|
50
|
+
# Need to put when other type of cusomer are coming
|
|
51
|
+
# fourth = random.choice(self._PAN_FOURTH_CHARS)
|
|
52
|
+
fourth = "P" # Our customer base is Indiviual
|
|
53
|
+
fifth = random.choice(string.ascii_uppercase)
|
|
54
|
+
digits = "".join(random.choices(string.digits, k=4))
|
|
55
|
+
last = random.choice(string.ascii_uppercase)
|
|
56
|
+
return f"{first_three}{fourth}{fifth}{digits}{last}"
|
|
57
|
+
|
|
58
|
+
def generate_mobile(self) -> str:
|
|
59
|
+
"""Generate a 10-digit Indian mobile number.
|
|
60
|
+
|
|
61
|
+
Indian mobile numbers start with a digit in the range 6-9,
|
|
62
|
+
followed by 9 random digits.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
A 10-digit numeric string.
|
|
66
|
+
"""
|
|
67
|
+
first = str(random.randint(6, 9))
|
|
68
|
+
rest = "".join(random.choices(string.digits, k=9))
|
|
69
|
+
return f"{first}{rest}"
|
|
70
|
+
|
|
71
|
+
def generate_udyam(self) -> str:
|
|
72
|
+
"""Generate a valid-format UDYAM registration number.
|
|
73
|
+
|
|
74
|
+
Format: ``UDYAM-XX-99-9999999`` where X is an uppercase letter
|
|
75
|
+
and 9 is a random digit.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
A UDYAM string like ``"UDYAM-KA-23-1234567"``.
|
|
79
|
+
"""
|
|
80
|
+
letters = string.ascii_uppercase
|
|
81
|
+
return (
|
|
82
|
+
"UDYAM-"
|
|
83
|
+
+ random.choice(letters)
|
|
84
|
+
+ random.choice(letters)
|
|
85
|
+
+ "-"
|
|
86
|
+
+ str(random.randint(10, 99))
|
|
87
|
+
+ "-"
|
|
88
|
+
+ str(random.randint(1000000, 9999999))
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def apply(self, payload: dict, unique_fields: dict[str, str]) -> dict:
|
|
92
|
+
"""Apply generated unique values to specified payload paths.
|
|
93
|
+
|
|
94
|
+
Creates a deep copy of *payload*, then for each entry in
|
|
95
|
+
*unique_fields* generates a value and sets it at the
|
|
96
|
+
dot-notation path.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
payload: The original request body dict.
|
|
100
|
+
unique_fields: Mapping of ``"dotted.path"`` to generator
|
|
101
|
+
type (``"email"``, ``"pan"``, or ``"mobile"``).
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
A new dict with unique values injected. The original
|
|
105
|
+
*payload* is never mutated.
|
|
106
|
+
"""
|
|
107
|
+
result = copy.deepcopy(payload)
|
|
108
|
+
|
|
109
|
+
generators = {
|
|
110
|
+
"email": self.generate_email,
|
|
111
|
+
"pan": self.generate_pan,
|
|
112
|
+
"mobile": self.generate_mobile,
|
|
113
|
+
"udyam": self.generate_udyam,
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
for field_path, gen_type in unique_fields.items():
|
|
117
|
+
value = generators[gen_type]()
|
|
118
|
+
keys = field_path.split(".")
|
|
119
|
+
target = result
|
|
120
|
+
|
|
121
|
+
for key in keys[:-1]:
|
|
122
|
+
target = target[key]
|
|
123
|
+
|
|
124
|
+
target[keys[-1]] = value
|
|
125
|
+
|
|
126
|
+
return result
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""ResultLogger — logs every API call to CSV or Excel."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import csv
|
|
6
|
+
import dataclasses
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from api_chain_runner.models import LogEntry
|
|
12
|
+
|
|
13
|
+
COLUMNS = [
|
|
14
|
+
"timestamp",
|
|
15
|
+
"step_name",
|
|
16
|
+
"method",
|
|
17
|
+
"url",
|
|
18
|
+
"request_headers",
|
|
19
|
+
"request_body",
|
|
20
|
+
"status_code",
|
|
21
|
+
"response_body",
|
|
22
|
+
"duration_ms",
|
|
23
|
+
"error",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ResultLogger:
|
|
28
|
+
"""Logs every API call to a CSV or Excel file.
|
|
29
|
+
|
|
30
|
+
Parameters
|
|
31
|
+
----------
|
|
32
|
+
output_path:
|
|
33
|
+
Destination file path (e.g. ``"results.csv"``).
|
|
34
|
+
fmt:
|
|
35
|
+
Output format — ``"csv"`` or ``"xlsx"``.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, output_path: str, fmt: str = "csv") -> None:
|
|
39
|
+
if fmt not in ("csv", "xlsx"):
|
|
40
|
+
raise ValueError(f"Unsupported format '{fmt}'. Must be 'csv' or 'xlsx'.")
|
|
41
|
+
self._output_path = Path(output_path)
|
|
42
|
+
self._fmt = fmt
|
|
43
|
+
self._entries: list[LogEntry] = []
|
|
44
|
+
|
|
45
|
+
def log(self, entry: LogEntry) -> None:
|
|
46
|
+
"""Append a log entry for a single API call."""
|
|
47
|
+
self._entries.append(entry)
|
|
48
|
+
|
|
49
|
+
def finalize(self) -> None:
|
|
50
|
+
"""Flush all accumulated entries to the output file."""
|
|
51
|
+
if self._fmt == "csv":
|
|
52
|
+
self._write_csv()
|
|
53
|
+
else:
|
|
54
|
+
self._write_xlsx()
|
|
55
|
+
|
|
56
|
+
# ------------------------------------------------------------------
|
|
57
|
+
# Private helpers
|
|
58
|
+
# ------------------------------------------------------------------
|
|
59
|
+
|
|
60
|
+
def _entry_to_row(self, entry: LogEntry) -> list[str]:
|
|
61
|
+
"""Convert a LogEntry to a list of string values matching COLUMNS order."""
|
|
62
|
+
d = dataclasses.asdict(entry)
|
|
63
|
+
return [str(d[col]) if d[col] is not None else "" for col in COLUMNS]
|
|
64
|
+
|
|
65
|
+
def _write_csv(self) -> None:
|
|
66
|
+
self._output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
67
|
+
with open(self._output_path, "w", newline="", encoding="utf-8") as fh:
|
|
68
|
+
writer = csv.writer(fh)
|
|
69
|
+
writer.writerow(COLUMNS)
|
|
70
|
+
for entry in self._entries:
|
|
71
|
+
writer.writerow(self._entry_to_row(entry))
|
|
72
|
+
|
|
73
|
+
def _write_xlsx(self) -> None:
|
|
74
|
+
try:
|
|
75
|
+
from openpyxl import Workbook
|
|
76
|
+
except ImportError as exc:
|
|
77
|
+
raise ImportError(
|
|
78
|
+
"openpyxl is required for Excel output. Install it with: pip install openpyxl"
|
|
79
|
+
) from exc
|
|
80
|
+
|
|
81
|
+
wb = Workbook()
|
|
82
|
+
ws = wb.active
|
|
83
|
+
ws.title = "API Results"
|
|
84
|
+
ws.append(COLUMNS)
|
|
85
|
+
for entry in self._entries:
|
|
86
|
+
ws.append(self._entry_to_row(entry))
|
|
87
|
+
|
|
88
|
+
self._output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
89
|
+
wb.save(str(self._output_path))
|