amsdal_cli 0.5.1__py3-none-any.whl → 0.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- amsdal_cli/__about__.py +1 -1
- amsdal_cli/commands/api_check/__init__.py +0 -0
- amsdal_cli/commands/api_check/command.py +118 -0
- amsdal_cli/commands/api_check/config.py +192 -0
- amsdal_cli/commands/api_check/data_classes.py +13 -0
- amsdal_cli/commands/api_check/operation_log.py +78 -0
- amsdal_cli/commands/api_check/services/__init__.py +0 -0
- amsdal_cli/commands/api_check/services/comparison.py +47 -0
- amsdal_cli/commands/api_check/services/data_factory.py +158 -0
- amsdal_cli/commands/api_check/services/loader.py +11 -0
- amsdal_cli/commands/api_check/services/runner.py +499 -0
- amsdal_cli/commands/api_check/services/storage.py +12 -0
- amsdal_cli/commands/build/services/mixin.py +2 -2
- amsdal_cli/commands/callbacks.py +14 -1
- amsdal_cli/commands/cloud/dependency/sub_commands/dependency_delete.py +0 -2
- amsdal_cli/commands/generate/utils/tests/type_utils.py +3 -3
- amsdal_cli/commands/migrations/sub_commands/apply.py +6 -44
- amsdal_cli/commands/migrations/sub_commands/list.py +26 -25
- amsdal_cli/commands/migrations/utils.py +86 -0
- amsdal_cli/commands/restore/command.py +1 -1
- amsdal_cli/commands/serve/utils.py +1 -1
- amsdal_cli/commands/worker/sub_commands/run.py +2 -2
- amsdal_cli/config/main.py +1 -0
- {amsdal_cli-0.5.1.dist-info → amsdal_cli-0.5.3.dist-info}/METADATA +2 -1
- {amsdal_cli-0.5.1.dist-info → amsdal_cli-0.5.3.dist-info}/RECORD +28 -16
- {amsdal_cli-0.5.1.dist-info → amsdal_cli-0.5.3.dist-info}/WHEEL +0 -0
- {amsdal_cli-0.5.1.dist-info → amsdal_cli-0.5.3.dist-info}/entry_points.txt +0 -0
- {amsdal_cli-0.5.1.dist-info → amsdal_cli-0.5.3.dist-info}/licenses/LICENSE.txt +0 -0
amsdal_cli/__about__.py
CHANGED
|
File without changes
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
import typer
|
|
5
|
+
|
|
6
|
+
from amsdal_cli.app import app
|
|
7
|
+
from amsdal_cli.commands.api_check.config import ApiCheckConfig
|
|
8
|
+
from amsdal_cli.commands.api_check.services.comparison import check
|
|
9
|
+
from amsdal_cli.commands.api_check.services.loader import load_operation_logs
|
|
10
|
+
from amsdal_cli.commands.api_check.services.runner import ApiRunner
|
|
11
|
+
from amsdal_cli.commands.api_check.services.storage import save
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@app.command(name='api-check')
|
|
15
|
+
def api_check(
|
|
16
|
+
target_url: str = typer.Argument( # noqa: B008
|
|
17
|
+
...,
|
|
18
|
+
help='Target API base URL to test',
|
|
19
|
+
),
|
|
20
|
+
config_file: Path = typer.Option( # noqa: B008
|
|
21
|
+
Path('api-check-config.json'),
|
|
22
|
+
'--config',
|
|
23
|
+
'-c',
|
|
24
|
+
help='Path to configuration file (YAML or JSON) with test cases and auth settings',
|
|
25
|
+
exists=True,
|
|
26
|
+
),
|
|
27
|
+
compare_url: Optional[str] = typer.Option( # noqa: B008
|
|
28
|
+
None,
|
|
29
|
+
'--compare-url',
|
|
30
|
+
help='Second API base URL to compare against',
|
|
31
|
+
),
|
|
32
|
+
compare_logs: Optional[Path] = typer.Option( # noqa: B008
|
|
33
|
+
None,
|
|
34
|
+
'--compare-logs',
|
|
35
|
+
help='Path to existing logs file to compare against',
|
|
36
|
+
exists=True,
|
|
37
|
+
),
|
|
38
|
+
output_logs: Optional[Path] = typer.Option( # noqa: B008
|
|
39
|
+
None,
|
|
40
|
+
'--output',
|
|
41
|
+
'-o',
|
|
42
|
+
help='Path to save output logs',
|
|
43
|
+
),
|
|
44
|
+
) -> None:
|
|
45
|
+
"""
|
|
46
|
+
Run API tests against target URL and compare results with another endpoint or saved logs.
|
|
47
|
+
|
|
48
|
+
The command requires a configuration file (in YAML or JSON format) that defines:
|
|
49
|
+
- Test cases with inputs and expected outputs
|
|
50
|
+
- Authentication settings
|
|
51
|
+
- Endpoints to test
|
|
52
|
+
|
|
53
|
+
Authentication can be configured using environment variables:
|
|
54
|
+
- AMSDAL_API_CHECK_AUTHORIZATION: Authorization token
|
|
55
|
+
- AMSDAL_API_CHECK_EMAIL: Email for authentication
|
|
56
|
+
- AMSDAL_API_CHECK_PASSWORD: Password for authentication
|
|
57
|
+
|
|
58
|
+
If the token is invalid and no credentials are provided, the command will exit with an error.
|
|
59
|
+
|
|
60
|
+
Examples:
|
|
61
|
+
|
|
62
|
+
1. Save logs to a file without comparison:
|
|
63
|
+
```
|
|
64
|
+
amsdal api-check https://api.example.com --compare-url https://api.example.com --output logs.json
|
|
65
|
+
```
|
|
66
|
+
This uses the same URL for both target and comparison, which skips the comparison
|
|
67
|
+
but still runs the API checks and saves the logs.
|
|
68
|
+
|
|
69
|
+
2. Compare a URL with previously saved logs:
|
|
70
|
+
```
|
|
71
|
+
amsdal api-check https://api.example.com --compare-logs previous-logs.json
|
|
72
|
+
```
|
|
73
|
+
This runs API checks against the target URL and compares the results with
|
|
74
|
+
the logs stored in previous-logs.json.
|
|
75
|
+
|
|
76
|
+
3. Compare two different URLs:
|
|
77
|
+
```
|
|
78
|
+
amsdal api-check https://api-prod.example.com --compare-url https://api-staging.example.com
|
|
79
|
+
```
|
|
80
|
+
This runs API checks against both URLs and compares the results.
|
|
81
|
+
|
|
82
|
+
You can combine these options as needed:
|
|
83
|
+
```
|
|
84
|
+
amsdal api-check https://api-prod.example.com --compare-url https://api-staging.example.com \
|
|
85
|
+
--output comparison.json
|
|
86
|
+
```
|
|
87
|
+
This compares the two URLs and also saves the logs from the target URL.
|
|
88
|
+
"""
|
|
89
|
+
if not compare_url and not compare_logs:
|
|
90
|
+
msg = 'Either --compare-url or --compare-logs must be provided'
|
|
91
|
+
raise typer.BadParameter(msg)
|
|
92
|
+
|
|
93
|
+
if compare_url and compare_logs:
|
|
94
|
+
msg = 'Cannot use both --compare-url and --compare-logs simultaneously'
|
|
95
|
+
raise typer.BadParameter(msg)
|
|
96
|
+
|
|
97
|
+
# Load configuration using the class method
|
|
98
|
+
config = ApiCheckConfig.load_from_file(config_file)
|
|
99
|
+
|
|
100
|
+
# Create runner with the loaded config
|
|
101
|
+
target_runner = ApiRunner(target_url, config)
|
|
102
|
+
target_logs_data = target_runner.run()
|
|
103
|
+
has_errors = False
|
|
104
|
+
|
|
105
|
+
if compare_url != target_url:
|
|
106
|
+
if compare_url:
|
|
107
|
+
compare_runner = ApiRunner(compare_url, config)
|
|
108
|
+
compare_logs_data = compare_runner.run()
|
|
109
|
+
else:
|
|
110
|
+
compare_logs_data = load_operation_logs(compare_logs) # type: ignore[arg-type]
|
|
111
|
+
|
|
112
|
+
has_errors = check(target_logs_data, compare_logs_data)
|
|
113
|
+
|
|
114
|
+
if output_logs:
|
|
115
|
+
save(target_logs_data, destination=output_logs)
|
|
116
|
+
|
|
117
|
+
if has_errors:
|
|
118
|
+
raise typer.Exit(code=1)
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
from typing import ClassVar
|
|
8
|
+
from typing import Literal
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
import yaml
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
from pydantic import Field
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class TransactionData(BaseModel):
|
|
19
|
+
transaction_name: str
|
|
20
|
+
input_params: dict[str, Any] = Field(default_factory=dict)
|
|
21
|
+
expected_response: Any = None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ApiCheckConfig(BaseModel):
|
|
25
|
+
# Class variable to store the config file path
|
|
26
|
+
_config_file: ClassVar[Optional[Path]] = None
|
|
27
|
+
|
|
28
|
+
items_per_list: tuple[int, int, int] = (5, 5, 5)
|
|
29
|
+
headers: dict[str, str] = Field(default_factory=dict)
|
|
30
|
+
auth_headers: dict[str, str] = Field(default_factory=dict)
|
|
31
|
+
request_timeout: int = 600
|
|
32
|
+
extend_output: bool = True
|
|
33
|
+
|
|
34
|
+
# Authentication
|
|
35
|
+
email: Optional[str] = None
|
|
36
|
+
password: Optional[str] = None
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def token(self) -> Optional[str]:
|
|
40
|
+
"""
|
|
41
|
+
Get the token from auth_headers.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
Optional[str]: The token or None if not set
|
|
45
|
+
"""
|
|
46
|
+
if not self.auth_headers or 'Authorization' not in self.auth_headers:
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
return self.auth_headers['Authorization']
|
|
50
|
+
|
|
51
|
+
@token.setter
|
|
52
|
+
def token(self, value: Optional[str]) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Set the token in auth_headers.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
value: The token value
|
|
58
|
+
"""
|
|
59
|
+
if value is None:
|
|
60
|
+
if 'Authorization' in self.auth_headers:
|
|
61
|
+
del self.auth_headers['Authorization']
|
|
62
|
+
else:
|
|
63
|
+
self.auth_headers['Authorization'] = value
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def token_expiry(self) -> Optional[int]:
|
|
67
|
+
"""
|
|
68
|
+
Get the token expiry from the token.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Optional[int]: The token expiry timestamp, or None if not available
|
|
72
|
+
"""
|
|
73
|
+
if not self.token:
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
import jwt
|
|
78
|
+
|
|
79
|
+
decoded = jwt.decode(self.token, options={'verify_signature': False})
|
|
80
|
+
return decoded.get('exp')
|
|
81
|
+
except Exception as e:
|
|
82
|
+
logger.warning(f'Failed to decode token: {e}')
|
|
83
|
+
return None
|
|
84
|
+
|
|
85
|
+
@property
|
|
86
|
+
def env_authorization(self) -> Optional[str]:
|
|
87
|
+
"""Get authorization token from environment variable."""
|
|
88
|
+
return os.environ.get('AMSDAL_API_CHECK_AUTHORIZATION')
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def env_email(self) -> Optional[str]:
|
|
92
|
+
"""Get email from environment variable."""
|
|
93
|
+
return os.environ.get('AMSDAL_API_CHECK_EMAIL')
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def env_password(self) -> Optional[str]:
|
|
97
|
+
"""Get password from environment variable."""
|
|
98
|
+
return os.environ.get('AMSDAL_API_CHECK_PASSWORD')
|
|
99
|
+
|
|
100
|
+
@classmethod
|
|
101
|
+
def load_from_file(cls, config_file: str | Path) -> 'ApiCheckConfig':
|
|
102
|
+
"""
|
|
103
|
+
Load configuration from a file.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
config_file: Path to the configuration file (YAML or JSON)
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
ApiCheckConfig: The loaded configuration
|
|
110
|
+
"""
|
|
111
|
+
config_path = Path(config_file)
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
with open(config_path) as f:
|
|
115
|
+
# Determine file format based on extension
|
|
116
|
+
if config_path.suffix.lower() in ['.yaml', '.yml']:
|
|
117
|
+
config_raw = yaml.safe_load(f)
|
|
118
|
+
elif config_path.suffix.lower() == '.json':
|
|
119
|
+
config_raw = json.load(f)
|
|
120
|
+
else:
|
|
121
|
+
# Default to YAML if extension is not recognized
|
|
122
|
+
config_raw = yaml.safe_load(f)
|
|
123
|
+
|
|
124
|
+
# Create config instance
|
|
125
|
+
config = cls(**config_raw)
|
|
126
|
+
# Store the config file path
|
|
127
|
+
cls._config_file = config_path
|
|
128
|
+
|
|
129
|
+
logger.info(f'Configuration loaded from {config_path}')
|
|
130
|
+
return config
|
|
131
|
+
except Exception as e:
|
|
132
|
+
logger.error(f'Failed to load configuration from {config_path}: {e}')
|
|
133
|
+
raise
|
|
134
|
+
|
|
135
|
+
def save(self, config_file: Optional[str | Path] = None) -> None:
|
|
136
|
+
"""
|
|
137
|
+
Save the current configuration to a file.
|
|
138
|
+
|
|
139
|
+
Email and password values provided through environment variables
|
|
140
|
+
(AMSDAL_API_CHECK_EMAIL, AMSDAL_API_CHECK_PASSWORD) will not be saved to the file.
|
|
141
|
+
Only the token and other configuration values will be persisted.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
config_file: Path to save the configuration to. If not provided,
|
|
145
|
+
uses the path from which the config was loaded.
|
|
146
|
+
"""
|
|
147
|
+
# Use provided path or fall back to the stored path
|
|
148
|
+
save_path = Path(config_file) if config_file else self.__class__._config_file
|
|
149
|
+
|
|
150
|
+
if not save_path:
|
|
151
|
+
logger.warning('No config file specified, cannot save configuration')
|
|
152
|
+
return
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
# Convert config to dict
|
|
156
|
+
config_dict = self.model_dump()
|
|
157
|
+
|
|
158
|
+
# Don't save email and password if they were provided through environment variables
|
|
159
|
+
if self.env_email and self.email == self.env_email:
|
|
160
|
+
config_dict.pop('email', None)
|
|
161
|
+
|
|
162
|
+
if self.env_password and self.password == self.env_password:
|
|
163
|
+
config_dict.pop('password', None)
|
|
164
|
+
|
|
165
|
+
# No need to handle token and token_expiry as they are now properties
|
|
166
|
+
# and not included in the model_dump()
|
|
167
|
+
|
|
168
|
+
# Write to file
|
|
169
|
+
with open(save_path, 'w') as f:
|
|
170
|
+
json.dump(config_dict, f, indent=2)
|
|
171
|
+
|
|
172
|
+
logger.info(f'Configuration saved to {save_path}')
|
|
173
|
+
except Exception as e:
|
|
174
|
+
logger.error(f'Failed to save configuration: {e}')
|
|
175
|
+
|
|
176
|
+
# classes
|
|
177
|
+
exclude_classes: list[str] | Literal['ALL'] = Field(default_factory=list)
|
|
178
|
+
|
|
179
|
+
# objects
|
|
180
|
+
exclude_objects_for_classes: list[str] = Field(default_factory=list)
|
|
181
|
+
objects_list_params_options: list[dict[str, Any]] = Field(default_factory=list)
|
|
182
|
+
object_detail_params_options: list[dict[str, Any]] = Field(default_factory=list)
|
|
183
|
+
object_write_operations_enabled: bool = Field(default=False)
|
|
184
|
+
exclude_object_write_operations_for_classes: list[str] = Field(default_factory=list)
|
|
185
|
+
pre_object_create_hook: Callable[[dict[str, Any]], dict[str, Any]] | dict[str, Any] = Field(default_factory=dict)
|
|
186
|
+
pre_object_update_hook: Callable[[dict[str, Any]], dict[str, Any]] | dict[str, Any] = Field(default_factory=dict)
|
|
187
|
+
|
|
188
|
+
# transactions
|
|
189
|
+
ignore_transaction_execution_errors: bool = Field(default=True)
|
|
190
|
+
exclude_transactions: list[str] | Literal['ALL'] = Field(default_factory=list)
|
|
191
|
+
exclude_execute_transactions: list[str] | Literal['ALL'] = Field(default_factory=list)
|
|
192
|
+
transactions_data: list[TransactionData] = Field(default_factory=list)
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from json import JSONEncoder
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
from pydantic import Field
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BytesJSONEncoder(JSONEncoder):
|
|
12
|
+
def default(self, o: Any) -> Any:
|
|
13
|
+
if isinstance(o, bytes):
|
|
14
|
+
return base64.b64encode(o).decode('utf-8')
|
|
15
|
+
return super().default(o)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OperationLog(BaseModel):
|
|
19
|
+
method: str
|
|
20
|
+
uri: str
|
|
21
|
+
headers: dict[str, str] = Field(default_factory=dict)
|
|
22
|
+
params: dict[str, str] | None = None
|
|
23
|
+
data: Any
|
|
24
|
+
status_code: int
|
|
25
|
+
response_data: Any
|
|
26
|
+
response_headers: dict[str, str] = Field(default_factory=dict)
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def id(self) -> str:
|
|
30
|
+
_params = json.dumps(self.params, cls=BytesJSONEncoder) if self.params else None
|
|
31
|
+
_data = json.dumps(self.data, cls=BytesJSONEncoder) if self.data else None
|
|
32
|
+
return f'{self.method} {self.uri} {_params} {_data}'
|
|
33
|
+
|
|
34
|
+
def __str__(self) -> str:
|
|
35
|
+
_response = (
|
|
36
|
+
json.dumps(self.response_data, cls=BytesJSONEncoder) if not isinstance(self.response_data, str) else None
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
return f'{self.id} {self.status_code} {_response}'
|
|
40
|
+
|
|
41
|
+
@classmethod
|
|
42
|
+
def from_response(
|
|
43
|
+
cls,
|
|
44
|
+
response: Any,
|
|
45
|
+
auth_headers: dict[str, Any] | None = None,
|
|
46
|
+
*,
|
|
47
|
+
ignore_object_version: bool = False,
|
|
48
|
+
ignore_class_version: bool = False,
|
|
49
|
+
) -> 'OperationLog':
|
|
50
|
+
request_headers = {key.lower(): value for key, value in response.request.headers.items()}
|
|
51
|
+
|
|
52
|
+
if auth_headers:
|
|
53
|
+
request_headers.update({key.lower(): '*****' for key in auth_headers})
|
|
54
|
+
|
|
55
|
+
return cls(
|
|
56
|
+
method=response.request.method,
|
|
57
|
+
uri=response.request.url.path,
|
|
58
|
+
headers=request_headers,
|
|
59
|
+
params=dict(response.request.url.params.items()),
|
|
60
|
+
data=response.request.content,
|
|
61
|
+
status_code=response.status_code,
|
|
62
|
+
response_data=cls._process_response_data(
|
|
63
|
+
response.text,
|
|
64
|
+
ignore_object_version=ignore_object_version,
|
|
65
|
+
ignore_class_version=ignore_class_version,
|
|
66
|
+
),
|
|
67
|
+
response_headers=response.headers,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
@classmethod
|
|
71
|
+
def _process_response_data(cls, text: str, *, ignore_object_version: bool, ignore_class_version: bool) -> Any:
|
|
72
|
+
if ignore_object_version:
|
|
73
|
+
text = re.sub(r'"object_version": "(?!(?:ALL|LATEST)\b).*?"', '"object_version": "ignore"', text)
|
|
74
|
+
|
|
75
|
+
if ignore_class_version:
|
|
76
|
+
text = re.sub(r'"class_version": "(?!(?:ALL|LATEST)\b).*?"', '"class_version": "ignore"', text)
|
|
77
|
+
|
|
78
|
+
return text
|
|
File without changes
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import difflib
|
|
2
|
+
|
|
3
|
+
from rich import print as rprint
|
|
4
|
+
|
|
5
|
+
from amsdal_cli.commands.api_check.operation_log import OperationLog
|
|
6
|
+
from amsdal_cli.utils.text import rich_error
|
|
7
|
+
from amsdal_cli.utils.text import rich_success
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def compare(
|
|
11
|
+
target_logs: list[OperationLog],
|
|
12
|
+
compare_logs: list[OperationLog],
|
|
13
|
+
) -> tuple[list[tuple[OperationLog, OperationLog]], bool]:
|
|
14
|
+
diffs: list[tuple[OperationLog, OperationLog]] = []
|
|
15
|
+
error: bool = False
|
|
16
|
+
for idx, log in enumerate(target_logs):
|
|
17
|
+
if idx >= len(compare_logs):
|
|
18
|
+
rprint(rich_error(f'Not found: {len(target_logs[idx:])}, rest logs: {target_logs[idx:]}'))
|
|
19
|
+
error = True
|
|
20
|
+
break
|
|
21
|
+
|
|
22
|
+
if log.id != compare_logs[idx].id:
|
|
23
|
+
rprint(rich_error(f'Log {log.id} != {compare_logs[idx].id}'))
|
|
24
|
+
error = True
|
|
25
|
+
break
|
|
26
|
+
|
|
27
|
+
if log != compare_logs[idx]:
|
|
28
|
+
diffs.append((log, compare_logs[idx]))
|
|
29
|
+
return diffs, error
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def check(target_logs: list[OperationLog], compare_logs: list[OperationLog]) -> bool:
|
|
33
|
+
diffs, error = compare(target_logs, compare_logs)
|
|
34
|
+
if not diffs and not error:
|
|
35
|
+
rprint(rich_success('No differences found!'))
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
differ = difflib.Differ()
|
|
39
|
+
|
|
40
|
+
rprint(rich_error('Differences found:'))
|
|
41
|
+
for log, compare_log in diffs:
|
|
42
|
+
str1 = str(log)
|
|
43
|
+
str2 = str(compare_log)
|
|
44
|
+
diff = list(differ.compare(str1.splitlines(), str2.splitlines()))
|
|
45
|
+
rprint(rich_error('\n'.join(diff)))
|
|
46
|
+
|
|
47
|
+
return True
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import string
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from faker import Faker
|
|
6
|
+
|
|
7
|
+
from amsdal_cli.commands.api_check.data_classes import ClassItem
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DataFactory:
|
|
11
|
+
_faker = Faker()
|
|
12
|
+
|
|
13
|
+
@classmethod
|
|
14
|
+
def build_data(cls, class_item: ClassItem) -> dict[str, Any]:
|
|
15
|
+
"""
|
|
16
|
+
Build sample data for a class based on its properties.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
class_item: The class item containing property definitions
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
A dictionary with generated data for each property
|
|
23
|
+
"""
|
|
24
|
+
result = {'_type': class_item.class_name}
|
|
25
|
+
|
|
26
|
+
for prop in class_item.properties:
|
|
27
|
+
prop_name = prop.get('title', '')
|
|
28
|
+
prop_type = prop.get('type', 'string')
|
|
29
|
+
|
|
30
|
+
# Skip if no name
|
|
31
|
+
if not prop_name:
|
|
32
|
+
continue
|
|
33
|
+
|
|
34
|
+
# Generate value based on type
|
|
35
|
+
result[prop_name] = cls.generate_value_for_type(prop_type, prop)
|
|
36
|
+
|
|
37
|
+
return result
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def build_update_data(cls, class_item: ClassItem, data: dict[str, Any]) -> dict[str, Any]:
|
|
41
|
+
"""
|
|
42
|
+
Build updated data for an existing object.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
class_item: The class item containing property definitions
|
|
46
|
+
data: The existing data to update
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
A dictionary with updated data
|
|
50
|
+
"""
|
|
51
|
+
result = {'_type': class_item.class_name}
|
|
52
|
+
|
|
53
|
+
# Copy metadata if present
|
|
54
|
+
if '_metadata' in data:
|
|
55
|
+
result['_metadata'] = data['_metadata']
|
|
56
|
+
|
|
57
|
+
for prop in class_item.properties:
|
|
58
|
+
prop_name = prop.get('key', '')
|
|
59
|
+
prop_type = prop.get('type', 'string')
|
|
60
|
+
|
|
61
|
+
# Skip if no name
|
|
62
|
+
if not prop_name:
|
|
63
|
+
continue
|
|
64
|
+
|
|
65
|
+
# For update, we'll modify some values but keep others
|
|
66
|
+
threshold = 0.5
|
|
67
|
+
if prop_name in data and random.random() < threshold: # noqa: S311
|
|
68
|
+
# Keep 50% of values the same
|
|
69
|
+
result[prop_name] = data[prop_name]
|
|
70
|
+
else:
|
|
71
|
+
# Generate new values for the rest
|
|
72
|
+
result[prop_name] = cls.generate_value_for_type(prop_type, prop)
|
|
73
|
+
|
|
74
|
+
return result
|
|
75
|
+
|
|
76
|
+
@classmethod
|
|
77
|
+
def generate_value_for_type(
|
|
78
|
+
cls,
|
|
79
|
+
type_name: str,
|
|
80
|
+
prop_info: dict[str, Any],
|
|
81
|
+
) -> Any:
|
|
82
|
+
"""
|
|
83
|
+
Generate a random value for a given type.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
type_name: The type of the property
|
|
87
|
+
prop_info: Additional property information
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
A randomly generated value appropriate for the type
|
|
91
|
+
"""
|
|
92
|
+
# Check if there's a default value
|
|
93
|
+
if 'default' in prop_info:
|
|
94
|
+
return prop_info['default']
|
|
95
|
+
|
|
96
|
+
# Generate based on type
|
|
97
|
+
type_lower = type_name.lower()
|
|
98
|
+
|
|
99
|
+
if type_lower == 'string':
|
|
100
|
+
return cls._faker.text(max_nb_chars=50)
|
|
101
|
+
|
|
102
|
+
elif type_lower == 'integer':
|
|
103
|
+
return cls._faker.random_int(min=1, max=1000)
|
|
104
|
+
|
|
105
|
+
elif type_lower == 'number':
|
|
106
|
+
return cls._faker.pyfloat(positive=True, right_digits=2, max_value=1000)
|
|
107
|
+
|
|
108
|
+
elif type_lower == 'boolean':
|
|
109
|
+
return cls._faker.boolean()
|
|
110
|
+
|
|
111
|
+
elif type_lower == 'date':
|
|
112
|
+
return cls._faker.date_object().isoformat()
|
|
113
|
+
|
|
114
|
+
elif type_lower == 'datetime':
|
|
115
|
+
return cls._faker.date_time().isoformat()
|
|
116
|
+
|
|
117
|
+
elif type_lower == 'array':
|
|
118
|
+
# Check if items type is specified
|
|
119
|
+
if 'items' in prop_info and prop_info['items'] is not None:
|
|
120
|
+
items_info = prop_info['items']
|
|
121
|
+
item_type = items_info.get('type', 'string')
|
|
122
|
+
# Generate array with proper item types
|
|
123
|
+
return [
|
|
124
|
+
cls.generate_value_for_type(item_type, items_info)
|
|
125
|
+
for _ in range(random.randint(1, 3)) # noqa: S311
|
|
126
|
+
]
|
|
127
|
+
else:
|
|
128
|
+
# Fallback to array of strings if no items info
|
|
129
|
+
return [cls._faker.word() for _ in range(random.randint(1, 3))] # noqa: S311
|
|
130
|
+
|
|
131
|
+
elif type_lower in {'dictionary', 'object'}:
|
|
132
|
+
# Check if key and value types are specified
|
|
133
|
+
if 'items' in prop_info and prop_info['items'] is not None:
|
|
134
|
+
items_info = prop_info['items']
|
|
135
|
+
key_info = items_info.get('key', {'type': 'string'})
|
|
136
|
+
value_info = items_info.get('value', {'type': 'string'})
|
|
137
|
+
|
|
138
|
+
# Generate dictionary with proper key and value types
|
|
139
|
+
result = {}
|
|
140
|
+
for _ in range(random.randint(1, 3)): # noqa: S311
|
|
141
|
+
# For keys, we need to ensure they're strings or can be converted to strings
|
|
142
|
+
key = cls.generate_value_for_type(key_info.get('type', 'string'), key_info)
|
|
143
|
+
if not isinstance(key, str):
|
|
144
|
+
key = str(key)
|
|
145
|
+
value = cls.generate_value_for_type(value_info.get('type', 'string'), value_info)
|
|
146
|
+
result[key] = value
|
|
147
|
+
return result
|
|
148
|
+
else:
|
|
149
|
+
# Fallback to dictionary of strings if no items info
|
|
150
|
+
return {cls._faker.word(): cls._faker.word() for _ in range(random.randint(1, 3))} # noqa: S311
|
|
151
|
+
|
|
152
|
+
elif type_lower == 'binary':
|
|
153
|
+
# Generate a small binary string
|
|
154
|
+
return ''.join(random.choice(string.ascii_letters) for _ in range(10)) # noqa: S311
|
|
155
|
+
|
|
156
|
+
# For any other type, it might be a reference to another class
|
|
157
|
+
# For testing purposes, we'll return a dictionary with a _type field
|
|
158
|
+
return None
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
from amsdal_cli.commands.api_check.operation_log import OperationLog
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def load_operation_logs(logs_path: Path) -> list[OperationLog]:
|
|
8
|
+
data_str = logs_path.read_text()
|
|
9
|
+
data_raw = json.loads(data_str)
|
|
10
|
+
|
|
11
|
+
return [OperationLog(**item) for item in data_raw]
|