smartpush 1.2.8__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: smartpush
3
- Version: 1.2.8
3
+ Version: 1.3.0
4
4
  Summary: 用于smartpush自动化测试工具包
5
5
  Author: 卢泽彬、邵宇飞、周彦龙
@@ -0,0 +1,44 @@
1
+ # SmartPush_AutoTest
2
+
3
+
4
+
5
+ ## Getting started
6
+
7
+ ## 打包/上传的依赖
8
+ ```
9
+ pip install wheel
10
+ pip install twine
11
+ ```
12
+
13
+
14
+ ## 打包-打包前记得修改版本号
15
+ ```
16
+ python setup.py sdist bdist_wheel
17
+ ```
18
+
19
+
20
+ ## 上传到pipy的命令
21
+ ```
22
+ twine upload dist/*
23
+ ```
24
+
25
+ # 平台调用demo
26
+ ```
27
+ import json # import 请置于行首
28
+ from smartpush.export.basic import ExcelExportChecker
29
+ from smartpush.export.basic import GetOssUrl
30
+ oss=GetOssUrl.get_oss_address_with_retry(vars['queryOssId'], "${em_host}", json.loads(requestHeaders))
31
+ result = ExcelExportChecker.check_excel_all(expected_oss=oss,actual_oss=vars['exportedOss'],ignore_sort =True)
32
+ assert result
33
+ ```
34
+ ## check_excel_all() 支持拓展参数
35
+ 1、check_type = "including" 如果需要预期结果包含可传 eg.联系人导出场景可用,flow导出场景配合使用
36
+ 2、ignore_sort = 0 如果需要忽略内部的行排序问题可传,eg.email热点点击数据导出无排序可用,传指定第几列,0是第一列
37
+ 3、ignore_sort_sheet_name = "url点击" 搭配ignore_sort使用,指定哪个sheet忽略排序,不传默认所有都排序,参数大小写不敏感(url点击-URL点击)
38
+ 4、skiprows = 1 传1可忽略第一行, eg.如flow的导出可用,动态表头不固定时可以跳过读取第一行
39
+
40
+ ## get_oss_address_with_retry(target_id, url, requestHeader, requestParam=None, is_import=False, **kwargs)
41
+ 1、is_import 导入校验是否成功传True,否则默认都是导出
42
+ 2、**kwargs 参数支持重试次数
43
+ tries = 30 # 重试次数
44
+ delay = 2 # 延迟时间,单位s
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='smartpush',
5
- version='1.2.8',
5
+ version='1.3.0',
6
6
  description='用于smartpush自动化测试工具包',
7
7
  author='卢泽彬、邵宇飞、周彦龙',
8
8
  packages=find_packages(),
@@ -2,6 +2,7 @@ import copy
2
2
  import json
3
3
  from urllib.parse import unquote
4
4
  from smartpush.export.basic.ReadExcel import *
5
+ from datetime import datetime
5
6
  from smartpush.utils import DataTypeUtils
6
7
 
7
8
  """
@@ -96,25 +97,59 @@ def check_excel_content_form_dict(actual, expected, **kwargs):
96
97
  return check_excel_content(actual, expected)
97
98
 
98
99
 
99
- def check_excel_content_including_expected(actual, expected, **kwargs):
100
+ def check_excel_content_including_expected(actual, expected, expected_oss, **kwargs):
100
101
  """
101
102
  通过 OSS URL 比较 Excel 内容,期望是包含的结果,actual传的是生成的oss
102
103
  """
103
104
  actual, expected = read_excel_and_write_to_dict(actual, **kwargs), read_excel_and_write_to_dict(
104
105
  expected, **kwargs)
105
106
  # 判断是否存在差异
106
- missing_items = find_missing_elements(expected.values(), actual.values())
107
+ if kwargs.get("export_type") == "flow":
108
+ missing_items = assert_flow(expected, actual, expected_oss)
109
+ else:
110
+ missing_items = find_missing_elements(expected.values(), actual.values())
107
111
  return (False, {"与期望结果存在差异": missing_items}) if missing_items else (True, "校验期望结果包含校验通过")
108
112
 
109
113
 
110
114
  def find_missing_elements(list1, list2):
111
115
  missing = []
116
+ sorted_list1 = sorted(list1)
117
+ sorted_list2 = sorted(list2)
118
+ if sorted_list1 == sorted_list2:
119
+ return missing
112
120
  for element in list1:
113
121
  if element not in list2:
114
122
  missing.append(element)
115
123
  return missing
116
124
 
117
125
 
126
+ def assert_flow(expected, actual, expected_oss):
127
+ # 判断预期数据in实际导出的数据
128
+ ex_sheet0 = expected.get("sheet0", [])
129
+ ac_sheet0 = actual.get("sheet0", [])
130
+ ex_sheet1 = expected.get("Flow node data by sending time", [])
131
+ ac_sheet1 = actual.get("Flow node data by sending time", [])
132
+ differences = []
133
+ res = []
134
+ ex_sheet1.append(ex_sheet0)
135
+ ac_sheet1.append(ac_sheet0)
136
+ for i in ac_sheet1:
137
+ if i not in ex_sheet1:
138
+ differences.append(i)
139
+ # 判断对应的列数据
140
+ for diff in differences:
141
+ if len(diff) != 24:
142
+ res.append("列预期不正确")
143
+ # 判断多出的行,获取今天的日期,与预期日期对比
144
+ ex_data = expected_oss.split("/")[4].split("-")
145
+ today = datetime.today()
146
+ target_date = datetime(int(ex_data[0]), int(ex_data[1]), int(ex_data[2]))
147
+ diff_days = (today - target_date).days
148
+ if len(differences) != diff_days:
149
+ res.append("日期预期不正确")
150
+ return res
151
+
152
+
118
153
  def check_excel_content_form_list(actual, expected):
119
154
  """
120
155
  通过 内容 比较 Excel 内容,不包含表头
@@ -135,7 +170,8 @@ def check_excel_all(actual_oss, expected_oss, check_type=None, **kwargs):
135
170
  flag1, name_result = check_excel_name(actual_oss, expected_oss)
136
171
  flag3, header_result = check_excel_header(actual_data_copy, expected_data_copy, type=file_type, **kwargs)
137
172
  if check_type == "including":
138
- flag2, content_result = check_excel_content_including_expected(actual, expected, type=file_type, **kwargs)
173
+ flag2, content_result = check_excel_content_including_expected(actual, expected, expected_oss, type=file_type,
174
+ **kwargs)
139
175
  else:
140
176
  flag2, content_result = check_excel_content_form_dict(actual, expected, type=file_type, **kwargs)
141
177
  print(json.dumps(
@@ -144,7 +180,7 @@ def check_excel_all(actual_oss, expected_oss, check_type=None, **kwargs):
144
180
  return all([flag1, flag2, flag3])
145
181
 
146
182
 
147
- def check_and_get_file_suffix_name(actual_oss, expected_oss) -> str:
183
+ def check_and_get_file_suffix_name(actual_oss, expected_oss, **kwargs) -> str:
148
184
  """
149
185
  校验并获取oss的后缀类型
150
186
  @param actual_oss:
@@ -153,6 +189,8 @@ def check_and_get_file_suffix_name(actual_oss, expected_oss) -> str:
153
189
  """
154
190
  actual_file_suffix_name = get_file_suffix_name(actual_oss)
155
191
  expected_file_suffix_name = get_file_suffix_name(expected_oss)
192
+ if actual_oss == expected_oss and kwargs.get("test", False):
193
+ raise Exception("oss链接不允许相同,请检查oss链接是否为相同链接,调试需要请传参数test=True")
156
194
  try:
157
195
  assert actual_file_suffix_name == expected_file_suffix_name
158
196
  return actual_file_suffix_name
@@ -238,11 +276,6 @@ def check_excel_header(actual, expected, **kwargs):
238
276
  @return:
239
277
  """
240
278
  try:
241
- # if all([isinstance(actual, str), isinstance(expected, str)]):
242
- # actual1, expected1 = read_excel_header(read_excel_from_oss(actual), **kwargs), read_excel_header(
243
- # read_excel_from_oss(
244
- # expected), **kwargs)
245
- # else:
246
279
  actual1, expected1 = read_excel_header(actual, return_type='dict', **kwargs), read_excel_header(expected,
247
280
  return_type='dict',
248
281
  **kwargs)
@@ -332,12 +365,11 @@ def compare_lists(actual_dict_list, expected_dict_list):
332
365
  def check_field_format(actual_oss, **kwargs):
333
366
  """
334
367
  逐个校验字段类型
335
- **kwargs: fileds为需检查字段,结构为dict,如{"0": {"1": "email", "2": "time"}},
368
+ **kwargs: fileds为需检查字段,结构为dict,如{0: {0: "email", 1: "time"}},
336
369
  即校验第一个sheet第二个字段需符合email格式,第二个字段需符合time格式
337
370
  """
338
371
  # 获取oss内容并存入dict
339
372
  actual = read_excel_from_oss(actual_oss)
340
- actual_data_copy = copy.deepcopy(actual)
341
373
  actual_dict = read_excel_and_write_to_dict(actual, **kwargs)
342
374
  # 解析参数并校验字段类型
343
375
  errors = []
@@ -357,5 +389,5 @@ def check_field_format(actual_oss, **kwargs):
357
389
  errors.append(
358
390
  f"{actual_dict_key[key]} 表, 第{num}行{filed_key}列{kwargs['fileds'][key][filed_key]}格式不符合规范, 值为:{row[filed_key]}")
359
391
  num += 1
360
- print(errors)
392
+ print(errors if len(errors) > 0 else "都校验成功")
361
393
  return False if len(errors) > 0 else True
@@ -0,0 +1,154 @@
1
+ import json
2
+ import urllib
3
+
4
+ import requests
5
+ from requests import HTTPError
6
+ from tenacity import retry, stop_after_attempt, wait_fixed, RetryError, stop_any
7
+ from smartpush.utils.StringUtils import StringUtils
8
+
9
+ export_requestParam = {
10
+ "page": 1,
11
+ "pageSize": 20,
12
+ "type": "EXPORT",
13
+ "status": None,
14
+ "startTime": None,
15
+ "endTime": None
16
+ }
17
+
18
+ import_requestParam = {
19
+ "page": 1,
20
+ "pageSize": 20,
21
+ "type": "IMPORT",
22
+ "status": None,
23
+ "startTime": None,
24
+ "endTime": None
25
+ }
26
+
27
+ manually_stop = False # 手动停止表示
28
+
29
+
30
+ # 用于技术第几次重试,无需修改
31
+ def log_attempt(retry_state):
32
+ """
33
+ 回调函数,在每次重试时记录并打印重试次数
34
+ """
35
+ attempt_number = retry_state.attempt_number
36
+ print(f"当前重试次数: {attempt_number}")
37
+
38
+
39
+ # 自定义停止条件函数
40
+ def should_stop(retry_state):
41
+ if manually_stop:
42
+ print("数据导入/导出状态是失败,立即停止重试")
43
+ return manually_stop
44
+
45
+
46
+ def get_oss_address_with_retry(target_id, url, requestHeader, requestParam=None, is_import=False, **kwargs) -> str:
47
+ """
48
+ 创建带有动态重试配置的获取 OSS 地址
49
+ **kwargs 可传参:tries=10, delay=2, backoff=1
50
+ :param is_import: 如果是导入的则传True
51
+ :param requestParam:
52
+ :param url:
53
+ :param target_id:
54
+ :param requestHeader:
55
+ :return: 带有重试配置的获取 OSS 地址的
56
+ """
57
+ if requestParam is None:
58
+ requestParam = import_requestParam if is_import else export_requestParam
59
+ tries = kwargs.get('tries', 30) # 重试次数
60
+ delay = kwargs.get('delay', 2)
61
+ _url = url + '/bulkOps/query'
62
+ if StringUtils.is_empty(target_id):
63
+ raise ValueError("缺少target_id参数")
64
+
65
+ def bulkOps_query(_url, _requestHeader, _requestParam):
66
+ response = requests.request(url=_url, headers=_requestHeader, data=json.dumps(_requestParam),
67
+ method="post")
68
+ response.raise_for_status()
69
+ result = response.json()
70
+ if result['code'] != 1:
71
+ raise HTTPError(f"{result}")
72
+ return result
73
+
74
+ @retry(stop=stop_after_attempt(tries) | stop_any(should_stop), wait=wait_fixed(delay), after=log_attempt)
75
+ def get_oss_address():
76
+ try:
77
+ result = bulkOps_query(_url, requestHeader, requestParam)
78
+ id_url_dict = {item["id"]: item["url"] for item in result["resultData"]["datas"]}
79
+ id_status_dict = {item["id"]: [item["status"], item["reason"]] for item in result["resultData"]["datas"]}
80
+ if target_id in id_url_dict:
81
+ if id_status_dict[target_id][0] == "FAIL":
82
+ reason = id_status_dict[target_id][1]
83
+ print(f"导出id {target_id}失败,原因是 [{reason}]")
84
+ global manually_stop
85
+ manually_stop = True
86
+ if len(id_url_dict[target_id]) == 1:
87
+ target_url = urllib.parse.unquote(id_url_dict[target_id][0])
88
+ print(f"target_id [{target_id}] 的oss链接为: {target_url}")
89
+ return target_url
90
+ else:
91
+ raise ValueError(f"存在多条 id 为 {target_id} 的记录,记录为:{id_url_dict[target_id]}")
92
+ else:
93
+ raise ValueError(f"未找到 导出id 为 {target_id} 的记录,未包含有效的 OSS 地址")
94
+ except (KeyError, json.JSONDecodeError) as e:
95
+ raise ValueError(f"响应数据格式错误,响应结果: {result},异常: {e}")
96
+ except requests.RequestException as e:
97
+ print(f"请求发生异常: {e},正在重试...")
98
+ raise
99
+
100
+ @retry(stop=stop_after_attempt(tries) | stop_any(should_stop), wait=wait_fixed(delay), after=log_attempt)
101
+ def get_import_success():
102
+ target_id_list = []
103
+ try:
104
+ result = bulkOps_query(_url, requestHeader, requestParam)
105
+ for item in result["resultData"]["datas"]:
106
+ if item.get("id") == int(target_id):
107
+ status = item.get("status")
108
+ reason = item.get("reason")
109
+ if status == "FAIL":
110
+ print(f"导入id {target_id}失败,原因是 [{reason}]")
111
+ global manually_stop
112
+ manually_stop = True
113
+ assert status == "SUCCESS"
114
+ return f"导入id {target_id} 导入成功"
115
+ else:
116
+ target_id_list.append(item.get("id"))
117
+ if target_id not in target_id_list:
118
+ raise ValueError(f"未找到 导入id 为 {target_id} 的记录,请检查是否发起导入")
119
+ except AssertionError:
120
+ raise AssertionError(f"导入id 为 {target_id} 的记录非SUCCESS,状态为:{status}")
121
+ except (KeyError, json.JSONDecodeError) as e:
122
+ raise ValueError(f"响应数据格式错误,响应结果: {result},异常: {e}")
123
+ except requests.RequestException as e:
124
+ print(f"请求发生异常: {e},正在重试...")
125
+ raise
126
+ except Exception:
127
+ raise
128
+
129
+ def cancel_export_file(_target_id):
130
+ """
131
+ 用于失败后取消导出/导入
132
+ :param _target_id:
133
+ :return:
134
+ """
135
+ cancel_url = url + '/bulkOps/cancel'
136
+ response = requests.request(url=cancel_url, headers=requestHeader, params={'id': _target_id}, method="get")
137
+ response.raise_for_status()
138
+ result = response.json()
139
+ if is_import:
140
+ print(f"导入文件失败,取消 {_target_id} 的导入记录,响应:{result}")
141
+ else:
142
+ print(f"获取Oss Url失败,取消 {_target_id} 的导出记录,响应:{result}")
143
+ return result
144
+
145
+ try:
146
+ if is_import:
147
+ return get_import_success()
148
+ else:
149
+ return get_oss_address()
150
+ except Exception as e:
151
+ # print(f"最终失败,错误信息: {e}")
152
+ if isinstance(e, RetryError):
153
+ cancel_export_file(target_id)
154
+ return f"执行失败,错误信息: {e}"
@@ -0,0 +1,41 @@
1
+ # -*- codeing = utf-8 -*-
2
+ # @Time :2025/2/20 00:27
3
+ # @Author :luzebin
4
+ import pandas as pd
5
+
6
+ from smartpush.export.basic.ExcelExportChecker import check_excel_all, read_excel_and_write_to_list, \
7
+ read_excel_from_oss, read_excel_csv_data, check_excel
8
+ from smartpush.export.basic.ReadExcel import read_excel_from_oss
9
+ from smartpush.export.basic.ReadExcel import read_excel_and_write_to_dict
10
+ from smartpush.export.basic.GetOssUrl import get_oss_address_with_retry, export_requestParam, import_requestParam
11
+
12
+ if __name__ == '__main__':
13
+ oss1 = "https://cdn.smartpushedm.com/material_ec2/2025-02-25/58c4a3a885884741b22380c360ac2894/【自动化导出】营销活动URL点击与热图.xlsx"
14
+ oss2 = "https://cdn.smartpushedm.com/material_ec2/2025-02-27/a5e18e3b3a83432daca871953cb8471b/【自动化导出】营销活动URL点击与热图.xlsx"
15
+ # # print(check_excel_all(oss1, oss1))
16
+ oss3 = "https://cdn.smartpushedm.com/material_ec2/2025-02-25/58c4a3a885884741b22380c360ac2894/【自动化导出】营销活动URL点击与热图.xlsx"
17
+ oss4 = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/58cee630b4c84eec9572b867af4ce692/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8URL%E7%82%B9%E5%87%BB%E4%B8%8E%E7%83%AD%E5%9B%BE.xlsx"
18
+ expected_oss = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/757df7e77ce544e193257c0da35a4983/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
19
+ actual_oss = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/757df7e77ce544e193257c0da35a4983/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
20
+
21
+ e_person_oss1 = "https://cdn.smartpushedm.com/material_ec2/2025-02-27/b48f34b3e88045d189631ec1f0f23d51/%E5%AF%BC%E5%87%BA%E5%85%A8%E9%83%A8%E5%AE%A2%E6%88%B7.csv"
22
+ a_person_oss2 = "https://cdn.smartpushedm.com/material_ec2/2025-02-27/c50519d803c04e3b9b52d9f625fed413/%E5%AF%BC%E5%87%BA%E5%85%A8%E9%83%A8%E5%AE%A2%E6%88%B7.csv"
23
+ host = "https://test.smartpushedm.com/api-em-ec2"
24
+ reqHeaders = {
25
+ 'cookie': 'osudb_appid=SMARTPUSH;osudb_oar=#01#SID0000123BL6ciRHRKpvOm/vWT9OS9brpfhSErcOdgeXJc0RJFopg83z0N3RzDE4w2DE5cQj6ALkLP8vG6Rhs0sR7NfToZvCLWXdQtYk6DJoKe4tqdo4kNcIc9F5obzLuyRmwGy9CZKcg/bMlmNyDZwBI1SIO;osudb_subappid=1;osudb_uid=4213785247;ecom_http_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDM4NDI1NTIsImp0aSI6IjEwNGQwOTVjLTA3MDItNDI5MC1iZjQzLWQ4YTVhNjdmNDM2NSIsInVzZXJJbmZvIjp7ImlkIjowLCJ1c2VySWQiOiI0MjEzNzg1MjQ3IiwidXNlcm5hbWUiOiIiLCJlbWFpbCI6ImZlbGl4LnNoYW9Ac2hvcGxpbmVhcHAuY29tIiwidXNlclJvbGUiOiJvd25lciIsInBsYXRmb3JtVHlwZSI6Nywic3ViUGxhdGZvcm0iOjEsInBob25lIjoiIiwibGFuZ3VhZ2UiOiJ6aC1oYW5zLWNuIiwiYXV0aFR5cGUiOiIiLCJhdHRyaWJ1dGVzIjp7ImNvdW50cnlDb2RlIjoiQ04iLCJjdXJyZW5jeSI6IkpQWSIsImN1cnJlbmN5U3ltYm9sIjoiSlDCpSIsImRvbWFpbiI6InNtYXJ0cHVzaDQubXlzaG9wbGluZXN0Zy5jb20iLCJsYW5ndWFnZSI6ImVuIiwibWVyY2hhbnRFbWFpbCI6ImZlbGl4LnNoYW9Ac2hvcGxpbmUuY29tIiwibWVyY2hhbnROYW1lIjoiU21hcnRQdXNoNF9lYzJf6Ieq5Yqo5YyW5bqX6ZO6IiwicGhvbmUiOiIiLCJzY29wZUNoYW5nZWQiOmZhbHNlLCJzdGFmZkxhbmd1YWdlIjoiemgtaGFucy1jbiIsInN0YXR1cyI6MCwidGltZXpvbmUiOiJBc2lhL01hY2FvIn0sInN0b3JlSWQiOiIxNjQ0Mzk1OTIwNDQ0IiwiaGFuZGxlIjoic21hcnRwdXNoNCIsImVudiI6IkNOIiwic3RlIjoiIiwidmVyaWZ5IjoiIn0sImxvZ2luVGltZSI6MTc0MTI1MDU1MjI4Miwic2NvcGUiOlsiZW1haWwtbWFya2V0IiwiY29va2llIiwic2wtZWNvbS1lbWFpbC1tYXJrZXQtbmV3LXRlc3QiLCJlbWFpbC1tYXJrZXQtbmV3LWRldi1mcyIsImFwaS11Yy1lYzIiLCJhcGktc3UtZWMyIiwiYXBpLWVtLWVjMiIsImZsb3ctcGx1Z2luIl0sImNsaWVudF9pZCI6ImVtYWlsLW1hcmtldCJ9.SjeTCLaZqbEFEFNeKe_EjrwmR0LdEYO9697ymVNzf5Q;',
26
+ 'Content-Type': 'application/json'}
27
+ actual_oss = get_oss_address_with_retry(11911, host, reqHeaders, import_requestParam, is_import=True)
28
+ actual_oss = get_oss_address_with_retry(11896, host, reqHeaders, export_requestParam)
29
+ # # res=read_excel_and_write_to_dict(read_excel_from_oss(actual_oss))
30
+ # # print(res)
31
+ # # print(read_excel_and_write_to_dict(read_excel_from_oss(oss1), type=".xlsx"))
32
+ # print(check_excel(check_type="all", actual_oss=actual_oss, expected_oss=expected_oss))
33
+ # print(check_excel_all(actual_oss=oss1, expected_oss=oss2,skiprows =1))
34
+ # print(check_excel_all(actual_oss=oss1, expected_oss=oss2, ignore_sort=0))
35
+ # print(check_excel_all(actual_oss=a_person_oss2, expected_oss=e_person_oss1, check_type="including"))
36
+ # print(check_excel_all(actual_oss=e_person_oss1, expected_oss=a_person_oss2, check_type="person"))
37
+ # read_excel_csv_data(type=)
38
+
39
+ # flow_ex="https://cdn.smartpushedm.com/material_ec2/2025-02-20/ad9e1534b8134dd098e96813f17d4b4d/%E6%B5%8B%E8%AF%95flow%E6%95%B0%E6%8D%AE%E6%8A%A5%E5%91%8A%E5%AF%BC%E5%87%BA%E5%8B%BF%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
40
+ # flow_ac="https://cdn.smartpushedm.com/material_ec2/2025-03-04/0c8f919f28d4455f9908f905aada7efb/测试flow数据报告导出勿动数据概览.xlsx"
41
+ # print(check_excel_all(actual_oss=flow_ac, expected_oss=flow_ex, check_type="including",export_type="flow",skiprows=1))
@@ -1,5 +1,5 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: smartpush
3
- Version: 1.2.8
3
+ Version: 1.3.0
4
4
  Summary: 用于smartpush自动化测试工具包
5
5
  Author: 卢泽彬、邵宇飞、周彦龙
smartpush-1.2.8/README.md DELETED
@@ -1,38 +0,0 @@
1
- # SmartPush_AutoTest
2
-
3
-
4
-
5
- ## Getting started
6
-
7
- ## 打包/上传的依赖
8
- ```
9
- pip install wheel
10
- pip install twine
11
- ```
12
-
13
-
14
- ## 打包-打包前记得修改版本号
15
- ```
16
- python setup.py sdist bdist_wheel
17
- ```
18
-
19
-
20
- ## 上传到pipy的命令
21
- ```
22
- twine upload dist/*
23
- ```
24
-
25
- # 平台调用demo
26
- ```
27
- import json # import 请置于行首
28
- from smartpush.export.basic import ExcelExportChecker
29
- from smartpush.export.basic import GetOssUrl
30
- oss=GetOssUrl.get_oss_address_with_retry(vars['queryOssId'], "${em_host}", json.loads(requestHeaders))
31
- result = ExcelExportChecker.check_excel_all(expected_oss=oss,actual_oss=vars['exportedOss'],ignore_sort =True)
32
- assert result
33
- ```
34
- ## check_excel_all() 支持拓展参数
35
- ### check_type = "including" 如果需要预期结果包含可传 eg.联系人导出场景可用
36
- ### ignore_sort = 0 如果需要忽略内部的行排序问题可传,eg.email热点点击数据导出无排序可用,传指定第几列,0是第一列
37
- ### ignore_sort_sheet_name = "url点击" 搭配ignore_sort使用,指定哪个sheet忽略排序,不传默认所有都排序,参数大小写不敏感(url点击-URL点击)
38
- ### skiprows = 1 传1可忽略第一行, eg.如flow的导出可用,动态表头不固定时可以跳过读取第一行
@@ -1,88 +0,0 @@
1
- import json
2
- import urllib
3
-
4
- import requests
5
- from tenacity import retry, stop_after_attempt, wait_fixed, RetryError
6
- from smartpush.utils.StringUtils import StringUtils
7
-
8
- _requestParam = {
9
- "page": 1,
10
- "pageSize": 20,
11
- "type": "EXPORT",
12
- "status": None,
13
- "startTime": None,
14
- "endTime": None
15
- }
16
-
17
-
18
- # 用于技术第几次重试,无需修改
19
- def log_attempt(retry_state):
20
- """
21
- 回调函数,在每次重试时记录并打印重试次数
22
- """
23
- attempt_number = retry_state.attempt_number
24
- print(f"当前重试次数: {attempt_number}")
25
-
26
-
27
- def get_oss_address_with_retry(target_id, url, requestHeader, requestParam=None, **kwargs) -> str:
28
- """
29
- 创建带有动态重试配置的获取 OSS 地址
30
- **kwargs 可传参:tries=10, delay=2, backoff=1
31
- :param requestParam:
32
- :param url:
33
- :param target_id:
34
- :param requestHeader:
35
- :return: 带有重试配置的获取 OSS 地址的
36
- """
37
- if requestParam is None:
38
- requestParam = _requestParam
39
- tries = kwargs.get('tries', 30) # 重试次数
40
- delay = kwargs.get('delay', 2)
41
-
42
- @retry(stop=stop_after_attempt(tries), wait=wait_fixed(delay), after=log_attempt)
43
- def get_oss_address():
44
- _url = url + '/bulkOps/query'
45
- result = None
46
- if StringUtils.is_empty(target_id):
47
- raise ValueError("缺少target_id参数")
48
- try:
49
- response = requests.request(url=_url, headers=requestHeader, data=json.dumps(requestParam),
50
- method="post")
51
- response.raise_for_status()
52
- result = response.json()
53
- id_url_dict = {item["id"]: item["url"] for item in result["resultData"]["datas"]}
54
- if target_id in id_url_dict:
55
- if len(id_url_dict[target_id]) == 1:
56
- target_url = urllib.parse.unquote(id_url_dict[target_id][0])
57
- print(f"target_id [{target_id}] 的oss链接为: {target_url}")
58
- return target_url
59
- else:
60
- raise ValueError(f"存在多条 id 为 {target_id} 的记录,记录为:{id_url_dict[target_id]}")
61
- else:
62
- raise ValueError(f"未找到 id 为 {target_id} 的记录,未包含有效的 OSS 地址,")
63
- except (KeyError, json.JSONDecodeError) as e:
64
- raise ValueError(f"响应数据格式错误,响应结果: {result},异常: {e}")
65
- except requests.RequestException as e:
66
- print(f"请求发生异常: {e},正在重试...")
67
- raise
68
-
69
- def cancel_export_file(_target_id):
70
- """
71
- 用于失败后取消导出
72
- :param _target_id:
73
- :return:
74
- """
75
- cancel_url = url + '/bulkOps/cancel'
76
- response = requests.request(url=cancel_url, headers=requestHeader, params={'id': _target_id}, method="get")
77
- response.raise_for_status()
78
- result = response.json()
79
- print(f"获取Oss Url失败,取消 {_target_id} 的导出记录,响应:{result}")
80
- return result
81
-
82
- try:
83
- return get_oss_address()
84
- except Exception as e:
85
- # print(f"最终失败,错误信息: {e}")
86
- if isinstance(e, RetryError):
87
- cancel_export_file(target_id)
88
- return None
@@ -1,36 +0,0 @@
1
- # -*- codeing = utf-8 -*-
2
- # @Time :2025/2/20 00:27
3
- # @Author :luzebin
4
- import pandas as pd
5
-
6
- from smartpush.export.basic import ExcelExportChecker
7
- from smartpush.export.basic.ReadExcel import read_excel_from_oss
8
- from smartpush.export.basic.ReadExcel import read_excel_and_write_to_dict
9
- from smartpush.export.basic.GetOssUrl import get_oss_address_with_retry
10
- from smartpush.utils.DataTypeUtils import DataTypeUtils
11
-
12
- if __name__ == '__main__':
13
- oss1 = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/31c1a577af244c65ab9f9a984c64f3d9/ab%E5%BC%B9%E7%AA%97%E6%B5%8B%E8%AF%952.10%E5%88%9B%E5%BB%BA-%E6%9C%89%E5%85%A8%E9%83%A8%E6%95%B0%E6%8D%AE%E9%94%80%E5%94%AE%E9%A2%9D%E6%98%8E%E7%BB%86%E6%95%B0%E6%8D%AE.xlsx"
14
- oss2 = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/31c1a577af244c65ab9f9a984c64f3d9/ab%E5%BC%B9%E7%AA%97%E6%B5%8B%E8%AF%952.10%E5%88%9B%E5%BB%BA-%E6%9C%89%E5%85%A8%E9%83%A8%E6%95%B0%E6%8D%AE%E9%94%80%E5%94%AE%E9%A2%9D%E6%98%8E%E7%BB%86%E6%95%B0%E6%8D%AE.xlsx"
15
- # # print(check_excel_all(oss1, oss1))
16
- oss3 = "https://cdn.smartpushedm.com/material_ec2/2025-02-27/a5e18e3b3a83432daca871953cb8471b/【自动化导出】营销活动URL点击与热图.xlsx"
17
- oss4 = "https://cdn.smartpushedm.com/material_ec2/2025-02-25/58c4a3a885884741b22380c360ac2894/【自动化导出】营销活动URL点击与热图.xlsx"
18
- expected_oss = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/757df7e77ce544e193257c0da35a4983/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
19
- # actual_oss = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/757df7e77ce544e193257c0da35a4983/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
20
-
21
- # e_person_oss1 = "https://cdn.smartpushedm.com/material_ec2/2025-02-27/b48f34b3e88045d189631ec1f0f23d51/%E5%AF%BC%E5%87%BA%E5%85%A8%E9%83%A8%E5%AE%A2%E6%88%B7.csv"
22
- # a_person_oss2 = "https://cdn.smartpushedm.com/material_ec2/2025-02-27/c50519d803c04e3b9b52d9f625fed413/%E5%AF%BC%E5%87%BA%E5%85%A8%E9%83%A8%E5%AE%A2%E6%88%B7.csv"
23
-
24
- # # #actual_oss= get_oss_address_with_retry("23161","https://cdn.smartpushedm.com/material_ec2_prod/2025-02-20/dae941ec20964ca5b106407858676f89/%E7%BE%A4%E7%BB%84%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx","",'{"page":1,"pageSize":10,"type":null,"status":null,"startTime":null,"endTime":null}')
25
- # # res=read_excel_and_write_to_dict(read_excel_from_oss(actual_oss))
26
- # # print(res)
27
- # # print(read_excel_and_write_to_dict(read_excel_from_oss(oss1), type=".xlsx"))
28
- # print(check_excel(check_type="all", actual_oss=actual_oss, expected_oss=expected_oss))
29
- # print(check_excel_all(actual_oss=oss1, expected_oss=oss2,skiprows =1))
30
- # print(check_excel_all(actual_oss=oss1, expected_oss=oss2,ignore_sort=True))
31
- # print(check_excel_all(actual_oss=a_person_oss2, expected_oss=e_person_oss1, check_type="including"))
32
- # print(check_excel_all(actual_oss=oss1, expected_oss=oss2, ignore_sort=True))
33
- # read_excel_csv_data(type=)
34
- # print(DataTypeUtils().check_email_format())
35
- errors = ExcelExportChecker.check_field_format(actual_oss=oss1, fileds={0: {2: "email", 5: "time"}}, skiprows=2)
36
- print(errors)
File without changes