smartpush 1.1.6__py3-none-any.whl → 1.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  import copy
2
2
  import json
3
3
  from urllib.parse import unquote
4
-
5
4
  from smartpush.export.basic.ReadExcel import *
6
5
 
7
6
  """
@@ -104,19 +103,23 @@ def check_excel_content_form_list(actual, expected):
104
103
  return check_excel_content(actual=actual, expected=expected)
105
104
 
106
105
 
107
- def check_excel_all(actual_oss, expected_oss):
106
+ def check_excel_all(actual_oss, expected_oss, **kwargs):
108
107
  """
109
108
  校验所有内容
109
+ **kwargs: skiprows->int 用于跳过读取行数,如果第一行是动态变化的,建议单独过滤,第一行传1
110
110
  """
111
- file_type = check_and_get_file_suffix_name(expected_oss, actual_oss)
111
+ file_type = check_and_get_file_suffix_name(actual_oss, expected_oss)
112
112
  actual, expected = read_excel_from_oss(actual_oss), read_excel_from_oss(expected_oss)
113
113
  actual_data_copy = copy.deepcopy(actual)
114
114
  expected_data_copy = copy.deepcopy(expected)
115
- flag1, content_result = check_excel_content_form_dict(actual, expected, type=file_type)
116
- flag2, name_result = check_excel_name(actual_oss, expected_oss)
117
- flag3, header_result = check_excel_header(actual_data_copy,expected_data_copy, type=file_type)
118
- return all([flag1, flag2]), json.dumps({f"文件名称-{flag1}": name_result, f"导出内容-{flag2}": content_result,f"表头校验-{flag3}": header_result},
119
- ensure_ascii=False)
115
+
116
+ flag1, name_result = check_excel_name(actual_oss, expected_oss)
117
+ flag2, content_result = check_excel_content_form_dict(actual, expected, type=file_type, **kwargs)
118
+ flag3, header_result = check_excel_header(actual_data_copy, expected_data_copy, type=file_type, **kwargs)
119
+ print(json.dumps(
120
+ {f"文件名称-{flag1}": name_result, f"导出内容-{flag2}": content_result, f"表头校验-{flag3}": header_result},
121
+ ensure_ascii=False))
122
+ return all([flag1, flag2, flag3])
120
123
 
121
124
 
122
125
  def check_and_get_file_suffix_name(actual_oss, expected_oss) -> str:
@@ -170,17 +173,31 @@ def check_excel_content(actual, expected):
170
173
  # 断言2:对比具体行
171
174
  if isinstance(actual, list) and isinstance(expected, list):
172
175
  # 第一层提取sheet
173
- for i in range(min(len(expected), len(actual))):
174
- if actual[i] == expected[i]:
176
+ for i in range(max(len(expected), len(actual))):
177
+ if len(expected) <= i:
178
+ errors.append(f"预期结果不存在第{i + 1}个sheet")
179
+ continue
180
+ elif len(actual) <= i:
181
+ errors.append(f"预期结果不存在第{i + 1}个sheet")
175
182
  continue
176
183
  else:
177
- for row in range(min(len(expected[i]), len(actual[i]))):
178
- if actual[i][row] == expected[i][row]:
179
- continue
180
- else:
181
- errors.append(
182
- f"{i+1}个sheet的内容-第" + str(i + 1) + "行不匹配,预期为:" + str(expected[i]) + ", 实际为: " + str(
183
- actual[i]))
184
+ if actual[i] == expected[i]:
185
+ continue
186
+ else:
187
+ for row in range(max(len(expected[i]), len(actual[i]))):
188
+ if len(expected[i]) <= row:
189
+ errors.append(f"预期结果不存在第{row + 1}个行")
190
+ continue
191
+ elif len(actual[i]) <= row:
192
+ errors.append(f"实际内容不存在第{row + 1}个行")
193
+ continue
194
+ else:
195
+ if actual[i][row] == expected[i][row]:
196
+ continue
197
+ else:
198
+ errors.append(
199
+ f"第{i+1}个sheet的内容-第" + str(i + 1) + "行不匹配,预期为:" + str(expected[i]) + ", 实际为: " + str(
200
+ actual[i]))
184
201
  return False, errors
185
202
  else:
186
203
  return False, compare_dicts(actual, expected)
@@ -198,17 +215,20 @@ def check_excel_header(actual, expected, **kwargs):
198
215
  @return:
199
216
  """
200
217
  try:
201
- if all([isinstance(actual, str), isinstance(expected, str)]):
202
- actual1, expected1 = read_excel_header(read_excel_from_oss(actual), **kwargs), read_excel_header(
203
- read_excel_from_oss(
204
- expected), **kwargs)
205
- else:
206
- actual1, expected1 = read_excel_header(actual, **kwargs), read_excel_header(expected, **kwargs)
218
+ # if all([isinstance(actual, str), isinstance(expected, str)]):
219
+ # actual1, expected1 = read_excel_header(read_excel_from_oss(actual), **kwargs), read_excel_header(
220
+ # read_excel_from_oss(
221
+ # expected), **kwargs)
222
+ # else:
223
+ actual1, expected1 = read_excel_header(actual, return_type='dict', **kwargs), read_excel_header(expected,
224
+ return_type='dict',
225
+ **kwargs)
207
226
  try:
208
- assert actual1 == expected1
227
+ result = check_excel_content(actual1, expected1)
228
+ assert result[0]
209
229
  return True, "表头校验值与顺序一致"
210
230
  except Exception as e:
211
- return False, f"表头校验值与顺序失败 {e}"
231
+ return False, f"表头校验值与顺序失败 {result[1]}"
212
232
  except Exception as e:
213
233
  return False, f"表头校验异常 {e}"
214
234
 
@@ -225,32 +245,62 @@ def del_temp_file(file_name=""):
225
245
  print(f"删除文件 {file_path} 时出错:{e}")
226
246
 
227
247
 
228
- def compare_dicts(dict1, dict2):
248
+ def compare_dicts(actual_dict, expected_dict):
229
249
  diff = {}
230
250
  # 找出只在 dict1 中存在的键
231
- only_in_dict1 = set(dict1.keys()) - set(dict2.keys())
251
+ only_in_dict1 = set(actual_dict.keys()) - set(expected_dict.keys())
232
252
  if only_in_dict1:
233
- diff['only_in_dict1'] = {key: dict1[key] for key in only_in_dict1}
253
+ diff['only_in_dict1'] = {key: actual_dict[key] for key in only_in_dict1}
234
254
  # 找出只在 dict2 中存在的键
235
- only_in_dict2 = set(dict2.keys()) - set(dict1.keys())
255
+ only_in_dict2 = set(expected_dict.keys()) - set(actual_dict.keys())
236
256
  if only_in_dict2:
237
- diff['only_in_dict2'] = {key: dict2[key] for key in only_in_dict2}
257
+ diff['only_in_dict2'] = {key: expected_dict[key] for key in only_in_dict2}
238
258
  # 处理两个字典都有的键
239
- common_keys = set(dict1.keys()) & set(dict2.keys())
259
+ common_keys = set(actual_dict.keys()) & set(expected_dict.keys())
240
260
  for key in common_keys:
241
- value1 = dict1[key]
242
- value2 = dict2[key]
261
+ value1 = actual_dict[key]
262
+ value2 = expected_dict[key]
243
263
  if isinstance(value1, dict) and isinstance(value2, dict):
244
264
  # 如果值是字典,递归比较
245
265
  sub_diff = compare_dicts(value1, value2)
246
266
  if sub_diff:
247
- diff[f'different_sub_dicts_at_{key}'] = sub_diff
267
+ diff[f'不同的字典_at_{key}'] = sub_diff
248
268
  elif isinstance(value1, list) and isinstance(value2, list):
249
- # 如果值是列表,比较列表元素
250
- if value1 != value2:
251
- diff[f'different_lists_at_{key}'] = (value1, value2)
269
+ # 如果值是列表,递归比较列表元素
270
+ list_diff = compare_lists(value1, value2)
271
+ if list_diff:
272
+ diff[f'sheet【{key}】中存在差异'] = list_diff
252
273
  else:
253
274
  # 其他情况,直接比较值
254
275
  if value1 != value2:
255
- diff[f'different_values_at_{key}'] = (value1, value2)
276
+ diff[f'不同的值_at_{key}'] = (value1, value2)
277
+ return diff
278
+
279
+
280
+ def compare_lists(actual_dict_list, expected_dict_list):
281
+ diff = []
282
+ max_len = max(len(actual_dict_list), len(expected_dict_list))
283
+ for i in range(max_len):
284
+ if i >= len(actual_dict_list):
285
+ # list2 更长
286
+ diff.append(('只存在expected_dict_list的中', expected_dict_list[i]))
287
+ elif i >= len(expected_dict_list):
288
+ # list1 更长
289
+ diff.append(('只存在actual_dict_list中', actual_dict_list[i]))
290
+ else:
291
+ item1 = actual_dict_list[i]
292
+ item2 = expected_dict_list[i]
293
+ if isinstance(item1, dict) and isinstance(item2, dict):
294
+ # 如果元素是字典,递归比较
295
+ sub_diff = compare_dicts(item1, item2)
296
+ if sub_diff:
297
+ diff.append(('列表索引中存在不同的字典', i, sub_diff))
298
+ elif isinstance(item1, list) and isinstance(item2, list):
299
+ # 如果元素是列表,递归比较
300
+ sub_list_diff = compare_lists(item1, item2)
301
+ if sub_list_diff:
302
+ diff.append(('列表索引的存在不同的子列表', i, sub_list_diff))
303
+ else:
304
+ if item1 != item2:
305
+ diff.append(('列表索引的不同值', i, (item1, item2)))
256
306
  return diff
@@ -1,9 +1,8 @@
1
- # from retry import retry
2
1
  import json
2
+ import urllib
3
3
 
4
4
  import requests
5
- from tenacity import retry, stop_after_attempt, wait_fixed,RetryError
6
-
5
+ from tenacity import retry, stop_after_attempt, wait_fixed, RetryError
7
6
  from smartpush.utils.StringUtils import StringUtils
8
7
 
9
8
 
@@ -26,10 +25,9 @@ def get_oss_address_with_retry(target_id, url, requestHeader, requestParam, **kw
26
25
  :param requestHeader:
27
26
  :return: 带有重试配置的获取 OSS 地址的
28
27
  """
29
- tries = kwargs.get('tries', 10) # 重试次数
28
+ tries = kwargs.get('tries', 20) # 重试次数
30
29
  delay = kwargs.get('delay', 2)
31
30
 
32
-
33
31
  @retry(stop=stop_after_attempt(tries), wait=wait_fixed(delay), after=log_attempt)
34
32
  def get_oss_address():
35
33
  _url = url + '/bulkOps/query'
@@ -45,8 +43,9 @@ def get_oss_address_with_retry(target_id, url, requestHeader, requestParam, **kw
45
43
  id_url_dict = {item["id"]: item["url"] for item in result["resultData"]["datas"]}
46
44
  if target_id in id_url_dict:
47
45
  if len(id_url_dict[target_id]) == 1:
48
- print(f"{target_id} oss链接为:{id_url_dict[target_id][0]}")
49
- return id_url_dict[target_id][0]
46
+ target_url = urllib.parse.unquote(id_url_dict[target_id][0])
47
+ print(f"target_id [{target_id}] 的oss链接为: {target_url}")
48
+ return target_url
50
49
  else:
51
50
  raise ValueError(f"存在多条 id 为 {target_id} 的记录,记录为:{id_url_dict[target_id]}")
52
51
  else:
@@ -77,21 +76,3 @@ def get_oss_address_with_retry(target_id, url, requestHeader, requestParam, **kw
77
76
  if isinstance(e, RetryError):
78
77
  cancel_export_file(target_id)
79
78
  return None
80
-
81
-
82
- if __name__ == '__main__':
83
- url = "https://test.smartpushedm.com/api-em-ec2"
84
- requestHeader = {
85
- "cookie": "osudb_appid=SMARTPUSH;osudb_oar=#01#SID0000122BBLon+0gwvStide+qtdJAK57ZSK1ty+iW8b7tv/Uwl6Zo4gDfUg6B83n+jgqTVjoZ5qRGyRsuLaXc9woDN2WRh3mu1yn7anglBmaFoemhCy/ttS8nqv/y0kj8khbu6mtBmQrseNfnO/Mir8PQP+S;osudb_subappid=1;osudb_uid=4213785247;ecom_http_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDI1Mjk2NTQsImp0aSI6ImM2MTA4MGJkLTU4MGUtNDJiNi05NzU5LTU0ZTNmZDExZDA4OSIsInVzZXJJbmZvIjp7ImlkIjowLCJ1c2VySWQiOiI0MjEzNzg1MjQ3IiwidXNlcm5hbWUiOiIiLCJlbWFpbCI6ImZlbGl4LnNoYW9Ac2hvcGxpbmVhcHAuY29tIiwidXNlclJvbGUiOiJvd25lciIsInBsYXRmb3JtVHlwZSI6Nywic3ViUGxhdGZvcm0iOjEsInBob25lIjoiIiwibGFuZ3VhZ2UiOiJ6aC1oYW5zLWNuIiwiYXV0aFR5cGUiOiIiLCJhdHRyaWJ1dGVzIjp7ImNvdW50cnlDb2RlIjoiQ04iLCJjdXJyZW5jeSI6IkpQWSIsImN1cnJlbmN5U3ltYm9sIjoiSlDCpSIsImRvbWFpbiI6InNtYXJ0cHVzaDQubXlzaG9wbGluZXN0Zy5jb20iLCJsYW5ndWFnZSI6ImVuIiwibWVyY2hhbnRFbWFpbCI6ImZlbGl4LnNoYW9Ac2hvcGxpbmUuY29tIiwibWVyY2hhbnROYW1lIjoiU21hcnRQdXNoNF9lYzJf6Ieq5Yqo5YyW5bqX6ZO6IiwicGhvbmUiOiIiLCJzY29wZUNoYW5nZWQiOmZhbHNlLCJzdGFmZkxhbmd1YWdlIjoiemgtaGFucy1jbiIsInN0YXR1cyI6MCwidGltZXpvbmUiOiJBc2lhL01hY2FvIn0sInN0b3JlSWQiOiIxNjQ0Mzk1OTIwNDQ0IiwiaGFuZGxlIjoic21hcnRwdXNoNCIsImVudiI6IkNOIiwic3RlIjoiIiwidmVyaWZ5IjoiIn0sImxvZ2luVGltZSI6MTczOTkzNzY1NDc2Mywic2NvcGUiOlsiZW1haWwtbWFya2V0IiwiY29va2llIiwic2wtZWNvbS1lbWFpbC1tYXJrZXQtbmV3LXRlc3QiLCJlbWFpbC1tYXJrZXQtbmV3LWRldi1mcyIsImFwaS11Yy1lYzIiLCJhcGktc3UtZWMyIiwiYXBpLWVtLWVjMiIsImZsb3ctcGx1Z2luIl0sImNsaWVudF9pZCI6ImVtYWlsLW1hcmtldCJ9.X2Birt-jiWILAvEjjwknUchil2ys8Y11omeRYgZ3K0I;",
86
- "Content-Type": "application/json"
87
- }
88
- requestParam = {
89
- "page": 1,
90
- "pageSize": 20,
91
- "type": "EXPORT",
92
- "status": None,
93
- "startTime": 1740033265288,
94
- "endTime": 1740044065288
95
- }
96
- id = "2334659"
97
- get_oss_address_with_retry(2334659, url, requestHeader, requestParam)
@@ -2,6 +2,7 @@ import io
2
2
  import os
3
3
  import re
4
4
  import warnings
5
+ import zipfile
5
6
  from io import BytesIO
6
7
  import pandas as pd
7
8
  from requests import request
@@ -22,19 +23,30 @@ def read_excel_from_oss(url="", method="get"):
22
23
  print(f"读取oss报错 {url} 时出错:{e}")
23
24
 
24
25
 
25
- def read_excel_header(excel_data, **kwargs) -> list:
26
+ def read_excel_header(excel_data, return_type='list', **kwargs) -> list | dict:
26
27
  """
27
28
  1、读出excel的头列 list
28
29
  """
29
30
  try:
30
- dfs = read_excel_csv_data(excel_data,**kwargs)
31
31
  result = []
32
- if kwargs['type'] in excel_extensions:
32
+ result_dict = {}
33
+ skip_rows = kwargs.pop('skiprows', 0) - 1 if 'skiprows' in kwargs else 0
34
+ dfs = read_excel_csv_data(excel_data, **kwargs)
35
+ if kwargs.get('type', None) in excel_extensions:
33
36
  for sheet_name, df in dfs.items():
34
- result.append(df.keys().values.tolist())
37
+ # result.append(df.keys().values.tolist())
38
+ headers = df.iloc[skip_rows].tolist()
39
+ result.append(headers)
40
+ result_dict[sheet_name] = headers
41
+ if return_type == 'list':
42
+ return result
43
+ else:
44
+ return result_dict
35
45
  else:
36
- result = dfs.keys().values.tolist()
37
- return result
46
+ # csv的头
47
+ # result = dfs.keys().values.tolist()
48
+ result = dfs.columns.tolist()
49
+ return result
38
50
  except Exception as e:
39
51
  print(f"excel生成header-list出错:{e}")
40
52
  raise
@@ -43,9 +55,11 @@ def read_excel_header(excel_data, **kwargs) -> list:
43
55
  def read_excel_csv_data(excel_data, **kwargs):
44
56
  with warnings.catch_warnings():
45
57
  warnings.filterwarnings("ignore", category=UserWarning, module=re.escape('openpyxl.styles.stylesheet'))
46
- if kwargs['type'] in excel_extensions:
47
- dfs = pd.read_excel(excel_data, sheet_name=None, na_filter=False, engine="openpyxl") if isinstance(excel_data,
48
- io.BytesIO) \
58
+ if kwargs.get('type', None) in excel_extensions:
59
+ dfs = pd.read_excel(excel_data, sheet_name=None, na_filter=False, engine='openpyxl',
60
+ skiprows=kwargs.get('skiprows', None), header=kwargs.get('header', None)) if isinstance(
61
+ excel_data,
62
+ io.BytesIO) \
49
63
  else excel_data
50
64
  else:
51
65
  dfs = pd.read_csv(excel_data, na_filter=False)
@@ -57,20 +71,29 @@ def read_excel_and_write_to_dict(excel_data=None, file_name=None, **kwargs):
57
71
  :param excel_data:excel的io对象, 参数和file_name互斥
58
72
  :file_name: excel文件名称,目前读取check_file目录下文件,参数和excel_data互斥
59
73
  """
74
+ dfs = None
60
75
  try:
61
76
  if excel_data is not None and file_name is not None:
62
77
  pass
63
78
  elif file_name is not None:
64
79
  excel_data = os.path.join(os.path.dirname(os.getcwd()) + "/check_file/" + file_name)
65
80
  dfs = read_excel_csv_data(excel_data, **kwargs)
66
- if kwargs['type'] in excel_extensions:
81
+ if kwargs.get('type', None) in excel_extensions:
67
82
  # 将DataFrame转换为字典,以行为单位存储数据
68
83
  row_dict = {} # 创建一个空字典来存储按行转换的数据
69
84
  for sheet_name, row in dfs.items():
70
- row_dict[sheet_name] = row.to_dict(orient='records')
85
+ row = row.to_dict(orient='records')
86
+ if kwargs.get("ignore_sort", False):
87
+ sorted_data_asc = sorted(row[0:], key=lambda x: x[0], reverse=True) # 内部排序
88
+ row_dict[sheet_name] = sorted_data_asc
89
+ else:
90
+ row_dict[sheet_name] = row
71
91
  else:
72
92
  row_dict = dfs.to_dict()
73
93
  return row_dict
94
+ except zipfile.BadZipFile:
95
+ print(f"文件读取错误,请检查文件是否为无效文件:{dfs}")
96
+ raise
74
97
  except Exception as e:
75
98
  print(f"excel写入dict时出错:{e}")
76
99
 
smartpush/test.py CHANGED
@@ -1,22 +1,27 @@
1
1
  # -*- codeing = utf-8 -*-
2
2
  # @Time :2025/2/20 00:27
3
3
  # @Author :luzebin
4
- from smartpush.export.basic.ExcelExportChecker import check_excel_all,read_excel_and_write_to_list,read_excel_from_oss,read_excel_csv_data,check_excel
4
+ import pandas as pd
5
+
6
+ from smartpush.export.basic.ExcelExportChecker import check_excel_all, read_excel_and_write_to_list, \
7
+ read_excel_from_oss, read_excel_csv_data, check_excel
5
8
  from smartpush.export.basic.ReadExcel import read_excel_from_oss
6
9
  from smartpush.export.basic.ReadExcel import read_excel_and_write_to_dict
7
10
  from smartpush.export.basic.GetOssUrl import get_oss_address_with_retry
8
11
 
9
12
  if __name__ == '__main__':
10
- oss1 = "https://cdn.smartpushedm.com/material_ec2/2025-02-19/4d98418295524ab1b52340c2ed2afa4a/AutoTest-%E5%9B%BA%E5%AE%9AB-2025-02-14%20%E5%88%9B%E5%BB%BA%E7%9A%84Email33%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
11
- oss2 = "https://cdn.smartpushedm.com/material_ec2/2025-02-19/ddbe9965d83840199e678a66dc414518/%E8%90%A5%E9%94%80%E4%BB%BB%E5%8A%A1%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
12
- oss3 = "https://cdn.smartpushedm.com/material_ec2/2025-02-19/4d98418295524ab1b52340c2ed2afa4a/AutoTest-%E5%9B%BA%E5%AE%9AB-2025-02-14%20%E5%88%9B%E5%BB%BA%E7%9A%84Email33%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
13
- # print(check_excel_all(oss1, oss1))
14
-
15
- # expected_oss ="https://cdn.smartpushedm.com/material_ec2_prod/2025-02-20/dae941ec20964ca5b106407858676f89/%E7%BE%A4%E7%BB%84%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
16
- # actual_oss = "https://cdn.smartpushedm.com/material_ec2_prod/2025-02-20/dae941ec20964ca5b106407858676f89/%E7%BE%A4%E7%BB%84%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
17
- # #actual_oss= get_oss_address_with_retry("23161","https://cdn.smartpushedm.com/material_ec2_prod/2025-02-20/dae941ec20964ca5b106407858676f89/%E7%BE%A4%E7%BB%84%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx","",'{"page":1,"pageSize":10,"type":null,"status":null,"startTime":null,"endTime":null}')
18
- # res=read_excel_and_write_to_dict(read_excel_from_oss(actual_oss))
19
- # print(res)
20
- # print(read_excel_and_write_to_dict(read_excel_from_oss(oss1), type=".xlsx"))
21
- print(check_excel(check_type="all", actual_oss=oss1, expected_oss=oss3))
22
- # read_excel_csv_data(type=)
13
+ oss1 = "https://cdn.smartpushedm.com/material_ec2/2025-02-20/ad9e1534b8134dd098e96813f17d4b4d/%E6%B5%8B%E8%AF%95flow%E6%95%B0%E6%8D%AE%E6%8A%A5%E5%91%8A%E5%AF%BC%E5%87%BA%E5%8B%BF%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
14
+ oss2 = "https://cdn.smartpushedm.com/material_ec2/2025-02-21/bbe660950493411d88b4a75ed0893ec8/%E6%B5%8B%E8%AF%95flow%E6%95%B0%E6%8D%AE%E6%8A%A5%E5%91%8A%E5%AF%BC%E5%87%BA%E5%8B%BF%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
15
+ # # print(check_excel_all(oss1, oss1))
16
+ # oss3 = "https://cdn.smartpushedm.com/material_ec2/2025-02-25/58c4a3a885884741b22380c360ac2894/【自动化导出】营销活动URL点击与热图.xlsx"
17
+ # oss4 = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/58cee630b4c84eec9572b867af4ce692/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8URL%E7%82%B9%E5%87%BB%E4%B8%8E%E7%83%AD%E5%9B%BE.xlsx"
18
+ expected_oss ="https://cdn.smartpushedm.com/material_ec2/2025-02-26/757df7e77ce544e193257c0da35a4983/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
19
+ actual_oss = "https://cdn.smartpushedm.com/material_ec2/2025-02-26/757df7e77ce544e193257c0da35a4983/%E3%80%90%E8%87%AA%E5%8A%A8%E5%8C%96%E5%AF%BC%E5%87%BA%E3%80%91%E8%90%A5%E9%94%80%E6%B4%BB%E5%8A%A8%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx"
20
+ # # #actual_oss= get_oss_address_with_retry("23161","https://cdn.smartpushedm.com/material_ec2_prod/2025-02-20/dae941ec20964ca5b106407858676f89/%E7%BE%A4%E7%BB%84%E6%95%B0%E6%8D%AE%E6%A6%82%E8%A7%88.xlsx","",'{"page":1,"pageSize":10,"type":null,"status":null,"startTime":null,"endTime":null}')
21
+ # # res=read_excel_and_write_to_dict(read_excel_from_oss(actual_oss))
22
+ # # print(res)
23
+ # # print(read_excel_and_write_to_dict(read_excel_from_oss(oss1), type=".xlsx"))
24
+ print(check_excel(check_type="all", actual_oss=actual_oss, expected_oss=expected_oss))
25
+ # print(check_excel_all(actual_oss=oss1, expected_oss=oss2,skiprows=1))
26
+ # print(check_excel_all(actual_oss=oss1, expected_oss=oss2,ignore_sort =True))
27
+ # read_excel_csv_data(type=)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: smartpush
3
- Version: 1.1.6
3
+ Version: 1.1.8
4
4
  Summary: 用于smartpush自动化测试工具包
5
5
  Author: 卢泽彬、邵宇飞、周彦龙
6
6
 
@@ -0,0 +1,14 @@
1
+ smartpush/__init__.py,sha256=XJrl1vhGATHSeSVqKmPXxYqxyseriUpvY5tLIXir3EE,24
2
+ smartpush/get_jira_info.py,sha256=dmCwkKa94xwyE2hegE1KBI3cV_LbrJ67P9osORUGPt4,2633
3
+ smartpush/test.py,sha256=34f8LgTj6bGO6LFukTo5yeiGq65sXReXGG7a9hgbACc,2668
4
+ smartpush/export/__init__.py,sha256=D9GbWcmwnetEndFDty5XbVienFK1WjqV2yYcQp3CM84,99
5
+ smartpush/export/basic/ExcelExportChecker.py,sha256=Ku-UoBKqTE7VwAWA644DNGUmXmXnnjmY6ifLV4rznUI,13593
6
+ smartpush/export/basic/GetOssUrl.py,sha256=oCbPRGa5SqdPWzzeQ8sG10uZJByhrLAzUtwZi_IZgrg,3062
7
+ smartpush/export/basic/ReadExcel.py,sha256=4UbM0MB--fQmjlBSTAALBlip0aFEyieo6zUlMqxUrcs,6334
8
+ smartpush/export/basic/__init__.py,sha256=6tcrS-2NSlsJo-UwEsnGUmwCf7jgOsh_UEbM0FD-gYE,70
9
+ smartpush/utils/StringUtils.py,sha256=NXomJ4qmyBRAFnGj5hrFRWwQnRQMTcPzy20fk1dunSw,3980
10
+ smartpush/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ smartpush-1.1.8.dist-info/METADATA,sha256=fm6qLk2nBEPnLwnqLkuy1ahF5WphfsYs2EQb48-9n5U,145
12
+ smartpush-1.1.8.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
13
+ smartpush-1.1.8.dist-info/top_level.txt,sha256=5_CXqu08EfbPaKLjuSAOAqCmGU6shiatwDU_ViBGCmg,10
14
+ smartpush-1.1.8.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.38.4)
2
+ Generator: bdist_wheel (0.45.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,14 +0,0 @@
1
- smartpush/__init__.py,sha256=XJrl1vhGATHSeSVqKmPXxYqxyseriUpvY5tLIXir3EE,24
2
- smartpush/get_jira_info.py,sha256=dmCwkKa94xwyE2hegE1KBI3cV_LbrJ67P9osORUGPt4,2633
3
- smartpush/test.py,sha256=Pkk6y3snpYnJBT-O51LVujutuT2RSm-nGu-rtB1H-f4,2035
4
- smartpush/export/__init__.py,sha256=D9GbWcmwnetEndFDty5XbVienFK1WjqV2yYcQp3CM84,99
5
- smartpush/export/basic/ExcelExportChecker.py,sha256=pG6yccw44AIynCOkh_Sv0svLwUMzInMwD-C01qfAtxs,10840
6
- smartpush/export/basic/GetOssUrl.py,sha256=IYt-C-SBY4WU_y8dm9aH3uKj1d7M7sSNtitDrz4EfHU,4932
7
- smartpush/export/basic/ReadExcel.py,sha256=rL1D29VIHhp2-CN79ROsbEdnnGWAjKNOe6SbPlO_5hA,5368
8
- smartpush/export/basic/__init__.py,sha256=6tcrS-2NSlsJo-UwEsnGUmwCf7jgOsh_UEbM0FD-gYE,70
9
- smartpush/utils/StringUtils.py,sha256=NXomJ4qmyBRAFnGj5hrFRWwQnRQMTcPzy20fk1dunSw,3980
10
- smartpush/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- smartpush-1.1.6.dist-info/METADATA,sha256=6zoN_MFGdEjUOJFeG3644DSlCHgqO67QaKJN41rVBsc,145
12
- smartpush-1.1.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
13
- smartpush-1.1.6.dist-info/top_level.txt,sha256=5_CXqu08EfbPaKLjuSAOAqCmGU6shiatwDU_ViBGCmg,10
14
- smartpush-1.1.6.dist-info/RECORD,,