mdbq 1.7.9__tar.gz → 1.8.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mdbq-1.7.9 → mdbq-1.8.1}/PKG-INFO +1 -1
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/company/copysh.py +1 -1
- mdbq-1.8.1/mdbq/other/sku_picture.py +652 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/pbix/refresh_all.py +1 -1
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq.egg-info/PKG-INFO +1 -1
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq.egg-info/SOURCES.txt +1 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/setup.py +1 -1
- {mdbq-1.7.9 → mdbq-1.8.1}/README.txt +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/__version__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/aggregation/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/aggregation/aggregation.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/aggregation/df_types.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/aggregation/mysql_types.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/aggregation/optimize_data.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/aggregation/query_data.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/bdup/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/bdup/bdup.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/clean/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/clean/data_clean.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/company/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/config/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/config/get_myconf.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/config/products.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/config/set_support.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/config/update_conf.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/dataframe/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/dataframe/converter.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/log/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/log/mylogger.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/mongo/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/mongo/mongo.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/mysql/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/mysql/mysql.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/mysql/s_query.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/mysql/year_month_day.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/other/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/other/porxy.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/other/pov_city.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/other/ua_sj.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/pbix/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/pbix/pbix_refresh.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq/spider/__init__.py +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq.egg-info/dependency_links.txt +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/mdbq.egg-info/top_level.txt +0 -0
- {mdbq-1.7.9 → mdbq-1.8.1}/setup.cfg +0 -0
@@ -0,0 +1,652 @@
|
|
1
|
+
# -*- coding:utf-8 -*-
|
2
|
+
import datetime
|
3
|
+
import getpass
|
4
|
+
import json
|
5
|
+
import os
|
6
|
+
import platform
|
7
|
+
import random
|
8
|
+
from dateutil.relativedelta import relativedelta
|
9
|
+
import re
|
10
|
+
import time
|
11
|
+
import warnings
|
12
|
+
import pandas as pd
|
13
|
+
from lxml import etree
|
14
|
+
from selenium import webdriver
|
15
|
+
from selenium.webdriver.support.wait import WebDriverWait
|
16
|
+
from selenium.webdriver.common.by import By
|
17
|
+
from selenium.webdriver.support import expected_conditions as EC
|
18
|
+
from selenium.webdriver.chrome.service import Service
|
19
|
+
from mdbq.config import set_support
|
20
|
+
from mdbq.config import get_myconf
|
21
|
+
from mdbq.mysql import mysql
|
22
|
+
from mdbq.mysql import s_query
|
23
|
+
import ua_sj
|
24
|
+
import requests
|
25
|
+
from openpyxl import load_workbook
|
26
|
+
# from openpyxl.drawing.image import Image
|
27
|
+
import openpyxl
|
28
|
+
from PIL import Image as PILImage
|
29
|
+
from io import BytesIO
|
30
|
+
from openpyxl.utils import get_column_letter
|
31
|
+
|
32
|
+
warnings.filterwarnings('ignore')
|
33
|
+
|
34
|
+
if platform.system() == 'Windows':
|
35
|
+
Share_Path = os.path.join(r'\\192.168.1.198\时尚事业部\01.运营部\天猫报表') # 共享文件根目录
|
36
|
+
elif platform.system() == 'Darwin':
|
37
|
+
Share_Path = os.path.join('/Volumes/时尚事业部/01.运营部/天猫报表') # 共享文件根目录
|
38
|
+
else:
|
39
|
+
Share_Path = ''
|
40
|
+
|
41
|
+
|
42
|
+
class LoadAccount:
|
43
|
+
""" 如果需要获取 cookie 需要注释无界面模式 """
|
44
|
+
|
45
|
+
def __init__(self):
|
46
|
+
self.url = 'https://login.taobao.com/' # 默认登录淘宝
|
47
|
+
self.cookie_path = os.path.join(set_support.SetSupport(dirname='support').dirname, 'cookies')
|
48
|
+
|
49
|
+
def __call__(self, *args, **kwargs):
|
50
|
+
self.check_cookie() # 检测cookie有效期, 但不阻断任务
|
51
|
+
|
52
|
+
def load_account(self, shop_name):
|
53
|
+
option = webdriver.ChromeOptions()
|
54
|
+
# option.add_argument("--headless") # 设置无界面模式
|
55
|
+
# 调整chrome启动配置
|
56
|
+
option.add_argument("--disable-gpu")
|
57
|
+
option.add_argument("--no-sandbox")
|
58
|
+
option.add_argument("--disable-dev-shm-usage")
|
59
|
+
option.add_experimental_option("excludeSwitches", ["enable-automation"])
|
60
|
+
option.add_experimental_option('excludeSwitches', ['enable-logging']) # 禁止日志输出,减少控制台干扰
|
61
|
+
option.add_experimental_option("useAutomationExtension", False)
|
62
|
+
option.add_argument('--ignore-ssl-error') # 忽略ssl错误
|
63
|
+
prefs = {
|
64
|
+
'profile.default_content_settings.popups': 0, # 禁止弹出所有窗口
|
65
|
+
"browser.download.manager. showAlertOnComplete": False, # 下载完成后不显示下载完成提示框
|
66
|
+
"profile.default_content_setting_values.automatic_downloads": 1, # 允许自动下载多个文件
|
67
|
+
}
|
68
|
+
|
69
|
+
option.add_experimental_option('perfLoggingPrefs', {
|
70
|
+
'enableNetwork': True,
|
71
|
+
'enablePage': False,
|
72
|
+
})
|
73
|
+
option.set_capability("goog:loggingPrefs", {
|
74
|
+
'browser': 'ALL',
|
75
|
+
'performance': 'ALL',
|
76
|
+
})
|
77
|
+
option.set_capability("goog:perfLoggingPrefs", {
|
78
|
+
'enableNetwork': True,
|
79
|
+
'enablePage': False,
|
80
|
+
'enableTimeline': False
|
81
|
+
})
|
82
|
+
|
83
|
+
|
84
|
+
|
85
|
+
option.add_experimental_option('prefs', prefs)
|
86
|
+
option.add_experimental_option('excludeSwitches', ['enable-automation']) # 实验性参数, 左上角小字
|
87
|
+
if platform.system() == 'Windows':
|
88
|
+
service = Service(os.path.join(f'C:\\Users\\{getpass.getuser()}\\chromedriver.exe'))
|
89
|
+
else:
|
90
|
+
service = Service('/usr/local/bin/chromedriver')
|
91
|
+
_driver = webdriver.Chrome(options=option, service=service, ) # 创建Chrome驱动程序实例
|
92
|
+
_driver.maximize_window() # 窗口最大化 方便后续加载数据
|
93
|
+
|
94
|
+
if 'jd' in shop_name: # 切换为京东
|
95
|
+
self.url = 'https://shop.jd.com/jdm/home/'
|
96
|
+
# 登录
|
97
|
+
_driver.get(self.url)
|
98
|
+
_driver.delete_all_cookies() # 首先清除浏览器打开已有的cookies
|
99
|
+
name_lists = os.listdir(self.cookie_path) # cookie 放在主目录下的 cookies 文件夹
|
100
|
+
for name in name_lists:
|
101
|
+
if shop_name in name and name.endswith('.txt') and '~' not in name and '.DS' not in name:
|
102
|
+
with open(os.path.join(self.cookie_path, name), 'r') as f:
|
103
|
+
cookies_list = json.load(f) # 使用json读取cookies 注意读取的是文件 所以用load而不是loads
|
104
|
+
for cookie in cookies_list:
|
105
|
+
_driver.add_cookie(cookie) # 添加cookies信息
|
106
|
+
break
|
107
|
+
# 以上从get url开始的操作要即时完成,不能进入time.sleep,否则登录失败
|
108
|
+
if 'jd' in shop_name:
|
109
|
+
return _driver
|
110
|
+
else:
|
111
|
+
_driver.refresh()
|
112
|
+
time.sleep(random.uniform(5, 8))
|
113
|
+
html = etree.HTML(_driver.page_source)
|
114
|
+
user_name = html.xpath('//div[@class="site-nav-user"]/a/text()')
|
115
|
+
if user_name: # 1877西门吹风
|
116
|
+
print(f'当前账号:{user_name} 登录成功')
|
117
|
+
return _driver
|
118
|
+
|
119
|
+
elements = _driver.find_elements(
|
120
|
+
By.XPATH, '//*[id="login-error"]/div')
|
121
|
+
if elements: # 您已登录,子账号不能访问.... 其实已经处于登录状态
|
122
|
+
if self.other(_driver):
|
123
|
+
return _driver
|
124
|
+
elements = _driver.find_elements(
|
125
|
+
By.XPATH, '//div[@class="captcha-tips"]/div[@class="warnning-text"]')
|
126
|
+
if elements: # 滑块验证,但其实已经处于登录状态
|
127
|
+
if self.other(_driver):
|
128
|
+
return _driver
|
129
|
+
wait = WebDriverWait(_driver, timeout=15)
|
130
|
+
try:
|
131
|
+
button = wait.until(
|
132
|
+
EC.element_to_be_clickable(
|
133
|
+
(By.XPATH, '//button[@class="fm-button fm-submit " and @type="submit"]')
|
134
|
+
)
|
135
|
+
) # 快速进入按钮
|
136
|
+
_driver.execute_script("arguments[0].click();", button) # 点击登录
|
137
|
+
time.sleep(3)
|
138
|
+
except:
|
139
|
+
# 店铺账号
|
140
|
+
try:
|
141
|
+
wait.until(
|
142
|
+
EC.presence_of_element_located(
|
143
|
+
(By.XPATH, '//*[@id="icestark-container"]/div[1]/div/div[1]/img')))
|
144
|
+
html = etree.HTML(_driver.page_source)
|
145
|
+
user_name = html.xpath('//div[@class="UserArea--shopName--3Z5NVbD"]/text()')
|
146
|
+
print(f'当前账号:{user_name} 登录成功')
|
147
|
+
return _driver
|
148
|
+
except:
|
149
|
+
print(f'{shop_name} -> {self.url} 尝试跨页登录1')
|
150
|
+
# self.other(_driver)
|
151
|
+
|
152
|
+
# 店铺账号, 有时候刷新cookies后系统会自动登录,不需要手动点击登录,因此多加一次判断
|
153
|
+
try:
|
154
|
+
wait.until(
|
155
|
+
EC.presence_of_element_located((By.XPATH, '//*[@id="icestark-container"]/div[1]/div/div[1]/img')))
|
156
|
+
html = etree.HTML(_driver.page_source)
|
157
|
+
user_name = html.xpath('//div[@class="UserArea--shopName--3Z5NVbD"]/text()')
|
158
|
+
print(f'当前账号:{user_name} 登录成功')
|
159
|
+
except:
|
160
|
+
print(f'{shop_name} -> {self.url} 尝试跨页登录2')
|
161
|
+
self.other(_driver)
|
162
|
+
return _driver
|
163
|
+
|
164
|
+
@staticmethod
|
165
|
+
def other(_driver):
|
166
|
+
""" 淘宝账号不知为何刷新cookies后不跳转, """
|
167
|
+
_driver.get('https://myseller.taobao.com')
|
168
|
+
time.sleep(3)
|
169
|
+
try:
|
170
|
+
wait = WebDriverWait(_driver, timeout=15)
|
171
|
+
wait.until(EC.presence_of_element_located((By.XPATH, '//div[contains(@class, "UserArea--shopName")]')))
|
172
|
+
print('登录成功')
|
173
|
+
return True
|
174
|
+
except Exception as e:
|
175
|
+
print(e)
|
176
|
+
print('登录失败')
|
177
|
+
_driver.quit()
|
178
|
+
return False
|
179
|
+
|
180
|
+
def d_new_cookies(self, _driver, _shopname):
|
181
|
+
""" 负责检查并刷新 cookies 文件"""
|
182
|
+
try:
|
183
|
+
_file = os.path.join(self.cookie_path, f'cookie_{_shopname}.txt')
|
184
|
+
_c = os.stat(_file).st_mtime # 读取文件的元信息 >>>文件修改时间
|
185
|
+
_c_time = datetime.datetime.fromtimestamp(_c) # 格式化修改时间
|
186
|
+
_today = datetime.datetime.today()
|
187
|
+
if (_today - _c_time).total_seconds() > 170000:
|
188
|
+
with open(_file, 'w') as f:
|
189
|
+
# 将cookies保存为json格式
|
190
|
+
cookies_list = _driver.get_cookies()
|
191
|
+
for cookie in cookies_list:
|
192
|
+
# 该字段有问题所以删除就可以
|
193
|
+
if 'expiry' in cookie:
|
194
|
+
del cookie['expiry']
|
195
|
+
if 'domain' in cookie and '万里马官方' in _shopname: # 仅仅是天猫淘宝需要修改此值, 京东别改
|
196
|
+
cookie['domain'] = '.taobao.com'
|
197
|
+
cookies_list = json.dumps(cookies_list)
|
198
|
+
f.write(cookies_list)
|
199
|
+
# print(f'cookie已保存: {_file}')
|
200
|
+
except Exception as e:
|
201
|
+
print(e)
|
202
|
+
|
203
|
+
def check_cookie(self):
|
204
|
+
"""
|
205
|
+
检查cookies,如果过期则重新获取
|
206
|
+
still_get: 设置该参数立即更新cookie, 不论是否过期
|
207
|
+
"""
|
208
|
+
if not os.path.exists(self.cookie_path):
|
209
|
+
print(f'没有找到cookies文件: {self.cookie_path}')
|
210
|
+
return False
|
211
|
+
files = os.listdir(self.cookie_path)
|
212
|
+
cook = []
|
213
|
+
for file in files:
|
214
|
+
if file.endswith('txt') and 'cookie_' in file:
|
215
|
+
cook.append(file)
|
216
|
+
c_ = os.stat(os.path.join(self.cookie_path, file)).st_mtime # 读取文件的元信息 >>>文件修改时间
|
217
|
+
c_time_ = datetime.datetime.fromtimestamp(c_) # 格式化修改时间
|
218
|
+
today = datetime.datetime.today()
|
219
|
+
if (today - c_time_).total_seconds() > 864000:
|
220
|
+
# 超过时间重新获取cookies
|
221
|
+
print(f' {file}cookie已过期,请重新获取cookies')
|
222
|
+
return None
|
223
|
+
|
224
|
+
def tb_cookie(self, _url='https://login.taobao.com/'):
|
225
|
+
"""
|
226
|
+
本函数需要谨慎调用,不要弄错账号以免cookies混乱
|
227
|
+
扫码获取cookies,下载到cookies文件夹
|
228
|
+
is_wlm_cookie: 单独创建一个wlm的cookies,保存在上层目录,用于日常数据下载,其他淘宝爬虫不要调用
|
229
|
+
c_account:设置为True时,检测店铺账号,False检测非店铺账号
|
230
|
+
"""
|
231
|
+
option = webdriver.ChromeOptions() # 浏览器启动选项
|
232
|
+
option.headless = True # False指定为无界面模式
|
233
|
+
# 调整chrome启动配置
|
234
|
+
option.add_argument("--disable-gpu")
|
235
|
+
option.add_argument("--no-sandbox")
|
236
|
+
option.add_argument("--disable-dev-shm-usage")
|
237
|
+
option.add_experimental_option("excludeSwitches", ["enable-automation"])
|
238
|
+
option.add_experimental_option("useAutomationExtension", False)
|
239
|
+
if platform.system() == 'Windows':
|
240
|
+
service = Service(os.path.join(f'C:\\Users\\{getpass.getuser()}\\chromedriver.exe'))
|
241
|
+
else:
|
242
|
+
service = Service('/usr/local/bin/chromedriver')
|
243
|
+
_driver = webdriver.Chrome(service=service, options=option) # 创建Chrome驱动程序实例
|
244
|
+
# 登录
|
245
|
+
_driver.get(_url)
|
246
|
+
time.sleep(1)
|
247
|
+
_driver.maximize_window() # 窗口最大化 方便后续加载数据
|
248
|
+
wait = WebDriverWait(_driver, timeout=120) # 等待登录二维码
|
249
|
+
wait.until(EC.element_to_be_clickable(
|
250
|
+
(By.XPATH, '//div[@class="qrcode-login"]/div/div[@class="qrcode-img"]')))
|
251
|
+
|
252
|
+
user_name = None
|
253
|
+
for i in range(10):
|
254
|
+
d_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
255
|
+
print(f'{d_time} 当前验证:等待非店账号扫码,请尽快扫码...')
|
256
|
+
wait = WebDriverWait(_driver, timeout=10) # 等待扫码登录后的页面, 左上角加载的一张图片
|
257
|
+
try: # 非店铺账号
|
258
|
+
wait.until(
|
259
|
+
EC.presence_of_element_located((By.XPATH, '//*[@id="J_SiteNavLogin"]/div[1]/div/a')))
|
260
|
+
html = etree.HTML(_driver.page_source)
|
261
|
+
user_name = html.xpath('//*[@id="J_SiteNavLogin"]/div[1]/div/a/text()')
|
262
|
+
break
|
263
|
+
except:
|
264
|
+
d_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
265
|
+
print(f'{d_time} 当前验证:等待店铺账号扫码...')
|
266
|
+
wait = WebDriverWait(_driver, timeout=15)
|
267
|
+
try: # 等待左上角的牵牛图标
|
268
|
+
wait.until(
|
269
|
+
EC.presence_of_element_located(
|
270
|
+
(By.XPATH, '//*[@id="icestark-container"]/div[1]/div/div[1]/img')))
|
271
|
+
html = etree.HTML(_driver.page_source) # 登录店铺名称
|
272
|
+
user_name = html.xpath('//div[contains(@class, "UserArea--shopName")]/text()')
|
273
|
+
break
|
274
|
+
except:
|
275
|
+
d_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
276
|
+
print(f'{d_time} {_url} 第 {i + 1}/10 次等待登录超时,正在重试')
|
277
|
+
if i > 8:
|
278
|
+
return None
|
279
|
+
d_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
280
|
+
print(f'{d_time} 登录成功,正在获取cookie...')
|
281
|
+
time.sleep(1)
|
282
|
+
sp_id = ['649844025963', '732863024183', '640779963378', '677330842517']
|
283
|
+
sp_id = random.choice(sp_id)
|
284
|
+
_driver.get(f'https://detail.tmall.com/item.htm?id={sp_id}')
|
285
|
+
time.sleep(3)
|
286
|
+
if user_name:
|
287
|
+
user_name = user_name[0]
|
288
|
+
user_name = re.sub(':', '_', user_name) # 删除用户名中的冒号
|
289
|
+
else:
|
290
|
+
user_name = ''
|
291
|
+
|
292
|
+
if not os.path.exists(self.cookie_path):
|
293
|
+
os.makedirs(self.cookie_path)
|
294
|
+
_file = os.path.join(self.cookie_path, f'cookie_{user_name}.txt')
|
295
|
+
with open(_file, 'w') as f:
|
296
|
+
# 将cookies保存为json格式
|
297
|
+
cookies_list = _driver.get_cookies()
|
298
|
+
for cookie in cookies_list:
|
299
|
+
# 该字段有问题所以删除就可以
|
300
|
+
if 'expiry' in cookie:
|
301
|
+
del cookie['expiry']
|
302
|
+
if 'domain' in cookie:
|
303
|
+
cookie['domain'] = '.taobao.com'
|
304
|
+
cookies_list = json.dumps(cookies_list)
|
305
|
+
f.write(cookies_list)
|
306
|
+
print(f'cookie已保存: {_file}')
|
307
|
+
_driver.quit()
|
308
|
+
|
309
|
+
def jd_cookie(self, _url='https://shop.jd.com/jdm/home/'):
|
310
|
+
option = webdriver.ChromeOptions() # 浏览器启动选项
|
311
|
+
option.headless = True # False指定为无界面模式
|
312
|
+
if platform.system() == 'Windows':
|
313
|
+
service = Service(os.path.join(f'C:\\Users\\{getpass.getuser()}\\chromedriver.exe'))
|
314
|
+
else:
|
315
|
+
service = Service('/usr/local/bin/chromedriver')
|
316
|
+
_driver = webdriver.Chrome(service=service, options=option) # 创建Chrome驱动程序实例
|
317
|
+
# 登录
|
318
|
+
_driver.get(_url)
|
319
|
+
time.sleep(1)
|
320
|
+
_driver.maximize_window() # 窗口最大化 方便后续加载数据
|
321
|
+
print('等待登录京东商家后台...')
|
322
|
+
wait = WebDriverWait(_driver, timeout=300)
|
323
|
+
try:
|
324
|
+
wait.until(
|
325
|
+
EC.presence_of_element_located((By.XPATH, '//span[text()="京准通"]')))
|
326
|
+
except:
|
327
|
+
print('等待京东登录超时!')
|
328
|
+
d_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
329
|
+
print(f'{d_time} 登录成功,正在获取cookie...')
|
330
|
+
time.sleep(3)
|
331
|
+
# d_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
332
|
+
|
333
|
+
if not os.path.exists(self.cookie_path):
|
334
|
+
os.makedirs(self.cookie_path)
|
335
|
+
_file = os.path.join(self.cookie_path, 'cookie_jd.txt')
|
336
|
+
with open(_file, 'w') as f:
|
337
|
+
# 将cookies保存为json格式
|
338
|
+
cookies_list = _driver.get_cookies()
|
339
|
+
for cookie in cookies_list:
|
340
|
+
# 该字段有问题所以删除就可以
|
341
|
+
if 'expiry' in cookie:
|
342
|
+
del cookie['expiry']
|
343
|
+
cookies_list = json.dumps(cookies_list)
|
344
|
+
f.write(cookies_list)
|
345
|
+
print(f'cookie已保存: {_file}')
|
346
|
+
time.sleep(1)
|
347
|
+
_driver.quit()
|
348
|
+
|
349
|
+
|
350
|
+
class SkuPicture:
|
351
|
+
def __init__(self, driver):
|
352
|
+
self.driver = driver
|
353
|
+
self.path = os.path.join(Share_Path, '其他文件')
|
354
|
+
self.filename = '商品id编码表.xlsx'
|
355
|
+
self.urls = []
|
356
|
+
self.datas = [] # 从单品页面获取数据,存储这部分数据,作为中转
|
357
|
+
self.df = pd.DataFrame()
|
358
|
+
|
359
|
+
def each_page(self):
|
360
|
+
wait = WebDriverWait(self.driver, timeout=15)
|
361
|
+
num = len(self.urls)
|
362
|
+
i = 0
|
363
|
+
for data in self.urls:
|
364
|
+
url = f'https://sell.publish.tmall.com/tmall/publish.htm?id={data['商品id']}'
|
365
|
+
print(f'当前任务: {i}/{num} {url}')
|
366
|
+
try:
|
367
|
+
self.driver.get(url)
|
368
|
+
time.sleep(3)
|
369
|
+
# elements = self.driver.find_elements(
|
370
|
+
# By.XPATH, '//h2[text()="很抱歉,您查看的商品找不到了!"]')
|
371
|
+
# if len(elements) > 0:
|
372
|
+
# continue
|
373
|
+
wait.until(EC.presence_of_element_located((By.XPATH, '//tr[@class="sku-table-row"]')))
|
374
|
+
html = etree.HTML(self.driver.page_source)
|
375
|
+
imgs = html.xpath('//img[contains(@class, "img-block")]/@src')
|
376
|
+
imgs = [f'https:{item}' for item in imgs if 'http' not in item]
|
377
|
+
titles = html.xpath('//img[contains(@class, "img-block")]/../span/@title')
|
378
|
+
# img = html.xpath('//tr[@class="sku-table-row"]/td/div/div/div/img[@class="img-block"]/@src')
|
379
|
+
sku_price = html.xpath(
|
380
|
+
'//tr[@class="sku-table-row"]/td[contains(@class, "sell-sku-cell-money")]//input/@value')
|
381
|
+
desc = html.xpath(
|
382
|
+
'//tr[@class="sku-table-row"]/td[contains(@class, "sell-sku-cell-skuIndividualCom")]//em/@title')
|
383
|
+
sales = html.xpath(
|
384
|
+
'//tr[@class="sku-table-row"]/td[contains(@class, "sell-sku-cell-number")]//input/@value')
|
385
|
+
sku_spbm = html.xpath(
|
386
|
+
'//tr[@class="sku-table-row"]/td[contains(@class, "sell-sku-cell-input") and contains(@id, "skuOuterId")]//input/@value')
|
387
|
+
leimu = html.xpath(
|
388
|
+
'//h2[@id="text-catpath"]/div/text()')
|
389
|
+
sp_titles = html.xpath(
|
390
|
+
'//div[@class="tm-title normal"]/span/span/input/@value')
|
391
|
+
|
392
|
+
if sp_titles:
|
393
|
+
sp_titles = sp_titles[0]
|
394
|
+
else:
|
395
|
+
sp_titles = ''
|
396
|
+
if leimu:
|
397
|
+
leimu = re.sub('>>', '_', leimu[0])
|
398
|
+
leimu = re.sub('当前类目:', '', leimu)
|
399
|
+
else:
|
400
|
+
leimu = ''
|
401
|
+
if not titles:
|
402
|
+
titles = ''
|
403
|
+
if not imgs:
|
404
|
+
imgs = ''
|
405
|
+
if not sales:
|
406
|
+
sales = ''
|
407
|
+
if not sku_price:
|
408
|
+
sku_price = ''
|
409
|
+
if not sku_spbm:
|
410
|
+
sku_spbm = ''
|
411
|
+
if not desc:
|
412
|
+
desc = ''
|
413
|
+
|
414
|
+
# print(sp_titles)
|
415
|
+
# print(titles)
|
416
|
+
# print(imgs)
|
417
|
+
# print(sales)
|
418
|
+
# print(sku_price)
|
419
|
+
# print(sku_spbm)
|
420
|
+
# print(desc)
|
421
|
+
# print(leimu)
|
422
|
+
self.datas.append(
|
423
|
+
{
|
424
|
+
'日期': datetime.date.today(),
|
425
|
+
'商品id': data['商品id'],
|
426
|
+
'商品标题': sp_titles,
|
427
|
+
'商品链接': f'https://detail.tmall.com/item.htm?id={data['商品id']}',
|
428
|
+
'sku名称': titles,
|
429
|
+
'sku图片链接': imgs,
|
430
|
+
'库存数量': sales,
|
431
|
+
'价格': sku_price,
|
432
|
+
'sku编码': sku_spbm,
|
433
|
+
'商家编码': data['商家编码'],
|
434
|
+
'推荐卖点': desc,
|
435
|
+
'是否新增': data['是否新增'],
|
436
|
+
'类目': leimu,
|
437
|
+
}
|
438
|
+
)
|
439
|
+
except Exception as e:
|
440
|
+
# print(e)
|
441
|
+
pass
|
442
|
+
i += 1
|
443
|
+
# if i > 3:
|
444
|
+
# break
|
445
|
+
time.sleep(1)
|
446
|
+
|
447
|
+
results = []
|
448
|
+
for data in self.datas:
|
449
|
+
try:
|
450
|
+
df = pd.DataFrame.from_dict(data, orient='columns')
|
451
|
+
results.append(df)
|
452
|
+
except:
|
453
|
+
pass
|
454
|
+
|
455
|
+
if results:
|
456
|
+
self.df = pd.concat(results)
|
457
|
+
self.df = self.df[['sku图片链接'] != '0']
|
458
|
+
|
459
|
+
def read_df(self):
|
460
|
+
path = os.path.join(self.path, self.filename)
|
461
|
+
df = pd.read_excel(path, header=0)
|
462
|
+
df = df[['商品id', '商家编码', '是否新增']]
|
463
|
+
df['是否新增'].fillna(0, inplace=True)
|
464
|
+
df = df.astype({'是否新增': int})
|
465
|
+
df = df[df['是否新增'] == 1]
|
466
|
+
self.urls = df.to_dict('records')
|
467
|
+
|
468
|
+
|
469
|
+
class DownloadPicture():
|
470
|
+
"""
|
471
|
+
从数据库中下载数据
|
472
|
+
"""
|
473
|
+
def __init__(self, service_name):
|
474
|
+
# target_service 从哪个服务器下载数据
|
475
|
+
self.months = 0 # 下载几个月数据, 0 表示当月, 1 是上月 1 号至今
|
476
|
+
# 实例化一个下载类
|
477
|
+
username, password, host, port = get_myconf.select_config_values(target_service=service_name,
|
478
|
+
database='mysql')
|
479
|
+
self.download = s_query.QueryDatas(username=username, password=password, host=host, port=port)
|
480
|
+
self.df = pd.DataFrame()
|
481
|
+
self.headers = {'User-Agent': ua_sj.get_ua()}
|
482
|
+
self.save_path = '/Users/xigua/Downloads/sku图片链接'
|
483
|
+
self.filename = ''
|
484
|
+
if not os.path.exists(self.save_path):
|
485
|
+
os.mkdir(self.save_path)
|
486
|
+
|
487
|
+
def get_df_from_service(self):
|
488
|
+
start_date, end_date = self.months_data(num=self.months)
|
489
|
+
projection = {
|
490
|
+
'商品id': 1,
|
491
|
+
'商家编码': 1,
|
492
|
+
'sku编码': 1,
|
493
|
+
'sku名称': 1,
|
494
|
+
'sku图片链接': 1
|
495
|
+
}
|
496
|
+
self.df = self.download.data_to_df(
|
497
|
+
db_name='属性设置2',
|
498
|
+
table_name='天猫商品sku信息',
|
499
|
+
start_date=start_date,
|
500
|
+
end_date=end_date,
|
501
|
+
projection=projection,
|
502
|
+
)
|
503
|
+
|
504
|
+
def download_data(self):
|
505
|
+
dict_data = self.df.to_dict('records')
|
506
|
+
num = len(dict_data)
|
507
|
+
i = 0
|
508
|
+
for data in dict_data:
|
509
|
+
url = data['sku图片链接']
|
510
|
+
sku_name = re.sub('/', '_', data['sku名称'])
|
511
|
+
self.filename = f'{data['商品id']}_{data['商家编码']}_{data['sku编码']}_{sku_name}.jpg'
|
512
|
+
if os.path.isfile(os.path.join(self.save_path, self.filename)):
|
513
|
+
i += 1
|
514
|
+
continue
|
515
|
+
if 'https' not in url:
|
516
|
+
i += 1
|
517
|
+
continue
|
518
|
+
|
519
|
+
print(f'正在下载: {i}/{num}, {data['sku编码']}')
|
520
|
+
self.headers.update({'User-Agent': ua_sj.get_ua()})
|
521
|
+
res = requests.get(url, headers=self.headers) # 下载图片到内存
|
522
|
+
# 保存图片到本地文件夹
|
523
|
+
with open(os.path.join(self.save_path, self.filename), 'wb') as f:
|
524
|
+
f.write(res.content)
|
525
|
+
i += 1
|
526
|
+
time.sleep(0.5)
|
527
|
+
|
528
|
+
@staticmethod
|
529
|
+
def months_data(num=0, end_date=None):
|
530
|
+
""" 读取近 num 个月的数据, 0 表示读取当月的数据 """
|
531
|
+
if not end_date:
|
532
|
+
end_date = datetime.datetime.now()
|
533
|
+
start_date = end_date - relativedelta(months=num) # n 月以前的今天
|
534
|
+
start_date = f'{start_date.year}-{start_date.month}-01' # 替换为 n 月以前的第一天
|
535
|
+
return pd.to_datetime(start_date), pd.to_datetime(end_date)
|
536
|
+
|
537
|
+
def main(service_name, database):
|
538
|
+
if not os.path.exists(Share_Path):
|
539
|
+
print(f'当前系统环境不支持')
|
540
|
+
return
|
541
|
+
|
542
|
+
_driver = LoadAccount() # 账号域不同, 要重新实例化
|
543
|
+
# tb_driver2 = 1
|
544
|
+
tb_driver2 = _driver.load_account(shop_name='万里马官方旗舰店')
|
545
|
+
if tb_driver2:
|
546
|
+
s = SkuPicture(driver=tb_driver2)
|
547
|
+
s.read_df() # 从本地文件中读取商品id,并更新 urls 参数
|
548
|
+
s.each_page() # 根据 urls 获取每个商品数据并更新为 df
|
549
|
+
tb_driver2.quit()
|
550
|
+
|
551
|
+
# s.df.to_csv('/Users/xigua/Downloads/test.csv', encoding='utf-8_sig', index=False, header=True)
|
552
|
+
username, password, host, port = get_myconf.select_config_values(target_service=service_name, database=database)
|
553
|
+
m = mysql.MysqlUpload(username=username, password=password, host=host, port=port)
|
554
|
+
m.df_to_mysql(
|
555
|
+
df=s.df,
|
556
|
+
db_name='属性设置2',
|
557
|
+
table_name='天猫商品sku信息',
|
558
|
+
move_insert=True, # 先删除,再插入
|
559
|
+
# df_sql=True,
|
560
|
+
# drop_duplicates=False,
|
561
|
+
# icm_update=unique_key_list,
|
562
|
+
service_database={service_name: database},
|
563
|
+
) # 3. 回传数据库
|
564
|
+
|
565
|
+
|
566
|
+
def main2(service_name, database):
|
567
|
+
d = DownloadPicture(service_name=service_name)
|
568
|
+
d.get_df_from_service()
|
569
|
+
d.download_data()
|
570
|
+
|
571
|
+
|
572
|
+
class InsertPicture():
|
573
|
+
def __init__(self):
|
574
|
+
self.file = '/Users/xigua/Downloads/test.xlsx'
|
575
|
+
self.path = '/Users/xigua/Downloads/sku图片链接'
|
576
|
+
self.pic_datas = []
|
577
|
+
self.header = 0 # sku 的标题栏起始行数
|
578
|
+
|
579
|
+
def insert_data(self):
|
580
|
+
self.get_filename()
|
581
|
+
# sku_in_files = [item['sku'] for item in self.pic_datas]
|
582
|
+
# print(len(sku_in_files))
|
583
|
+
|
584
|
+
# df = pd.read_excel(self.file, header=self.header)
|
585
|
+
# cols = df.columns.tolist()
|
586
|
+
# print(cols)
|
587
|
+
|
588
|
+
workbook = load_workbook(self.file)
|
589
|
+
sheet = workbook.active
|
590
|
+
rows = sheet.max_row # 总行数
|
591
|
+
columns = sheet.max_column # 总列数
|
592
|
+
# print(columns)
|
593
|
+
# print(rows)
|
594
|
+
sheet.insert_cols(0, 1) # 在第0列开始插入1列空白列
|
595
|
+
sheet['A1'] = '商品图片'
|
596
|
+
|
597
|
+
for col in range(1, columns+1):
|
598
|
+
for row in range(1, rows+1):
|
599
|
+
# print(f'第{col}列, 第{row}行...')
|
600
|
+
value = sheet.cell(row=row, column=col).value
|
601
|
+
if value:
|
602
|
+
for data in self.pic_datas:
|
603
|
+
if str(value) == data['sku']:
|
604
|
+
print(value, data['文件名称'])
|
605
|
+
image_path = os.path.join(data['文件路径'], data['文件名称'])
|
606
|
+
img = PILImage.open(image_path)
|
607
|
+
output = BytesIO()
|
608
|
+
img.save(output, format='JPEG')
|
609
|
+
image_data = output.getvalue()
|
610
|
+
|
611
|
+
# 调整图片大小
|
612
|
+
img_resized = img.resize((128, 128)) # 调整为128x128大小
|
613
|
+
output_resized = BytesIO()
|
614
|
+
img_resized.save(output_resized, format='JPEG')
|
615
|
+
image_data_resized = output_resized.getvalue()
|
616
|
+
col_letter = 'A'
|
617
|
+
sheet.add_image(openpyxl.drawing.image.Image(sheet, image_data_resized), f'{col_letter}{row}')
|
618
|
+
break
|
619
|
+
|
620
|
+
# workbook.save(self.file)
|
621
|
+
# print(filenames)
|
622
|
+
# sheet.insert_cols(0, 1) # 在第0列开始插入1列空白列
|
623
|
+
# sheet['A1'] = '商品图片'
|
624
|
+
|
625
|
+
|
626
|
+
# col_letter = get_column_letter(col) # 将数字索引转换为列标签 A、B、C、D...
|
627
|
+
# sheet.column_dimensions[col_letter].width = 10
|
628
|
+
# sheet.row_dimensions[row].height = 80
|
629
|
+
# # sheet.cell(row=row, column=col).value = "" # 删除原内容
|
630
|
+
# now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ")
|
631
|
+
# print(f'{now}正在转换: 第{col}列, 第{row}行..')
|
632
|
+
|
633
|
+
def get_filename(self):
|
634
|
+
for root, dirs, files in os.walk(self.path, topdown=False):
|
635
|
+
for name in files:
|
636
|
+
if name.endswith('.jpg'):
|
637
|
+
sku_id = re.findall(r'\d+_\d+_(\d+)_|\d+_\d+_(\d+-\d+)_|\d+_\d+_([A-Za-z]+\d+)_', name)
|
638
|
+
sku_id = [item for item in sku_id[0] if item != '']
|
639
|
+
self.pic_datas.append({'文件路径': root, '文件名称': name, 'sku': sku_id[0]})
|
640
|
+
|
641
|
+
|
642
|
+
def main3():
|
643
|
+
p = InsertPicture()
|
644
|
+
p.header = 1
|
645
|
+
p.insert_data()
|
646
|
+
|
647
|
+
|
648
|
+
|
649
|
+
if __name__ == '__main__':
|
650
|
+
# main(service_name='company', database='mysql')
|
651
|
+
# main2(service_name='company', database='mysql')
|
652
|
+
main3()
|
@@ -63,7 +63,7 @@ class RefreshAll:
|
|
63
63
|
if filename.endswith('.xlsx'):
|
64
64
|
try:
|
65
65
|
print(f'正在刷新 >>>{filename}')
|
66
|
-
path = os.path.join(self.run_py_path, filename) # 拼接文件路径
|
66
|
+
path = os.path.join(top_path, self.run_py_path, filename) # 拼接文件路径
|
67
67
|
xlapp = win32com.client.Dispatch('Excel.Application') # 创建Excel程序App
|
68
68
|
xlapp.Visible = False # 窗口是否可见
|
69
69
|
xlapp.DisplayAlerts = False # 是否显示警告信息
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|