scraper2-hj3415 0.0.3__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. scraper2_hj3415/cli.py +35 -11
  2. scraper2_hj3415/miscrapy/mi/__init__.py +0 -0
  3. scraper2_hj3415/miscrapy/mi/items.py +7 -0
  4. scraper2_hj3415/miscrapy/mi/middlewares.py +103 -0
  5. scraper2_hj3415/miscrapy/mi/pipelines.py +39 -0
  6. scraper2_hj3415/miscrapy/mi/settings.py +103 -0
  7. scraper2_hj3415/miscrapy/mi/spiders/__init__.py +4 -0
  8. scraper2_hj3415/miscrapy/mi/spiders/aud.py +23 -0
  9. scraper2_hj3415/miscrapy/mi/spiders/chf.py +25 -0
  10. scraper2_hj3415/miscrapy/mi/spiders/gbond3y.py +24 -0
  11. scraper2_hj3415/miscrapy/mi/spiders/gold.py +25 -0
  12. scraper2_hj3415/miscrapy/mi/spiders/kosdaq.py +23 -0
  13. scraper2_hj3415/miscrapy/mi/spiders/kospi.py +23 -0
  14. scraper2_hj3415/miscrapy/mi/spiders/mihistory.py +333 -0
  15. scraper2_hj3415/miscrapy/mi/spiders/silver.py +25 -0
  16. scraper2_hj3415/miscrapy/mi/spiders/sp500.py +24 -0
  17. scraper2_hj3415/miscrapy/mi/spiders/usdidx.py +30 -0
  18. scraper2_hj3415/miscrapy/mi/spiders/usdkrw.py +24 -0
  19. scraper2_hj3415/miscrapy/mi/spiders/wti.py +25 -0
  20. scraper2_hj3415/miscrapy/run.py +156 -0
  21. scraper2_hj3415/miscrapy/scrapy.cfg +11 -0
  22. scraper2_hj3415/nfscrapy/nfs/items.py +24 -0
  23. scraper2_hj3415/nfscrapy/nfs/pipelines.py +94 -16
  24. scraper2_hj3415/nfscrapy/nfs/settings.py +2 -1
  25. scraper2_hj3415/nfscrapy/nfs/spiders/_c108.py +95 -0
  26. scraper2_hj3415/nfscrapy/nfs/spiders/c101.py +3 -11
  27. scraper2_hj3415/nfscrapy/nfs/spiders/c103.py +174 -0
  28. scraper2_hj3415/nfscrapy/nfs/spiders/c104.py +228 -0
  29. scraper2_hj3415/nfscrapy/nfs/spiders/c106.py +92 -0
  30. scraper2_hj3415/nfscrapy/nfs/spiders/common.py +60 -0
  31. scraper2_hj3415/nfscrapy/run.py +76 -26
  32. {scraper2_hj3415-0.0.3.dist-info → scraper2_hj3415-0.1.1.dist-info}/METADATA +1 -1
  33. scraper2_hj3415-0.1.1.dist-info/RECORD +42 -0
  34. scraper2_hj3415-0.1.1.dist-info/entry_points.txt +2 -0
  35. scraper2_hj3415-0.0.3.dist-info/RECORD +0 -17
  36. scraper2_hj3415-0.0.3.dist-info/entry_points.txt +0 -2
  37. {scraper2_hj3415-0.0.3.dist-info → scraper2_hj3415-0.1.1.dist-info}/WHEEL +0 -0
  38. {scraper2_hj3415-0.0.3.dist-info → scraper2_hj3415-0.1.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,92 @@
1
+ import time
2
+ import scrapy
3
+ import pandas as pd
4
+ from util_hj3415 import utils
5
+ from scrapy.selector import Selector
6
+
7
+ from nfs import items
8
+ from nfs.spiders import common
9
+
10
+ # 분기와 년도 2페이지를 스크랩함.
11
+
12
+
13
+ class C106Spider(scrapy.Spider):
14
+ name = 'c106'
15
+ allowed_domains = ['navercomp.wisereport.co.kr']
16
+ WAIT = 2
17
+
18
+ def __init__(self, code, mongo_client, *args, **kwargs):
19
+ super(C106Spider, self).__init__(*args, **kwargs)
20
+ self.codes = common.adjust_arg_type(code)
21
+ self.mongo_client = mongo_client
22
+ self.driver = utils.get_driver()
23
+
24
+ def start_requests(self):
25
+ total_count = len(self.codes)
26
+ print(f'Start scraping {self.name}, {total_count} items...')
27
+ self.logger.info(f'entire codes list - {self.codes}')
28
+ for i, one_code in enumerate(self.codes):
29
+ print(f'{i + 1}/{total_count}. Parsing {self.name}...{one_code}')
30
+ # C106의 컬럼명을 얻기 위한 주소
31
+ yield scrapy.Request(url=f'https://navercomp.wisereport.co.kr/v2/company/c1060001.aspx?cmp_cd={one_code}',
32
+ callback=self.parse_c106_col,
33
+ cb_kwargs=dict(code=one_code)
34
+ )
35
+
36
+ def parse_c106_col(self, response, code):
37
+ self.driver.get(response.url)
38
+ # 경험상 1초는 데이터 수집에러 가능성 있었음.
39
+ time.sleep(self.WAIT)
40
+ html = Selector(text=self.driver.page_source)
41
+
42
+ # 컬럼명을 얻어 다음 request에 실어 보낸다.
43
+ cols = []
44
+ for i in range(1, 7):
45
+ pretitle = html.xpath(f'//*[@id="cTB611_h"]/thead/tr/th[{i}]/text()[1]').getall()[0].strip().replace('.','')
46
+ # 인덱스에 공칸일 경우 데이터베이스 저장시 에러가 발생하기 때문에 추가한 코드
47
+ if pretitle == '':
48
+ pretitle = 'Unnamed'
49
+ cols.append(pretitle)
50
+ self.logger.info(f'Parsing column names - {code} >>>> {cols}')
51
+
52
+ titles = ['y', 'q'] # pipeline에서 테이블명으로 됨
53
+ for title in titles:
54
+ # C106의 내부의 iframe주소, 분기와 연간 2개임
55
+ # reference from https://docs.scrapy.org/en/latest/topics/request-response.html (request 연쇄보내기)
56
+ yield scrapy.Request(
57
+ url=f'https://navercomp.wisereport.co.kr/company/cF6002.aspx?cmp_cd={code}'
58
+ f'&finGubun=MAIN&cmp_cd1=&cmp_cd2=&cmp_cd3=&cmp_cd4=&sec_cd=G453010&frq={title.upper()}',
59
+ callback=self.parse_c106,
60
+ cb_kwargs=dict(code=code, cols=cols, title=title)
61
+ )
62
+
63
+ def parse_c106(self, response, code, cols, title):
64
+ df = C106Spider.get_df_from_html(response.text, cols)
65
+ df['항목'] = (df['항목'].str.replace('\(억\)', '', regex=True).str.replace('\(원\)', '', regex=True)
66
+ .str.replace('\(억원\)', '', regex=True).str.replace('\(%\)', '', regex=True))
67
+ self.logger.info(df)
68
+ # make item to yield
69
+ item = items.C106items()
70
+ item['코드'] = code
71
+ item['title'] = title
72
+ item['df'] = df
73
+ yield item
74
+
75
+ @staticmethod
76
+ def get_df_from_html(html, cols):
77
+ # 전체 html source에서 table 부위만 추출하여 데이터프레임으로 변환
78
+ df = pd.read_html(html)[0]
79
+ # 인덱스 추가
80
+ df.columns = cols
81
+ df.dropna(how='all', inplace=True)
82
+ return df
83
+
84
+ def __str__(self):
85
+ return 'C106 Spider'
86
+
87
+ def __del__(self):
88
+ if self.driver is not None:
89
+ print('Retrieve chrome driver...')
90
+ self.driver.quit()
91
+
92
+
@@ -0,0 +1,60 @@
1
+ import time
2
+ import pandas as pd
3
+ from typing import Tuple, List
4
+ from selenium.webdriver.chrome.webdriver import WebDriver
5
+ from selenium.webdriver.common.by import By
6
+
7
+ import logging
8
+ logger = logging.getLogger(__name__)
9
+ formatter = logging.Formatter('%(levelname)s: [%(name)s] %(message)s')
10
+ ch = logging.StreamHandler()
11
+ ch.setFormatter(formatter)
12
+ logger.addHandler(ch)
13
+ logger.setLevel(logging.INFO)
14
+
15
+
16
+ def adjust_arg_type(code) -> list:
17
+ """
18
+ 커맨드라인에서 스파이더를 실행할 경우 인자 형식이 str 이 되기 때문에 list 로 변경해 주는 함수
19
+ """
20
+ if type(code) is str:
21
+ return [code, ]
22
+ elif type(code) is list:
23
+ return code
24
+ else:
25
+ raise TypeError
26
+
27
+
28
+ def click_buttons(driver: WebDriver, url: str, buttons: List[Tuple[str, str]], wait: float):
29
+ # 하부 클래스에서 buttons 리스트를 입력받아 실제 버튼을 클릭하는 함수
30
+ logger.debug(f'*** Setting {url} page by clicking buttons ***')
31
+ driver.get(url)
32
+ for name, xpath in buttons:
33
+ logger.debug(f'- Click the {name} button')
34
+ driver.find_element(By.XPATH, xpath).click()
35
+ time.sleep(wait)
36
+ logger.debug('*** Buttons click done ***')
37
+
38
+
39
+ def get_df_from_html(selector, xpath, table_num):
40
+ """
41
+ C103,C104에서 사용
42
+ 펼치지 않은 네이버 테이블의 항목과 내용을 pandas 데이터프레임으로 변환시킴
43
+ reference from http://hleecaster.com/python-pandas-selecting-data/(pandas 행열 선택)
44
+ reference from https://blog.naver.com/wideeyed/221603778414(pandas 문자열 처리)
45
+ reference from https://riptutorial.com/ko/pandas/example/5745/dataframe-%EC%97%B4-%EC%9D%B4%EB%A6%84-%EB%82%98%EC%97%B4(pandas 열이름 나열)
46
+ """
47
+ # 전체 html source에서 table 부위만 추출하여 데이터프레임으로 변환
48
+ tables_list = selector.xpath(xpath).getall()
49
+ # print(tables_list[table_num])
50
+ df = pd.read_html(tables_list[table_num])[0]
51
+ # 항목열의 펼치기 스트링 제거
52
+ df['항목'] = df['항목'].str.replace('펼치기', '').str.strip()
53
+ # reference from https://stackoverflow.com/questions/3446170/escape-string-for-use-in-javascript-regex(정규표현식 특수기호처리)
54
+ # 인덱스행의 불필요한 스트링 제거
55
+ df.columns = (df.columns.str.replace('연간컨센서스보기', '', regex=False).str.replace('연간컨센서스닫기', '', regex=False)
56
+ .str.replace('\(IFRS연결\)', '', regex=True).str.replace('\(IFRS별도\)', '', regex=True)
57
+ .str.replace('\(GAAP개별\)', '', regex=True).str.replace('\(YoY\)', '', regex=True)
58
+ .str.replace('\(QoQ\)', '', regex=True).str.replace('\(E\)', '', regex=True)
59
+ .str.replace('.', '', regex=False).str.strip())
60
+ return df
@@ -1,14 +1,15 @@
1
1
  import os
2
+ import sys
2
3
  import time
3
- import pymongo
4
4
 
5
5
  from scrapy.crawler import CrawlerProcess
6
6
  from multiprocessing import Process, cpu_count
7
7
  from scrapy.utils.project import get_project_settings
8
8
 
9
- from util_hj3415 import utils
9
+ from util_hj3415 import utils, mongo
10
10
 
11
11
  import logging
12
+
12
13
  logger = logging.getLogger(__name__)
13
14
  formatter = logging.Formatter('%(levelname)s: [%(name)s] %(message)s')
14
15
  ch = logging.StreamHandler()
@@ -23,6 +24,7 @@ def chcwd(func):
23
24
  :param func:
24
25
  :return:
25
26
  """
27
+
26
28
  def wrapper(*args, **kwargs):
27
29
  before_cwd = os.getcwd()
28
30
  logger.info(f'current path : {before_cwd}')
@@ -32,6 +34,7 @@ def chcwd(func):
32
34
  func(*args, **kwargs)
33
35
  logger.info(f'restore path to {before_cwd}')
34
36
  os.chdir(before_cwd)
37
+
35
38
  return wrapper
36
39
 
37
40
 
@@ -46,10 +49,15 @@ def _run_scrapy(spider: str, codes: list, mongo_addr: str):
46
49
  if mongo_addr == "":
47
50
  mongo_client = None
48
51
  else:
49
- mongo_client = connect_mongo(mongo_addr)
52
+ try:
53
+ mongo_client = mongo.connect_mongo(mongo_addr)
54
+ except mongo.UnableConnectServerException:
55
+ conn_str = f"Unable to connect to the server.(MY IP : {utils.get_ip_addr()})"
56
+ print(f"{conn_str} Server Addr : {mongo_addr}", file=sys.stderr)
57
+ return
50
58
 
51
59
  process = CrawlerProcess(get_project_settings())
52
- process.crawl(spider, codes=codes, mongo_client=mongo_client)
60
+ process.crawl(spider, code=codes, mongo_client=mongo_client)
53
61
  process.start()
54
62
 
55
63
  if mongo_client is not None:
@@ -63,6 +71,7 @@ def _code_divider(entire_codes: list) -> tuple:
63
71
  :param entire_codes:
64
72
  :return:
65
73
  """
74
+
66
75
  def _split_list(alist, wanted_parts=1):
67
76
  """
68
77
  멀티프로세싱할 갯수로 리스트를 나눈다.
@@ -120,32 +129,34 @@ def _mp_c10168(page: str, codes: list, mongo_addr: str):
120
129
  print(f'Total spent time : {round(time.time() - start_time, 2)} sec.')
121
130
 
122
131
 
123
- def connect_mongo(addr: str, timeout=5) -> pymongo.MongoClient:
132
+ def _mp_c1034(page: str, codes: list, mongo_addr: str):
124
133
  """
125
- 몽고 클라이언트를 만들어주는 함수.
126
- 필요할 때마다 클라이언트를 생성하는 것보다 클라이언트 한개로 데이터베이스를 다루는게 효율적이라 함수를 따로 뺐음.
127
- resolve conn error - https://stackoverflow.com/questions/54484890/ssl-handshake-issue-with-pymongo-on-python3
128
- :param addr:
129
- :param timeout:
134
+ c103 6개, c104 8개 페이지수대로 멀티프로세싱 시행
135
+ :param page:
136
+ :param codes:
130
137
  :return:
131
138
  """
132
- import certifi
133
- ca = certifi.where()
134
- if addr.startswith('mongodb://'):
135
- # set a some-second connection timeout
136
- client = pymongo.MongoClient(addr, serverSelectionTimeoutMS=timeout * 1000)
137
- elif addr.startswith('mongodb+srv://'):
138
- client = pymongo.MongoClient(addr, serverSelectionTimeoutMS=timeout * 1000, tlsCAFile=ca)
139
+ if page == 'c103':
140
+ spiders = ('c103_iy', 'c103_by', 'c103_cy', 'c103_iq', 'c103_bq', 'c103_cq')
141
+ elif page == 'c104':
142
+ spiders = ('c104_aq', 'c104_bq', 'c104_cq', 'c104_dq', 'c104_ay', 'c104_by', 'c104_cy', 'c104_dy')
139
143
  else:
140
- raise Exception(f"Invalid address: {addr}")
141
- try:
142
- srv_info = client.server_info()
143
- conn_str = f"Connect to Mongo Atlas v{srv_info['version']}..."
144
- print(conn_str, f"Server Addr : {addr}")
145
- return client
146
- except Exception:
147
- conn_str = f"Unable to connect to the server.(MY IP : {utils.get_ip_addr()})"
148
- raise Exception(f"{conn_str} Server Addr : {addr}")
144
+ raise NameError
145
+ title = spiders[0].split('_')[0]
146
+ print('*' * 25, f"Scrape multiprocess {title}", '*' * 25)
147
+ print(f'Total {len(codes)} items..')
148
+ logger.info(codes)
149
+
150
+ start_time = time.time()
151
+ ths = []
152
+ error = False
153
+ for spider in spiders:
154
+ ths.append(Process(target=_run_scrapy, args=(spider, codes, mongo_addr)))
155
+ for i in range(len(ths)):
156
+ ths[i].start()
157
+ for i in range(len(ths)):
158
+ ths[i].join()
159
+ print(f'Total spent time : {round(time.time() - start_time, 2)} sec.')
149
160
 
150
161
 
151
162
  @chcwd
@@ -158,3 +169,42 @@ def c101(codes: list, mongo_addr: str = ""):
158
169
  """
159
170
  _mp_c10168('c101', codes=codes, mongo_addr=mongo_addr)
160
171
 
172
+
173
+ @chcwd
174
+ def c103(codes: list, mongo_addr: str = ""):
175
+ """
176
+ c103을 외부에서 실행할 수 있는 함수
177
+ :param codes: 종목코드 리스트
178
+ :param mongo_addr: 몽고데이터베이스 URI - mongodb://...
179
+ :return:
180
+ """
181
+ _mp_c1034('c103', codes=codes, mongo_addr=mongo_addr)
182
+
183
+
184
+ @chcwd
185
+ def c104(codes: list, mongo_addr: str = ""):
186
+ """
187
+ c104을 외부에서 실행할 수 있는 함수
188
+ :param codes: 종목코드 리스트
189
+ :param mongo_addr: 몽고데이터베이스 URI - mongodb://...
190
+ :return:
191
+ """
192
+ _mp_c1034('c104', codes=codes, mongo_addr=mongo_addr)
193
+
194
+
195
+ @chcwd
196
+ def c106(codes: list, mongo_addr: str = ""):
197
+ """
198
+ c106을 외부에서 실행할 수 있는 함수
199
+ :param codes: 종목코드 리스트
200
+ :param mongo_addr: 몽고데이터베이스 URI - mongodb://...
201
+ :return:
202
+ """
203
+ _mp_c10168('c106', codes=codes, mongo_addr=mongo_addr)
204
+
205
+ """
206
+ @chcwd
207
+ def c108(codes: list, mongo_addr: str = ""):
208
+ _mp_c10168('c108', codes=codes, mongo_addr=mongo_addr)
209
+ """
210
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scraper2_hj3415
3
- Version: 0.0.3
3
+ Version: 0.1.1
4
4
  Summary: Gathering the stock data
5
5
  Project-URL: Homepage, https://pypi.org/project/scraper2_hj3415/
6
6
  Project-URL: Bug Tracker, https://pypi.org/project/scraper2_hj3415/
@@ -0,0 +1,42 @@
1
+ scraper2_hj3415/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ scraper2_hj3415/cli.py,sha256=bPM5XAqT7VcVON_fclHPoBhq6jjJXvN0Ywf0KRoOdM0,2194
3
+ scraper2_hj3415/miscrapy/run.py,sha256=ZBnHJHyUdPVcKdBK7gTyPFY9-9VIJYzhflscrowUE3k,5323
4
+ scraper2_hj3415/miscrapy/scrapy.cfg,sha256=KvMPqIfrkMIydPYMPb0fN9m_IBpr61N7ctwCwudcF2s,247
5
+ scraper2_hj3415/miscrapy/mi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ scraper2_hj3415/miscrapy/mi/items.py,sha256=g6qALzFirk5Y1N3XGF3SaeLkbzzTebW782uqLKc3Z70,124
7
+ scraper2_hj3415/miscrapy/mi/middlewares.py,sha256=li2RPzDzqukbOU1J24eNLI9cDSSFcUCoyf5PPo1cAIA,3640
8
+ scraper2_hj3415/miscrapy/mi/pipelines.py,sha256=Hj7DEalndBMYcLiTUOXWWYY8FgkSj-iFUtH-fftUUKU,1105
9
+ scraper2_hj3415/miscrapy/mi/settings.py,sha256=aaTp6B6j1FpbBTMGYQgZKAsvs5HWZ91OHndLmnx7M1o,3512
10
+ scraper2_hj3415/miscrapy/mi/spiders/__init__.py,sha256=ULwecZkx3_NTphkz7y_qiazBeUoHFnCCWnKSjoDCZj0,161
11
+ scraper2_hj3415/miscrapy/mi/spiders/aud.py,sha256=dIebapFUYnzINj6sCYmeI8d6X2_NoY_DWqWhtnjlOYs,1029
12
+ scraper2_hj3415/miscrapy/mi/spiders/chf.py,sha256=tyBZxZjsksf7ot1ilKingepJ2ET4Ledp36RgwaLGdTU,1061
13
+ scraper2_hj3415/miscrapy/mi/spiders/gbond3y.py,sha256=m9TnWCQ6LNPogF6yzYRKY5ZzBfpxeD9UFweZwIf66NY,1009
14
+ scraper2_hj3415/miscrapy/mi/spiders/gold.py,sha256=olte0zpFuhvFWrc4rI88db6iWnnicr5GkfV7998x94M,1062
15
+ scraper2_hj3415/miscrapy/mi/spiders/kosdaq.py,sha256=B0cdS6sTd_wX1LGzDN7181ewpIb02ufC6JaqTRA9IJs,898
16
+ scraper2_hj3415/miscrapy/mi/spiders/kospi.py,sha256=J4KAdbRwKeaRSh7DhQLsIJfazLzYWOQ7A3SNbmunop4,894
17
+ scraper2_hj3415/miscrapy/mi/spiders/mihistory.py,sha256=-SGKKCrjcEY2mYe2c4ztlx1T4nBm7HhIOTgMGo_BMS4,15637
18
+ scraper2_hj3415/miscrapy/mi/spiders/silver.py,sha256=0DP3hR_oXJIEqRQaxJdfjJtiGKoc8dzlYIWsQ3jVekg,1068
19
+ scraper2_hj3415/miscrapy/mi/spiders/sp500.py,sha256=oOaBdGhnggg-laQ8aJp5sTvbYwjCQAbuv_ZduawgIdo,935
20
+ scraper2_hj3415/miscrapy/mi/spiders/usdidx.py,sha256=TQ0zSxg7xpFiudhsgyjiJ38G3DMCkVAVaDYEnIU_H3k,1074
21
+ scraper2_hj3415/miscrapy/mi/spiders/usdkrw.py,sha256=BmLmOEK4HogPFZ1XOFXB_RhDyAO2vjRVC6a1bNnLbxQ,963
22
+ scraper2_hj3415/miscrapy/mi/spiders/wti.py,sha256=Yy49k-uzpUBpAu-zXhFLCWp_ZEqWXiIPXtJK3PvQUGQ,1058
23
+ scraper2_hj3415/nfscrapy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ scraper2_hj3415/nfscrapy/run.py,sha256=Esa1yKB4ZEZ_H6Cq7v7lVC0LuzDnCvrkSEzL89n3fKg,6849
25
+ scraper2_hj3415/nfscrapy/scrapy.cfg,sha256=yCkEgpzAwc9NWjYGaEUelGdLg3mUuuQF1Zl0k5vITH8,260
26
+ scraper2_hj3415/nfscrapy/nfs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
+ scraper2_hj3415/nfscrapy/nfs/items.py,sha256=n9xDKoGez8oRmsRLSDjjPhLCXgeJGUsKydjWc_gzgbk,1479
28
+ scraper2_hj3415/nfscrapy/nfs/middlewares.py,sha256=F3kL0OJMsUhiw2mPIxQLGvY3wdMxIsQl1BG2uwo_ZHo,3745
29
+ scraper2_hj3415/nfscrapy/nfs/pipelines.py,sha256=I_co-PfJdUUsvw461s5Kroc1YGCPlEuPOMjptYldzBQ,5264
30
+ scraper2_hj3415/nfscrapy/nfs/settings.py,sha256=BkOy913N9K8Ypwuj0GiZ0zAI_WnBcjJzwjgnZOS_jqw,3472
31
+ scraper2_hj3415/nfscrapy/nfs/spiders/__init__.py,sha256=ULwecZkx3_NTphkz7y_qiazBeUoHFnCCWnKSjoDCZj0,161
32
+ scraper2_hj3415/nfscrapy/nfs/spiders/_c108.py,sha256=gOgWQ7qTOOBEZQTY79K5MWzjmz-ZpFg5wrqBhzLoXjI,4776
33
+ scraper2_hj3415/nfscrapy/nfs/spiders/c101.py,sha256=-b_bC0jS_MgDP9h9n2MFHRq-wDx1CGWsgWj6-KQKnpc,3931
34
+ scraper2_hj3415/nfscrapy/nfs/spiders/c103.py,sha256=5E7jZX5UIASCtqGa0eikJVcwlKQsveT-4nyTN6TXGkQ,5614
35
+ scraper2_hj3415/nfscrapy/nfs/spiders/c104.py,sha256=usz29mBeXiQrv_e7uDf2ZihbENVsafEUHUJw8aHRwlw,7645
36
+ scraper2_hj3415/nfscrapy/nfs/spiders/c106.py,sha256=cMbpSLqG4EanKzh6rjpFLAnECt_zLR34MTZo34xu6Go,3783
37
+ scraper2_hj3415/nfscrapy/nfs/spiders/common.py,sha256=QTVWFF386cvoN2czFJQpTeE5jRgnlSE_Z-7y13tPyi8,2876
38
+ scraper2_hj3415-0.1.1.dist-info/METADATA,sha256=zKqwM0tDyqMhXFJmUII9lifvGPyzs5MaMxbi4alN1g8,1154
39
+ scraper2_hj3415-0.1.1.dist-info/WHEEL,sha256=Fd6mP6ydyRguakwUJ05oBE7fh2IPxgtDN9IwHJ9OqJQ,87
40
+ scraper2_hj3415-0.1.1.dist-info/entry_points.txt,sha256=1bGP38AtuY6n2FcP_fLmpqGtFmFf8NLJWsCSWoTjF-0,60
41
+ scraper2_hj3415-0.1.1.dist-info/licenses/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
42
+ scraper2_hj3415-0.1.1.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ nfscraper = scraper2_hj3415.cli:nfscraper
@@ -1,17 +0,0 @@
1
- scraper2_hj3415/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- scraper2_hj3415/cli.py,sha256=I4Ha38SqYmaL_oJB0VLq_-lP5I45bAWmU71hgwNzRDY,472
3
- scraper2_hj3415/nfscrapy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- scraper2_hj3415/nfscrapy/run.py,sha256=G4UVhG0LYEdljiC7hbKCsiFVHSixmU3cOQEI0pM6Xso,5768
5
- scraper2_hj3415/nfscrapy/scrapy.cfg,sha256=yCkEgpzAwc9NWjYGaEUelGdLg3mUuuQF1Zl0k5vITH8,260
6
- scraper2_hj3415/nfscrapy/nfs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- scraper2_hj3415/nfscrapy/nfs/items.py,sha256=zKTQWz-IrA_MLY86oVExFhbFq-bgBEHMFlmfZL_CwGA,1017
8
- scraper2_hj3415/nfscrapy/nfs/middlewares.py,sha256=F3kL0OJMsUhiw2mPIxQLGvY3wdMxIsQl1BG2uwo_ZHo,3745
9
- scraper2_hj3415/nfscrapy/nfs/pipelines.py,sha256=gMM_Jfm7dbAfaTux1YvfbJ34vF-BBoIq6QZqX9wN0Uc,989
10
- scraper2_hj3415/nfscrapy/nfs/settings.py,sha256=UsmKxrDTNboO10CzeFi3IhNiTpiKg-gvNySSIlNiogQ,3426
11
- scraper2_hj3415/nfscrapy/nfs/spiders/__init__.py,sha256=ULwecZkx3_NTphkz7y_qiazBeUoHFnCCWnKSjoDCZj0,161
12
- scraper2_hj3415/nfscrapy/nfs/spiders/c101.py,sha256=WKqLQ5EzJTnH1uozyhO7j8Invv4kBy0JCFy4nemd4PI,4233
13
- scraper2_hj3415-0.0.3.dist-info/METADATA,sha256=grsQ7NqmBIlANIOOqfUxVZiDX66-QiVS1TW0Dy5J818,1154
14
- scraper2_hj3415-0.0.3.dist-info/WHEEL,sha256=Fd6mP6ydyRguakwUJ05oBE7fh2IPxgtDN9IwHJ9OqJQ,87
15
- scraper2_hj3415-0.0.3.dist-info/entry_points.txt,sha256=fXVauW_bDwCLucr5jdvKf4lKk4w74zYns9U_9f9LSyU,57
16
- scraper2_hj3415-0.0.3.dist-info/licenses/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
17
- scraper2_hj3415-0.0.3.dist-info/RECORD,,
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- cli-manager = scraper2_hj3415.cli:main