neurostats-API 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neurostats_API/__init__.py +13 -1
- neurostats_API/fetchers/__init__.py +2 -0
- neurostats_API/fetchers/base.py +102 -4
- neurostats_API/fetchers/institution.py +19 -13
- neurostats_API/fetchers/profit_lose.py +7 -7
- neurostats_API/fetchers/tech.py +34 -7
- neurostats_API/fetchers/tej_finance_report.py +313 -0
- neurostats_API/tools/balance_sheet.yaml +1 -0
- neurostats_API/tools/profit_lose.yaml +25 -3
- neurostats_API/utils/__init__.py +2 -1
- neurostats_API/utils/calculate_value.py +26 -0
- neurostats_API/utils/data_process.py +90 -8
- {neurostats_API-0.0.13.dist-info → neurostats_API-0.0.15.dist-info}/METADATA +72 -2
- {neurostats_API-0.0.13.dist-info → neurostats_API-0.0.15.dist-info}/RECORD +16 -14
- {neurostats_API-0.0.13.dist-info → neurostats_API-0.0.15.dist-info}/WHEEL +0 -0
- {neurostats_API-0.0.13.dist-info → neurostats_API-0.0.15.dist-info}/top_level.txt +0 -0
neurostats_API/__init__.py
CHANGED
@@ -1 +1,13 @@
|
|
1
|
-
__version__='0.0.
|
1
|
+
__version__='0.0.15'
|
2
|
+
|
3
|
+
from .fetchers import (
|
4
|
+
BalanceSheetFetcher,
|
5
|
+
CashFlowFetcher,
|
6
|
+
FinanceOverviewFetcher,
|
7
|
+
FinanceReportFetcher,
|
8
|
+
InstitutionFetcher,
|
9
|
+
MarginTradingFetcher,
|
10
|
+
MonthRevenueFetcher,
|
11
|
+
TechFetcher,
|
12
|
+
ProfitLoseFetcher
|
13
|
+
)
|
@@ -2,6 +2,8 @@ from .base import StatsDateTime, StatsFetcher
|
|
2
2
|
from .balance_sheet import BalanceSheetFetcher
|
3
3
|
from .cash_flow import CashFlowFetcher
|
4
4
|
from .finance_overview import FinanceOverviewFetcher
|
5
|
+
from .tej_finance_report import FinanceReportFetcher
|
6
|
+
from .tech import TechFetcher
|
5
7
|
from .institution import InstitutionFetcher
|
6
8
|
from .margin_trading import MarginTradingFetcher
|
7
9
|
from .month_revenue import MonthRevenueFetcher
|
neurostats_API/fetchers/base.py
CHANGED
@@ -1,16 +1,16 @@
|
|
1
|
+
import abc
|
1
2
|
from pymongo import MongoClient
|
2
3
|
import pandas as pd
|
3
4
|
import json
|
4
5
|
import pytz
|
5
6
|
from datetime import datetime, timedelta, date
|
6
|
-
from ..utils import StatsDateTime, StatsProcessor
|
7
|
+
from ..utils import StatsDateTime, StatsProcessor, YoY_Calculator
|
7
8
|
import yaml
|
8
9
|
|
9
10
|
class StatsFetcher:
|
10
11
|
def __init__(self, ticker, db_client):
|
11
12
|
self.ticker = ticker
|
12
|
-
self.db = db_client[
|
13
|
-
"company"] # Replace with your database name
|
13
|
+
self.db = db_client["company"] # Replace with your database name
|
14
14
|
self.collection = self.db["twse_stats"]
|
15
15
|
|
16
16
|
self.timezone = pytz.timezone("Asia/Taipei")
|
@@ -37,7 +37,7 @@ class StatsFetcher:
|
|
37
37
|
]
|
38
38
|
|
39
39
|
def collect_data(self, start_date, end_date):
|
40
|
-
pipeline = self.prepare_query(
|
40
|
+
pipeline = self.prepare_query()
|
41
41
|
|
42
42
|
fetched_data = list(self.collection.aggregate(pipeline))
|
43
43
|
|
@@ -52,3 +52,101 @@ class StatsFetcher:
|
|
52
52
|
season = (month - 1) // 3 + 1
|
53
53
|
|
54
54
|
return StatsDateTime(date, year, month, day, season)
|
55
|
+
|
56
|
+
class BaseTEJFetcher(abc.ABC):
|
57
|
+
def __init__(self):
|
58
|
+
self.client = None
|
59
|
+
self.db = None
|
60
|
+
self.collection = None
|
61
|
+
|
62
|
+
@abc.abstractmethod
|
63
|
+
def get(self):
|
64
|
+
pass
|
65
|
+
|
66
|
+
def get_latest_data_time(self, ticker):
|
67
|
+
latest_data = self.collection.find_one(
|
68
|
+
{"ticker": ticker},
|
69
|
+
{"last_update": 1, "_id" : 0}
|
70
|
+
)
|
71
|
+
|
72
|
+
try:
|
73
|
+
latest_date = latest_data['last_update']["latest_data_date"]
|
74
|
+
except Exception as e:
|
75
|
+
latest_date = None
|
76
|
+
|
77
|
+
return latest_date
|
78
|
+
|
79
|
+
def cal_YoY(self, data_dict: dict, start_year: int, end_year: int):
|
80
|
+
year_shifts = [1,3,5,10]
|
81
|
+
return_dict = {}
|
82
|
+
for year in range(start_year, end_year+1):
|
83
|
+
year_data = data_dict[str(year)]
|
84
|
+
year_keys = list(year_data.keys())
|
85
|
+
for key in year_keys:
|
86
|
+
if (key in 'season'):
|
87
|
+
continue
|
88
|
+
|
89
|
+
if (isinstance(year_data[key], (int, float))):
|
90
|
+
temp_dict = {"value": year_data[key]}
|
91
|
+
|
92
|
+
for shift in year_shifts:
|
93
|
+
this_value = year_data[key]
|
94
|
+
try:
|
95
|
+
past_year = str(year - shift)
|
96
|
+
last_value = data_dict[past_year][key]['value']
|
97
|
+
temp_dict[f"YoY_{shift}"] = YoY_Calculator.cal_growth(
|
98
|
+
this_value, last_value, delta = shift
|
99
|
+
)
|
100
|
+
except Exception as e:
|
101
|
+
temp_dict[f"YoY_{shift}"] = None
|
102
|
+
|
103
|
+
year_data[key] = temp_dict
|
104
|
+
|
105
|
+
else:
|
106
|
+
year_data.pop(key)
|
107
|
+
|
108
|
+
return_dict[year] = year_data
|
109
|
+
|
110
|
+
|
111
|
+
return return_dict
|
112
|
+
|
113
|
+
def cal_QoQ(self, data_dict):
|
114
|
+
return_dict = {}
|
115
|
+
for i, time_index in enumerate(data_dict.keys()):
|
116
|
+
year, season = time_index.split("Q")
|
117
|
+
year = int(year)
|
118
|
+
season = int(season)
|
119
|
+
if (season == 1):
|
120
|
+
last_year = year - 1
|
121
|
+
last_season = 4
|
122
|
+
else:
|
123
|
+
last_year = year
|
124
|
+
last_season = season - 1
|
125
|
+
|
126
|
+
this_data = data_dict[time_index]
|
127
|
+
this_keys = list(this_data.keys())
|
128
|
+
for key in this_keys:
|
129
|
+
if (key in 'season'):
|
130
|
+
continue
|
131
|
+
|
132
|
+
this_value = this_data[key]
|
133
|
+
|
134
|
+
if (isinstance(this_value, (int, float))):
|
135
|
+
temp_dict = {"value": this_value}
|
136
|
+
|
137
|
+
try:
|
138
|
+
last_value = data_dict[f"{last_year}Q{last_season}"][key]['value']
|
139
|
+
|
140
|
+
temp_dict['growth'] = YoY_Calculator.cal_growth(
|
141
|
+
this_value, last_value, delta=1
|
142
|
+
)
|
143
|
+
except Exception as e:
|
144
|
+
temp_dict['growth'] = None
|
145
|
+
|
146
|
+
this_data[key] = temp_dict
|
147
|
+
|
148
|
+
else:
|
149
|
+
this_data.pop(key)
|
150
|
+
return_dict[time_index] = this_data
|
151
|
+
return return_dict
|
152
|
+
|
@@ -164,25 +164,26 @@ class InstitutionFetcher(StatsFetcher):
|
|
164
164
|
latest_daily_data['volume'])
|
165
165
|
}
|
166
166
|
# 一年內法人
|
167
|
-
annual_dates = [
|
167
|
+
annual_dates = [
|
168
|
+
data['date'].strftime("%Y-%m-%d") for data in daily_datas
|
169
|
+
]
|
168
170
|
annual_closes = {
|
169
|
-
data['date'].strftime("%Y-%m-%d")
|
171
|
+
data['date'].strftime("%Y-%m-%d"): data['close']
|
170
172
|
for data in daily_datas
|
171
173
|
if (data['date'].strftime("%Y-%m-%d") in annual_dates)
|
172
174
|
}
|
173
175
|
annual_volumes = {
|
174
|
-
data['date'].strftime("%Y-%m-%d")
|
176
|
+
data['date'].strftime("%Y-%m-%d"): data['volume']
|
175
177
|
for data in daily_datas
|
176
178
|
if (data['date'].strftime("%Y-%m-%d") in annual_dates)
|
177
179
|
}
|
178
180
|
annual_trading = {
|
179
|
-
data['date'].strftime("%Y-%m-%d")
|
181
|
+
data['date'].strftime("%Y-%m-%d"): data
|
180
182
|
for data in institution_tradings
|
181
|
-
}
|
183
|
+
}
|
182
184
|
|
183
185
|
annual_trading = {
|
184
186
|
date: {
|
185
|
-
|
186
187
|
"close": annual_closes[date],
|
187
188
|
"volume": annual_volumes[date],
|
188
189
|
**annual_trading[date]
|
@@ -217,13 +218,18 @@ class InstitutionFetcher(StatsFetcher):
|
|
217
218
|
self.target_institution(latest_trading,
|
218
219
|
latest_table['institutional_investor'],
|
219
220
|
key, volume)
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
221
|
+
# 計算合計
|
222
|
+
for unit in ['stock', 'percentage']:
|
223
|
+
# 買進總和
|
224
|
+
latest_table['institutional_investor']['buy'][unit] = (
|
225
|
+
latest_table['foreign']['buy'][unit] +
|
226
|
+
latest_table['prop']['buy'][unit] +
|
227
|
+
latest_table['mutual']['buy'][unit])
|
228
|
+
# 賣出總和
|
229
|
+
latest_table['institutional_investor']['sell'][unit] = (
|
230
|
+
latest_table['foreign']['sell'][unit] +
|
231
|
+
latest_table['prop']['sell'][unit] +
|
232
|
+
latest_table['mutual']['sell'][unit])
|
227
233
|
|
228
234
|
frames = []
|
229
235
|
for category, trades in latest_table.items():
|
@@ -21,12 +21,6 @@ class ProfitLoseFetcher(StatsFetcher):
|
|
21
21
|
def prepare_query(self, target_season):
|
22
22
|
pipeline = super().prepare_query()
|
23
23
|
|
24
|
-
target_query = {
|
25
|
-
"year": "$$target_season_data.year",
|
26
|
-
"season": "$$target_season_data.season",
|
27
|
-
"balance_sheet": "$$$$target_season_data.balance_sheet"
|
28
|
-
}
|
29
|
-
|
30
24
|
pipeline.append({
|
31
25
|
"$project": {
|
32
26
|
"_id": 0,
|
@@ -121,6 +115,9 @@ class ProfitLoseFetcher(StatsFetcher):
|
|
121
115
|
elif ('YoY' in item_name):
|
122
116
|
if (isinstance(item, (float, int))):
|
123
117
|
item = StatsProcessor.cal_percentage(item)
|
118
|
+
elif ('每股盈餘' in index_name):
|
119
|
+
if (isinstance(item, (float, int))):
|
120
|
+
item = StatsProcessor.cal_non_percentage(item, postfix="元")
|
124
121
|
else:
|
125
122
|
if (isinstance(item, (float, int))):
|
126
123
|
item = StatsProcessor.cal_non_percentage(item, postfix="千元")
|
@@ -153,6 +150,9 @@ class ProfitLoseFetcher(StatsFetcher):
|
|
153
150
|
target_index=target_index)
|
154
151
|
break
|
155
152
|
except Exception as e:
|
156
|
-
|
153
|
+
return_dict[name] = StatsProcessor.slice_multi_col_table(
|
154
|
+
total_table=total_table,
|
155
|
+
mode=setting['mode'],
|
156
|
+
target_index=target_index)
|
157
157
|
|
158
158
|
return return_dict
|
neurostats_API/fetchers/tech.py
CHANGED
@@ -1,9 +1,16 @@
|
|
1
1
|
from .base import StatsFetcher
|
2
2
|
import pandas as pd
|
3
|
+
import yfinance as yf
|
3
4
|
|
4
5
|
class TechFetcher(StatsFetcher):
|
5
6
|
|
6
7
|
def __init__(self, ticker:str, db_client):
|
8
|
+
|
9
|
+
"""
|
10
|
+
The Capitalization-Weighted Index includes the following tickers:
|
11
|
+
['GSPC', 'IXIC', 'DJI', 'TWII']
|
12
|
+
"""
|
13
|
+
|
7
14
|
super().__init__(ticker, db_client)
|
8
15
|
self.full_ohlcv = self._get_ohlcv()
|
9
16
|
self.basic_indexes = ['SMA5', 'SMA20', 'SMA60', 'EMA5', 'EMA20',
|
@@ -40,16 +47,36 @@ class TechFetcher(StatsFetcher):
|
|
40
47
|
)
|
41
48
|
|
42
49
|
def _get_ohlcv(self):
|
43
|
-
|
44
|
-
|
50
|
+
|
51
|
+
if self.ticker in ['GSPC', 'IXIC', 'DJI', 'TWII']:
|
52
|
+
|
53
|
+
full_tick = f'^{self.ticker}'
|
54
|
+
yf_ticker = yf.Ticker(full_tick)
|
55
|
+
origin_df = yf_ticker.history(period="10y")
|
56
|
+
origin_df = origin_df.reset_index()
|
57
|
+
origin_df["Date"] = pd.to_datetime(origin_df["Date"]).dt.date
|
58
|
+
df = origin_df.rename(
|
59
|
+
columns={
|
60
|
+
"Date": "date",
|
61
|
+
"Open": "open",
|
62
|
+
"High": "high",
|
63
|
+
"Low": "low",
|
64
|
+
"Close": "close",
|
65
|
+
"Volume": "volume"
|
66
|
+
}
|
67
|
+
)
|
68
|
+
else:
|
69
|
+
|
70
|
+
query = {'ticker': self.ticker}
|
71
|
+
ticker_full = list(self.collection.find(query))
|
45
72
|
|
46
|
-
|
47
|
-
|
73
|
+
if not ticker_full:
|
74
|
+
raise ValueError(f"No data found for ticker: {self.ticker}")
|
48
75
|
|
49
|
-
|
50
|
-
|
76
|
+
if 'daily_data' not in ticker_full[0] or ticker_full[0]['daily_data'] is None:
|
77
|
+
raise KeyError("Missing 'daily_data' in the retrieved data")
|
51
78
|
|
52
|
-
|
79
|
+
df = pd.DataFrame(ticker_full[0]['daily_data'])
|
53
80
|
|
54
81
|
selected_cols = ['date','open','high','low','close','volume']
|
55
82
|
|
@@ -0,0 +1,313 @@
|
|
1
|
+
from .base import BaseTEJFetcher
|
2
|
+
from datetime import datetime
|
3
|
+
from enum import Enum
|
4
|
+
import pandas as pd
|
5
|
+
from pymongo import MongoClient
|
6
|
+
from ..utils import StatsProcessor, YoY_Calculator
|
7
|
+
import warnings
|
8
|
+
|
9
|
+
|
10
|
+
class FinanceReportFetcher(BaseTEJFetcher):
|
11
|
+
class FetchMode(Enum):
|
12
|
+
YOY = 1
|
13
|
+
QOQ = 2
|
14
|
+
YOY_NOCAL = 3
|
15
|
+
QOQ_NOCAL = 4
|
16
|
+
|
17
|
+
def __init__(self, mongo_uri, db_name = "company", collection_name = "TWN/AINVFQ1"):
|
18
|
+
self.client = MongoClient(mongo_uri)
|
19
|
+
self.db = self.client[db_name]
|
20
|
+
self.collection = self.db[collection_name]
|
21
|
+
|
22
|
+
self.check_index = {
|
23
|
+
'coid', 'mdate', 'key3', 'no',
|
24
|
+
'sem', 'merg', 'curr', 'annd',
|
25
|
+
'fin_ind', 'bp11', 'bp21', 'bp22',
|
26
|
+
'bp31', 'bp41', 'bp51', 'bp53',
|
27
|
+
'bp61', 'bp62', 'bp63', 'bp64',
|
28
|
+
'bp65', 'bf11', 'bf12', 'bf21',
|
29
|
+
'bf22', 'bf41', 'bf42', 'bf43',
|
30
|
+
'bf44', 'bf45', 'bf99', 'bsca',
|
31
|
+
'bsnca', 'bsta','bscl','bsncl',
|
32
|
+
'bstl','bsse','bslse','debt',
|
33
|
+
'quick','ppe','ar','ip12',
|
34
|
+
'ip22','ip31','ip51','iv41',
|
35
|
+
'if11','isibt','isni','isnip',
|
36
|
+
'eps','ispsd','gm','opi',
|
37
|
+
'nri','ri','nopi','ebit',
|
38
|
+
'cip31','cscfo','cscfi','cscff',
|
39
|
+
'person','shares','wavg','taxrate',
|
40
|
+
'r104','r115','r105','r106',
|
41
|
+
'r107','r108','r201','r112',
|
42
|
+
'r401','r402','r403','r404',
|
43
|
+
'r405','r408','r409','r410',
|
44
|
+
'r502','r501','r205','r505',
|
45
|
+
'r517','r512','r509','r608',
|
46
|
+
'r616','r610','r607','r613',
|
47
|
+
'r612','r609','r614','r611',
|
48
|
+
'r307','r304','r305','r306',
|
49
|
+
'r316','r834'
|
50
|
+
}
|
51
|
+
|
52
|
+
def get(
|
53
|
+
self,
|
54
|
+
ticker,
|
55
|
+
fetch_mode: FetchMode = FetchMode.QOQ,
|
56
|
+
start_date: str = None,
|
57
|
+
end_date: str = None,
|
58
|
+
report_type: str = "Q",
|
59
|
+
indexes: list = []
|
60
|
+
):
|
61
|
+
"""
|
62
|
+
基礎的query function
|
63
|
+
ticker(str): 股票代碼
|
64
|
+
start_date(str): 開頭日期範圍
|
65
|
+
end_date(str): = 結束日期範圍
|
66
|
+
report_type(str): 報告型態 {"A", "Q", "TTM"}
|
67
|
+
fetch_mode(class FetchMode):
|
68
|
+
YoY : 起始日期到結束日期範圍內,特定該季的資料
|
69
|
+
QoQ : 起始日期到結束日期內,每季的資料(與上一季成長率)
|
70
|
+
indexes(List): 指定的index
|
71
|
+
"""
|
72
|
+
# 確認indexes中是否有錯誤的index,有的話回傳warning
|
73
|
+
if (indexes and self.check_index):
|
74
|
+
indexes = set(indexes)
|
75
|
+
difference = indexes-self.check_index
|
76
|
+
if (difference):
|
77
|
+
warnings.warn(
|
78
|
+
f"{list(difference)} 沒有出現在資料表中,請確認column名稱是否正確",
|
79
|
+
UserWarning
|
80
|
+
)
|
81
|
+
|
82
|
+
|
83
|
+
if (fetch_mode in {
|
84
|
+
self.FetchMode.QOQ,
|
85
|
+
self.FetchMode.QOQ_NOCAL
|
86
|
+
}
|
87
|
+
):
|
88
|
+
if (not start_date):
|
89
|
+
warnings.warn("No start_date specified, use default date = \"2005-01-01\"", UserWarning)
|
90
|
+
start_date = datetime.strptime("2005-01-01", "%Y-%m-%d")
|
91
|
+
if (not end_date):
|
92
|
+
warnings.warn("No end_date specified, use default date = today", UserWarning)
|
93
|
+
end_date = datetime.today()
|
94
|
+
|
95
|
+
assert (start_date <= end_date)
|
96
|
+
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
97
|
+
end_date = datetime.strptime(end_date, "%Y-%m-%d")
|
98
|
+
|
99
|
+
start_year = start_date.year
|
100
|
+
start_season = (start_date.month - 1) // 4 + 1
|
101
|
+
end_year = end_date.year
|
102
|
+
end_season = (end_date.month - 1) // 4 + 1
|
103
|
+
|
104
|
+
if (fetch_mode == self.FetchMode.QOQ):
|
105
|
+
use_cal = True
|
106
|
+
else:
|
107
|
+
use_cal = False
|
108
|
+
|
109
|
+
data_df = self.get_QoQ_data(
|
110
|
+
ticker=ticker,
|
111
|
+
start_year=start_year,
|
112
|
+
start_season=start_season,
|
113
|
+
end_year=end_year,
|
114
|
+
end_season=end_season,
|
115
|
+
report_type=report_type,
|
116
|
+
indexes=indexes,
|
117
|
+
use_cal=use_cal
|
118
|
+
)
|
119
|
+
|
120
|
+
return data_df
|
121
|
+
|
122
|
+
elif (fetch_mode in {
|
123
|
+
self.FetchMode.YOY,
|
124
|
+
self.FetchMode.YOY_NOCAL
|
125
|
+
}
|
126
|
+
):
|
127
|
+
start_year = 2005
|
128
|
+
end_date = self.get_latest_data_time(ticker)
|
129
|
+
if (not end_date):
|
130
|
+
end_date = datetime.today()
|
131
|
+
|
132
|
+
end_year = end_date.year
|
133
|
+
season = (end_date.month - 1) // 4 + 1
|
134
|
+
|
135
|
+
if (fetch_mode == self.FetchMode.YOY):
|
136
|
+
use_cal = True
|
137
|
+
else:
|
138
|
+
use_cal = False
|
139
|
+
|
140
|
+
data_df = self.get_YoY_data(
|
141
|
+
ticker = ticker,
|
142
|
+
start_year = start_year,
|
143
|
+
end_year = end_year,
|
144
|
+
season = season,
|
145
|
+
report_type = report_type,
|
146
|
+
indexes = indexes
|
147
|
+
)
|
148
|
+
|
149
|
+
return data_df
|
150
|
+
|
151
|
+
def get_QoQ_data(
|
152
|
+
self,
|
153
|
+
ticker,
|
154
|
+
start_year,
|
155
|
+
start_season,
|
156
|
+
end_year,
|
157
|
+
end_season,
|
158
|
+
report_type = "Q",
|
159
|
+
indexes = [],
|
160
|
+
use_cal = False
|
161
|
+
):
|
162
|
+
"""
|
163
|
+
取得時間範圍內每季資料
|
164
|
+
"""
|
165
|
+
if (not indexes): # 沒有指定 -> 取全部
|
166
|
+
pipeline = [
|
167
|
+
{ "$match": { "ticker": ticker } },
|
168
|
+
{ "$unwind": "$data" },
|
169
|
+
{ "$match": {
|
170
|
+
"$or": [
|
171
|
+
{ "data.year": { "$gt": start_year, "$lt": end_year } },
|
172
|
+
{ "data.year": start_year, "data.season": { "$gte": start_season } },
|
173
|
+
{ "data.year": end_year, "data.season": { "$lte": end_season } }
|
174
|
+
]
|
175
|
+
}},
|
176
|
+
{ "$project": {
|
177
|
+
"data.year": 1,
|
178
|
+
"data.season": 1,
|
179
|
+
f"data.{report_type}": 1,
|
180
|
+
"_id": 0
|
181
|
+
}
|
182
|
+
}
|
183
|
+
]
|
184
|
+
|
185
|
+
|
186
|
+
else: # 取指定index
|
187
|
+
project_stage = {
|
188
|
+
"data.year": 1,
|
189
|
+
"data.season": 1
|
190
|
+
}
|
191
|
+
for index in indexes:
|
192
|
+
project_stage[f"data.{report_type}.{index}"] = 1
|
193
|
+
|
194
|
+
pipeline = [
|
195
|
+
{ "$match": { "ticker": ticker } },
|
196
|
+
{ "$unwind": "$data" },
|
197
|
+
{ "$match": {
|
198
|
+
"$or": [
|
199
|
+
{ "data.year": { "$gt": start_year, "$lt": end_year } },
|
200
|
+
{ "data.year": start_year, "data.season": { "$gte": start_season } },
|
201
|
+
{ "data.year": end_year, "data.season": { "$lte": end_season } }
|
202
|
+
]
|
203
|
+
}},
|
204
|
+
{ "$project": project_stage }
|
205
|
+
]
|
206
|
+
|
207
|
+
|
208
|
+
fetched_data = self.collection.aggregate(pipeline).to_list()
|
209
|
+
|
210
|
+
data_dict = StatsProcessor.list_of_dict_to_dict(
|
211
|
+
fetched_data,
|
212
|
+
keys = ["year", "season"],
|
213
|
+
delimeter = "Q",
|
214
|
+
data_key=report_type
|
215
|
+
)
|
216
|
+
if (use_cal):
|
217
|
+
data_with_QoQ = self.cal_QoQ(data_dict)
|
218
|
+
data_df = pd.DataFrame.from_dict(data_with_QoQ)
|
219
|
+
else:
|
220
|
+
data_df = pd.DataFrame.from_dict(data_dict)
|
221
|
+
return data_df
|
222
|
+
|
223
|
+
def get_YoY_data(
|
224
|
+
self,
|
225
|
+
ticker,
|
226
|
+
start_year,
|
227
|
+
end_year,
|
228
|
+
season,
|
229
|
+
report_type = "Q",
|
230
|
+
indexes = [],
|
231
|
+
use_cal = False
|
232
|
+
):
|
233
|
+
"""
|
234
|
+
取得某季歷年資料
|
235
|
+
"""
|
236
|
+
if (use_cal):
|
237
|
+
select_year = set()
|
238
|
+
|
239
|
+
for year in range(start_year, end_year + 1):
|
240
|
+
year_shifts = {
|
241
|
+
year,
|
242
|
+
year - 1,
|
243
|
+
year - 3,
|
244
|
+
year - 5,
|
245
|
+
year - 10
|
246
|
+
}
|
247
|
+
|
248
|
+
select_year = select_year.union(year_shifts)
|
249
|
+
|
250
|
+
select_year = sorted(list(select_year), reverse=True)
|
251
|
+
else:
|
252
|
+
select_year = [year for year in range(start_year, end_year + 1)]
|
253
|
+
|
254
|
+
if (not indexes): # 沒有指定 -> 取全部
|
255
|
+
pipeline = [
|
256
|
+
{ "$match": { "ticker": ticker } },
|
257
|
+
{ "$unwind": "$data" },
|
258
|
+
{ "$match": {
|
259
|
+
"$or":[
|
260
|
+
{
|
261
|
+
"$and": [
|
262
|
+
{ "data.year": {"$in": select_year }},
|
263
|
+
{ "data.season": {"$eq": season}}
|
264
|
+
]
|
265
|
+
},
|
266
|
+
]
|
267
|
+
}},
|
268
|
+
{ "$project": {
|
269
|
+
"data.year": 1,
|
270
|
+
"data.season": 1,
|
271
|
+
f"data.{report_type}": 1,
|
272
|
+
"_id": 0
|
273
|
+
}
|
274
|
+
}
|
275
|
+
]
|
276
|
+
|
277
|
+
|
278
|
+
else: # 取指定index
|
279
|
+
project_stage = {
|
280
|
+
"data.year": 1,
|
281
|
+
"data.season": 1
|
282
|
+
}
|
283
|
+
for index in indexes:
|
284
|
+
project_stage[f"data.{report_type}.{index}"] = 1
|
285
|
+
|
286
|
+
pipeline = [
|
287
|
+
{ "$match": { "ticker": ticker } },
|
288
|
+
{ "$unwind": "$data" },
|
289
|
+
{ "$match": {
|
290
|
+
"$and": [
|
291
|
+
{ "data.year": {"$in": select_year}},
|
292
|
+
{ "data.season": {"$eq": season}}
|
293
|
+
]
|
294
|
+
}},
|
295
|
+
{ "$project": project_stage }
|
296
|
+
]
|
297
|
+
|
298
|
+
fetched_data = self.collection.aggregate(pipeline).to_list()
|
299
|
+
|
300
|
+
# 處理計算YoY
|
301
|
+
data_dict = StatsProcessor.list_of_dict_to_dict(
|
302
|
+
fetched_data,
|
303
|
+
keys = ['year', 'season'],
|
304
|
+
data_key=report_type,
|
305
|
+
delimeter='Q'
|
306
|
+
)
|
307
|
+
if (use_cal):
|
308
|
+
data_with_YoY = self.cal_YoY(data_dict, start_year, end_year)
|
309
|
+
result_df = pd.DataFrame.from_dict(data_with_YoY)
|
310
|
+
else:
|
311
|
+
result_df = pd.DataFrame.from_dict(data_dict)
|
312
|
+
|
313
|
+
return result_df
|
@@ -3,38 +3,45 @@ profit_lose: # 總營收表
|
|
3
3
|
|
4
4
|
grand_total_profit_lose:
|
5
5
|
mode: grand_total_values
|
6
|
-
|
6
|
+
|
7
7
|
revenue:
|
8
8
|
mode: growth
|
9
9
|
target_index:
|
10
10
|
- 營業收入合計
|
11
|
+
- 收入合計
|
11
12
|
- 利息收入
|
12
13
|
|
13
14
|
grand_total_revenue:
|
14
15
|
mode: grand_total_growth
|
15
16
|
target_index:
|
16
17
|
- 營業收入合計
|
18
|
+
- 收入合計
|
17
19
|
- 利息收入
|
18
20
|
|
19
21
|
gross_profit:
|
20
22
|
mode: growth
|
21
23
|
target_index:
|
22
24
|
- 營業毛利(毛損)淨額
|
25
|
+
- 利息淨收益
|
23
26
|
|
24
27
|
grand_total_gross_profit:
|
25
28
|
mode: grand_total_growth
|
26
29
|
target_index:
|
27
30
|
- 營業毛利(毛損)淨額
|
28
|
-
|
31
|
+
- 利息淨收益
|
32
|
+
|
29
33
|
gross_profit_percentage:
|
30
34
|
mode: percentage
|
31
35
|
target_index:
|
32
36
|
- 營業毛利(毛損)淨額
|
37
|
+
- 利息淨收益
|
33
38
|
|
34
39
|
grand_total_gross_profit_percentage:
|
35
40
|
mode: grand_total_percentage
|
36
41
|
target_index:
|
37
42
|
- 營業毛利(毛損)淨額
|
43
|
+
- 利息淨收益
|
44
|
+
|
38
45
|
# 營利
|
39
46
|
operating_income:
|
40
47
|
mode: growth
|
@@ -55,46 +62,61 @@ grand_total_operating_income_percentage:
|
|
55
62
|
mode: grand_total_percentage
|
56
63
|
target_index:
|
57
64
|
- 營業利益(損失)
|
65
|
+
|
58
66
|
# 稅前淨利
|
59
67
|
net_income_before_tax:
|
60
68
|
mode: growth
|
61
69
|
target_index:
|
62
70
|
- 稅前淨利(淨損)
|
71
|
+
- 繼續營業單位稅前損益
|
72
|
+
- 繼續營業單位稅前淨利(淨損)
|
63
73
|
|
64
74
|
grand_total_net_income_before_tax:
|
65
75
|
mode: grand_total_growth
|
66
76
|
target_index:
|
67
77
|
- 稅前淨利(淨損)
|
78
|
+
- 繼續營業單位稅前損益
|
79
|
+
- 繼續營業單位稅前淨利(淨損)
|
68
80
|
|
69
81
|
net_income_before_tax_percentage:
|
70
82
|
mode: percentage
|
71
83
|
target_index:
|
72
84
|
- 稅前淨利(淨損)
|
85
|
+
- 繼續營業單位稅前損益
|
86
|
+
- 繼續營業單位稅前淨利(淨損)
|
73
87
|
|
74
88
|
grand_total_net_income_before_tax_percentage:
|
75
89
|
mode: grand_total_percentage
|
76
90
|
target_index:
|
77
91
|
- 稅前淨利(淨損)
|
92
|
+
- 繼續營業單位稅前損益
|
93
|
+
- 繼續營業單位稅前淨利(淨損)
|
94
|
+
|
78
95
|
# 本期淨利
|
79
96
|
net_income:
|
80
97
|
mode: growth
|
81
98
|
target_index:
|
82
99
|
- 本期淨利(淨損)
|
100
|
+
- 本期稅後淨利(淨損)
|
83
101
|
|
84
102
|
grand_total_net_income:
|
85
103
|
mode: grand_total_growth
|
86
104
|
target_index:
|
87
105
|
- 本期淨利(淨損)
|
106
|
+
- 本期稅後淨利(淨損)
|
88
107
|
|
89
108
|
net_income_percentage:
|
90
109
|
mode: percentage
|
91
110
|
target_index:
|
92
111
|
- 本期淨利(淨損)
|
112
|
+
- 本期稅後淨利(淨損)
|
93
113
|
|
94
114
|
grand_total_income_percentage:
|
95
115
|
mode: grand_total_percentage
|
96
116
|
target_index:
|
97
117
|
- 本期淨利(淨損)
|
118
|
+
- 本期稅後淨利(淨損)
|
119
|
+
|
98
120
|
# EPS
|
99
121
|
EPS:
|
100
122
|
mode: value
|
@@ -118,4 +140,4 @@ grand_total_EPS_growth:
|
|
118
140
|
mode: grand_total_growth
|
119
141
|
target_index:
|
120
142
|
- 基本每股盈餘
|
121
|
-
- 基本每股盈餘合計
|
143
|
+
- 基本每股盈餘合計
|
neurostats_API/utils/__init__.py
CHANGED
@@ -0,0 +1,26 @@
|
|
1
|
+
class YoY_Calculator:
|
2
|
+
def __init__(self):
|
3
|
+
pass
|
4
|
+
|
5
|
+
@classmethod
|
6
|
+
def cal_growth(cls, target_value: float, past_value: float, delta: int):
|
7
|
+
"""
|
8
|
+
計算成長率以及年化成長率
|
9
|
+
target_value: float,這個時間的數值
|
10
|
+
past_value: float,過去的這個時間數值
|
11
|
+
delta: int,代表隔了幾年/季 delta > 1 時改以年化成長率計算
|
12
|
+
"""
|
13
|
+
try:
|
14
|
+
if (delta > 1):
|
15
|
+
YoY = ((target_value / past_value)**(1 / delta)) - 1
|
16
|
+
|
17
|
+
else:
|
18
|
+
YoY = ((target_value - past_value) / past_value)
|
19
|
+
|
20
|
+
except Exception as e:
|
21
|
+
return None
|
22
|
+
|
23
|
+
if (isinstance(YoY, complex)): # 年化成長率有複數問題
|
24
|
+
return None
|
25
|
+
|
26
|
+
return YoY
|
@@ -20,7 +20,6 @@ class StatsProcessor:
|
|
20
20
|
"""
|
21
21
|
1. 讀檔: txt / yaml
|
22
22
|
2. 將巢狀dictionary / DataFrame扁平化
|
23
|
-
|
24
23
|
"""
|
25
24
|
|
26
25
|
@classmethod
|
@@ -97,20 +96,47 @@ class StatsProcessor:
|
|
97
96
|
"""
|
98
97
|
對Multicolumn的dataframe切出目標的index
|
99
98
|
"""
|
100
|
-
times = total_table.columns.get_level_values(0).unique()
|
101
99
|
try:
|
102
100
|
target_metrics = target_metric_dict[mode]
|
103
101
|
except KeyError as e:
|
104
102
|
return f"mode Error: Get mode should be {list(target_metric_dict.keys())} but get {mode}"
|
105
103
|
|
104
|
+
times = total_table.columns.get_level_values(0).unique()
|
106
105
|
desired_order = [(time, value_name) for time in times
|
107
106
|
for value_name in target_metrics]
|
108
107
|
|
109
108
|
if (target_index):
|
110
109
|
target_index = target_index.split()
|
111
|
-
|
112
|
-
|
113
|
-
|
110
|
+
try:
|
111
|
+
sliced_table = total_table.loc[
|
112
|
+
target_index,
|
113
|
+
pd.IndexSlice[:, target_metrics]][desired_order].T
|
114
|
+
|
115
|
+
except Exception as e: # 沒辦法完整取得表格
|
116
|
+
# 先設立空表格
|
117
|
+
empty_index = pd.Index(desired_order)
|
118
|
+
empty_columns = pd.Index(target_index)
|
119
|
+
sliced_table = pd.DataFrame(index=empty_index,
|
120
|
+
columns=empty_columns)
|
121
|
+
|
122
|
+
try:
|
123
|
+
# 提取有效的部分資料
|
124
|
+
partial_table = total_table.loc[
|
125
|
+
total_table.index.intersection(target_index),
|
126
|
+
pd.IndexSlice[:, target_metrics]
|
127
|
+
]
|
128
|
+
|
129
|
+
# 遍歷 partial_table 的索引和值,手動填入 sliced_table
|
130
|
+
for row_index in partial_table.index:
|
131
|
+
for col_index in partial_table.columns:
|
132
|
+
if col_index in desired_order and row_index in target_index:
|
133
|
+
sliced_table.loc[col_index, row_index] = partial_table.loc[row_index, col_index]
|
134
|
+
|
135
|
+
# 確保 `sliced_table` 的排序符合 `desired_order`
|
136
|
+
sliced_table = sliced_table.reindex(index=desired_order, columns=target_index)
|
137
|
+
except Exception as sub_e:
|
138
|
+
pass
|
139
|
+
|
114
140
|
if (mode == 'value_and_percentage'): # 因應balance_sheet 頁面的格式
|
115
141
|
return_table = sliced_table.T
|
116
142
|
return_table.columns = [
|
@@ -146,7 +172,7 @@ class StatsProcessor:
|
|
146
172
|
@classmethod
|
147
173
|
def cal_percentage(cls, value, postfix="%"):
|
148
174
|
if (isinstance(value, (float, int))):
|
149
|
-
value = np.round(value * 100
|
175
|
+
value = np.round(value * 100, 2).item()
|
150
176
|
value = f"{value:.2f}{postfix}"
|
151
177
|
|
152
178
|
return value
|
@@ -157,7 +183,7 @@ class StatsProcessor:
|
|
157
183
|
@classmethod
|
158
184
|
def cal_non_percentage(cls, value, to_str=False, postfix="元"):
|
159
185
|
if (isinstance(value, (float, int))):
|
160
|
-
|
186
|
+
|
161
187
|
value = np.round(value, 2).item()
|
162
188
|
if (postfix == "千元"):
|
163
189
|
value *= 1000
|
@@ -178,7 +204,7 @@ class StatsProcessor:
|
|
178
204
|
|
179
205
|
else:
|
180
206
|
return value
|
181
|
-
|
207
|
+
|
182
208
|
@classmethod
|
183
209
|
def cal_round_int(cls, value):
|
184
210
|
"""
|
@@ -188,3 +214,59 @@ class StatsProcessor:
|
|
188
214
|
return int(np.round(value).item())
|
189
215
|
else:
|
190
216
|
return value
|
217
|
+
|
218
|
+
@classmethod
|
219
|
+
def list_of_dict_to_dict(
|
220
|
+
cls,
|
221
|
+
data_list: list,
|
222
|
+
key: str = "",
|
223
|
+
keys: list = [],
|
224
|
+
delimeter: str = "_",
|
225
|
+
data_key: str = "Q"
|
226
|
+
):
|
227
|
+
"""
|
228
|
+
TEJ DB 用
|
229
|
+
List[Dict] -> Dict[Dict]
|
230
|
+
input:
|
231
|
+
data_list(List):
|
232
|
+
[
|
233
|
+
{ "data":
|
234
|
+
{
|
235
|
+
"year": 2021...
|
236
|
+
"season": 1,
|
237
|
+
"Q": {}...
|
238
|
+
|
239
|
+
}
|
240
|
+
}
|
241
|
+
]
|
242
|
+
|
243
|
+
key(str): 選擇哪一個key作為轉化後的index
|
244
|
+
delimeter(str): 多個key時要用甚麼分隔
|
245
|
+
return:
|
246
|
+
{
|
247
|
+
"2021" : {# Q下的資料} ...
|
248
|
+
}
|
249
|
+
|
250
|
+
or (keys = ['year', 'season'])
|
251
|
+
{
|
252
|
+
"2021Q2" : {}
|
253
|
+
}
|
254
|
+
"""
|
255
|
+
assert (key or keys), "func list_of_dict_to_dict must have argument \"key\" or \"keys\""
|
256
|
+
|
257
|
+
return_dict = {}
|
258
|
+
if (key):
|
259
|
+
keys = [key]
|
260
|
+
for data in data_list:
|
261
|
+
data = data['data']
|
262
|
+
|
263
|
+
pop_keys = []
|
264
|
+
|
265
|
+
for key in keys:
|
266
|
+
assert (key in data.keys())
|
267
|
+
pop_keys.append(str(data.pop(key)))
|
268
|
+
|
269
|
+
pop_key = delimeter.join(pop_keys)
|
270
|
+
return_dict[pop_key] = data[data_key]
|
271
|
+
|
272
|
+
return return_dict
|
@@ -1,12 +1,18 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: neurostats-API
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.15
|
4
4
|
Summary: The service of NeuroStats website
|
5
5
|
Home-page: https://github.com/NeurowattStats/NeuroStats_API.git
|
6
6
|
Author: JasonWang@Neurowatt
|
7
7
|
Author-email: jason@neurowatt.ai
|
8
8
|
Requires-Python: >=3.6
|
9
9
|
Description-Content-Type: text/markdown
|
10
|
+
Requires-Dist: numpy>=2.1.0
|
11
|
+
Requires-Dist: pandas>=2.2.0
|
12
|
+
Requires-Dist: pymongo
|
13
|
+
Requires-Dist: pytz
|
14
|
+
Requires-Dist: python-dotenv
|
15
|
+
Requires-Dist: yfinance
|
10
16
|
|
11
17
|
# neurostats_API
|
12
18
|
|
@@ -83,7 +89,7 @@ pip install neurostats-API
|
|
83
89
|
```Python
|
84
90
|
>>> import neurostats_API
|
85
91
|
>>> print(neurostats_API.__version__)
|
86
|
-
0.0.
|
92
|
+
0.0.15
|
87
93
|
```
|
88
94
|
|
89
95
|
### 得到最新一期的評價資料與歷年評價
|
@@ -667,7 +673,71 @@ fetcher.query()
|
|
667
673
|
|
668
674
|
請注意`range`, `last_range`, `52week_range`這三個項目型態為字串,其餘為float
|
669
675
|
|
676
|
+
|
677
|
+
## TEJ 相關
|
678
|
+
### 會計師簽證財務資料
|
679
|
+
```Python
|
680
|
+
from neurostats_API import FinanceReportFetcher
|
681
|
+
|
682
|
+
mongo_uri = <MongoDB 的 URI>
|
683
|
+
db_name = 'company' # 連接的DB名稱
|
684
|
+
collection_name = "TWN/AINVFQ1" # 連接的collection對象
|
685
|
+
|
686
|
+
fetcher = FinanceReportFetcher(
|
687
|
+
mongo_uri = mongo_uri,
|
688
|
+
db_name = db_name,
|
689
|
+
collection_name = collection_name
|
690
|
+
)
|
691
|
+
|
692
|
+
data = fetcher.get(
|
693
|
+
ticker = "2330" # 任意的股票代碼
|
694
|
+
fetch_mode = fetcher.YOY_NOCAL # 取得模式
|
695
|
+
start_date = "2005-01-01",
|
696
|
+
end_date = "2024-12-31",
|
697
|
+
report_type = "Q",
|
698
|
+
indexes = []
|
699
|
+
)
|
700
|
+
```
|
701
|
+
- `ticker`: 股票代碼
|
702
|
+
|
703
|
+
- `fetch_mode` : 取得模式,為`fetcher.YOY_NOCAL` 或 `fetcher.QOQ_NOCAL`
|
704
|
+
- `YOY_NOCAL`: 以end_date為準,取得與end_date為準同季的歷年資料,時間範圍以start_date為準
|
705
|
+
> 例如`start_date = "2020-07-01"`, `end_date = "2024-01-01"`,會回傳2020~2024的第一季資料
|
706
|
+
|
707
|
+
- `QOQ_NOCAL`: 時間範圍內的每季資料
|
708
|
+
|
709
|
+
- `start_date`: 開始日期,不設定時預設為`2005-01-01`
|
710
|
+
|
711
|
+
- `end_date`: 結束日期,不設定時預設為資料庫最新資料的日期
|
712
|
+
|
713
|
+
- `report_type`: 選擇哪種報告,預設為`Q`
|
714
|
+
- `A`: 當年累計
|
715
|
+
- `Q`: 當季數值
|
716
|
+
- `TTM`: 移動四季 (包括當季在內,往前累計四個季度)
|
717
|
+
|
718
|
+
- `indexes`: 選擇的column,需要以TEJ提供的欄位名稱為準,不提供時或提供`[]`會回傳全部column
|
719
|
+
- 範例輸入: `['bp41', 'bp51']`
|
720
|
+
|
721
|
+
[TEJ資料集連結](https://tquant.tejwin.com/%E8%B3%87%E6%96%99%E9%9B%86/)
|
722
|
+
請看 `會計師簽證財務資料`
|
723
|
+
|
724
|
+
#### 回傳資料
|
725
|
+
fetch_mode設定為`YOY_NOCAL`與`QOQ_NOCAL`下
|
726
|
+
為回傳pd.DataFrame,column名稱為<年份>Q<季>, row名稱為指定財報項目
|
727
|
+
|
670
728
|
## 版本紀錄
|
729
|
+
## 0.0.15
|
730
|
+
- TechFetcher中新增指數條件
|
731
|
+
|
732
|
+
- 新增tej_fetcher索取TEJ相關的資料
|
733
|
+
|
734
|
+
- package新增depensnecy,可以安裝需要的相關package
|
735
|
+
|
736
|
+
## 0.0.14
|
737
|
+
- 修改部分財報資料錯誤的乘以1000的問題
|
738
|
+
|
739
|
+
- 新增例外處理: 若資料庫對於季資料一部分index缺失的情況下仍會盡可能去將資料蒐集並呈現
|
740
|
+
|
671
741
|
### 0.0.13
|
672
742
|
- value_fetcher 新增獲得一序列評價的功能
|
673
743
|
|
@@ -1,28 +1,30 @@
|
|
1
|
-
neurostats_API/__init__.py,sha256=
|
1
|
+
neurostats_API/__init__.py,sha256=oEkbIWbrC6-8sBPEJQXg0QYoz3TNZtYXhSTEO6d0JcU,261
|
2
2
|
neurostats_API/cli.py,sha256=UJSWLIw03P24p-gkBb6JSEI5dW5U12UvLf1L8HjQD-o,873
|
3
3
|
neurostats_API/main.py,sha256=QcsfmWivg2Dnqw3MTJWiI0QvEiRs0VuH-BjwQHFCv00,677
|
4
|
-
neurostats_API/fetchers/__init__.py,sha256=
|
4
|
+
neurostats_API/fetchers/__init__.py,sha256=B4aBwVzf_X-YieEf3fZteU0qmBPVIB9VjrmkyWhLK18,489
|
5
5
|
neurostats_API/fetchers/balance_sheet.py,sha256=sQv4Gk5uoKURLEdh57YknOQWiyVwaXJ2Mw75jxNqUS0,5804
|
6
|
-
neurostats_API/fetchers/base.py,sha256=
|
6
|
+
neurostats_API/fetchers/base.py,sha256=4YS8MJR3u9Sg6dKX7QoCYuqNeQaoYHIlvPm5x8VQ72U,4882
|
7
7
|
neurostats_API/fetchers/cash_flow.py,sha256=TY7VAWVXkj5-mzH5Iu0sIE-oV8MvGmmDy0URNotNV1E,7614
|
8
8
|
neurostats_API/fetchers/finance_overview.py,sha256=PxUdWY0x030olYMLcCHDBn068JLmCE2RTOce1dxs5vM,27753
|
9
|
-
neurostats_API/fetchers/institution.py,sha256=
|
9
|
+
neurostats_API/fetchers/institution.py,sha256=UrcBc6t7u7CnEwUsf6YmLbbJ8VncdWpq8bCz17q2dgs,11168
|
10
10
|
neurostats_API/fetchers/margin_trading.py,sha256=lQImtNdvaBoSlKhJvQ3DkH3HjSSgKRJz4ZZpyR5-Z4I,10433
|
11
11
|
neurostats_API/fetchers/month_revenue.py,sha256=nixX2llzjCFr2m2YVjxrSfkBusnZPrPb2dRDq1XLGhw,4251
|
12
|
-
neurostats_API/fetchers/profit_lose.py,sha256=
|
13
|
-
neurostats_API/fetchers/tech.py,sha256=
|
12
|
+
neurostats_API/fetchers/profit_lose.py,sha256=EN9Y0iamcAaHMZdjHXO6b_2buLnORssf8ZS7A0hi74s,5896
|
13
|
+
neurostats_API/fetchers/tech.py,sha256=Hol1bcwJ_ERcnoTXNWlqqaWOuzdl7MeiAjCvzQMZDTg,12269
|
14
|
+
neurostats_API/fetchers/tej_finance_report.py,sha256=VDP0Lx2ErCgIBBz7nbquC1ugkcnj6p7ehM2JtFInjsQ,10218
|
14
15
|
neurostats_API/fetchers/value_invest.py,sha256=_eQxuEnIYvksb06QHixGK29Gnwr_3xmI6Tu7dv4J__E,5769
|
15
|
-
neurostats_API/tools/balance_sheet.yaml,sha256=
|
16
|
+
neurostats_API/tools/balance_sheet.yaml,sha256=6XygNG_Ybb1Xkk1e39LMLKr7ATvaCP3xxuwFbgNl6dA,673
|
16
17
|
neurostats_API/tools/cash_flow_percentage.yaml,sha256=fk2Z4eb1JjGFvP134eJatHacB7BgTkBenhDJr83w8RE,1345
|
17
18
|
neurostats_API/tools/finance_overview_dict.yaml,sha256=B9nV75StXkrF3yv2-eezzitlJ38eEK86RD_VY6588gQ,2884
|
18
|
-
neurostats_API/tools/profit_lose.yaml,sha256=
|
19
|
+
neurostats_API/tools/profit_lose.yaml,sha256=iyp9asYJ04vAxk_HBUDse_IBy5oVvYHpwsyACg5YEeg,3029
|
19
20
|
neurostats_API/tools/seasonal_data_field_dict.txt,sha256=X8yc_el6p8BH_3FikTqBVFGsvWdXT6MHXLfKfi44334,8491
|
20
|
-
neurostats_API/utils/__init__.py,sha256=
|
21
|
-
neurostats_API/utils/
|
21
|
+
neurostats_API/utils/__init__.py,sha256=0tJCRmlJq2aDwcNNW-oEaA9H0OxTJMFvjpVYtG4AvZU,186
|
22
|
+
neurostats_API/utils/calculate_value.py,sha256=lUKSsWU76XRmDUcmi4eDjoQxjb3vWpAAKInF9w49VNI,782
|
23
|
+
neurostats_API/utils/data_process.py,sha256=A--dzOsu42jRxqqCD41gTtjE5rhEBYmhB6y-AnCvo5U,8986
|
22
24
|
neurostats_API/utils/datetime.py,sha256=XJya4G8b_-ZOaBbMXgQjWh2MC4wc-o6goQ7EQJQMWrQ,773
|
23
25
|
neurostats_API/utils/db_client.py,sha256=OYe6yazcR4Aa6jYmy47JrryUeh2NnKGqY2K_lSZe6i8,455
|
24
26
|
neurostats_API/utils/fetcher.py,sha256=VbrUhjA-GG5AyjPX2SHtFIbZM4dm3jo0RgZzuCbb_Io,40927
|
25
|
-
neurostats_API-0.0.
|
26
|
-
neurostats_API-0.0.
|
27
|
-
neurostats_API-0.0.
|
28
|
-
neurostats_API-0.0.
|
27
|
+
neurostats_API-0.0.15.dist-info/METADATA,sha256=btfdGRam5QpUHFFiA_UPWYeZuAqAMYkEJ0Ufod399T4,27959
|
28
|
+
neurostats_API-0.0.15.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
|
29
|
+
neurostats_API-0.0.15.dist-info/top_level.txt,sha256=nSlQPMG0VtXivJyedp4Bkf86EOy2TpW10VGxolXrqnU,15
|
30
|
+
neurostats_API-0.0.15.dist-info/RECORD,,
|
File without changes
|
File without changes
|