wbportfolio 1.50.15__py2.py3-none-any.whl → 1.51.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of wbportfolio might be problematic. Click here for more details.
- wbportfolio/analysis/claims.py +9 -8
- wbportfolio/import_export/handlers/dividend.py +15 -5
- wbportfolio/import_export/handlers/fees.py +11 -4
- wbportfolio/import_export/handlers/trade.py +21 -3
- wbportfolio/import_export/parsers/jpmorgan/customer_trade.py +3 -4
- wbportfolio/import_export/parsers/jpmorgan/fees.py +3 -4
- wbportfolio/import_export/parsers/jpmorgan/valuation.py +1 -3
- wbportfolio/import_export/parsers/leonteq/equity.py +1 -4
- wbportfolio/import_export/parsers/leonteq/fees.py +2 -4
- wbportfolio/import_export/parsers/leonteq/valuation.py +1 -4
- wbportfolio/import_export/parsers/natixis/customer_trade.py +11 -15
- wbportfolio/import_export/parsers/natixis/d1_customer_trade.py +9 -10
- wbportfolio/import_export/parsers/natixis/d1_equity.py +4 -12
- wbportfolio/import_export/parsers/natixis/d1_fees.py +3 -5
- wbportfolio/import_export/parsers/natixis/d1_trade.py +4 -13
- wbportfolio/import_export/parsers/natixis/d1_valuation.py +2 -5
- wbportfolio/import_export/parsers/natixis/dividend.py +5 -5
- wbportfolio/import_export/parsers/natixis/equity.py +4 -4
- wbportfolio/import_export/parsers/natixis/fees.py +3 -6
- wbportfolio/import_export/parsers/natixis/trade.py +4 -4
- wbportfolio/import_export/parsers/natixis/utils.py +5 -9
- wbportfolio/import_export/parsers/natixis/valuation.py +4 -5
- wbportfolio/import_export/parsers/sg_lux/fees.py +6 -6
- wbportfolio/import_export/parsers/sg_lux/perf_fees.py +3 -6
- wbportfolio/import_export/parsers/sg_lux/registers.py +6 -2
- wbportfolio/import_export/parsers/sg_lux/valuation.py +3 -4
- wbportfolio/import_export/parsers/societe_generale/customer_trade.py +13 -12
- wbportfolio/import_export/parsers/societe_generale/valuation.py +2 -3
- wbportfolio/import_export/parsers/tellco/customer_trade.py +4 -6
- wbportfolio/import_export/parsers/tellco/equity.py +2 -3
- wbportfolio/import_export/parsers/tellco/valuation.py +2 -4
- wbportfolio/import_export/parsers/ubs/api/fees.py +6 -9
- wbportfolio/import_export/parsers/ubs/customer_trade.py +14 -20
- wbportfolio/import_export/parsers/ubs/equity.py +3 -6
- wbportfolio/import_export/parsers/ubs/historical_customer_trade.py +19 -38
- wbportfolio/import_export/parsers/ubs/valuation.py +2 -3
- wbportfolio/import_export/parsers/vontobel/instrument.py +2 -2
- wbportfolio/import_export/parsers/vontobel/management_fees.py +3 -5
- wbportfolio/import_export/parsers/vontobel/performance_fees.py +2 -3
- wbportfolio/import_export/parsers/vontobel/trade.py +1 -3
- wbportfolio/import_export/parsers/vontobel/utils.py +0 -12
- wbportfolio/locale/de/LC_MESSAGES/django.po +197 -0
- wbportfolio/locale/fr/LC_MESSAGES/django.po +197 -0
- wbportfolio/models/portfolio.py +8 -0
- wbportfolio/models/transactions/trade_proposals.py +22 -15
- wbportfolio/models/transactions/trades.py +8 -2
- wbportfolio/serializers/signals.py +1 -1
- wbportfolio/serializers/transactions/trades.py +2 -0
- wbportfolio/viewsets/configs/display/trades.py +12 -0
- wbportfolio/viewsets/configs/menu/trades.py +0 -1
- wbportfolio/viewsets/transactions/claim.py +1 -1
- wbportfolio/viewsets/transactions/trade_proposals.py +1 -1
- {wbportfolio-1.50.15.dist-info → wbportfolio-1.51.1.dist-info}/METADATA +1 -1
- {wbportfolio-1.50.15.dist-info → wbportfolio-1.51.1.dist-info}/RECORD +56 -54
- {wbportfolio-1.50.15.dist-info → wbportfolio-1.51.1.dist-info}/WHEEL +0 -0
- {wbportfolio-1.50.15.dist-info → wbportfolio-1.51.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
import pandas as pd
|
|
3
3
|
|
|
4
|
-
from .utils import _get_underlying_instrument,
|
|
4
|
+
from .utils import _get_underlying_instrument, file_name_parse_isin
|
|
5
5
|
|
|
6
6
|
FIELD_MAP = {
|
|
7
7
|
"Close": "initial_price",
|
|
@@ -28,11 +28,11 @@ def _apply_adjusting_factor(row):
|
|
|
28
28
|
|
|
29
29
|
def parse(import_source):
|
|
30
30
|
# Parse the Parts of the filename into the different parts
|
|
31
|
-
parts =
|
|
31
|
+
parts = file_name_parse_isin(import_source.file.name)
|
|
32
32
|
|
|
33
33
|
# Get the valuation date and investment from the parts list
|
|
34
34
|
valuation_date = parts["valuation_date"]
|
|
35
|
-
|
|
35
|
+
product_data = parts["product"]
|
|
36
36
|
|
|
37
37
|
# Load file into a CSV DictReader
|
|
38
38
|
df = pd.read_csv(import_source.file, encoding="utf-16", delimiter=";")
|
|
@@ -50,7 +50,7 @@ def parse(import_source):
|
|
|
50
50
|
df = df.drop(columns=df.columns.difference(FIELD_MAP.values()))
|
|
51
51
|
|
|
52
52
|
df["portfolio__instrument_type"] = "product"
|
|
53
|
-
df["
|
|
53
|
+
df["portfolio__isin"] = product_data["isin"]
|
|
54
54
|
df["is_estimated"] = False
|
|
55
55
|
df["date"] = valuation_date.strftime("%Y-%m-%d")
|
|
56
56
|
df["asset_valuation_date"] = pd.to_datetime(df["asset_valuation_date"], dayfirst=True).dt.strftime("%Y-%m-%d")
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
from pandas.tseries.offsets import BDay
|
|
3
3
|
|
|
4
|
-
from .utils import
|
|
4
|
+
from .utils import file_name_parse_isin
|
|
5
5
|
|
|
6
6
|
FIELD_MAP = {
|
|
7
7
|
"Date": "transaction_date",
|
|
@@ -14,9 +14,8 @@ FIELD_MAP = {
|
|
|
14
14
|
|
|
15
15
|
def parse(import_source):
|
|
16
16
|
# Parse the Parts of the filename into the different parts
|
|
17
|
-
parts =
|
|
17
|
+
parts = file_name_parse_isin(import_source.file.name)
|
|
18
18
|
# Get the valuation date and investment from the parts list
|
|
19
|
-
parts["valuation_date"]
|
|
20
19
|
product = parts["product"]
|
|
21
20
|
|
|
22
21
|
df = pd.read_csv(import_source.file, encoding="utf-8", delimiter=";")
|
|
@@ -42,12 +41,10 @@ def parse(import_source):
|
|
|
42
41
|
)
|
|
43
42
|
df = df[df["total_value"] != 0]
|
|
44
43
|
|
|
45
|
-
df["
|
|
46
|
-
df["linked_product"] = product.id
|
|
44
|
+
df["linked_product"] = [product] * df.shape[0]
|
|
47
45
|
df["transaction_date"] = df["transaction_date"].dt.strftime("%Y-%m-%d")
|
|
48
46
|
df["calculated"] = False
|
|
49
47
|
df["total_value_gross"] = df["total_value"]
|
|
50
48
|
df["total_value_fx_portfolio"] = df["total_value"]
|
|
51
49
|
df["total_value_gross_fx_portfolio"] = df["total_value"]
|
|
52
|
-
|
|
53
50
|
return {"data": df.to_dict("records")}
|
|
@@ -2,7 +2,7 @@ import pandas as pd
|
|
|
2
2
|
|
|
3
3
|
from wbportfolio.models import Trade
|
|
4
4
|
|
|
5
|
-
from .utils import _get_underlying_instrument,
|
|
5
|
+
from .utils import _get_underlying_instrument, file_name_parse_isin
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
def _parse_trade_type(type):
|
|
@@ -22,10 +22,10 @@ def parse(import_source):
|
|
|
22
22
|
df = pd.read_csv(import_source.file, sep=";")
|
|
23
23
|
if not df.empty:
|
|
24
24
|
# Parse the Parts of the filename into the different parts
|
|
25
|
-
parts =
|
|
25
|
+
parts = file_name_parse_isin(import_source.file.name)
|
|
26
26
|
|
|
27
27
|
# Get the valuation date and investment from the parts list
|
|
28
|
-
|
|
28
|
+
product_data = parts["product"]
|
|
29
29
|
|
|
30
30
|
# Iterate through the CSV File and parse the data into a list
|
|
31
31
|
df["underlying_instrument"] = df[["BLOOMBERG CODE", "NAME", "QUOTED_CRNCY"]].apply(
|
|
@@ -58,7 +58,7 @@ def parse(import_source):
|
|
|
58
58
|
df.transaction_date = df.transaction_date.apply(lambda x: x.strftime("%Y-%m-%d"))
|
|
59
59
|
df["transaction_subtype"] = df["transaction_subtype"].apply(lambda x: _parse_trade_type(x))
|
|
60
60
|
df = df.drop(columns=df.columns.difference(columns_map.values()))
|
|
61
|
-
df["portfolio"] = product.
|
|
61
|
+
df["portfolio"] = [{"instrument_type": "product", **product_data}] * df.shape[0]
|
|
62
62
|
df["bank"] = "Natixis Cash Transfer"
|
|
63
63
|
data = df.to_dict("records")
|
|
64
64
|
return {"data": data}
|
|
@@ -1,11 +1,9 @@
|
|
|
1
1
|
import datetime
|
|
2
2
|
import re
|
|
3
3
|
|
|
4
|
-
from django.db.models import Q
|
|
5
|
-
|
|
6
4
|
from wbportfolio.models import Product
|
|
7
5
|
|
|
8
|
-
INSTRUMENT_MAP_NAME = {"EDA23_AtonRa Z class":
|
|
6
|
+
INSTRUMENT_MAP_NAME = {"EDA23_AtonRa Z class": "LU2170995018"}
|
|
9
7
|
|
|
10
8
|
|
|
11
9
|
def _get_exchange_from_ticker(ticker):
|
|
@@ -21,8 +19,8 @@ def _get_ticker(ticker):
|
|
|
21
19
|
|
|
22
20
|
|
|
23
21
|
def _get_underlying_instrument(bbg_code, name, currency, instrument_type="equity", isin=None, cash_position=False):
|
|
24
|
-
if
|
|
25
|
-
return {"
|
|
22
|
+
if isin := INSTRUMENT_MAP_NAME.get(bbg_code, None):
|
|
23
|
+
return {"isin": isin}
|
|
26
24
|
|
|
27
25
|
exchange = _get_exchange_from_ticker(bbg_code)
|
|
28
26
|
ticker = _get_ticker(bbg_code)
|
|
@@ -54,7 +52,7 @@ def _get_underlying_instrument(bbg_code, name, currency, instrument_type="equity
|
|
|
54
52
|
return underlying_quote
|
|
55
53
|
|
|
56
54
|
|
|
57
|
-
def
|
|
55
|
+
def file_name_parse_isin(file_name):
|
|
58
56
|
dates = re.findall(r"_([0-9]{4}-?[0-9]{2}-?[0-9]{2})", file_name)
|
|
59
57
|
identifier = re.findall(r"([A-Z]{2}(?![A-Z]{10}\b)[A-Z0-9]{10})_", file_name)
|
|
60
58
|
assert len(dates) == 2, "Not 2 dates found in the filename"
|
|
@@ -68,10 +66,8 @@ def file_name_parse(file_name):
|
|
|
68
66
|
except ValueError:
|
|
69
67
|
generation_date = datetime.datetime.strptime(dates[1], "%Y%m%d").date()
|
|
70
68
|
|
|
71
|
-
product = Product.objects.get(Q(ticker=identifier[0]) | Q(isin=identifier[0]))
|
|
72
|
-
|
|
73
69
|
return {
|
|
74
|
-
"product":
|
|
70
|
+
"product": {"isin": identifier[0]},
|
|
75
71
|
"valuation_date": valuation_date,
|
|
76
72
|
"generation_date": generation_date,
|
|
77
73
|
}
|
|
@@ -4,7 +4,7 @@ import datetime
|
|
|
4
4
|
|
|
5
5
|
from wbportfolio.import_export.utils import convert_string_to_number
|
|
6
6
|
|
|
7
|
-
from .utils import
|
|
7
|
+
from .utils import file_name_parse_isin
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def parse(import_source):
|
|
@@ -20,11 +20,10 @@ def parse(import_source):
|
|
|
20
20
|
csv_reader = csv.DictReader(csv_file, delimiter=";")
|
|
21
21
|
|
|
22
22
|
# Parse the Parts of the filename into the different parts
|
|
23
|
-
parts =
|
|
23
|
+
parts = file_name_parse_isin(import_source.file.name)
|
|
24
24
|
|
|
25
25
|
# Get the valuation date and investment from the parts list
|
|
26
|
-
parts["
|
|
27
|
-
product = parts["product"]
|
|
26
|
+
product_data = parts["product"]
|
|
28
27
|
|
|
29
28
|
# Iterate through the CSV File and parse the data into a list
|
|
30
29
|
data = list()
|
|
@@ -34,7 +33,7 @@ def parse(import_source):
|
|
|
34
33
|
if date.weekday() not in [5, 6]:
|
|
35
34
|
data.append(
|
|
36
35
|
{
|
|
37
|
-
"instrument": {"instrument_type": "product",
|
|
36
|
+
"instrument": {"instrument_type": "product", **product_data},
|
|
38
37
|
"date": date.strftime("%Y-%m-%d"),
|
|
39
38
|
"net_value": round(convert_string_to_number(valuation["Index Value in%"]), 6),
|
|
40
39
|
"gross_value": round(convert_string_to_number(valuation["Index gross in%"]), 6),
|
|
@@ -4,7 +4,7 @@ import datetime
|
|
|
4
4
|
import logging
|
|
5
5
|
|
|
6
6
|
from wbportfolio.import_export.utils import convert_string_to_number
|
|
7
|
-
from wbportfolio.models import Fees
|
|
7
|
+
from wbportfolio.models import Fees
|
|
8
8
|
|
|
9
9
|
logger = logging.getLogger("importers.parsers.sglux.fee")
|
|
10
10
|
# Shares class mapping between ticker to identifier
|
|
@@ -37,14 +37,14 @@ def parse(import_source):
|
|
|
37
37
|
for fee_data in fee_reader:
|
|
38
38
|
if (fee_description := fee_data["Fees description"]) and fee_description.startswith("Investment Manager fees"):
|
|
39
39
|
share_class = fee_data["Class"] # .split("\\")[1][0]
|
|
40
|
-
product = Product.objects.get(
|
|
41
|
-
parent__identifier=fee_data["Code"], currency__key=fee_data["Local ccy"], identifier=share_class
|
|
42
|
-
)
|
|
43
40
|
date = datetime.datetime.strptime(fee_data["NAV Date"], "%Y/%m/%d")
|
|
44
41
|
data.append(
|
|
45
42
|
{
|
|
46
|
-
"
|
|
47
|
-
|
|
43
|
+
"linked_product": {
|
|
44
|
+
"parent__identifier": fee_data["Code"],
|
|
45
|
+
"currency__key": fee_data["Local ccy"],
|
|
46
|
+
"identifier": share_class,
|
|
47
|
+
},
|
|
48
48
|
"transaction_date": date.strftime("%Y-%m-%d"),
|
|
49
49
|
"calculated": False,
|
|
50
50
|
"transaction_subtype": Fees.Type.MANAGEMENT,
|
|
@@ -5,7 +5,7 @@ import logging
|
|
|
5
5
|
import re
|
|
6
6
|
|
|
7
7
|
from wbportfolio.import_export.utils import convert_string_to_number
|
|
8
|
-
from wbportfolio.models import Fees
|
|
8
|
+
from wbportfolio.models import Fees
|
|
9
9
|
|
|
10
10
|
logger = logging.getLogger("importers.parsers.sglux.perf_fees")
|
|
11
11
|
|
|
@@ -18,7 +18,6 @@ def parse(import_source):
|
|
|
18
18
|
# fee_reader = csv.DictReader(fee_file, delimiter=',')
|
|
19
19
|
fee_reader = csv.reader(fee_file)
|
|
20
20
|
isin = re.findall("([A-Z]{2}[A-Z0-9]{9}[0-9]{1})", import_source.file.name)[0]
|
|
21
|
-
product = Product.objects.get(isin=isin)
|
|
22
21
|
|
|
23
22
|
# Iterate through the CSV File and parse the data into a list
|
|
24
23
|
data = list()
|
|
@@ -27,8 +26,7 @@ def parse(import_source):
|
|
|
27
26
|
date = datetime.datetime.strptime(fee_data[0], "%m/%d/%Y")
|
|
28
27
|
data.append(
|
|
29
28
|
{
|
|
30
|
-
"
|
|
31
|
-
"linked_product": product.id,
|
|
29
|
+
"linked_product": {"isin": isin},
|
|
32
30
|
"transaction_date": date.strftime("%Y-%m-%d"),
|
|
33
31
|
"calculated": False,
|
|
34
32
|
"transaction_subtype": Fees.Type.PERFORMANCE,
|
|
@@ -37,8 +35,7 @@ def parse(import_source):
|
|
|
37
35
|
)
|
|
38
36
|
data.append(
|
|
39
37
|
{
|
|
40
|
-
"
|
|
41
|
-
"linked_product": product.id,
|
|
38
|
+
"linked_product": {"isin": isin},
|
|
42
39
|
"transaction_date": date.strftime("%Y-%m-%d"),
|
|
43
40
|
"calculated": False,
|
|
44
41
|
"transaction_subtype": Fees.Type.PERFORMANCE_CRYSTALIZED,
|
|
@@ -21,6 +21,10 @@ def convert_string_to_number(string):
|
|
|
21
21
|
def parse(import_source):
|
|
22
22
|
data = list()
|
|
23
23
|
|
|
24
|
+
country_title_mapping_exception = import_source.source.import_parameters.get(
|
|
25
|
+
"country_title_mapping_exception", {"great-britain": "GB", "united kingdom": "GB", "man (isle of)": "IM"}
|
|
26
|
+
)
|
|
27
|
+
|
|
24
28
|
sylk_handler = SYLK()
|
|
25
29
|
for line in [_line.decode("cp1252") for _line in import_source.file.open("rb").readlines()]:
|
|
26
30
|
sylk_handler.parseline(line)
|
|
@@ -156,8 +160,8 @@ def parse(import_source):
|
|
|
156
160
|
country_title_mapping = {
|
|
157
161
|
country["name"].lower(): country["id"] for country in Geography.countries.all().values("id", "name")
|
|
158
162
|
}
|
|
159
|
-
|
|
160
|
-
|
|
163
|
+
for title, code in country_title_mapping_exception.items():
|
|
164
|
+
country_title_mapping[title] = Geography.countries.get(code_2=code).id
|
|
161
165
|
|
|
162
166
|
df["CITIZENSHIP"] = df["CITIZENSHIP"].apply(lambda x: country_code_mapping[x])
|
|
163
167
|
df["RESIDENCE"] = df["RESIDENCE"].apply(lambda x: country_code_mapping[x])
|
|
@@ -4,7 +4,6 @@ import datetime
|
|
|
4
4
|
import re
|
|
5
5
|
|
|
6
6
|
from wbportfolio.import_export.utils import convert_string_to_number
|
|
7
|
-
from wbportfolio.models import Product
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
def file_name_parse(file_name):
|
|
@@ -38,11 +37,11 @@ def parse(import_source):
|
|
|
38
37
|
|
|
39
38
|
for valuation in csv_reader:
|
|
40
39
|
valuation_date = datetime.datetime.strptime(valuation["NAV Date"], "%Y/%m/%d").date()
|
|
41
|
-
|
|
42
|
-
if valuation_date.weekday() not in [5, 6] and
|
|
40
|
+
isin = valuation["ISIN Code"]
|
|
41
|
+
if valuation_date.weekday() not in [5, 6] and valuation["ShareCurrency"] == valuation["Ccy"]:
|
|
43
42
|
data.append(
|
|
44
43
|
{
|
|
45
|
-
"instrument": {"instrument_type": "product", "
|
|
44
|
+
"instrument": {"instrument_type": "product", "isin": isin},
|
|
46
45
|
"date": valuation_date.strftime("%Y-%m-%d"),
|
|
47
46
|
"net_value": round(convert_string_to_number(valuation["NAV per share"]), 6),
|
|
48
47
|
"calculated": False,
|
|
@@ -11,14 +11,14 @@ def parse(import_source):
|
|
|
11
11
|
df_dict = pd.read_excel(BytesIO(import_source.file.read()), engine="openpyxl", sheet_name=None)
|
|
12
12
|
|
|
13
13
|
data = list()
|
|
14
|
-
|
|
14
|
+
product_isins = set()
|
|
15
15
|
max_date = datetime.date(1900, 1, 1)
|
|
16
16
|
|
|
17
17
|
for sheet_name, df in df_dict.items():
|
|
18
18
|
if "prices" not in sheet_name:
|
|
19
19
|
isin = re.findall("([A-Z]{2}[A-Z0-9]{9}[0-9]{1})", sheet_name)
|
|
20
|
-
|
|
21
|
-
|
|
20
|
+
isin = isin[0]
|
|
21
|
+
product_isins.add(isin)
|
|
22
22
|
df = df.rename(
|
|
23
23
|
columns={
|
|
24
24
|
"Trade Date": "transaction_date",
|
|
@@ -34,22 +34,23 @@ def parse(import_source):
|
|
|
34
34
|
for trade in df.to_dict("records"):
|
|
35
35
|
max_date = max(trade["transaction_date"].date(), max_date)
|
|
36
36
|
|
|
37
|
-
|
|
38
|
-
shares = shares / product.share_price
|
|
39
|
-
portfolio = product.primary_portfolio
|
|
37
|
+
nominal = trade["nominal"] if trade["way"] == "S" else trade["nominal"] * -1
|
|
40
38
|
data.append(
|
|
41
39
|
{
|
|
42
|
-
"underlying_instrument": {"
|
|
43
|
-
"
|
|
44
|
-
"portfolio": portfolio.id,
|
|
40
|
+
"underlying_instrument": {"isin": isin, "instrument_type": "product"},
|
|
41
|
+
"portfolio": {"isin": isin, "instrument_type": "product"},
|
|
45
42
|
"transaction_date": trade["transaction_date"].strftime("%Y-%m-%d"),
|
|
46
|
-
"
|
|
47
|
-
"transaction_subtype": Trade.Type.REDEMPTION if
|
|
43
|
+
"nominal": nominal,
|
|
44
|
+
"transaction_subtype": Trade.Type.REDEMPTION if nominal < 0 else Trade.Type.SUBSCRIPTION,
|
|
48
45
|
"bank": trade["bank"],
|
|
49
46
|
"price": round(trade["price"] / 10, 6),
|
|
50
47
|
}
|
|
51
48
|
)
|
|
49
|
+
underlying_instruments = Product.objects.filter(isin__in=product_isins).values_list("id", flat=True)
|
|
52
50
|
return {
|
|
53
51
|
"data": data,
|
|
54
|
-
"history": {
|
|
52
|
+
"history": {
|
|
53
|
+
"underlying_instruments": list(underlying_instruments),
|
|
54
|
+
"transaction_date": max_date.strftime("%Y-%m-%d"),
|
|
55
|
+
},
|
|
55
56
|
}
|
|
@@ -4,7 +4,6 @@ import numpy as np
|
|
|
4
4
|
import pandas as pd
|
|
5
5
|
|
|
6
6
|
from wbportfolio.import_export.utils import get_file_extension
|
|
7
|
-
from wbportfolio.models import Product
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
def parse(import_source):
|
|
@@ -25,10 +24,10 @@ def parse(import_source):
|
|
|
25
24
|
|
|
26
25
|
data = list()
|
|
27
26
|
for valuation in df.to_dict("records"):
|
|
28
|
-
|
|
27
|
+
isin = valuation["ISIN Code"]
|
|
29
28
|
data.append(
|
|
30
29
|
{
|
|
31
|
-
"instrument": {"instrument_type": "product", "
|
|
30
|
+
"instrument": {"instrument_type": "product", "isin": isin},
|
|
32
31
|
"date": valuation["Valuation Date"].strftime("%Y-%m-%d"),
|
|
33
32
|
"net_value": round(valuation["Bid"], 6),
|
|
34
33
|
"calculated": False,
|
|
@@ -4,7 +4,7 @@ import datetime
|
|
|
4
4
|
import re
|
|
5
5
|
|
|
6
6
|
from wbportfolio.import_export.utils import convert_string_to_number
|
|
7
|
-
from wbportfolio.models import
|
|
7
|
+
from wbportfolio.models import Trade
|
|
8
8
|
|
|
9
9
|
product_mapping = {
|
|
10
10
|
"2304": {
|
|
@@ -42,16 +42,14 @@ def parse(import_source):
|
|
|
42
42
|
# Iterate through the CSV File and parse the data into a list
|
|
43
43
|
data = list()
|
|
44
44
|
for customer_trade in csv_reader:
|
|
45
|
-
|
|
45
|
+
isin = product_mapping[identifier][customer_trade["Anteilklasse"].strip()]
|
|
46
46
|
|
|
47
47
|
transaction_date = datetime.datetime.strptime(customer_trade["Datum"], "%Y%m%d").date()
|
|
48
48
|
shares = round(convert_string_to_number(customer_trade["Saldo - Anzahl"]), 4)
|
|
49
|
-
portfolio = product.primary_portfolio
|
|
50
49
|
data.append(
|
|
51
50
|
{
|
|
52
|
-
"underlying_instrument": {"
|
|
53
|
-
"portfolio":
|
|
54
|
-
"currency__key": product.currency.key,
|
|
51
|
+
"underlying_instrument": {"isin": isin, "instrument_type": "product"},
|
|
52
|
+
"portfolio": {"isin": isin, "instrument_type": "product"},
|
|
55
53
|
"transaction_date": transaction_date.strftime("%Y-%m-%d"),
|
|
56
54
|
"value_date": transaction_date.strftime("%Y-%m-%d"),
|
|
57
55
|
"shares": shares,
|
|
@@ -6,7 +6,6 @@ import pandas as pd
|
|
|
6
6
|
from wbcore.contrib.currency.models import Currency
|
|
7
7
|
|
|
8
8
|
from wbportfolio.import_export.utils import convert_string_to_number
|
|
9
|
-
from wbportfolio.models import ProductGroup
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
def parse(import_source):
|
|
@@ -24,7 +23,7 @@ def parse(import_source):
|
|
|
24
23
|
basket["Assetart"] == "TRES" and "Sichtguthaben" not in basket["Assetbezeichnung"]
|
|
25
24
|
):
|
|
26
25
|
continue
|
|
27
|
-
|
|
26
|
+
identifier = basket["Fonds-Nr."].strip()
|
|
28
27
|
valuation_date = datetime.datetime.strptime(basket["Datum"], "%Y%m%d")
|
|
29
28
|
|
|
30
29
|
currency_key = basket["Währung"]
|
|
@@ -67,7 +66,7 @@ def parse(import_source):
|
|
|
67
66
|
"exchange": underlying_quote.get("exchange", None),
|
|
68
67
|
"portfolio": {
|
|
69
68
|
"instrument_type": "product_group",
|
|
70
|
-
"
|
|
69
|
+
"identifier": identifier,
|
|
71
70
|
},
|
|
72
71
|
"is_estimated": False,
|
|
73
72
|
"currency__key": currency_key,
|
|
@@ -3,8 +3,6 @@ import csv
|
|
|
3
3
|
import datetime
|
|
4
4
|
import re
|
|
5
5
|
|
|
6
|
-
from wbportfolio.models import Product
|
|
7
|
-
|
|
8
6
|
|
|
9
7
|
def file_name_parse(file_name):
|
|
10
8
|
dates = re.findall(r"([0-9]{4}-[0-9]{2}-[0-9]{2})", file_name)
|
|
@@ -37,11 +35,11 @@ def parse(import_source):
|
|
|
37
35
|
|
|
38
36
|
for valuation in csv_reader:
|
|
39
37
|
valuation_date = datetime.datetime.strptime(valuation["Bewertungsdatum"], "%Y%m%d").date()
|
|
40
|
-
|
|
38
|
+
isin = valuation["ISIN"]
|
|
41
39
|
if valuation_date.weekday() not in [5, 6]:
|
|
42
40
|
data.append(
|
|
43
41
|
{
|
|
44
|
-
"instrument": {"instrument_type": "product", "
|
|
42
|
+
"instrument": {"instrument_type": "product", "isin": isin},
|
|
45
43
|
"date": valuation_date.strftime("%Y-%m-%d"),
|
|
46
44
|
"net_value": float(valuation["Nettoinventarwert"]),
|
|
47
45
|
"calculated": False,
|
|
@@ -2,30 +2,27 @@ import json
|
|
|
2
2
|
|
|
3
3
|
import pandas as pd
|
|
4
4
|
|
|
5
|
-
from wbportfolio.models import Fees
|
|
5
|
+
from wbportfolio.models import Fees
|
|
6
6
|
|
|
7
7
|
BASE_MAPPING = {"managementFee": "total_value", "performanceFee": "total_value", "date": "transaction_date"}
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def parse(import_source):
|
|
11
|
-
def _process_df(df,
|
|
11
|
+
def _process_df(df, product_isin):
|
|
12
12
|
df = df.rename(columns=BASE_MAPPING).dropna(how="all", axis=1)
|
|
13
13
|
df = df.drop(columns=df.columns.difference(BASE_MAPPING.values()))
|
|
14
|
-
df["
|
|
15
|
-
df["linked_product"] = product.id
|
|
16
|
-
df["underlying_instrument"] = product.id
|
|
17
|
-
df["portfolio"] = product.primary_portfolio.id
|
|
14
|
+
df["linked_product"] = [{"isin": product_isin}] * df.shape[0]
|
|
18
15
|
return df
|
|
19
16
|
|
|
20
17
|
content = json.load(import_source.file)
|
|
21
18
|
data = []
|
|
22
|
-
if
|
|
19
|
+
if isin := content.get("isin", None):
|
|
23
20
|
if mngt_data := content.get("management_fees", None):
|
|
24
|
-
df = _process_df(pd.DataFrame(mngt_data),
|
|
21
|
+
df = _process_df(pd.DataFrame(mngt_data), isin)
|
|
25
22
|
df["transaction_subtype"] = Fees.Type.MANAGEMENT.value
|
|
26
23
|
data.extend(df.to_dict("records"))
|
|
27
24
|
if perf_data := content.get("performance_fees", None):
|
|
28
|
-
df = _process_df(pd.DataFrame(perf_data),
|
|
25
|
+
df = _process_df(pd.DataFrame(perf_data), isin)
|
|
29
26
|
df["transaction_subtype"] = Fees.Type.PERFORMANCE.value
|
|
30
27
|
data.extend(df.to_dict("records"))
|
|
31
28
|
|
|
@@ -5,29 +5,23 @@ import numpy as np
|
|
|
5
5
|
import pandas as pd
|
|
6
6
|
from wbcore.contrib.io.models import ImportSource
|
|
7
7
|
|
|
8
|
-
from wbportfolio.models import
|
|
8
|
+
from wbportfolio.models import Trade
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
def parse_row(obj: Dict, import_source: ImportSource) -> Dict:
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
shares =
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
portfolio
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
"bank": obj["custodian"],
|
|
26
|
-
"transaction_subtype": Trade.Type.REDEMPTION if shares < 0 else Trade.Type.SUBSCRIPTION,
|
|
27
|
-
"price": round(obj["price"], 6),
|
|
28
|
-
}
|
|
29
|
-
except Product.DoesNotExist:
|
|
30
|
-
import_source.log += f"Product with ISIN {isin} does not exists."
|
|
12
|
+
isin = obj["underlying_instrument__isin"]
|
|
13
|
+
shares = obj["shares"]
|
|
14
|
+
if import_source.source.import_parameters.get("negate_shares", False):
|
|
15
|
+
shares = -1 * shares
|
|
16
|
+
return {
|
|
17
|
+
"underlying_instrument": {"isin": isin, "instrument_type": "product"},
|
|
18
|
+
"portfolio": {"isin": isin, "instrument_type": "product"},
|
|
19
|
+
"transaction_date": obj["transaction_date"].strftime("%Y-%m-%d"),
|
|
20
|
+
"shares": shares,
|
|
21
|
+
"bank": obj["custodian"],
|
|
22
|
+
"transaction_subtype": Trade.Type.REDEMPTION if shares < 0 else Trade.Type.SUBSCRIPTION,
|
|
23
|
+
"price": round(obj["price"], 6),
|
|
24
|
+
}
|
|
31
25
|
|
|
32
26
|
|
|
33
27
|
def parse(import_source):
|
|
@@ -3,8 +3,6 @@ import re
|
|
|
3
3
|
|
|
4
4
|
import xlrd
|
|
5
5
|
|
|
6
|
-
from wbportfolio.models import Product
|
|
7
|
-
|
|
8
6
|
|
|
9
7
|
def file_name_parse(file_name):
|
|
10
8
|
isin = re.findall("([A-Z]{2}[A-Z0-9]{9}[0-9]{1})", file_name)
|
|
@@ -18,8 +16,7 @@ def parse(import_source):
|
|
|
18
16
|
book = xlrd.open_workbook(file_contents=import_source.file.read())
|
|
19
17
|
|
|
20
18
|
parts = file_name_parse(import_source.file.name)
|
|
21
|
-
|
|
22
|
-
product = Product.objects.get(isin=parts["isin"])
|
|
19
|
+
isin = parts["isin"]
|
|
23
20
|
|
|
24
21
|
equity_sheet = book.sheet_by_name("Equity")
|
|
25
22
|
cash_sheet = book.sheet_by_name("Cash")
|
|
@@ -62,7 +59,7 @@ def parse(import_source):
|
|
|
62
59
|
"currency__key": equity_sheet.cell_value(row, 10),
|
|
63
60
|
"refinitiv_identifier_code": ric,
|
|
64
61
|
},
|
|
65
|
-
"portfolio": {"instrument_type": "product", "
|
|
62
|
+
"portfolio": {"instrument_type": "product", "isin": isin},
|
|
66
63
|
"is_estimated": False,
|
|
67
64
|
"exchange": exchange_data,
|
|
68
65
|
"initial_shares": initial_shares,
|
|
@@ -82,7 +79,7 @@ def parse(import_source):
|
|
|
82
79
|
"instrument_type": "cash",
|
|
83
80
|
"currency__key": cash_sheet.cell_value(row_index + 1, 1),
|
|
84
81
|
},
|
|
85
|
-
"portfolio": {"instrument_type": "product", "
|
|
82
|
+
"portfolio": {"instrument_type": "product", "isin": isin},
|
|
86
83
|
"is_estimated": False,
|
|
87
84
|
"name": cash_sheet.cell_value(row_index + 1, 1),
|
|
88
85
|
"initial_shares": round(cash_sheet.cell_value(row_index + 1, 2), 4),
|
|
@@ -1,39 +1,11 @@
|
|
|
1
1
|
from io import BytesIO
|
|
2
|
-
from typing import Dict
|
|
3
2
|
|
|
4
3
|
import pandas as pd
|
|
5
|
-
from wbcore.contrib.io.models import ImportSource
|
|
6
4
|
|
|
7
5
|
from wbportfolio.models import Product, Trade
|
|
8
6
|
|
|
9
7
|
|
|
10
|
-
def parse_row(obj: Dict, import_source: ImportSource) -> Dict:
|
|
11
|
-
if _id := obj["underlying_instrument_id"]:
|
|
12
|
-
product = Product.objects.get(id=_id)
|
|
13
|
-
shares = -1 * obj["shares"]
|
|
14
|
-
portfolio = product.primary_portfolio
|
|
15
|
-
return {
|
|
16
|
-
"underlying_instrument": {"id": product.id, "instrument_type": "product"},
|
|
17
|
-
"currency__key": product.currency.key,
|
|
18
|
-
"portfolio": portfolio.id,
|
|
19
|
-
"transaction_date": obj["transaction_date"].strftime("%Y-%m-%d"),
|
|
20
|
-
"shares": shares,
|
|
21
|
-
"bank": obj["custodian"] if obj["custodian"] else "NA",
|
|
22
|
-
"transaction_subtype": Trade.Type.REDEMPTION if shares < 0 else Trade.Type.SUBSCRIPTION,
|
|
23
|
-
"price": round(obj["price"], 6),
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
|
|
27
8
|
def parse(import_source):
|
|
28
|
-
def _get_underlying_instrument_id(isin):
|
|
29
|
-
try:
|
|
30
|
-
product = Product.objects.get(isin=isin)
|
|
31
|
-
return product.id
|
|
32
|
-
except Product.DoesNotExist:
|
|
33
|
-
import_source.log += f"Product with ISIN {isin} does not exists."
|
|
34
|
-
|
|
35
|
-
return None
|
|
36
|
-
|
|
37
9
|
df_dict = pd.read_excel(BytesIO(import_source.file.read()), engine="openpyxl")
|
|
38
10
|
df_dict = df_dict.rename(
|
|
39
11
|
columns={
|
|
@@ -47,22 +19,31 @@ def parse(import_source):
|
|
|
47
19
|
)
|
|
48
20
|
df_dict = df_dict.where(pd.notnull(df_dict), None)
|
|
49
21
|
df_dict["transaction_date"] = pd.to_datetime(df_dict["transaction_date"])
|
|
50
|
-
|
|
51
|
-
df_dict["underlying_instrument_id"] = df_dict["underlying_instrument__isin"].apply(
|
|
52
|
-
lambda x: _get_underlying_instrument_id(x)
|
|
53
|
-
)
|
|
22
|
+
product_isins = set()
|
|
54
23
|
|
|
55
24
|
data = list()
|
|
56
|
-
for
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
25
|
+
for obj in df_dict.to_dict("records"):
|
|
26
|
+
shares = -1 * obj["shares"]
|
|
27
|
+
isin = obj["underlying_instrument__isin"]
|
|
28
|
+
data.append(
|
|
29
|
+
{
|
|
30
|
+
"underlying_instrument": {"isin": isin, "instrument_type": "product"},
|
|
31
|
+
"portfolio": {"isin": isin, "instrument_type": "product"},
|
|
32
|
+
"transaction_date": obj["transaction_date"].strftime("%Y-%m-%d"),
|
|
33
|
+
"shares": shares,
|
|
34
|
+
"bank": obj["custodian"] if obj["custodian"] else "NA",
|
|
35
|
+
"transaction_subtype": Trade.Type.REDEMPTION if shares < 0 else Trade.Type.SUBSCRIPTION,
|
|
36
|
+
"price": round(obj["price"], 6),
|
|
37
|
+
}
|
|
38
|
+
)
|
|
39
|
+
product_isins.add(isin)
|
|
40
|
+
|
|
41
|
+
underlying_instruments = Product.objects.filter(isin__in=product_isins).values_list("id", flat=True)
|
|
61
42
|
|
|
62
43
|
return {
|
|
63
44
|
"data": data,
|
|
64
45
|
"history": {
|
|
65
|
-
"underlying_instruments": underlying_instruments,
|
|
46
|
+
"underlying_instruments": list(underlying_instruments),
|
|
66
47
|
"transaction_date": df_dict.transaction_date.max().strftime("%Y-%m-%d"),
|
|
67
48
|
},
|
|
68
49
|
}
|