finbourne-sdk-utils 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- features/__init__.py +0 -0
- features/main.py +11 -0
- finbourne_sdk_utils/__init__.py +8 -0
- finbourne_sdk_utils/cocoon/__init__.py +34 -0
- finbourne_sdk_utils/cocoon/async_tools.py +94 -0
- finbourne_sdk_utils/cocoon/cocoon.py +1862 -0
- finbourne_sdk_utils/cocoon/cocoon_printer.py +455 -0
- finbourne_sdk_utils/cocoon/config/domain_settings.json +125 -0
- finbourne_sdk_utils/cocoon/config/seed_sample_data.json +36 -0
- finbourne_sdk_utils/cocoon/dateorcutlabel.py +198 -0
- finbourne_sdk_utils/cocoon/instruments.py +482 -0
- finbourne_sdk_utils/cocoon/properties.py +442 -0
- finbourne_sdk_utils/cocoon/seed_sample_data.py +137 -0
- finbourne_sdk_utils/cocoon/systemConfiguration.py +92 -0
- finbourne_sdk_utils/cocoon/transaction_type_upload.py +136 -0
- finbourne_sdk_utils/cocoon/utilities.py +1877 -0
- finbourne_sdk_utils/cocoon/validator.py +243 -0
- finbourne_sdk_utils/extract/__init__.py +1 -0
- finbourne_sdk_utils/extract/group_holdings.py +400 -0
- finbourne_sdk_utils/iam/__init__.py +1 -0
- finbourne_sdk_utils/iam/roles.py +74 -0
- finbourne_sdk_utils/jupyter_tools/__init__.py +2 -0
- finbourne_sdk_utils/jupyter_tools/hide_code_button.py +23 -0
- finbourne_sdk_utils/jupyter_tools/stop_execution.py +14 -0
- finbourne_sdk_utils/logger/LusidLogger.py +41 -0
- finbourne_sdk_utils/logger/__init__.py +1 -0
- finbourne_sdk_utils/lpt/__init__.py +0 -0
- finbourne_sdk_utils/lpt/back_compat.py +20 -0
- finbourne_sdk_utils/lpt/cash_ladder.py +191 -0
- finbourne_sdk_utils/lpt/connect_lusid.py +64 -0
- finbourne_sdk_utils/lpt/connect_none.py +5 -0
- finbourne_sdk_utils/lpt/connect_token.py +9 -0
- finbourne_sdk_utils/lpt/dfq.py +321 -0
- finbourne_sdk_utils/lpt/either.py +65 -0
- finbourne_sdk_utils/lpt/get_instruments.py +101 -0
- finbourne_sdk_utils/lpt/lpt.py +374 -0
- finbourne_sdk_utils/lpt/lse.py +188 -0
- finbourne_sdk_utils/lpt/map_instruments.py +164 -0
- finbourne_sdk_utils/lpt/pager.py +32 -0
- finbourne_sdk_utils/lpt/record.py +13 -0
- finbourne_sdk_utils/lpt/refreshing_token.py +43 -0
- finbourne_sdk_utils/lpt/search_instruments.py +48 -0
- finbourne_sdk_utils/lpt/stdargs.py +154 -0
- finbourne_sdk_utils/lpt/txn_config.py +128 -0
- finbourne_sdk_utils/lpt/txn_config_yaml.py +493 -0
- finbourne_sdk_utils/pandas_utils/__init__.py +0 -0
- finbourne_sdk_utils/pandas_utils/lusid_pandas.py +128 -0
- finbourne_sdk_utils-0.0.24.dist-info/LICENSE +21 -0
- finbourne_sdk_utils-0.0.24.dist-info/METADATA +25 -0
- finbourne_sdk_utils-0.0.24.dist-info/RECORD +52 -0
- finbourne_sdk_utils-0.0.24.dist-info/WHEEL +5 -0
- finbourne_sdk_utils-0.0.24.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import re
|
|
3
|
+
import pandas as pd
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def parse(with_inputs=True, args=None):
|
|
8
|
+
parser = argparse.ArgumentParser(
|
|
9
|
+
description="DataFrame Query Tool", fromfile_prefix_chars="@"
|
|
10
|
+
)
|
|
11
|
+
if with_inputs:
|
|
12
|
+
parser.add_argument("input", nargs="+", help="source csv file")
|
|
13
|
+
parser.add_argument("-c", "--columns", action="store_true", help="display columns")
|
|
14
|
+
parser.add_argument(
|
|
15
|
+
"-s", "--select", nargs="*", metavar="column", help="fields to select"
|
|
16
|
+
)
|
|
17
|
+
parser.add_argument(
|
|
18
|
+
"-w",
|
|
19
|
+
"--where",
|
|
20
|
+
nargs="*",
|
|
21
|
+
metavar="column=value",
|
|
22
|
+
help="filtering eg. -w 'cost>100' 'cost>-100' or -w "
|
|
23
|
+
"'strat=Tech,Pharma' 'region!=UK'",
|
|
24
|
+
)
|
|
25
|
+
parser.add_argument(
|
|
26
|
+
"-o", "--order", nargs="*", metavar="column", help="fields to sort by"
|
|
27
|
+
)
|
|
28
|
+
parser.add_argument(
|
|
29
|
+
"-u", "--unique", action="store_true", help="unique values only"
|
|
30
|
+
)
|
|
31
|
+
parser.add_argument(
|
|
32
|
+
"-g", "--groupby", nargs="*", metavar="column", help="fields to groupby"
|
|
33
|
+
)
|
|
34
|
+
parser.add_argument("--filename", metavar="filename", help="save output to file")
|
|
35
|
+
parser.add_argument(
|
|
36
|
+
"-f", "--first", type=int, default=0, help='show first "n" records'
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument(
|
|
39
|
+
"--strings", action="store_true", help="interpret all fields as strings"
|
|
40
|
+
)
|
|
41
|
+
parser.add_argument(
|
|
42
|
+
"-l", "--last", type=int, default=0, help='show last "n" records'
|
|
43
|
+
)
|
|
44
|
+
parser.add_argument(
|
|
45
|
+
"-j", "--join", nargs="+", help="join to other frame. path, criterion"
|
|
46
|
+
)
|
|
47
|
+
parser.add_argument("--dp", type=int, default=2)
|
|
48
|
+
parser.add_argument("-t", "--transpose", action="store_true")
|
|
49
|
+
parser.add_argument("-m", action="store_true")
|
|
50
|
+
parser.add_argument("-i", "--index", action="store_true")
|
|
51
|
+
parser.add_argument("-x", "--xls", action="store_true")
|
|
52
|
+
parser.add_argument("--glob", action="store_true")
|
|
53
|
+
parser.add_argument("--identify", action="store_true")
|
|
54
|
+
parser.add_argument("--separator", help="separator from text files")
|
|
55
|
+
parser.add_argument(
|
|
56
|
+
"--tab", action="store_true", help="tab separator for text files"
|
|
57
|
+
)
|
|
58
|
+
parser.add_argument(
|
|
59
|
+
"--latin", action="store_true", help="read files with latin-1 encoding"
|
|
60
|
+
)
|
|
61
|
+
parser.add_argument(
|
|
62
|
+
"--markdown", action="store_true", help="give output in markdown format"
|
|
63
|
+
)
|
|
64
|
+
parser.add_argument("--count", action="store_true", help="count records in groups")
|
|
65
|
+
parser.add_argument(
|
|
66
|
+
"--single", nargs="+", help="Single column uniqueness constraint."
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
return parser.parse_args(args)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def apply_args(args, given_df):
|
|
73
|
+
nrows = (
|
|
74
|
+
args.first
|
|
75
|
+
if args.first > 0
|
|
76
|
+
and args.last == 0
|
|
77
|
+
and given_df is None
|
|
78
|
+
and args.groupby is None
|
|
79
|
+
and args.where is None
|
|
80
|
+
and args.order is None
|
|
81
|
+
and len(args.input) == 1
|
|
82
|
+
else None
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if args.glob:
|
|
86
|
+
import glob
|
|
87
|
+
|
|
88
|
+
args.input = glob.glob(args.input[0])
|
|
89
|
+
|
|
90
|
+
if args.columns and given_df is None:
|
|
91
|
+
args.input = args.input[0:1]
|
|
92
|
+
nrows = 2
|
|
93
|
+
|
|
94
|
+
reader_args = {"nrows": nrows}
|
|
95
|
+
if args.strings:
|
|
96
|
+
reader_args["dtype"] = str
|
|
97
|
+
|
|
98
|
+
if args.tab:
|
|
99
|
+
reader_args["sep"] = "\t"
|
|
100
|
+
elif args.separator:
|
|
101
|
+
reader_args["sep"] = args.separator
|
|
102
|
+
|
|
103
|
+
if args.latin:
|
|
104
|
+
reader_args["encoding"] = "latin-1"
|
|
105
|
+
|
|
106
|
+
def load_frame(path):
|
|
107
|
+
if path.endswith(".pk"):
|
|
108
|
+
return pd.read_pickle(path)
|
|
109
|
+
elif ".xls" in path.lower():
|
|
110
|
+
s = path.split(":")
|
|
111
|
+
if len(s) == 2:
|
|
112
|
+
return pd.read_excel(
|
|
113
|
+
s[0], engine="openpyxl", **reader_args, sheet_name=s[1]
|
|
114
|
+
)
|
|
115
|
+
return pd.read_excel(path, engine="openpyxl", **reader_args)
|
|
116
|
+
else:
|
|
117
|
+
return pd.read_csv(path, **reader_args)
|
|
118
|
+
|
|
119
|
+
if given_df is not None:
|
|
120
|
+
dfs = [("Given", given_df)]
|
|
121
|
+
else:
|
|
122
|
+
dfs = [(fn, load_frame(fn)) for fn in args.input]
|
|
123
|
+
|
|
124
|
+
if args.identify:
|
|
125
|
+
for fn, df in dfs:
|
|
126
|
+
df["FILE-NAME"] = fn
|
|
127
|
+
df["FILE-INDEX"] = df.index
|
|
128
|
+
|
|
129
|
+
if len(dfs) == 1:
|
|
130
|
+
df = dfs[0][1]
|
|
131
|
+
else:
|
|
132
|
+
df = pd.concat([d[1] for d in dfs], ignore_index=True, sort=False)
|
|
133
|
+
del dfs
|
|
134
|
+
|
|
135
|
+
if args.columns:
|
|
136
|
+
return df
|
|
137
|
+
|
|
138
|
+
if args.join:
|
|
139
|
+
cols = [c.split("=") for c in args.join[1:]]
|
|
140
|
+
df = df.merge(
|
|
141
|
+
load_frame(args.join[0]),
|
|
142
|
+
how="left",
|
|
143
|
+
left_on=[c[0] for c in cols],
|
|
144
|
+
right_on=[c[-1] for c in cols],
|
|
145
|
+
indicator=True,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if args.where:
|
|
149
|
+
for c in args.where:
|
|
150
|
+
|
|
151
|
+
# Get keys, values and operations
|
|
152
|
+
kv = re.findall(r"[^>,<,=]+", c)
|
|
153
|
+
ops = re.findall(r"[>,<,=]", c)
|
|
154
|
+
|
|
155
|
+
if len(kv) < 2:
|
|
156
|
+
raise ValueError(f"No keys or values found in clause: '{c}'")
|
|
157
|
+
|
|
158
|
+
# define behaviour
|
|
159
|
+
EQ = 1
|
|
160
|
+
GT = 2
|
|
161
|
+
LT = 4
|
|
162
|
+
GE = EQ + GT # 3
|
|
163
|
+
LE = EQ + LT # 5
|
|
164
|
+
|
|
165
|
+
op = EQ if "=" in ops else 0
|
|
166
|
+
op += GT if ">" in ops else 0
|
|
167
|
+
op += LT if "<" in ops else 0
|
|
168
|
+
|
|
169
|
+
col = kv[0]
|
|
170
|
+
invert = col.endswith("!")
|
|
171
|
+
|
|
172
|
+
if invert:
|
|
173
|
+
col = col[:-1]
|
|
174
|
+
|
|
175
|
+
if kv[1].startswith("IN:") and kv[1].endswith(".csv"):
|
|
176
|
+
s = set(pd.read_csv(kv[1][3:]).iloc[:, 0].astype(str))
|
|
177
|
+
else:
|
|
178
|
+
s = kv[1:]
|
|
179
|
+
|
|
180
|
+
if len(s) == 1:
|
|
181
|
+
if "*" in kv[1]:
|
|
182
|
+
crit = (
|
|
183
|
+
df[col]
|
|
184
|
+
.astype(str)
|
|
185
|
+
.fillna("")
|
|
186
|
+
.str.match("({})".format(kv[1].replace("*", ".*")))
|
|
187
|
+
)
|
|
188
|
+
else:
|
|
189
|
+
dflt = ""
|
|
190
|
+
v = kv[1]
|
|
191
|
+
if v.endswith(" as int"):
|
|
192
|
+
v = int(v[:-7])
|
|
193
|
+
dflt = 0
|
|
194
|
+
elif df[col].dtype == int:
|
|
195
|
+
v = int(v)
|
|
196
|
+
dflt = 0
|
|
197
|
+
elif df[col].dtype == np.int64:
|
|
198
|
+
v = int(v)
|
|
199
|
+
dflt = 0
|
|
200
|
+
elif df[col].dtype == float:
|
|
201
|
+
v = float(v)
|
|
202
|
+
dflt = 0.0
|
|
203
|
+
|
|
204
|
+
# apply appropriate boolean operator to get filter mask
|
|
205
|
+
crit = {
|
|
206
|
+
EQ: lambda f, v: f == v,
|
|
207
|
+
GT: lambda f, v: f > v,
|
|
208
|
+
LT: lambda f, v: f < v,
|
|
209
|
+
GE: lambda f, v: f >= v,
|
|
210
|
+
LE: lambda f, v: f <= v,
|
|
211
|
+
}.get(op, lambda f, v: print("Invalid operation!"))(
|
|
212
|
+
df[col].fillna(dflt), v
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
else:
|
|
216
|
+
crit = df[col].isin(s)
|
|
217
|
+
|
|
218
|
+
# apply filter mask
|
|
219
|
+
if invert:
|
|
220
|
+
df = df[~crit]
|
|
221
|
+
else:
|
|
222
|
+
df = df[crit]
|
|
223
|
+
|
|
224
|
+
if args.groupby:
|
|
225
|
+
if args.count:
|
|
226
|
+
df = df.groupby(args.groupby, as_index=False).size().reset_index()
|
|
227
|
+
else:
|
|
228
|
+
df = df.groupby(args.groupby, as_index=False).sum()
|
|
229
|
+
|
|
230
|
+
if args.select:
|
|
231
|
+
if len(args.select) == 1 and args.select[0].startswith("file:"):
|
|
232
|
+
args.select = [
|
|
233
|
+
col.replace("\n", "")
|
|
234
|
+
for col in open(args.select[0][5:], "r").readlines()
|
|
235
|
+
]
|
|
236
|
+
df = df[args.select]
|
|
237
|
+
|
|
238
|
+
if args.unique:
|
|
239
|
+
df = df.drop_duplicates()
|
|
240
|
+
|
|
241
|
+
if args.single:
|
|
242
|
+
df = df.drop_duplicates(args.single)
|
|
243
|
+
|
|
244
|
+
if args.order:
|
|
245
|
+
df = df.sort_values(args.order)
|
|
246
|
+
|
|
247
|
+
return df
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def dfq(args, given_df=None):
|
|
251
|
+
if (given_df is not None) and (not isinstance(given_df, pd.DataFrame)):
|
|
252
|
+
print(given_df)
|
|
253
|
+
exit(0)
|
|
254
|
+
|
|
255
|
+
df = apply_args(args, given_df)
|
|
256
|
+
|
|
257
|
+
if args.columns:
|
|
258
|
+
print("\n".join(df.columns.values))
|
|
259
|
+
exit()
|
|
260
|
+
|
|
261
|
+
# Display a dataframe with no cropping
|
|
262
|
+
def display_df(df, decimals=2):
|
|
263
|
+
if args.xls:
|
|
264
|
+
import xlwings as xw
|
|
265
|
+
|
|
266
|
+
wb = xw.Book()
|
|
267
|
+
wb.sheets[0].range("A1").options(index=args.index).value = df
|
|
268
|
+
else:
|
|
269
|
+
fmt = "{:,." + str(decimals) + "f}"
|
|
270
|
+
pd.options.display.float_format = fmt.format
|
|
271
|
+
pd.set_option("display.max_colwidth", 200)
|
|
272
|
+
|
|
273
|
+
try:
|
|
274
|
+
if args.transpose:
|
|
275
|
+
df = df.T
|
|
276
|
+
args.index = True
|
|
277
|
+
with pd.option_context("display.width", None, "display.max_rows", 1000):
|
|
278
|
+
if args.markdown:
|
|
279
|
+
print(
|
|
280
|
+
df.fillna("").to_markdown(
|
|
281
|
+
index=args.index, floatfmt=f".{decimals}f"
|
|
282
|
+
)
|
|
283
|
+
)
|
|
284
|
+
else:
|
|
285
|
+
print(df.fillna("").to_string(index=args.index))
|
|
286
|
+
except:
|
|
287
|
+
print(df.to_string(index=args.index))
|
|
288
|
+
|
|
289
|
+
def display(df, subset=None, total=0):
|
|
290
|
+
if args.filename:
|
|
291
|
+
if subset:
|
|
292
|
+
filename = args.filename.replace(".", "-{}-{}.".format(subset, len(df)))
|
|
293
|
+
else:
|
|
294
|
+
filename = args.filename
|
|
295
|
+
if filename.lower().endswith(".xlsx"):
|
|
296
|
+
df.to_excel(filename, index=False, freeze_panes=(1, 0))
|
|
297
|
+
elif filename.lower().endswith(".pk"):
|
|
298
|
+
df.to_pickle(filename)
|
|
299
|
+
else:
|
|
300
|
+
df.to_csv(filename, index=False)
|
|
301
|
+
else:
|
|
302
|
+
if subset:
|
|
303
|
+
print("{} {}".format(subset, len(df)))
|
|
304
|
+
display_df(df, args.dp)
|
|
305
|
+
|
|
306
|
+
if args.first > 0:
|
|
307
|
+
display(df[: args.first], "First")
|
|
308
|
+
|
|
309
|
+
if args.last > 0:
|
|
310
|
+
display(df[-args.last :], "Last")
|
|
311
|
+
|
|
312
|
+
if args.first == 0 and args.last == 0:
|
|
313
|
+
display(df)
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def main():
|
|
317
|
+
dfq(parse())
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
if __name__ == "__main__":
|
|
321
|
+
main()
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
class Either:
|
|
2
|
+
def __init__(self, *args, **kwargs):
|
|
3
|
+
|
|
4
|
+
# at least 2 arguments. Assume first two are Left and Right
|
|
5
|
+
if len(args) >= 2:
|
|
6
|
+
self.left = args[0]
|
|
7
|
+
self.right = args[1]
|
|
8
|
+
# 1 argument. May be 'dict like'
|
|
9
|
+
elif len(args) == 1:
|
|
10
|
+
if isinstance(args[0], Either):
|
|
11
|
+
self.left = args[0].left
|
|
12
|
+
self.right = args[0].right
|
|
13
|
+
else:
|
|
14
|
+
try:
|
|
15
|
+
self.right = args[0].get("right")
|
|
16
|
+
self.left = args[0].get("left")
|
|
17
|
+
except:
|
|
18
|
+
try:
|
|
19
|
+
self.right = args[0].right
|
|
20
|
+
self.left = args[0].left
|
|
21
|
+
except:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
if self.left is None and self.right is None:
|
|
25
|
+
# only 1 value, Just assume it's a 'right'
|
|
26
|
+
self.right = args[0]
|
|
27
|
+
|
|
28
|
+
# No positional arguments - could be provided via kwargs
|
|
29
|
+
else:
|
|
30
|
+
self.left = kwargs.get("left", None)
|
|
31
|
+
self.right = kwargs.get("right", None)
|
|
32
|
+
|
|
33
|
+
# Call the left function if left is available
|
|
34
|
+
# Otherwise call the right function
|
|
35
|
+
def match(self, left, right):
|
|
36
|
+
if self.left is not None:
|
|
37
|
+
return left(self.left)
|
|
38
|
+
|
|
39
|
+
return right(self.right)
|
|
40
|
+
|
|
41
|
+
def is_left(self):
|
|
42
|
+
return self.left is not None
|
|
43
|
+
|
|
44
|
+
def if_left(self, left):
|
|
45
|
+
if self.left is not None:
|
|
46
|
+
return left(self.left)
|
|
47
|
+
|
|
48
|
+
def is_right(self):
|
|
49
|
+
return self.right is not None
|
|
50
|
+
|
|
51
|
+
def if_right(self, right):
|
|
52
|
+
if self.right is not None:
|
|
53
|
+
return right(self.right)
|
|
54
|
+
|
|
55
|
+
def __getattr__(self, name):
|
|
56
|
+
return self.__dict__.get(name)
|
|
57
|
+
|
|
58
|
+
def bind(self, fn):
|
|
59
|
+
return Either(fn(self.right)) if self.left is None else self
|
|
60
|
+
|
|
61
|
+
def Left(v):
|
|
62
|
+
return Either(left=v)
|
|
63
|
+
|
|
64
|
+
def Right(v):
|
|
65
|
+
return Either(right=v)
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
from finbourne_sdk_utils.lpt import lpt
|
|
4
|
+
from finbourne_sdk_utils.lpt import lse
|
|
5
|
+
from finbourne_sdk_utils.lpt import stdargs
|
|
6
|
+
|
|
7
|
+
TOOLNAME = "instr"
|
|
8
|
+
TOOLTIP = "Display specified Instruments"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def parse(extend=None, args=None):
|
|
12
|
+
return (
|
|
13
|
+
stdargs.Parser("Get Instruments", ["filename", "limit", "asat"])
|
|
14
|
+
.add("instrument", nargs="*")
|
|
15
|
+
.add("--type", "-t", default="ClientInternal", help="Instrument type")
|
|
16
|
+
.add("--identifiers", nargs="*", help="identifiers to display")
|
|
17
|
+
.add("--properties", nargs="*", help="properties required for display")
|
|
18
|
+
.add(
|
|
19
|
+
"--from",
|
|
20
|
+
dest="from_file",
|
|
21
|
+
nargs=2,
|
|
22
|
+
metavar=("filename.csv", "column"),
|
|
23
|
+
help="load values from file",
|
|
24
|
+
)
|
|
25
|
+
.add("--effective_at")
|
|
26
|
+
.extend(extend)
|
|
27
|
+
.parse(args)
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def process_args(api, args):
|
|
32
|
+
MAX_PROPS = 50
|
|
33
|
+
|
|
34
|
+
def success(result):
|
|
35
|
+
idents = [f"identifiers.KEY:{v}" for v in args.identifiers]
|
|
36
|
+
columns = ["lusid_instrument_id", "name"]
|
|
37
|
+
columns.extend(idents)
|
|
38
|
+
if args.properties:
|
|
39
|
+
columns.extend(["P:" + v for v in args.properties])
|
|
40
|
+
|
|
41
|
+
df = lpt.to_df(result.content.values.values(), columns)
|
|
42
|
+
return df.rename(columns=dict(zip(idents, args.identifiers)))
|
|
43
|
+
|
|
44
|
+
if args.from_file:
|
|
45
|
+
df = pd.read_csv(args.from_file[0])[args.from_file[1:]].drop_duplicates()
|
|
46
|
+
args.instrument.extend(df[args.from_file[1]].values)
|
|
47
|
+
|
|
48
|
+
def step3(result, props_remaining, final_dataframe):
|
|
49
|
+
df = success(result)
|
|
50
|
+
|
|
51
|
+
if final_dataframe is None:
|
|
52
|
+
final_dataframe = df
|
|
53
|
+
else:
|
|
54
|
+
props = [c for c in df.columns.values if c.startswith("P:")]
|
|
55
|
+
final_dataframe = final_dataframe.merge(
|
|
56
|
+
df[["lusid_instrument_id"] + props],
|
|
57
|
+
how="left",
|
|
58
|
+
on="lusid_instrument_id",
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
if len(props_remaining) > 0:
|
|
62
|
+
args.properties = props_remaining[:MAX_PROPS]
|
|
63
|
+
remaining = props_remaining[MAX_PROPS:]
|
|
64
|
+
return api.call.get_instruments(
|
|
65
|
+
args.type, request_body=args.instrument, property_keys=args.properties
|
|
66
|
+
).bind(lambda r: step3(r, remaining, final_dataframe))
|
|
67
|
+
return final_dataframe
|
|
68
|
+
|
|
69
|
+
def step2(result=None):
|
|
70
|
+
next_step = success
|
|
71
|
+
if result is not None:
|
|
72
|
+
# Contains full set of instrument properties
|
|
73
|
+
l = list(
|
|
74
|
+
result.apply(lambda r: f"Instrument/{r['scope']}/{r['code']}", axis=1)
|
|
75
|
+
)
|
|
76
|
+
args.properties = l[:MAX_PROPS] # limitation
|
|
77
|
+
if len(l) > MAX_PROPS:
|
|
78
|
+
next_step = lambda r: step3(r, l[MAX_PROPS:], None)
|
|
79
|
+
|
|
80
|
+
return api.call.get_instruments(
|
|
81
|
+
args.type,
|
|
82
|
+
request_body=args.instrument,
|
|
83
|
+
property_keys=args.properties,
|
|
84
|
+
effective_at=lpt.to_date(args.effective_at),
|
|
85
|
+
).bind(next_step)
|
|
86
|
+
|
|
87
|
+
if args.identifiers is None:
|
|
88
|
+
args.identifiers = [args.type]
|
|
89
|
+
|
|
90
|
+
if args.properties:
|
|
91
|
+
if args.properties[0] == "all":
|
|
92
|
+
from finbourne_sdk_utils.lpt import qry_properties as qp
|
|
93
|
+
|
|
94
|
+
return qp.process_args(api, qp.parse(args=["-d", "Instrument"])).bind(step2)
|
|
95
|
+
|
|
96
|
+
return step2()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# Standalone tool
|
|
100
|
+
def main(parse=parse, display_df=lpt.display_df):
|
|
101
|
+
return lpt.standard_flow(parse, lse.connect, process_args, display_df)
|