finbourne-sdk-utils 0.0.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. features/__init__.py +0 -0
  2. features/main.py +11 -0
  3. finbourne_sdk_utils/__init__.py +8 -0
  4. finbourne_sdk_utils/cocoon/__init__.py +34 -0
  5. finbourne_sdk_utils/cocoon/async_tools.py +94 -0
  6. finbourne_sdk_utils/cocoon/cocoon.py +1862 -0
  7. finbourne_sdk_utils/cocoon/cocoon_printer.py +455 -0
  8. finbourne_sdk_utils/cocoon/config/domain_settings.json +125 -0
  9. finbourne_sdk_utils/cocoon/config/seed_sample_data.json +36 -0
  10. finbourne_sdk_utils/cocoon/dateorcutlabel.py +198 -0
  11. finbourne_sdk_utils/cocoon/instruments.py +482 -0
  12. finbourne_sdk_utils/cocoon/properties.py +442 -0
  13. finbourne_sdk_utils/cocoon/seed_sample_data.py +137 -0
  14. finbourne_sdk_utils/cocoon/systemConfiguration.py +92 -0
  15. finbourne_sdk_utils/cocoon/transaction_type_upload.py +136 -0
  16. finbourne_sdk_utils/cocoon/utilities.py +1877 -0
  17. finbourne_sdk_utils/cocoon/validator.py +243 -0
  18. finbourne_sdk_utils/extract/__init__.py +1 -0
  19. finbourne_sdk_utils/extract/group_holdings.py +400 -0
  20. finbourne_sdk_utils/iam/__init__.py +1 -0
  21. finbourne_sdk_utils/iam/roles.py +74 -0
  22. finbourne_sdk_utils/jupyter_tools/__init__.py +2 -0
  23. finbourne_sdk_utils/jupyter_tools/hide_code_button.py +23 -0
  24. finbourne_sdk_utils/jupyter_tools/stop_execution.py +14 -0
  25. finbourne_sdk_utils/logger/LusidLogger.py +41 -0
  26. finbourne_sdk_utils/logger/__init__.py +1 -0
  27. finbourne_sdk_utils/lpt/__init__.py +0 -0
  28. finbourne_sdk_utils/lpt/back_compat.py +20 -0
  29. finbourne_sdk_utils/lpt/cash_ladder.py +191 -0
  30. finbourne_sdk_utils/lpt/connect_lusid.py +64 -0
  31. finbourne_sdk_utils/lpt/connect_none.py +5 -0
  32. finbourne_sdk_utils/lpt/connect_token.py +9 -0
  33. finbourne_sdk_utils/lpt/dfq.py +321 -0
  34. finbourne_sdk_utils/lpt/either.py +65 -0
  35. finbourne_sdk_utils/lpt/get_instruments.py +101 -0
  36. finbourne_sdk_utils/lpt/lpt.py +374 -0
  37. finbourne_sdk_utils/lpt/lse.py +188 -0
  38. finbourne_sdk_utils/lpt/map_instruments.py +164 -0
  39. finbourne_sdk_utils/lpt/pager.py +32 -0
  40. finbourne_sdk_utils/lpt/record.py +13 -0
  41. finbourne_sdk_utils/lpt/refreshing_token.py +43 -0
  42. finbourne_sdk_utils/lpt/search_instruments.py +48 -0
  43. finbourne_sdk_utils/lpt/stdargs.py +154 -0
  44. finbourne_sdk_utils/lpt/txn_config.py +128 -0
  45. finbourne_sdk_utils/lpt/txn_config_yaml.py +493 -0
  46. finbourne_sdk_utils/pandas_utils/__init__.py +0 -0
  47. finbourne_sdk_utils/pandas_utils/lusid_pandas.py +128 -0
  48. finbourne_sdk_utils-0.0.24.dist-info/LICENSE +21 -0
  49. finbourne_sdk_utils-0.0.24.dist-info/METADATA +25 -0
  50. finbourne_sdk_utils-0.0.24.dist-info/RECORD +52 -0
  51. finbourne_sdk_utils-0.0.24.dist-info/WHEEL +5 -0
  52. finbourne_sdk_utils-0.0.24.dist-info/top_level.txt +2 -0
@@ -0,0 +1,164 @@
1
+ import os
2
+
3
+ import pandas as pd
4
+
5
+ from finbourne_sdk_utils.lpt import lpt
6
+ from finbourne_sdk_utils.lpt import lse
7
+ from finbourne_sdk_utils.lpt import stdargs
8
+ from finbourne_sdk_utils.lpt.either import Either
9
+
10
+ mapping_prefixes = {"Figi": "FIGI", "ClientInternal": "INT", "QuotePermId": "QPI"}
11
+
12
+ mapping_table = {}
13
+
14
+
15
+ def parse(extend=None, args=None):
16
+ return (
17
+ stdargs.Parser("Map Instruments", ["filename"])
18
+ .add("--folder", help="include all 'txn' files in the folder")
19
+ .add("input", nargs="*", metavar="file", help="file(s) containing instruments")
20
+ .add(
21
+ "--column",
22
+ metavar="input-column",
23
+ default="instrument_uid",
24
+ help="column name for instrument column",
25
+ )
26
+ .extend(extend)
27
+ .parse(args)
28
+ )
29
+
30
+
31
+ def process_args(api, args):
32
+ if args.folder:
33
+ args.input.extend(
34
+ [
35
+ os.path.join(args.folder, f)
36
+ for f in os.listdir(args.folder)
37
+ if "-txn-" in f and f.endswith(".csv")
38
+ ]
39
+ )
40
+
41
+ df = (
42
+ pd.concat(
43
+ [lpt.read_csv(f)[[args.column]].drop_duplicates() for f in args.input],
44
+ ignore_index=True,
45
+ sort=True,
46
+ )
47
+ .drop_duplicates()
48
+ .reset_index(drop=True)
49
+ )
50
+
51
+ df.columns = ["FROM"]
52
+ df["TO"] = df["FROM"]
53
+
54
+ return map_instruments(api, df, "TO")
55
+
56
+
57
+ def main():
58
+ lpt.standard_flow(parse, lse.connect, process_args)
59
+
60
+
61
+ def map_instruments(api, df, column):
62
+ WORKING = "__:::__" # temporary column name
63
+
64
+ # Apply any known mappings to avoid unecessary i/o
65
+
66
+ if len(mapping_table) > 0:
67
+ srs = df[column].map(mapping_table)
68
+ srs = srs[srs.notnull()]
69
+ if len(srs) > 0:
70
+ df.loc[srs.index, column] = srs
71
+
72
+ # updates the mappings table
73
+ def update_mappings(src, prefix):
74
+ mapping_table.update(
75
+ {prefix + k: v.lusid_instrument_id for k, v in src.items()}
76
+ )
77
+
78
+ def batch_query(instr_type, prefix, outstanding):
79
+ if len(outstanding) > 0:
80
+ batch = outstanding[:500] # records to process now
81
+ remainder = outstanding[500:] # remaining records
82
+
83
+ # called if the get_instruments() succeeds
84
+ def get_success(result):
85
+ get_found = result.content.values
86
+ get_failed = result.content.failed
87
+
88
+ # Update successfully found items
89
+ update_mappings(get_found, prefix)
90
+
91
+ if len(get_failed) > 0:
92
+ if instr_type == "ClientInternal":
93
+ # For un-mapped internal codes, we will try to add (upsert)
94
+
95
+ # called if the upsert_instruments() succeeds
96
+ def add_success(result):
97
+ add_worked = result.content.values
98
+ add_failed = result.content.failed
99
+
100
+ if len(add_failed) > 0:
101
+ return Either.Left("Failed to add internal instruments")
102
+
103
+ # Update successfully added items
104
+ update_mappings(add_worked, prefix)
105
+
106
+ # Kick off the next batch
107
+ return batch_query(instr_type, prefix, remainder)
108
+
109
+ # Create the upsert request from the failed items
110
+ request = {
111
+ k: api.models.InstrumentDefinition(
112
+ name=v.id, identifiers={"ClientInternal": v.id}
113
+ )
114
+ for k, v in get_failed.items()
115
+ }
116
+
117
+ return api.call.upsert_instruments(request).bind(add_success)
118
+ else:
119
+ # Instruments are not mapped. Nothing we cando.
120
+ return Either.Left(
121
+ "Failed to locate instruments of type {}".format(instr_type)
122
+ )
123
+ else:
124
+ # No failures, kick off the next batch
125
+ return batch_query(instr_type, prefix, remainder)
126
+
127
+ return api.call.get_instruments(
128
+ instr_type, list(batch[WORKING].values)
129
+ ).bind(get_success)
130
+ else:
131
+ # No records remaining. Return the now-enriched dataframe
132
+ return Either.Right(df)
133
+
134
+ def map_type(key, instr_type):
135
+ prefix = key + ":"
136
+ subset = df[df[column].str.startswith(prefix)]
137
+
138
+ # See if there are any entries of this type
139
+ if len(subset) > 0:
140
+ width = len(prefix)
141
+ uniques = subset[[column]].drop_duplicates(column)
142
+ uniques[WORKING] = uniques[column].str.slice(width)
143
+
144
+ def map_success(v):
145
+ df.loc[subset.index, column] = subset[column].map(mapping_table)
146
+ return Either.Right(df)
147
+
148
+ return batch_query(instr_type, prefix, uniques).bind(map_success)
149
+ else:
150
+ # Nothing to be done, pass the full result back
151
+ return Either.Right(df)
152
+
153
+ return (
154
+ map_type("FIGI", "Figi")
155
+ .bind(lambda r: map_type("INT", "ClientInternal"))
156
+ .bind(lambda r: map_type("QPI", "QuotePermId"))
157
+ )
158
+
159
+
160
+ def include_mappings(path):
161
+ if path:
162
+ df = lpt.read_csv(path)
163
+
164
+ mapping_table.update(df.set_index("FROM")["TO"].to_dict())
@@ -0,0 +1,32 @@
1
+ import pandas as pd
2
+ import re
3
+ import urllib.parse
4
+
5
+ from finbourne_sdk_utils.lpt.either import Either
6
+
7
+ rexp = re.compile(r".*page=([^=']{10,}).*")
8
+
9
+
10
+ def page_all_results(fetch_page, page_handler):
11
+ results = []
12
+
13
+ def got_page(result):
14
+ results.append(page_handler(result))
15
+
16
+ links = [l for l in result.content.links if l.relation == "NextPage"]
17
+
18
+ if len(links) > 0:
19
+ match = rexp.match(links[0].href)
20
+ if match:
21
+ return urllib.parse.unquote(match.group(1))
22
+ return None
23
+
24
+ page = Either(None)
25
+ while True:
26
+ page = fetch_page(page.right).bind(got_page)
27
+ if page.is_left():
28
+ return page
29
+ if page.right == None:
30
+ break
31
+
32
+ return pd.concat(results, ignore_index=True, sort=False)
@@ -0,0 +1,13 @@
1
+ # A simple key/value class with dot notation access
2
+ class Rec:
3
+ def __init__(self, **kwargs):
4
+ self.__dict__.update(kwargs)
5
+
6
+ def __str__(self):
7
+ return str(self.__dict__)
8
+
9
+ def __iter__(self):
10
+ return self.__dict__.__iter__()
11
+
12
+ def to_dict(self):
13
+ return self.__dict__
@@ -0,0 +1,43 @@
1
+ import requests
2
+ from datetime import datetime
3
+ from datetime import timedelta
4
+ from collections import UserString
5
+
6
+ # Behaves like a string, but refreshes the
7
+ # OKTA credentials should they expire
8
+
9
+
10
+ class RefreshingToken(UserString):
11
+ def __init__(self, token_url, token_request_body, headers):
12
+
13
+ token_data = {"expires": datetime.now(), "credentials": ""}
14
+
15
+ def get_token():
16
+ if token_data["expires"] <= datetime.now():
17
+
18
+ okta_response = requests.post(
19
+ token_url, data=token_request_body, headers=headers
20
+ )
21
+ if okta_response.status_code != 200:
22
+ print("OKTA authentication failed")
23
+ print(okta_response.text)
24
+ exit()
25
+
26
+ d = dict(okta_response.json())
27
+ token_data["expires"] = datetime.now() + timedelta(
28
+ seconds=d.get("expires_in", 3600) - 60
29
+ )
30
+ token_data["credentials"] = d["access_token"]
31
+
32
+ return token_data["credentials"]
33
+
34
+ self.token_fn = get_token
35
+
36
+ # Call the token function to get the credentials
37
+ # (refreshing them if necessary)
38
+ # and then return the attribute for the resulting token
39
+ def __getattribute__(self, name):
40
+ token = object.__getattribute__(self, "token_fn")()
41
+ if name == "data":
42
+ return token
43
+ return token.__getattribute__(name)
@@ -0,0 +1,48 @@
1
+ import pandas as pd
2
+ import dateutil
3
+ from finbourne_sdk_utils.lpt import lpt
4
+ from finbourne_sdk_utils.lpt import lse
5
+ from finbourne_sdk_utils.lpt import stdargs
6
+
7
+ TOOLNAME = "instr_search"
8
+ TOOLTIP = "Search for Instruments"
9
+
10
+
11
+ def parse(extend=None, args=None):
12
+ return (
13
+ stdargs.Parser("Search Instruments", ["filename", "limit"])
14
+ .add("--properties", nargs="*", help="properties to search")
15
+ .add("--date")
16
+ .extend(extend)
17
+ .parse(args)
18
+ )
19
+
20
+
21
+ def process_args(api, args):
22
+ def success(result):
23
+ flat = [i for r in result.content for i in r.mastered_instruments]
24
+ if len(flat) > 0:
25
+ identifiers = sorted(set.union(*[set(i.identifiers.keys()) for i in flat]))
26
+ df = lpt.to_df(
27
+ flat,
28
+ ["name"] + ["identifiers.KEY:" + i + ".value" for i in identifiers],
29
+ )
30
+ df.columns = ["Name"] + identifiers
31
+ return df
32
+ else:
33
+ return "No Matches"
34
+
35
+ request = [
36
+ api.models.InstrumentSearchProperty(s[0], s[1])
37
+ for s in [p.split("=") for p in args.properties]
38
+ ]
39
+ return api.call.instruments_search(
40
+ instrument_search_property=request,
41
+ mastered_only=True,
42
+ mastered_effective_at=lpt.to_date(args.date),
43
+ ).bind(success)
44
+
45
+
46
+ # Standalone tool
47
+ def main(parse=parse, display_df=lpt.display_df):
48
+ lpt.standard_flow(parse, lse.connect, process_args, display_df)
@@ -0,0 +1,154 @@
1
+ import argparse
2
+
3
+ # Ensure standardisation of commonly used arguments
4
+
5
+
6
+ class Parser:
7
+
8
+ # Create a parser and add in the standard arguments
9
+ def __init__(self, description, sections=[]):
10
+ self.parser = argparse.ArgumentParser(
11
+ description=description, fromfile_prefix_chars="@"
12
+ )
13
+ self.post_processors = []
14
+ self.arguments = []
15
+
16
+ if "scope" in sections:
17
+ self.add("scope", help="Scope")
18
+
19
+ if "portfolio" in sections:
20
+ self.add("portfolio", help="Portfolio id")
21
+
22
+ if "date" in sections:
23
+ self.add("date", help="date YYYY-MM-DD")
24
+
25
+ if "input" in sections:
26
+ self.add("input", help="input filename")
27
+
28
+ if "properties" in sections:
29
+ self.add("--properties", nargs="+", help="List of propertykeys", default=[])
30
+
31
+ if "filename" in sections:
32
+ self.add(
33
+ "-f", "--filename", metavar="filename.csv", help="write to this file"
34
+ )
35
+
36
+ if "limit" in sections:
37
+ self.add(
38
+ "-l",
39
+ "--limit",
40
+ type=int,
41
+ default=0,
42
+ metavar="n",
43
+ help="limit the number of results",
44
+ )
45
+
46
+ if "date_range" in sections:
47
+ self.add("-s", "--start_date", dest="start_date", metavar="YYYY-MM-DD")
48
+ self.add("-e", "--end_date", dest="end_date", metavar="YYYY-MM-DD")
49
+
50
+ if "quiet" in sections:
51
+ self.add(
52
+ "-q",
53
+ "--quiet",
54
+ action="store_true",
55
+ help="Quiet mode. Doesn't show the progress bar",
56
+ )
57
+
58
+ if "group" in sections:
59
+ self.add(
60
+ "-g",
61
+ "--group",
62
+ action="store_true",
63
+ help="Indicates use of Portfolio Groups",
64
+ )
65
+
66
+ if "optional_portfolio" in sections:
67
+ self.add(
68
+ "-p",
69
+ "--portfolio",
70
+ dest="portfolio",
71
+ help="Optional Portfolio id"
72
+ )
73
+
74
+ if "flush_scope" in sections:
75
+ self.add(
76
+ "--flush_scope",
77
+ dest="flush_scope",
78
+ action="store_true",
79
+ help="Flush all transactions in scope"
80
+ )
81
+
82
+ self.add(
83
+ "--secrets-file",
84
+ dest="secrets",
85
+ default="secrets.json",
86
+ help="path to secrets file",
87
+ )
88
+
89
+ self.add(
90
+ "--environment",
91
+ dest="env",
92
+ default=["lusid"],
93
+ nargs="+",
94
+ help="choose a special LUSID environment. E.g. 'ipsum_lorem'",
95
+ )
96
+
97
+ self.add(
98
+ "--stats",
99
+ dest="stats",
100
+ metavar="stats-file.csv",
101
+ const="-",
102
+ nargs="?",
103
+ help="Write statistics to file. [Leave blank for stdout]",
104
+ )
105
+
106
+ self.add("-d", "--debug", help=r"print debug messages, expected input: 'debug'")
107
+
108
+ if "asat" in sections:
109
+ self.add(
110
+ "--asat",
111
+ dest="asat",
112
+ metavar="YYYY-MM-DDTHH:MM:SS.000",
113
+ help="as-at time",
114
+ )
115
+
116
+ if "test" in sections:
117
+ self.add("--test", action="store_true", help="Run in test mode")
118
+
119
+ # If a filename parameter is given then pass output via dfq
120
+ # Can be supressed by the NODFQ option
121
+ if "filename" in sections and "NODFQ" not in sections:
122
+ self.add(
123
+ "--dfq",
124
+ nargs=argparse.REMAINDER,
125
+ help="pass the output through to 'dfq' - see `finbourne_sdk_utils dfq --help` for options",
126
+ )
127
+
128
+ def add(self, *args, **kwargs):
129
+ # Add arguments to a list, so they can be removed
130
+ self.arguments.append((args[0], args, kwargs))
131
+ # self.parser.add_argument(*args,**kwargs)
132
+ return self
133
+
134
+ def remove(self, key):
135
+ self.arguments = [tpl for tpl in self.arguments if tpl[0] != key]
136
+
137
+ def post_process(self, fn):
138
+ self.post_processors.append(fn)
139
+ return self
140
+
141
+ def parse(self, args=None):
142
+ for arg in self.arguments:
143
+ self.parser.add_argument(*arg[1], **arg[2])
144
+
145
+ args = self.parser.parse_args(args)
146
+ for fn in self.post_processors:
147
+ fn(args)
148
+
149
+ return args
150
+
151
+ def extend(self, fn):
152
+ if fn:
153
+ fn(self)
154
+ return self
@@ -0,0 +1,128 @@
1
+ from finbourne_sdk_utils.lpt import lpt
2
+ from finbourne_sdk_utils.lpt import lse
3
+ from finbourne_sdk_utils.lpt import stdargs
4
+ from finbourne_sdk_utils.lpt import txn_config_yaml as tcy
5
+
6
+ TOOLNAME = "txn_cfg"
7
+ TOOLTIP = "Get/Set the transaction configuration"
8
+
9
+
10
+ def parse(extend=None, args=None):
11
+ return (
12
+ stdargs.Parser(
13
+ "Get/Set transaction configuration", ["filename", "limit", "NODFQ", "asat"]
14
+ )
15
+ .add(
16
+ "action",
17
+ choices=("get", "set", "try"),
18
+ help="get or set the config. 'Try' can be used to validate a custom encoding",
19
+ )
20
+ .add("--raw", action="store_true", help="use raw (non custom) encoding")
21
+ .add("--json", action="store_true", help="display the json to be sent")
22
+ .add("--group", action="store_true", help="set a single group")
23
+ .add(
24
+ "--force",
25
+ action="store_true",
26
+ help="set a single group, remove existing aliases for the group",
27
+ )
28
+ .extend(extend)
29
+ .parse(args)
30
+ )
31
+
32
+
33
+ def validate_group(txn_types, group):
34
+ for txn in txn_types:
35
+ for alias in txn.aliases:
36
+ assert alias.transaction_group == group, "More than one group in the list"
37
+
38
+
39
+ def rem_groups(txn_types_old, group, arg):
40
+ def still_valid(tt):
41
+ for cand in tt.aliases:
42
+ if cand.transaction_group != group:
43
+ return True
44
+ if arg is not True:
45
+ raise AssertionError(
46
+ "Existing group detected, use '--force' to remove them"
47
+ )
48
+ return False
49
+
50
+ def clear_out_group(tt):
51
+ check = len(tt.aliases)
52
+ tt.aliases = [cand for cand in tt.aliases if cand.transaction_group != group]
53
+
54
+ if len(tt.aliases) != check and arg != True:
55
+ raise AssertionError(
56
+ "Existing group detected, use '--force' to remove them"
57
+ )
58
+ return tt
59
+
60
+ return [clear_out_group(t) for t in txn_types_old if still_valid(t)]
61
+
62
+
63
+ def merge_sets(txn_types_old, txn_types, arg):
64
+ group = txn_types[0].aliases[0].transaction_group
65
+
66
+ validate_group(txn_types, group)
67
+ txn_types_clean = rem_groups(txn_types_old, group, arg)
68
+
69
+ txn_types += txn_types_clean
70
+ return txn_types
71
+
72
+
73
+ def process_args(api, args):
74
+ y = tcy.TxnConfigYaml(api.models)
75
+
76
+ if args.action == "get":
77
+
78
+ def get_success(result):
79
+ y.dump(
80
+ y.TransactionSetConfigurationDataNoLinks(
81
+ result.content.transaction_configs, result.content.side_definitions
82
+ ),
83
+ args.filename,
84
+ args.raw,
85
+ )
86
+ return None
87
+
88
+ return api.call.list_configuration_transaction_types().bind(get_success)
89
+
90
+ if args.action == "try":
91
+ ffs = y.load(args.filename)
92
+ y.dump(ffs, "{}-try".format(args.filename))
93
+
94
+ if args.action == "set":
95
+
96
+ def set_success(result):
97
+ print(y.get_yaml(result.content))
98
+ return None
99
+
100
+ if args.group:
101
+ txn_types = y.load(args.filename)
102
+
103
+ result = api.call.list_configuration_transaction_types()
104
+
105
+ if result.right is not None:
106
+ txn_types_old = result.right.content
107
+ else:
108
+ raise ValueError("Api call did not return correct result")
109
+
110
+ txn_types = y.load_update_str(
111
+ y.get_yaml(merge_sets(txn_types_old, txn_types, args.force))
112
+ )
113
+ else:
114
+ txn_types = y.load_update(args.filename)
115
+
116
+ # y.dump(ffs,"{}-set".format(args.filename),True)
117
+ if args.json:
118
+ print(txn_types)
119
+ return None
120
+ else:
121
+ return api.call.set_configuration_transaction_types(
122
+ transaction_set_configuration_data_request=txn_types
123
+ ).bind(set_success)
124
+
125
+
126
+ # Standalone tool
127
+ def main(parse=parse):
128
+ lpt.standard_flow(parse, lse.connect, process_args)