macrotap 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- macrotap-0.1.0/.gitignore +13 -0
- macrotap-0.1.0/LICENSE +21 -0
- macrotap-0.1.0/PKG-INFO +23 -0
- macrotap-0.1.0/README.md +4 -0
- macrotap-0.1.0/pyproject.toml +30 -0
- macrotap-0.1.0/src/macrotap/__init__.py +15 -0
- macrotap-0.1.0/src/macrotap/fred/__init__.py +5 -0
- macrotap-0.1.0/src/macrotap/fred/main.py +154 -0
- macrotap-0.1.0/src/macrotap/imf/__init__.py +4 -0
- macrotap-0.1.0/src/macrotap/imf/_imf_requests.py +178 -0
- macrotap-0.1.0/src/macrotap/imf/_imf_response.py +102 -0
- macrotap-0.1.0/src/macrotap/imf/main.py +63 -0
macrotap-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Iris Solutions Team
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
macrotap-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: macrotap
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Quick and easy access to macroeconomic data from public providers
|
|
5
|
+
Project-URL: Homepage, https://github.com/iris-solutions-team/macrotap
|
|
6
|
+
Project-URL: Documentation, https://iris-solutions-team.github.io/macrotap-pages
|
|
7
|
+
Project-URL: Bug Tracker, https://github.com/iris-solutions-team/macrotap/issues
|
|
8
|
+
Author-email: Jaromir Benes <jaromir.benes@gmail.com>
|
|
9
|
+
License: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: API,data,finance,macroeconomics,public
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Requires-Python: >=3.11
|
|
16
|
+
Requires-Dist: datapie>=0.3.0
|
|
17
|
+
Requires-Dist: requests>=2.32.5
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# MacroTap
|
|
22
|
+
|
|
23
|
+
|
macrotap-0.1.0/README.md
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = [ "hatchling",]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "macrotap"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Quick and easy access to macroeconomic data from public providers"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.11"
|
|
11
|
+
classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent",]
|
|
12
|
+
keywords = [ "macroeconomics", "finance", "data", "API", "public",]
|
|
13
|
+
dependencies = [ "datapie >= 0.3.0", "requests >= 2.32.5",]
|
|
14
|
+
[[project.authors]]
|
|
15
|
+
name = "Jaromir Benes"
|
|
16
|
+
email = "jaromir.benes@gmail.com"
|
|
17
|
+
|
|
18
|
+
[project.license]
|
|
19
|
+
text = "MIT"
|
|
20
|
+
|
|
21
|
+
[project.urls]
|
|
22
|
+
Homepage = "https://github.com/iris-solutions-team/macrotap"
|
|
23
|
+
Documentation = "https://iris-solutions-team.github.io/macrotap-pages"
|
|
24
|
+
"Bug Tracker" = "https://github.com/iris-solutions-team/macrotap/issues"
|
|
25
|
+
|
|
26
|
+
[tool.hatch.build.targets.wheel]
|
|
27
|
+
packages = [ "src/macrotap",]
|
|
28
|
+
|
|
29
|
+
[tool.hatch.build.targets.sdist]
|
|
30
|
+
include = [ "src/macrotap", "README.md", "LICENSE",]
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"""
|
|
2
|
+
"""
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
#[
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
# Typing imports
|
|
10
|
+
from typing import Iterable, Any
|
|
11
|
+
|
|
12
|
+
# Third-party imports
|
|
13
|
+
import requests as _rq
|
|
14
|
+
import datapie as _ap
|
|
15
|
+
|
|
16
|
+
#]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
_FRED_FREQ_MAP = {
|
|
20
|
+
"A".casefold(): _ap.Frequency.YEARLY,
|
|
21
|
+
"Q".casefold(): _ap.Frequency.QUARTERLY,
|
|
22
|
+
"M".casefold(): _ap.Frequency.MONTHLY,
|
|
23
|
+
"D".casefold(): _ap.Frequency.DAILY,
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
_BASE_URL = r"https://api.stlouisfed.org/fred/series"
|
|
28
|
+
_OBSERVATIONS_URL = _BASE_URL + r"/observations"
|
|
29
|
+
_API_KEY = r"951f01181da86ccb9045ce8716f82f43"
|
|
30
|
+
_PARAMETERS = r"?series_id={series_id}&api_key={api_key}&file_type=json"
|
|
31
|
+
_MISSING_VALUE = r"."
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def from_fred(
|
|
35
|
+
request_strings: str | Iterable[str],
|
|
36
|
+
return_info: bool = False,
|
|
37
|
+
) -> _ap.Databox:
|
|
38
|
+
r"""
|
|
39
|
+
................................................................................
|
|
40
|
+
|
|
41
|
+
==Download time series from FRED (St Louis Fed Database)==
|
|
42
|
+
|
|
43
|
+
This method downloads time series data from the FRED database. The data is
|
|
44
|
+
downloaded using the FRED API. The method requires an API key, which is provided
|
|
45
|
+
by the FRED website. The API key is stored in the `_API_KEY` variable in the
|
|
46
|
+
`_fred.py` module. The method downloads the data for the specified series IDs
|
|
47
|
+
and returns a `Databox` object with the downloaded series.
|
|
48
|
+
|
|
49
|
+
db = Databox.from_fred(
|
|
50
|
+
request_strings,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
### Input arguments ###
|
|
55
|
+
|
|
56
|
+
???+ input "request_strings"
|
|
57
|
+
A list or dictionary of series IDs to download from FRED. If a dictionary is
|
|
58
|
+
provided, the keys are used as the FRED codes and the values are used for
|
|
59
|
+
the names of the time series in the Databox. If list of strings is provided,
|
|
60
|
+
the series IDs are used as the names of the series in the `Databox` object.
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
### Returns ###
|
|
64
|
+
|
|
65
|
+
???+ returns "db"
|
|
66
|
+
A `Databox` object containing the downloaded time series data.
|
|
67
|
+
|
|
68
|
+
................................................................................
|
|
69
|
+
"""
|
|
70
|
+
#[
|
|
71
|
+
if isinstance(request_strings, str):
|
|
72
|
+
request_strings = (request_strings, )
|
|
73
|
+
#
|
|
74
|
+
output_db = _ap.Databox()
|
|
75
|
+
info = []
|
|
76
|
+
for i in request_strings:
|
|
77
|
+
output_db[i], current_info, = _get_series(i, )
|
|
78
|
+
if return_info:
|
|
79
|
+
info.append(current_info)
|
|
80
|
+
#
|
|
81
|
+
if return_info:
|
|
82
|
+
return output_db, info,
|
|
83
|
+
#
|
|
84
|
+
return output_db
|
|
85
|
+
#}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _request_from_series_ids(series_ids: Iterable[str], ):
|
|
89
|
+
"""
|
|
90
|
+
"""
|
|
91
|
+
#[
|
|
92
|
+
return {
|
|
93
|
+
series_id.strip(): series_id.strip()
|
|
94
|
+
for series_id in series_ids
|
|
95
|
+
}
|
|
96
|
+
#]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _get_series(series_id: str, ) -> tuple[_ap.Series, dict[str, Any]]:
|
|
100
|
+
"""
|
|
101
|
+
"""
|
|
102
|
+
#[
|
|
103
|
+
urls = _get_series_urls(series_id, )
|
|
104
|
+
meta_response = _rq.get(urls["meta_url"], ).json()
|
|
105
|
+
data_response = _rq.get(urls["data_url"], ).json()
|
|
106
|
+
#
|
|
107
|
+
if "seriess" not in meta_response or "observations" not in data_response:
|
|
108
|
+
raise ValueError(f"Invalid response from FRED API for {series_id}", )
|
|
109
|
+
#
|
|
110
|
+
freq = _get_freq_from_meta_response(meta_response, )
|
|
111
|
+
iso_dates, str_values = _get_dates_and_values_from_data_response(data_response, )
|
|
112
|
+
values = tuple( (float(x) if x != _MISSING_VALUE else None) for x in str_values )
|
|
113
|
+
dates = _ap.periods_from_iso_strings(iso_dates, frequency=freq, )
|
|
114
|
+
info = {
|
|
115
|
+
"meta_url": urls["meta_url"],
|
|
116
|
+
"data_url": urls["data_url"],
|
|
117
|
+
"meta_response": meta_response,
|
|
118
|
+
"data_response": data_response,
|
|
119
|
+
}
|
|
120
|
+
return _ap.Series(periods=dates, values=values, ), info,
|
|
121
|
+
#]
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _get_freq_from_meta_response(meta_response: dict, ):
|
|
125
|
+
"""
|
|
126
|
+
"""
|
|
127
|
+
#[
|
|
128
|
+
freq_letter = meta_response["seriess"][0]["frequency_short"].casefold()
|
|
129
|
+
return _FRED_FREQ_MAP[freq_letter]
|
|
130
|
+
#]
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _get_dates_and_values_from_data_response(data_response: dict, ):
|
|
134
|
+
"""
|
|
135
|
+
"""
|
|
136
|
+
#[
|
|
137
|
+
date_value_pairs = (
|
|
138
|
+
(obs["date"], obs["value"], )
|
|
139
|
+
for obs in data_response["observations"]
|
|
140
|
+
)
|
|
141
|
+
return zip(*date_value_pairs, )
|
|
142
|
+
#]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _get_series_urls(series_id: str, ):
|
|
146
|
+
"""
|
|
147
|
+
"""
|
|
148
|
+
#[
|
|
149
|
+
parameters = _PARAMETERS.format(series_id=series_id, api_key=_API_KEY, )
|
|
150
|
+
meta_url = _BASE_URL + parameters
|
|
151
|
+
data_url = _OBSERVATIONS_URL + parameters
|
|
152
|
+
return {"meta_url": meta_url, "data_url": data_url, }
|
|
153
|
+
#]
|
|
154
|
+
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
|
|
2
|
+
#[
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
# Typing imports
|
|
7
|
+
from typing import Any, Callable
|
|
8
|
+
|
|
9
|
+
# Standard library imports
|
|
10
|
+
import itertools as _it
|
|
11
|
+
import dataclasses as _dc
|
|
12
|
+
import time as _ti
|
|
13
|
+
|
|
14
|
+
# Typing imports
|
|
15
|
+
from collections.abc import Iterable
|
|
16
|
+
|
|
17
|
+
#]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
DATA_FLOW_SEPARATOR = "/"
|
|
21
|
+
|
|
22
|
+
DIM_SEPARATOR = "."
|
|
23
|
+
|
|
24
|
+
URL_TEMPLATE = (
|
|
25
|
+
r"https://api.imf.org/external/sdmx/2.1/data/"
|
|
26
|
+
r"{data_flow}"
|
|
27
|
+
r"{data_flow_separator}"
|
|
28
|
+
r"{series_key}"
|
|
29
|
+
r"?TIME_PERIOD&dataonly"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def build_request_urls(
|
|
34
|
+
request_strings: Iterable[str],
|
|
35
|
+
**kwargs,
|
|
36
|
+
) -> tuple[str, ...]:
|
|
37
|
+
r"""
|
|
38
|
+
"""
|
|
39
|
+
requests = _get_requests_from_strings(request_strings, )
|
|
40
|
+
return tuple(
|
|
41
|
+
i.url
|
|
42
|
+
for i in requests
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def build_grouped_request_urls(
|
|
47
|
+
request_strings: Iterable[str],
|
|
48
|
+
group_by: Callable | None = None,
|
|
49
|
+
) -> tuple[str, ...]:
|
|
50
|
+
r"""
|
|
51
|
+
"""
|
|
52
|
+
requests = _get_requests_from_strings(request_strings, )
|
|
53
|
+
grouped_requests = _group_requests(
|
|
54
|
+
requests,
|
|
55
|
+
group_by=group_by,
|
|
56
|
+
)
|
|
57
|
+
return tuple(
|
|
58
|
+
_build_url_for_grouped_requests(i)
|
|
59
|
+
for i in grouped_requests
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _get_requests_from_strings(
|
|
64
|
+
request_strings: str | Iterable[str],
|
|
65
|
+
) -> tuple[_Request, ...]:
|
|
66
|
+
r"""
|
|
67
|
+
"""
|
|
68
|
+
#[
|
|
69
|
+
if isinstance(request_strings, str):
|
|
70
|
+
request_strings = (request_strings, )
|
|
71
|
+
requests = tuple(
|
|
72
|
+
_Request.from_request_string(n, )
|
|
73
|
+
for n in request_strings
|
|
74
|
+
)
|
|
75
|
+
return requests
|
|
76
|
+
#]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _group_requests(
|
|
80
|
+
requests: tuple[_Request],
|
|
81
|
+
group_by: Callable | None = None,
|
|
82
|
+
) -> tuple[tuple[_Request, ...], ...]:
|
|
83
|
+
|
|
84
|
+
if group_by is None:
|
|
85
|
+
group_by = _group_by
|
|
86
|
+
|
|
87
|
+
invalid = next((i for i in requests if "+" in i.series_key), None)
|
|
88
|
+
if invalid:
|
|
89
|
+
raise ValueError(
|
|
90
|
+
f"Invalid series key '{invalid}' found in individual requests. "
|
|
91
|
+
"Use 'build_request_urls' with 'group_requests=False'."
|
|
92
|
+
)
|
|
93
|
+
requests = sorted(requests, key=group_by, )
|
|
94
|
+
grouped_requests = tuple(
|
|
95
|
+
tuple(group)
|
|
96
|
+
for _, group in _it.groupby(requests, key=group_by, )
|
|
97
|
+
)
|
|
98
|
+
return grouped_requests
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _group_by(request: _Request, ) -> Any:
|
|
102
|
+
return request.data_flow
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _build_url_for_grouped_requests(
|
|
106
|
+
grouped_requests: tuple[_Request, ...],
|
|
107
|
+
) -> str:
|
|
108
|
+
r"""
|
|
109
|
+
"""
|
|
110
|
+
#[
|
|
111
|
+
data_flow = grouped_requests[0].data_flow
|
|
112
|
+
dim_lists = tuple(
|
|
113
|
+
request.series_key_dims
|
|
114
|
+
for request in grouped_requests
|
|
115
|
+
)
|
|
116
|
+
combined_dims = (
|
|
117
|
+
"+".join(set(dim_group))
|
|
118
|
+
for dim_group in zip(*dim_lists)
|
|
119
|
+
)
|
|
120
|
+
series_key = _Request._DIM_SEPARATOR.join(combined_dims, )
|
|
121
|
+
url = _Request._URL_TEMPLATE.format(
|
|
122
|
+
data_flow=data_flow,
|
|
123
|
+
data_flow_separator=_Request._DATA_FLOW_SEPARATOR,
|
|
124
|
+
series_key=series_key,
|
|
125
|
+
)
|
|
126
|
+
return url
|
|
127
|
+
#]
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class _Request:
|
|
131
|
+
#[
|
|
132
|
+
|
|
133
|
+
__slots__ = (
|
|
134
|
+
"data_flow",
|
|
135
|
+
"series_key_dims",
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
_DATA_FLOW_SEPARATOR = DATA_FLOW_SEPARATOR
|
|
139
|
+
|
|
140
|
+
_DIM_SEPARATOR = DIM_SEPARATOR
|
|
141
|
+
|
|
142
|
+
_URL_TEMPLATE = URL_TEMPLATE
|
|
143
|
+
|
|
144
|
+
def __init__(
|
|
145
|
+
self,
|
|
146
|
+
data_flow: str = "",
|
|
147
|
+
series_key_dims: Iterable[str] = (),
|
|
148
|
+
) -> None:
|
|
149
|
+
self.data_flow = data_flow
|
|
150
|
+
self.series_key_dims = tuple(series_key_dims)
|
|
151
|
+
|
|
152
|
+
@classmethod
|
|
153
|
+
def from_request_string(klass, request_string: str) -> _Request:
|
|
154
|
+
data_flow, series_key, = request_string.split(klass._DATA_FLOW_SEPARATOR, )
|
|
155
|
+
series_key_dims = tuple(series_key.split(klass._DIM_SEPARATOR, ))
|
|
156
|
+
return klass(
|
|
157
|
+
data_flow=data_flow,
|
|
158
|
+
series_key_dims=series_key_dims,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
@property
|
|
162
|
+
def series_key(self, ) -> str:
|
|
163
|
+
return self._DIM_SEPARATOR.join(self.series_key_dims, )
|
|
164
|
+
|
|
165
|
+
@property
|
|
166
|
+
def url(self, ) -> str:
|
|
167
|
+
return self._URL_TEMPLATE.format(
|
|
168
|
+
data_flow=self.data_flow,
|
|
169
|
+
data_flow_separator=self._DATA_FLOW_SEPARATOR,
|
|
170
|
+
series_key=self.series_key,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def num_dims(self, ) -> int:
|
|
175
|
+
return len(self.series_key_dims)
|
|
176
|
+
|
|
177
|
+
#]
|
|
178
|
+
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
|
|
2
|
+
#[
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
# Standard library imports
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
# Third-party imports
|
|
10
|
+
import datapie as ap
|
|
11
|
+
|
|
12
|
+
#]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
SERIES_KEY_DELIMITER = "."
|
|
16
|
+
DESCRIPTION_DELIMITER = " | "
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def parse_response_json(response_json: dict, ) -> ap.Databox:
|
|
20
|
+
|
|
21
|
+
observation_dims = response_json["structure"]["dimensions"]["observation"]
|
|
22
|
+
series_key_dims = response_json["structure"]["dimensions"]["series"]
|
|
23
|
+
series_data = response_json["dataSets"][0]["series"]
|
|
24
|
+
|
|
25
|
+
if len(observation_dims) != 1 or observation_dims[0]["id"] != "TIME_PERIOD":
|
|
26
|
+
raise ValueError("Unexpected observation dimensions structure")
|
|
27
|
+
|
|
28
|
+
time_periods_info = observation_dims[0]["values"]
|
|
29
|
+
time_periods_sdmx = tuple(i["id"] for i in time_periods_info)
|
|
30
|
+
time_periods_sdmx = tuple(_clean_sdmx_string(i) for i in time_periods_sdmx)
|
|
31
|
+
time_periods = {
|
|
32
|
+
str(i): ap.Period.from_sdmx_string(j)
|
|
33
|
+
for i, j in enumerate(time_periods_sdmx, )
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
def _extract_series_key_dim_value(dim:int, pointer:int) -> str:
|
|
37
|
+
return series_key_dims[dim]["values"][pointer]["id"]
|
|
38
|
+
|
|
39
|
+
def _extract_series_key_dim_info(dim:int, pointer:int) -> str:
|
|
40
|
+
return series_key_dims[dim]["values"][pointer]["name"]
|
|
41
|
+
|
|
42
|
+
def _series_key_from_pointer(pointers: tuple[int, ...], ) -> tuple[str, str]:
|
|
43
|
+
r"""
|
|
44
|
+
"""
|
|
45
|
+
series_key_parts = tuple(
|
|
46
|
+
_extract_series_key_dim_value(i, p, )
|
|
47
|
+
for i, p, in enumerate(pointers)
|
|
48
|
+
)
|
|
49
|
+
return SERIES_KEY_DELIMITER.join(series_key_parts, )
|
|
50
|
+
|
|
51
|
+
def _description_from_pointer(pointers: tuple[int, ...], ) -> str:
|
|
52
|
+
description_parts = tuple(
|
|
53
|
+
_extract_series_key_dim_info(i, p, )
|
|
54
|
+
for i, p, in enumerate(pointers)
|
|
55
|
+
)
|
|
56
|
+
return DESCRIPTION_DELIMITER.join(description_parts, )
|
|
57
|
+
|
|
58
|
+
output_db = ap.Databox()
|
|
59
|
+
|
|
60
|
+
for series_key, series_record, in series_data.items():
|
|
61
|
+
pointers = _pointers_from_numeric_series_key(series_key, )
|
|
62
|
+
series_key = _series_key_from_pointer(pointers, )
|
|
63
|
+
description = _description_from_pointer(pointers, )
|
|
64
|
+
#
|
|
65
|
+
series_periods = tuple(
|
|
66
|
+
time_periods[i]
|
|
67
|
+
for i in series_record["observations"].keys()
|
|
68
|
+
)
|
|
69
|
+
#
|
|
70
|
+
series_values = tuple(
|
|
71
|
+
float(i[0]) if i[0] is not None else None
|
|
72
|
+
for i in series_record["observations"].values()
|
|
73
|
+
)
|
|
74
|
+
#
|
|
75
|
+
time_series = ap.Series(
|
|
76
|
+
periods=series_periods,
|
|
77
|
+
values=series_values,
|
|
78
|
+
description=description,
|
|
79
|
+
)
|
|
80
|
+
output_db[series_key] = time_series
|
|
81
|
+
|
|
82
|
+
return output_db
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _clean_sdmx_string(s: str, ) -> str:
|
|
86
|
+
r"""
|
|
87
|
+
Dates in response do not conform to ISO 8601 due to use of "-M" for months
|
|
88
|
+
"""
|
|
89
|
+
s = s.replace("-M", "-")
|
|
90
|
+
return s
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _pointers_from_numeric_series_key(numeric_series_key: str) -> tuple[str, str]:
|
|
94
|
+
r"""
|
|
95
|
+
Convert '0:1:2' to (0, 1, 2, ) where each number is an int pointer to
|
|
96
|
+
the dimension value in the series key dimension structure
|
|
97
|
+
"""
|
|
98
|
+
return tuple(
|
|
99
|
+
int(i)
|
|
100
|
+
for i in numeric_series_key.split(":")
|
|
101
|
+
)
|
|
102
|
+
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
|
|
2
|
+
#[
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
# Third-party imports
|
|
7
|
+
import datapie as _ap
|
|
8
|
+
import requests as _rq
|
|
9
|
+
|
|
10
|
+
# Local imports
|
|
11
|
+
from ._imf_requests import build_request_urls, build_grouped_request_urls
|
|
12
|
+
from ._imf_response import parse_response_json
|
|
13
|
+
|
|
14
|
+
#]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
REQUEST_DISPATCH = {
|
|
18
|
+
True: build_grouped_request_urls,
|
|
19
|
+
False: build_request_urls,
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
_HEADERS = {"accept": "application/json", }
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def from_imf(
|
|
26
|
+
request_strings: str | Iterable[str],
|
|
27
|
+
group_requests: bool = True,
|
|
28
|
+
group_by: Callable | None = None,
|
|
29
|
+
merge_strategy: str = "error",
|
|
30
|
+
return_info: bool = False,
|
|
31
|
+
) -> _ap.Databox:
|
|
32
|
+
r"""
|
|
33
|
+
"""
|
|
34
|
+
if isinstance(request_strings, str):
|
|
35
|
+
request_strings = (request_strings, )
|
|
36
|
+
request_strings = tuple(set(request_strings, ))
|
|
37
|
+
|
|
38
|
+
build_request_urls = REQUEST_DISPATCH[group_requests]
|
|
39
|
+
urls = build_request_urls(request_strings, group_by=group_by, )
|
|
40
|
+
responses = []
|
|
41
|
+
|
|
42
|
+
output_db = _ap.Databox()
|
|
43
|
+
info = []
|
|
44
|
+
|
|
45
|
+
for url in urls:
|
|
46
|
+
response = _rq.get(url, headers=_HEADERS, )
|
|
47
|
+
response_json = response.json()
|
|
48
|
+
response_db = parse_response_json(response_json, )
|
|
49
|
+
output_db.merge(
|
|
50
|
+
response_db,
|
|
51
|
+
strategy=merge_strategy,
|
|
52
|
+
)
|
|
53
|
+
if return_info:
|
|
54
|
+
info.append({
|
|
55
|
+
"url": url,
|
|
56
|
+
"response": response.json(),
|
|
57
|
+
})
|
|
58
|
+
|
|
59
|
+
if return_info:
|
|
60
|
+
return output_db, info,
|
|
61
|
+
|
|
62
|
+
return output_db
|
|
63
|
+
|