mcp-bcrp 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_bcrp/__init__.py +1 -1
- mcp_bcrp/_version.py +2 -2
- mcp_bcrp/client.py +21 -0
- mcp_bcrp/search_engine.py +110 -118
- mcp_bcrp/server.py +20 -26
- {mcp_bcrp-0.1.1.dist-info → mcp_bcrp-0.1.3.dist-info}/METADATA +11 -12
- mcp_bcrp-0.1.3.dist-info/RECORD +12 -0
- mcp_bcrp-0.1.1.dist-info/RECORD +0 -12
- {mcp_bcrp-0.1.1.dist-info → mcp_bcrp-0.1.3.dist-info}/WHEEL +0 -0
- {mcp_bcrp-0.1.1.dist-info → mcp_bcrp-0.1.3.dist-info}/entry_points.txt +0 -0
- {mcp_bcrp-0.1.1.dist-info → mcp_bcrp-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {mcp_bcrp-0.1.1.dist-info → mcp_bcrp-0.1.3.dist-info}/top_level.txt +0 -0
mcp_bcrp/__init__.py
CHANGED
mcp_bcrp/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.1.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 1,
|
|
31
|
+
__version__ = version = '0.1.3'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 1, 3)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
mcp_bcrp/client.py
CHANGED
|
@@ -154,6 +154,27 @@ class BCRPMetadata:
|
|
|
154
154
|
mask &= kw_mask
|
|
155
155
|
return self.df[mask].head(limit)
|
|
156
156
|
|
|
157
|
+
def get_series_names(self, codes: List[str]) -> List[str]:
|
|
158
|
+
"""
|
|
159
|
+
Retrieve original names for a list of series codes.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
codes: List of BCRP series codes.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
List of names corresponding to the codes.
|
|
166
|
+
Uses the code itself if name is not found.
|
|
167
|
+
"""
|
|
168
|
+
if self.df.empty:
|
|
169
|
+
return codes
|
|
170
|
+
|
|
171
|
+
# Standardize columns to search
|
|
172
|
+
code_col = "Código de serie" if "Código de serie" in self.df.columns else "Codigo de serie"
|
|
173
|
+
name_col = "Nombre de serie"
|
|
174
|
+
|
|
175
|
+
mapping = dict(zip(self.df[code_col], self.df[name_col]))
|
|
176
|
+
return [mapping.get(code, code) for code in codes]
|
|
177
|
+
|
|
157
178
|
class AsyncBCRPClient:
|
|
158
179
|
"""
|
|
159
180
|
Async client for BCRP (Banco Central de Reserva del Perú) Statistical API.
|
mcp_bcrp/search_engine.py
CHANGED
|
@@ -2,18 +2,18 @@
|
|
|
2
2
|
Deterministic Search Engine for BCRP Series.
|
|
3
3
|
|
|
4
4
|
Pipeline:
|
|
5
|
-
1. Canonical Normalization (lowercase, remove accents,
|
|
6
|
-
2. Attribute Extraction (currency, horizon, component)
|
|
5
|
+
1. Canonical Normalization (lowercase, remove accents, synonyms)
|
|
6
|
+
2. Attribute Extraction (currency, horizon, component, side)
|
|
7
7
|
3. Hard Filters
|
|
8
|
-
4. Fuzzy Scoring with RapidFuzz
|
|
9
|
-
5.
|
|
8
|
+
4. Fuzzy Scoring with RapidFuzz (Token Set Ratio)
|
|
9
|
+
5. Interactive Candidate Resolution
|
|
10
10
|
"""
|
|
11
11
|
|
|
12
12
|
import pandas as pd
|
|
13
13
|
import logging
|
|
14
14
|
import unicodedata
|
|
15
15
|
import re
|
|
16
|
-
from typing import Dict, Any
|
|
16
|
+
from typing import Dict, Any, List
|
|
17
17
|
|
|
18
18
|
try:
|
|
19
19
|
from rapidfuzz import fuzz
|
|
@@ -25,16 +25,24 @@ logger = logging.getLogger("mcp_bcrp")
|
|
|
25
25
|
|
|
26
26
|
class SearchEngine:
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
Interactive Search Engine for BCRP Series.
|
|
29
29
|
|
|
30
|
-
Implements a pipeline for
|
|
31
|
-
1. Canonical Normalization
|
|
32
|
-
2.
|
|
33
|
-
3. Fuzzy Scoring
|
|
34
|
-
4.
|
|
30
|
+
Implements a pipeline for robust series resolution:
|
|
31
|
+
1. Canonical Normalization with Synonym Support
|
|
32
|
+
2. Attribute Filtering (Currency, Side)
|
|
33
|
+
3. Fuzzy Set Scoring
|
|
34
|
+
4. Multi-candidate Result Generation
|
|
35
35
|
"""
|
|
36
36
|
|
|
37
|
-
STOPWORDS = {'de', 'del', 'el', 'la', 'los', 'las', 'y', 'en', 'al', 'con', 'por'}
|
|
37
|
+
STOPWORDS = {'de', 'del', 'el', 'la', 'los', 'las', 'y', 'en', 'al', 'con', 'por', 'precio', 'valor', 'indicador'}
|
|
38
|
+
|
|
39
|
+
# Synonym map for common abbreviations
|
|
40
|
+
SYNONYMS = {
|
|
41
|
+
"tc": "tipo cambio",
|
|
42
|
+
"t.c.": "tipo cambio",
|
|
43
|
+
"pbi": "producto bruto interno",
|
|
44
|
+
"internacional": "lme londres Chicago nymex",
|
|
45
|
+
}
|
|
38
46
|
|
|
39
47
|
def __init__(self, metadata_df: pd.DataFrame):
|
|
40
48
|
"""
|
|
@@ -51,72 +59,60 @@ class SearchEngine:
|
|
|
51
59
|
Canonical normalization of text.
|
|
52
60
|
|
|
53
61
|
Applies: lowercase, accent removal, punctuation removal,
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
Args:
|
|
57
|
-
text: Raw input text.
|
|
58
|
-
|
|
59
|
-
Returns:
|
|
60
|
-
Normalized string with clean tokens.
|
|
62
|
+
synonym expansion, stopword filtering.
|
|
61
63
|
"""
|
|
62
64
|
if not isinstance(text, str):
|
|
63
65
|
return ""
|
|
64
66
|
|
|
65
67
|
text = text.lower()
|
|
68
|
+
# Remove accents
|
|
66
69
|
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8')
|
|
70
|
+
# Replace punctuation
|
|
67
71
|
text = re.sub(r'[^\w\s]', ' ', text)
|
|
72
|
+
|
|
73
|
+
# Apply synonyms (simple replacement)
|
|
74
|
+
for syn, target in self.SYNONYMS.items():
|
|
75
|
+
if syn in text.split():
|
|
76
|
+
text = text.replace(syn, target)
|
|
77
|
+
|
|
68
78
|
tokens = text.split()
|
|
69
79
|
clean_tokens = [t for t in tokens if t not in self.STOPWORDS]
|
|
70
80
|
|
|
71
81
|
return " ".join(clean_tokens)
|
|
72
82
|
|
|
73
83
|
def _extract_attributes(self, text_norm: str) -> Dict[str, Any]:
|
|
74
|
-
"""
|
|
75
|
-
Extract structured attributes from normalized text.
|
|
76
|
-
|
|
77
|
-
Args:
|
|
78
|
-
text_norm: Normalized text string.
|
|
79
|
-
|
|
80
|
-
Returns:
|
|
81
|
-
Dictionary with currency, horizon, component, and scale.
|
|
82
|
-
"""
|
|
84
|
+
"""Extract structured attributes to help disambiguate."""
|
|
83
85
|
attrs = {
|
|
84
86
|
"currency": None,
|
|
87
|
+
"side": None, # compra / venta
|
|
85
88
|
"horizon": None,
|
|
86
|
-
"component": None
|
|
87
|
-
"scale": None
|
|
89
|
+
"component": None
|
|
88
90
|
}
|
|
89
91
|
|
|
90
92
|
tokens = set(text_norm.split())
|
|
91
93
|
|
|
92
|
-
# Currency
|
|
94
|
+
# Currency
|
|
93
95
|
if any(t in tokens for t in ['us', 'usd', 'dolares']):
|
|
94
96
|
attrs['currency'] = 'usd'
|
|
95
97
|
elif any(t in tokens for t in ['s', 'pen', 'soles']):
|
|
96
98
|
attrs['currency'] = 'pen'
|
|
97
99
|
|
|
98
|
-
#
|
|
99
|
-
if "
|
|
100
|
+
# Side (Critical for FX)
|
|
101
|
+
if "compra" in tokens:
|
|
102
|
+
attrs['side'] = 'compra'
|
|
103
|
+
elif "venta" in tokens:
|
|
104
|
+
attrs['side'] = 'venta'
|
|
105
|
+
|
|
106
|
+
# Horizon
|
|
107
|
+
if "corto" in tokens:
|
|
100
108
|
attrs['horizon'] = 'corto'
|
|
101
|
-
elif "largo" in
|
|
109
|
+
elif "largo" in tokens:
|
|
102
110
|
attrs['horizon'] = 'largo'
|
|
103
111
|
|
|
104
|
-
# Component detection
|
|
105
|
-
if "activos" in text_norm:
|
|
106
|
-
attrs['component'] = 'activos'
|
|
107
|
-
elif "pasivos" in text_norm:
|
|
108
|
-
attrs['component'] = 'pasivos'
|
|
109
|
-
|
|
110
|
-
# Scale detection
|
|
111
|
-
if "millones" in text_norm:
|
|
112
|
-
attrs['scale'] = 'millones'
|
|
113
|
-
elif "miles" in text_norm:
|
|
114
|
-
attrs['scale'] = 'miles'
|
|
115
|
-
|
|
116
112
|
return attrs
|
|
117
113
|
|
|
118
114
|
def _preprocess_metadata(self):
|
|
119
|
-
"""Pre-calculate normalized
|
|
115
|
+
"""Pre-calculate normalized search corpus."""
|
|
120
116
|
if self.df.empty:
|
|
121
117
|
self.search_corpus = []
|
|
122
118
|
return
|
|
@@ -127,16 +123,18 @@ class SearchEngine:
|
|
|
127
123
|
name_norm = self._normalize(raw_name)
|
|
128
124
|
attrs = self._extract_attributes(name_norm)
|
|
129
125
|
|
|
126
|
+
# Use original code column names if possible
|
|
127
|
+
code = row.get("Código de serie") or row.get("Codigo de serie")
|
|
128
|
+
|
|
130
129
|
item = {
|
|
131
130
|
"idx": idx,
|
|
132
|
-
"codigo_serie":
|
|
131
|
+
"codigo_serie": code,
|
|
133
132
|
"name_original": raw_name,
|
|
134
133
|
"name_norm": name_norm,
|
|
135
134
|
"tokens": set(name_norm.split()),
|
|
136
135
|
"currency": attrs['currency'],
|
|
137
|
-
"
|
|
138
|
-
"
|
|
139
|
-
"scale": attrs['scale']
|
|
136
|
+
"side": attrs['side'],
|
|
137
|
+
"horizon": attrs['horizon']
|
|
140
138
|
}
|
|
141
139
|
processed.append(item)
|
|
142
140
|
|
|
@@ -144,19 +142,11 @@ class SearchEngine:
|
|
|
144
142
|
|
|
145
143
|
def solve(self, query: str) -> Dict[str, Any]:
|
|
146
144
|
"""
|
|
147
|
-
Resolve query
|
|
148
|
-
|
|
149
|
-
Args:
|
|
150
|
-
query: Search query (e.g., "tipo de cambio USD")
|
|
151
|
-
|
|
152
|
-
Returns:
|
|
153
|
-
Dict with 'codigo_serie' and 'confidence' on success,
|
|
154
|
-
or 'error' and 'reason' on failure/ambiguity.
|
|
145
|
+
Resolve query with interactive candidate logic.
|
|
155
146
|
"""
|
|
156
147
|
if not self.search_corpus:
|
|
157
148
|
return {"error": "no_match", "reason": "empty_corpus"}
|
|
158
149
|
|
|
159
|
-
# Parse and normalize query
|
|
160
150
|
q_norm = self._normalize(query)
|
|
161
151
|
q_attrs = self._extract_attributes(q_norm)
|
|
162
152
|
q_tokens = set(q_norm.split())
|
|
@@ -164,74 +154,76 @@ class SearchEngine:
|
|
|
164
154
|
if not q_tokens:
|
|
165
155
|
return {"error": "no_match", "reason": "empty_query"}
|
|
166
156
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
candidates = [c for c in candidates if c['component'] == q_attrs['component']]
|
|
178
|
-
|
|
179
|
-
if not candidates:
|
|
180
|
-
return {"error": "no_match", "reason": "filters_eliminated_all"}
|
|
181
|
-
|
|
182
|
-
# Score candidates using fuzzy matching
|
|
183
|
-
scored_candidates = []
|
|
184
|
-
for c in candidates:
|
|
185
|
-
score = 0
|
|
186
|
-
if fuzz:
|
|
187
|
-
score = fuzz.token_sort_ratio(q_norm, c['name_norm'])
|
|
157
|
+
# Scoring
|
|
158
|
+
scored = []
|
|
159
|
+
for c in self.search_corpus:
|
|
160
|
+
if not fuzz:
|
|
161
|
+
# Basic token overlap fallback
|
|
162
|
+
intersection = len(q_tokens & c['tokens'])
|
|
163
|
+
score = (intersection / len(q_tokens)) * 100 if q_tokens else 0
|
|
164
|
+
else:
|
|
165
|
+
# Token Set Ratio is perfect for finding "query" inside "long technical title"
|
|
166
|
+
score = fuzz.token_set_ratio(q_norm, c['name_norm'])
|
|
188
167
|
|
|
189
|
-
#
|
|
190
|
-
|
|
191
|
-
|
|
168
|
+
# Boost if specific side (compra/venta) matches
|
|
169
|
+
if q_attrs['side'] and c['side'] == q_attrs['side']:
|
|
170
|
+
score += 5
|
|
171
|
+
elif q_attrs['side'] and c['side'] and c['side'] != q_attrs['side']:
|
|
172
|
+
score -= 10
|
|
192
173
|
|
|
193
|
-
if
|
|
194
|
-
|
|
195
|
-
"
|
|
196
|
-
"
|
|
197
|
-
"
|
|
198
|
-
"missing_query_tokens": q_tokens - c['tokens']
|
|
174
|
+
if score >= 65:
|
|
175
|
+
scored.append({
|
|
176
|
+
"codigo_serie": c['codigo_serie'],
|
|
177
|
+
"name": c['name_original'],
|
|
178
|
+
"score": score
|
|
199
179
|
})
|
|
200
180
|
|
|
201
|
-
|
|
181
|
+
scored.sort(key=lambda x: x['score'], reverse=True)
|
|
202
182
|
|
|
203
|
-
if not
|
|
204
|
-
return {"error": "no_match", "reason": "
|
|
183
|
+
if not scored:
|
|
184
|
+
return {"error": "no_match", "reason": "low_confidence"}
|
|
205
185
|
|
|
206
|
-
|
|
186
|
+
# Logic for result type
|
|
187
|
+
top_score = scored[0]['score']
|
|
207
188
|
|
|
208
|
-
#
|
|
209
|
-
|
|
189
|
+
# 1. Check for ties or very close matches at the top
|
|
190
|
+
# If multiple series have top_score, or are very close (within 2 pts), return candidates.
|
|
191
|
+
high_tier = [s for s in scored if s['score'] >= (top_score - 2)]
|
|
192
|
+
|
|
193
|
+
if len(high_tier) > 1 and top_score < 100:
|
|
194
|
+
# Ambiguity if multiple high matches, unless one is perfect 100 and there are no other 100s
|
|
195
|
+
pass # fall through to candidates logic
|
|
196
|
+
elif len(high_tier) == 1 and top_score >= 85:
|
|
197
|
+
# Single clear winner with good score
|
|
210
198
|
return {
|
|
211
|
-
"codigo_serie":
|
|
212
|
-
"confidence": round(
|
|
213
|
-
"name":
|
|
199
|
+
"codigo_serie": high_tier[0]['codigo_serie'],
|
|
200
|
+
"confidence": round(high_tier[0]['score'] / 100.0, 2),
|
|
201
|
+
"name": high_tier[0]['name']
|
|
214
202
|
}
|
|
215
|
-
|
|
216
|
-
# Multiple matches: check for ambiguity
|
|
217
|
-
candidates_top_tier = [
|
|
218
|
-
x for x in scored_candidates
|
|
219
|
-
if x['score'] >= (top['score'] - 5)
|
|
220
|
-
]
|
|
221
203
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
"
|
|
228
|
-
"
|
|
229
|
-
"reason": "mixed_attributes_in_top_results"
|
|
204
|
+
# If top_score is 100, but there are multiple 100s, it's ambiguous
|
|
205
|
+
top_tier_100 = [s for s in scored if s['score'] == 100]
|
|
206
|
+
if len(top_tier_100) == 1:
|
|
207
|
+
return {
|
|
208
|
+
"codigo_serie": top_tier_100[0]['codigo_serie'],
|
|
209
|
+
"confidence": 1.0,
|
|
210
|
+
"name": top_tier_100[0]['name']
|
|
230
211
|
}
|
|
231
|
-
|
|
232
|
-
#
|
|
212
|
+
|
|
213
|
+
# 2. Interactive Candidates
|
|
214
|
+
# Return top 5 matches if confidence is mixed or tied
|
|
215
|
+
candidates = []
|
|
216
|
+
seen_codes = set()
|
|
217
|
+
for s in scored[:5]:
|
|
218
|
+
if s['codigo_serie'] not in seen_codes:
|
|
219
|
+
candidates.append({
|
|
220
|
+
"codigo": s['codigo_serie'],
|
|
221
|
+
"nombre": s['name']
|
|
222
|
+
})
|
|
223
|
+
seen_codes.add(s['codigo_serie'])
|
|
224
|
+
|
|
233
225
|
return {
|
|
234
|
-
"
|
|
235
|
-
"
|
|
236
|
-
"
|
|
226
|
+
"error": "ambiguedad",
|
|
227
|
+
"reason": "multiple_candidates",
|
|
228
|
+
"candidates": candidates
|
|
237
229
|
}
|
mcp_bcrp/server.py
CHANGED
|
@@ -153,29 +153,17 @@ async def get_table(
|
|
|
153
153
|
if df.empty:
|
|
154
154
|
return "No data found."
|
|
155
155
|
|
|
156
|
-
# 2.
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
# Or is it Index? usebcrp 'variation=1' implies monthly variation.
|
|
161
|
-
|
|
162
|
-
# Ensure time is datetime
|
|
163
|
-
# BCRP returns 'Mmm.YY' or similar sometimes. helper parsing might be needed.
|
|
164
|
-
# But for now, let's just return the raw data properly formatted,
|
|
165
|
-
# Maybe adding a simple pct_change if it's numeric.
|
|
166
|
-
|
|
167
|
-
# For simplicity and reliability in this refactor, we will return the raw values
|
|
168
|
-
# but structured neatly. Re-implementing full 'table' logic from usebcrp might be overkill
|
|
169
|
-
# if the user just wants the data.
|
|
170
|
-
|
|
171
|
-
# However, to be helpful, let's try to set names if provided
|
|
172
|
-
if names:
|
|
173
|
-
# Map codes to names
|
|
174
|
-
# columns are 'time' + codes.
|
|
175
|
-
mapping = {code: name for code, name in zip(series_codes, names)}
|
|
176
|
-
df.rename(columns=mapping, inplace=True)
|
|
156
|
+
# 2. Resolve Names if not provided
|
|
157
|
+
if not names:
|
|
158
|
+
await metadata_client.load()
|
|
159
|
+
names = metadata_client.get_series_names(series_codes)
|
|
177
160
|
|
|
178
|
-
|
|
161
|
+
# 3. Rename columns
|
|
162
|
+
mapping = {code: name for code, name in zip(series_codes, names)}
|
|
163
|
+
df.rename(columns=mapping, inplace=True)
|
|
164
|
+
|
|
165
|
+
return df.to_json(orient='records', date_format='iso', indent=2)
|
|
166
|
+
|
|
179
167
|
|
|
180
168
|
except Exception as e:
|
|
181
169
|
return f"Table generation failed: {str(e)}"
|
|
@@ -234,12 +222,18 @@ async def plot_chart(
|
|
|
234
222
|
df['time'] = df['time'].apply(parse_spanish_date)
|
|
235
223
|
df = df.set_index('time')
|
|
236
224
|
|
|
237
|
-
# 4.
|
|
225
|
+
# 4. Resolve Names if not provided
|
|
226
|
+
if not names:
|
|
227
|
+
await metadata_client.load()
|
|
228
|
+
names = metadata_client.get_series_names(series_codes)
|
|
229
|
+
|
|
230
|
+
# 5. Plot each series
|
|
238
231
|
colors = ['#1a5fb4', '#e01b24', '#33d17a', '#ff7800', '#9141ac']
|
|
239
232
|
for idx, code in enumerate(series_codes):
|
|
240
|
-
if code in df.columns
|
|
241
|
-
|
|
242
|
-
|
|
233
|
+
col_name = code if code in df.columns else (names[idx] if names and names[idx] in df.columns else None)
|
|
234
|
+
if col_name:
|
|
235
|
+
series = df[col_name].dropna()
|
|
236
|
+
label = names[idx] if names and idx < len(names) else col_name
|
|
243
237
|
color = colors[idx % len(colors)]
|
|
244
238
|
ax.plot(series.index, series.values, linewidth=2.5,
|
|
245
239
|
label=label, color=color)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcp-bcrp
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: MCP Server for Banco Central de Reserva del Perú (BCRP) Statistical API
|
|
5
5
|
Author-email: Maykol Medrano <mmedrano2@uc.cl>
|
|
6
6
|
License: MIT
|
|
@@ -14,13 +14,14 @@ Classifier: Intended Audience :: Financial and Insurance Industry
|
|
|
14
14
|
Classifier: Intended Audience :: Science/Research
|
|
15
15
|
Classifier: License :: OSI Approved :: MIT License
|
|
16
16
|
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
18
|
Classifier: Programming Language :: Python :: 3.10
|
|
18
19
|
Classifier: Programming Language :: Python :: 3.11
|
|
19
20
|
Classifier: Programming Language :: Python :: 3.12
|
|
20
21
|
Classifier: Programming Language :: Python :: 3.13
|
|
21
22
|
Classifier: Topic :: Office/Business :: Financial
|
|
22
23
|
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
23
|
-
Requires-Python: >=3.
|
|
24
|
+
Requires-Python: >=3.9
|
|
24
25
|
Description-Content-Type: text/markdown
|
|
25
26
|
License-File: LICENSE
|
|
26
27
|
Requires-Dist: fastmcp>=0.1.0
|
|
@@ -36,16 +37,14 @@ Dynamic: license-file
|
|
|
36
37
|
# mcp-bcrp
|
|
37
38
|
|
|
38
39
|
[](https://www.python.org/downloads/)
|
|
39
|
-
[](https://github.com/psf/black)
|
|
40
|
+
[](https://github.com/MaykolMedrano/mcp_bcrp)
|
|
41
|
+
[](https://pypi.org/project/mcp-bcrp/)
|
|
42
|
+
[](https://opensource.org/licenses/MIT)
|
|
43
43
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
**MCP Server for Banco Central de Reserva del Peru (BCRP) Statistical API**
|
|
44
|
+
[-green?style=for-the-badge&logo=jupyter)](https://github.com/MaykolMedrano/mcp_bcrp/blob/main/examples/Guia_Usuario_BCRP.ipynb)
|
|
45
|
+
[](https://colab.research.google.com/github/MaykolMedrano/mcp_bcrp/blob/main/examples/Guia_Usuario_BCRP.ipynb)
|
|
47
46
|
|
|
48
|
-
|
|
47
|
+
MCP Server and Python library for the **Banco Central de Reserva del Perú (BCRP)** Statistical API. Access over 5,000 macroeconomic indicators directly from your AI agent or Python environment.
|
|
49
48
|
|
|
50
49
|
---
|
|
51
50
|
|
|
@@ -122,8 +121,8 @@ pip install -e .
|
|
|
122
121
|
### With Optional Dependencies
|
|
123
122
|
|
|
124
123
|
```bash
|
|
125
|
-
pip install mcp-bcrp[charts] # Include matplotlib for chart generation
|
|
126
|
-
pip install mcp-bcrp[dev] # Include development dependencies
|
|
124
|
+
pip install "mcp-bcrp[charts]" # Include matplotlib for chart generation
|
|
125
|
+
pip install "mcp-bcrp[dev]" # Include development dependencies
|
|
127
126
|
```
|
|
128
127
|
|
|
129
128
|
---
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
mcp_bcrp/__init__.py,sha256=tDnFxTiEWd9EF_GFYjpFywIVsg3jOTHv8cVr0l5FzUM,371
|
|
2
|
+
mcp_bcrp/__main__.py,sha256=Y12G_44op5TA9oV_KphVmLgGV8GJob5Nxig7yPsib9c,142
|
|
3
|
+
mcp_bcrp/_version.py,sha256=q5nF98G8SoVeJqaknL0xdyxtv0egsqb0fK06_84Izu8,704
|
|
4
|
+
mcp_bcrp/client.py,sha256=8i5nNHecSHh8wE9a7F7XkV0kNfYOImk_JcUNKvVuJSs,12488
|
|
5
|
+
mcp_bcrp/search_engine.py,sha256=SaopNOpRK7k0DN3-kLEOV8I1O_3EMX-Iaja-Qfg23pE,7681
|
|
6
|
+
mcp_bcrp/server.py,sha256=_GUn8xdNB8SfRvZTgJvUsyZfnrI8Z_kUin34XQ1nfDU,11931
|
|
7
|
+
mcp_bcrp-0.1.3.dist-info/licenses/LICENSE,sha256=Btzdu2kIoMbdSp6OyCLupB1aRgpTCJ_szMimgEnpkkE,1056
|
|
8
|
+
mcp_bcrp-0.1.3.dist-info/METADATA,sha256=jdwcGyZOAv_f5gSfjgR2i1jx95AGIcSxvL4qV7K4tWg,12861
|
|
9
|
+
mcp_bcrp-0.1.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
10
|
+
mcp_bcrp-0.1.3.dist-info/entry_points.txt,sha256=HXAROyWwyye03ILbKKY_wx83ddOAQQD6KI2sDy-yo6E,53
|
|
11
|
+
mcp_bcrp-0.1.3.dist-info/top_level.txt,sha256=AgccxOm73j9R2xAkb1azi9QQQeQjAiTkD1q-1k_625U,9
|
|
12
|
+
mcp_bcrp-0.1.3.dist-info/RECORD,,
|
mcp_bcrp-0.1.1.dist-info/RECORD
DELETED
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
mcp_bcrp/__init__.py,sha256=4DGcjFkG5LnJ82vAbTS_Zx0VR52FZg56WIZ9H_UXJQE,371
|
|
2
|
-
mcp_bcrp/__main__.py,sha256=Y12G_44op5TA9oV_KphVmLgGV8GJob5Nxig7yPsib9c,142
|
|
3
|
-
mcp_bcrp/_version.py,sha256=m8HxkqoKGw_wAJtc4ZokpJKNLXqp4zwnNhbnfDtro7w,704
|
|
4
|
-
mcp_bcrp/client.py,sha256=yeI2m9v6ThsoTcNw4rud1p-CjKeCSUDgWVGtT3e9Pqo,11754
|
|
5
|
-
mcp_bcrp/search_engine.py,sha256=5vktoUgqJHfPyHZ9qYAuqxgSqioiGunjwdLE9Tfp7jo,7670
|
|
6
|
-
mcp_bcrp/server.py,sha256=QhnsAt-Kf7kTnaO5kzcELJSv0ibdptHWTyxVBeegKMU,12435
|
|
7
|
-
mcp_bcrp-0.1.1.dist-info/licenses/LICENSE,sha256=Btzdu2kIoMbdSp6OyCLupB1aRgpTCJ_szMimgEnpkkE,1056
|
|
8
|
-
mcp_bcrp-0.1.1.dist-info/METADATA,sha256=Fq54BswjdBdzaVr28DgtJM1YTQliKBbBughdbhVuRjE,12679
|
|
9
|
-
mcp_bcrp-0.1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
10
|
-
mcp_bcrp-0.1.1.dist-info/entry_points.txt,sha256=HXAROyWwyye03ILbKKY_wx83ddOAQQD6KI2sDy-yo6E,53
|
|
11
|
-
mcp_bcrp-0.1.1.dist-info/top_level.txt,sha256=AgccxOm73j9R2xAkb1azi9QQQeQjAiTkD1q-1k_625U,9
|
|
12
|
-
mcp_bcrp-0.1.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|