yfin-mcp 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yfin_mcp/__init__.py +1 -0
- yfin_mcp/__main__.py +4 -0
- yfin_mcp/cache_manager.py +150 -0
- yfin_mcp/pagination_utils.py +285 -0
- yfin_mcp/server.py +566 -0
- {yfin_mcp-0.2.4.dist-info → yfin_mcp-0.2.5.dist-info}/METADATA +3 -3
- yfin_mcp-0.2.5.dist-info/RECORD +11 -0
- yfin_mcp-0.2.5.dist-info/entry_points.txt +2 -0
- yfin_mcp-0.2.5.dist-info/top_level.txt +1 -0
- yfin_mcp-0.2.4.dist-info/RECORD +0 -6
- yfin_mcp-0.2.4.dist-info/entry_points.txt +0 -2
- yfin_mcp-0.2.4.dist-info/top_level.txt +0 -1
- {yfin_mcp-0.2.4.dist-info → yfin_mcp-0.2.5.dist-info}/WHEEL +0 -0
- {yfin_mcp-0.2.4.dist-info → yfin_mcp-0.2.5.dist-info}/licenses/LICENSE +0 -0
yfin_mcp/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# yfin_mcp package
|
yfin_mcp/__main__.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Thread-safe LRU cache manager with TTL support for Yahoo Finance MCP server.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import threading
|
|
6
|
+
import time
|
|
7
|
+
from collections import OrderedDict
|
|
8
|
+
from typing import Any, Callable, Optional, Tuple
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class CacheManager:
|
|
12
|
+
"""Thread-safe LRU cache with TTL (Time To Live) support."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, max_size: int = 100):
|
|
15
|
+
"""
|
|
16
|
+
Initialize cache manager.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
max_size: Maximum number of entries in cache (LRU eviction)
|
|
20
|
+
"""
|
|
21
|
+
self._cache: OrderedDict[str, Tuple[Any, float, float]] = OrderedDict()
|
|
22
|
+
self._max_size = max_size
|
|
23
|
+
self._lock = threading.Lock()
|
|
24
|
+
self._hits = 0
|
|
25
|
+
self._misses = 0
|
|
26
|
+
|
|
27
|
+
def get(self, key: str) -> Optional[Tuple[Any, float]]:
|
|
28
|
+
"""
|
|
29
|
+
Get value from cache if it exists and hasn't expired.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
key: Cache key
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Tuple of (value, age_seconds) if found and valid, None otherwise
|
|
36
|
+
"""
|
|
37
|
+
with self._lock:
|
|
38
|
+
if key not in self._cache:
|
|
39
|
+
self._misses += 1
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
value, timestamp, ttl = self._cache[key]
|
|
43
|
+
age = time.time() - timestamp
|
|
44
|
+
|
|
45
|
+
# Check if expired
|
|
46
|
+
if age > ttl:
|
|
47
|
+
del self._cache[key]
|
|
48
|
+
self._misses += 1
|
|
49
|
+
return None
|
|
50
|
+
|
|
51
|
+
# Move to end (most recently used)
|
|
52
|
+
self._cache.move_to_end(key)
|
|
53
|
+
self._hits += 1
|
|
54
|
+
return (value, age)
|
|
55
|
+
|
|
56
|
+
def set(self, key: str, value: Any, ttl_seconds: float):
|
|
57
|
+
"""
|
|
58
|
+
Set value in cache with TTL.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
key: Cache key
|
|
62
|
+
value: Value to cache
|
|
63
|
+
ttl_seconds: Time to live in seconds
|
|
64
|
+
"""
|
|
65
|
+
with self._lock:
|
|
66
|
+
# Remove oldest if at capacity
|
|
67
|
+
if len(self._cache) >= self._max_size and key not in self._cache:
|
|
68
|
+
self._cache.popitem(last=False)
|
|
69
|
+
|
|
70
|
+
self._cache[key] = (value, time.time(), ttl_seconds)
|
|
71
|
+
self._cache.move_to_end(key)
|
|
72
|
+
|
|
73
|
+
def get_or_set(
|
|
74
|
+
self,
|
|
75
|
+
key: str,
|
|
76
|
+
factory_func: Callable[[], Any],
|
|
77
|
+
ttl_seconds: float
|
|
78
|
+
) -> Tuple[Any, Optional[float]]:
|
|
79
|
+
"""
|
|
80
|
+
Get from cache or compute and cache if missing/expired.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
key: Cache key
|
|
84
|
+
factory_func: Function to call if cache miss
|
|
85
|
+
ttl_seconds: TTL for new cache entries
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Tuple of (value, age_seconds). age_seconds is None for cache miss.
|
|
89
|
+
"""
|
|
90
|
+
# Try to get from cache
|
|
91
|
+
cached = self.get(key)
|
|
92
|
+
if cached is not None:
|
|
93
|
+
return cached
|
|
94
|
+
|
|
95
|
+
# Cache miss - compute value
|
|
96
|
+
try:
|
|
97
|
+
value = factory_func()
|
|
98
|
+
self.set(key, value, ttl_seconds)
|
|
99
|
+
return (value, None) # None age indicates fresh data
|
|
100
|
+
except Exception as e:
|
|
101
|
+
# Don't cache errors
|
|
102
|
+
raise e
|
|
103
|
+
|
|
104
|
+
def clear(self):
|
|
105
|
+
"""Clear all cache entries."""
|
|
106
|
+
with self._lock:
|
|
107
|
+
self._cache.clear()
|
|
108
|
+
self._hits = 0
|
|
109
|
+
self._misses = 0
|
|
110
|
+
|
|
111
|
+
def get_stats(self) -> dict:
|
|
112
|
+
"""
|
|
113
|
+
Get cache statistics.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Dict with hits, misses, size, and hit rate
|
|
117
|
+
"""
|
|
118
|
+
with self._lock:
|
|
119
|
+
total = self._hits + self._misses
|
|
120
|
+
hit_rate = (self._hits / total * 100) if total > 0 else 0
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"hits": self._hits,
|
|
124
|
+
"misses": self._misses,
|
|
125
|
+
"size": len(self._cache),
|
|
126
|
+
"max_size": self._max_size,
|
|
127
|
+
"hit_rate": f"{hit_rate:.1f}%"
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
# Global cache instance
|
|
132
|
+
_global_cache: Optional[CacheManager] = None
|
|
133
|
+
_cache_lock = threading.Lock()
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def get_cache() -> CacheManager:
|
|
137
|
+
"""
|
|
138
|
+
Get the global cache instance (singleton pattern).
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
Global CacheManager instance
|
|
142
|
+
"""
|
|
143
|
+
global _global_cache
|
|
144
|
+
|
|
145
|
+
if _global_cache is None:
|
|
146
|
+
with _cache_lock:
|
|
147
|
+
if _global_cache is None:
|
|
148
|
+
_global_cache = CacheManager(max_size=100)
|
|
149
|
+
|
|
150
|
+
return _global_cache
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pagination utilities for Yahoo Finance MCP server.
|
|
3
|
+
Provides token-based pagination and plain text formatting for LLM-friendly responses.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Any, List, Optional, Union
|
|
9
|
+
|
|
10
|
+
import pandas as pd
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class PaginationResult:
|
|
15
|
+
"""Result of pagination operation."""
|
|
16
|
+
formatted_text: str
|
|
17
|
+
page: int
|
|
18
|
+
total_pages: int
|
|
19
|
+
total_items: int
|
|
20
|
+
items_on_page: int
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def estimate_tokens(text: str) -> int:
|
|
24
|
+
"""
|
|
25
|
+
Estimate token count for text.
|
|
26
|
+
Uses conservative estimate of ~4 characters per token.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
text: Text to estimate tokens for
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Estimated token count
|
|
33
|
+
"""
|
|
34
|
+
return len(text) // 4
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def format_dataframe_as_table(df: pd.DataFrame, max_col_width: int = 15) -> str:
|
|
38
|
+
"""
|
|
39
|
+
Format DataFrame as plain text table with fixed-width columns.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
df: DataFrame to format
|
|
43
|
+
max_col_width: Maximum column width
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Formatted table string
|
|
47
|
+
"""
|
|
48
|
+
if df.empty:
|
|
49
|
+
return "No data available"
|
|
50
|
+
|
|
51
|
+
# Convert all columns to string and truncate
|
|
52
|
+
formatted_data = []
|
|
53
|
+
for _, row in df.iterrows():
|
|
54
|
+
formatted_row = []
|
|
55
|
+
for val in row:
|
|
56
|
+
str_val = str(val)
|
|
57
|
+
if len(str_val) > max_col_width:
|
|
58
|
+
str_val = str_val[:max_col_width-2] + ".."
|
|
59
|
+
formatted_row.append(str_val)
|
|
60
|
+
formatted_data.append(formatted_row)
|
|
61
|
+
|
|
62
|
+
# Get column names
|
|
63
|
+
columns = [str(col)[:max_col_width] for col in df.columns]
|
|
64
|
+
|
|
65
|
+
# Build table
|
|
66
|
+
lines = []
|
|
67
|
+
|
|
68
|
+
# Header
|
|
69
|
+
header = " | ".join(f"{col:<{max_col_width}}" for col in columns)
|
|
70
|
+
lines.append(header)
|
|
71
|
+
lines.append("-" * len(header))
|
|
72
|
+
|
|
73
|
+
# Rows
|
|
74
|
+
for row in formatted_data:
|
|
75
|
+
row_str = " | ".join(f"{val:<{max_col_width}}" for val in row)
|
|
76
|
+
lines.append(row_str)
|
|
77
|
+
|
|
78
|
+
return "\n".join(lines)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def format_dict_as_text(data: dict, indent: int = 0) -> str:
|
|
82
|
+
"""
|
|
83
|
+
Format dictionary as plain text with key-value pairs.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
data: Dictionary to format
|
|
87
|
+
indent: Indentation level
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Formatted text string
|
|
91
|
+
"""
|
|
92
|
+
lines = []
|
|
93
|
+
indent_str = " " * indent
|
|
94
|
+
|
|
95
|
+
for key, value in data.items():
|
|
96
|
+
if isinstance(value, dict):
|
|
97
|
+
lines.append(f"{indent_str}{key}:")
|
|
98
|
+
lines.append(format_dict_as_text(value, indent + 1))
|
|
99
|
+
elif isinstance(value, list):
|
|
100
|
+
lines.append(f"{indent_str}{key}: {len(value)} items")
|
|
101
|
+
else:
|
|
102
|
+
lines.append(f"{indent_str}{key}: {value}")
|
|
103
|
+
|
|
104
|
+
return "\n".join(lines)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def paginate_by_tokens(
|
|
108
|
+
data: Union[pd.DataFrame, List[dict], dict],
|
|
109
|
+
page: int,
|
|
110
|
+
max_tokens: int = 6000,
|
|
111
|
+
data_type: str = "table",
|
|
112
|
+
title: Optional[str] = None,
|
|
113
|
+
cache_age: Optional[float] = None,
|
|
114
|
+
) -> PaginationResult:
|
|
115
|
+
"""
|
|
116
|
+
Paginate data based on token limit.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
data: Data to paginate (DataFrame, list of dicts, or dict)
|
|
120
|
+
page: Page number (1-indexed)
|
|
121
|
+
max_tokens: Maximum tokens per page
|
|
122
|
+
data_type: Type of data ("table" or "dict")
|
|
123
|
+
title: Optional title for the page
|
|
124
|
+
cache_age: Optional cache age in seconds
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
PaginationResult with formatted text and metadata
|
|
128
|
+
"""
|
|
129
|
+
# Convert data to list of items for pagination
|
|
130
|
+
if isinstance(data, pd.DataFrame):
|
|
131
|
+
total_items = len(data)
|
|
132
|
+
items = data
|
|
133
|
+
elif isinstance(data, list):
|
|
134
|
+
total_items = len(data)
|
|
135
|
+
items = data
|
|
136
|
+
elif isinstance(data, dict):
|
|
137
|
+
total_items = len(data)
|
|
138
|
+
items = data
|
|
139
|
+
else:
|
|
140
|
+
raise ValueError(f"Unsupported data type: {type(data)}")
|
|
141
|
+
|
|
142
|
+
if total_items == 0:
|
|
143
|
+
return PaginationResult(
|
|
144
|
+
formatted_text="No data available",
|
|
145
|
+
page=1,
|
|
146
|
+
total_pages=1,
|
|
147
|
+
total_items=0,
|
|
148
|
+
items_on_page=0
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Calculate items per page dynamically
|
|
152
|
+
# Start with all items and reduce until under token limit
|
|
153
|
+
items_per_page = total_items
|
|
154
|
+
test_data = items
|
|
155
|
+
|
|
156
|
+
while items_per_page > 0:
|
|
157
|
+
# Get subset for this page size
|
|
158
|
+
if isinstance(data, pd.DataFrame):
|
|
159
|
+
test_data = data.iloc[:items_per_page]
|
|
160
|
+
test_text = format_dataframe_as_table(test_data)
|
|
161
|
+
elif isinstance(data, dict):
|
|
162
|
+
test_items = dict(list(data.items())[:items_per_page])
|
|
163
|
+
test_text = format_dict_as_text(test_items)
|
|
164
|
+
else:
|
|
165
|
+
test_items = data[:items_per_page]
|
|
166
|
+
test_text = "\n".join(str(item) for item in test_items)
|
|
167
|
+
|
|
168
|
+
# Check token count
|
|
169
|
+
if estimate_tokens(test_text) <= max_tokens * 0.8: # Leave 20% margin
|
|
170
|
+
break
|
|
171
|
+
|
|
172
|
+
# Reduce page size
|
|
173
|
+
items_per_page = max(1, items_per_page // 2)
|
|
174
|
+
|
|
175
|
+
# Calculate pagination
|
|
176
|
+
total_pages = (total_items + items_per_page - 1) // items_per_page
|
|
177
|
+
page = max(1, min(page, total_pages)) # Clamp page number
|
|
178
|
+
|
|
179
|
+
start_idx = (page - 1) * items_per_page
|
|
180
|
+
end_idx = min(start_idx + items_per_page, total_items)
|
|
181
|
+
|
|
182
|
+
# Get page data
|
|
183
|
+
if isinstance(data, pd.DataFrame):
|
|
184
|
+
page_data = data.iloc[start_idx:end_idx]
|
|
185
|
+
content = format_dataframe_as_table(page_data)
|
|
186
|
+
elif isinstance(data, dict):
|
|
187
|
+
page_items = dict(list(data.items())[start_idx:end_idx])
|
|
188
|
+
content = format_dict_as_text(page_items)
|
|
189
|
+
else:
|
|
190
|
+
page_items = data[start_idx:end_idx]
|
|
191
|
+
content = "\n".join(str(item) for item in page_items)
|
|
192
|
+
|
|
193
|
+
# Build formatted output
|
|
194
|
+
lines = []
|
|
195
|
+
lines.append("=" * 70)
|
|
196
|
+
if title:
|
|
197
|
+
lines.append(f"📊 {title}")
|
|
198
|
+
lines.append("=" * 70)
|
|
199
|
+
lines.append("")
|
|
200
|
+
lines.append(content)
|
|
201
|
+
lines.append("")
|
|
202
|
+
lines.append("─" * 70)
|
|
203
|
+
lines.append(f"📄 PAGE {page} of {total_pages} | Showing items {start_idx + 1}-{end_idx} of {total_items} total")
|
|
204
|
+
lines.append(f"📊 Estimated tokens: {estimate_tokens(content)} / {max_tokens} max")
|
|
205
|
+
lines.append("─" * 70)
|
|
206
|
+
lines.append("")
|
|
207
|
+
|
|
208
|
+
# Navigation guidance
|
|
209
|
+
if total_pages > 1:
|
|
210
|
+
lines.append("🔍 NAVIGATION:")
|
|
211
|
+
if page < total_pages:
|
|
212
|
+
lines.append(f" • Next page: Use page={page + 1} to see items {end_idx + 1}-{min(end_idx + items_per_page, total_items)}")
|
|
213
|
+
if page > 1:
|
|
214
|
+
lines.append(f" • Previous page: Use page={page - 1}")
|
|
215
|
+
lines.append(f" • Export all data: Add export_path=\"./data.json\"")
|
|
216
|
+
lines.append("")
|
|
217
|
+
|
|
218
|
+
# Cache info
|
|
219
|
+
if cache_age is not None:
|
|
220
|
+
if cache_age == 0 or cache_age is None:
|
|
221
|
+
lines.append("💾 CACHE: Fresh data (not cached)")
|
|
222
|
+
else:
|
|
223
|
+
lines.append(f"💾 CACHE: Data cached (age: {cache_age:.1f} seconds)")
|
|
224
|
+
|
|
225
|
+
lines.append("=" * 70)
|
|
226
|
+
|
|
227
|
+
formatted_text = "\n".join(lines)
|
|
228
|
+
|
|
229
|
+
return PaginationResult(
|
|
230
|
+
formatted_text=formatted_text,
|
|
231
|
+
page=page,
|
|
232
|
+
total_pages=total_pages,
|
|
233
|
+
total_items=total_items,
|
|
234
|
+
items_on_page=end_idx - start_idx
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def export_to_json(data: Union[pd.DataFrame, List[dict], dict], file_path: str) -> str:
|
|
239
|
+
"""
|
|
240
|
+
Export data to JSON file.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
data: Data to export
|
|
244
|
+
file_path: Path to save JSON file
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
Success message with file info
|
|
248
|
+
"""
|
|
249
|
+
# Convert DataFrame to dict
|
|
250
|
+
if isinstance(data, pd.DataFrame):
|
|
251
|
+
export_data = data.to_dict(orient="records")
|
|
252
|
+
else:
|
|
253
|
+
export_data = data
|
|
254
|
+
|
|
255
|
+
# Write to file
|
|
256
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
257
|
+
json.dump(export_data, f, indent=2, default=str)
|
|
258
|
+
|
|
259
|
+
# Get file size
|
|
260
|
+
import os
|
|
261
|
+
file_size = os.path.getsize(file_path)
|
|
262
|
+
size_kb = file_size / 1024
|
|
263
|
+
|
|
264
|
+
# Count items
|
|
265
|
+
if isinstance(export_data, list):
|
|
266
|
+
item_count = len(export_data)
|
|
267
|
+
elif isinstance(export_data, dict):
|
|
268
|
+
item_count = len(export_data)
|
|
269
|
+
else:
|
|
270
|
+
item_count = 1
|
|
271
|
+
|
|
272
|
+
# Build response
|
|
273
|
+
lines = []
|
|
274
|
+
lines.append("=" * 70)
|
|
275
|
+
lines.append("✅ DATA EXPORTED SUCCESSFULLY")
|
|
276
|
+
lines.append("=" * 70)
|
|
277
|
+
lines.append("")
|
|
278
|
+
lines.append(f"📁 File: {file_path}")
|
|
279
|
+
lines.append(f"📊 Size: {size_kb:.2f} KB")
|
|
280
|
+
lines.append(f"📝 Items: {item_count}")
|
|
281
|
+
lines.append("")
|
|
282
|
+
lines.append("The complete dataset has been saved to the specified file.")
|
|
283
|
+
lines.append("=" * 70)
|
|
284
|
+
|
|
285
|
+
return "\n".join(lines)
|
yfin_mcp/server.py
ADDED
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import pandas as pd
|
|
6
|
+
import yfinance as yf
|
|
7
|
+
from mcp.server.fastmcp import FastMCP
|
|
8
|
+
|
|
9
|
+
from .cache_manager import get_cache
|
|
10
|
+
from .pagination_utils import paginate_by_tokens, export_to_json
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Define an enum for the type of financial statement
|
|
14
|
+
class FinancialType(str, Enum):
|
|
15
|
+
income_stmt = "income_stmt"
|
|
16
|
+
quarterly_income_stmt = "quarterly_income_stmt"
|
|
17
|
+
balance_sheet = "balance_sheet"
|
|
18
|
+
quarterly_balance_sheet = "quarterly_balance_sheet"
|
|
19
|
+
cashflow = "cashflow"
|
|
20
|
+
quarterly_cashflow = "quarterly_cashflow"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class HolderType(str, Enum):
|
|
24
|
+
major_holders = "major_holders"
|
|
25
|
+
institutional_holders = "institutional_holders"
|
|
26
|
+
mutualfund_holders = "mutualfund_holders"
|
|
27
|
+
insider_transactions = "insider_transactions"
|
|
28
|
+
insider_purchases = "insider_purchases"
|
|
29
|
+
insider_roster_holders = "insider_roster_holders"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class RecommendationType(str, Enum):
|
|
33
|
+
recommendations = "recommendations"
|
|
34
|
+
upgrades_downgrades = "upgrades_downgrades"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# Initialize FastMCP server
|
|
38
|
+
yfinance_server = FastMCP(
|
|
39
|
+
"yfinance",
|
|
40
|
+
instructions="""
|
|
41
|
+
# Yahoo Finance MCP Server
|
|
42
|
+
|
|
43
|
+
This server is used to get information about a given ticker symbol from yahoo finance.
|
|
44
|
+
|
|
45
|
+
Available tools:
|
|
46
|
+
- get_historical_stock_prices: Get historical stock prices for a given ticker symbol from yahoo finance. Include the following information: Date, Open, High, Low, Close, Volume, Adj Close.
|
|
47
|
+
- get_stock_info: Get stock information for a given ticker symbol from yahoo finance. Include the following information: Stock Price & Trading Info, Company Information, Financial Metrics, Earnings & Revenue, Margins & Returns, Dividends, Balance Sheet, Ownership, Analyst Coverage, Risk Metrics, Other.
|
|
48
|
+
- get_yahoo_finance_news: Get news for a given ticker symbol from yahoo finance.
|
|
49
|
+
- get_stock_actions: Get stock dividends and stock splits for a given ticker symbol from yahoo finance.
|
|
50
|
+
- get_financial_statement: Get financial statement for a given ticker symbol from yahoo finance. You can choose from the following financial statement types: income_stmt, quarterly_income_stmt, balance_sheet, quarterly_balance_sheet, cashflow, quarterly_cashflow.
|
|
51
|
+
- get_holder_info: Get holder information for a given ticker symbol from yahoo finance. You can choose from the following holder types: major_holders, institutional_holders, mutualfund_holders, insider_transactions, insider_purchases, insider_roster_holders.
|
|
52
|
+
- get_option_expiration_dates: Fetch the available options expiration dates for a given ticker symbol.
|
|
53
|
+
- get_option_chain: Fetch the option chain for a given ticker symbol, expiration date, and option type.
|
|
54
|
+
- get_recommendations: Get recommendations or upgrades/downgrades for a given ticker symbol from yahoo finance. You can also specify the number of months back to get upgrades/downgrades for, default is 12.
|
|
55
|
+
""",
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@yfinance_server.tool(
|
|
60
|
+
name="get_historical_stock_prices",
|
|
61
|
+
description="""Get historical stock prices for a given ticker symbol from yahoo finance. Include the following information: Date, Open, High, Low, Close, Volume, Adj Close.
|
|
62
|
+
Args:
|
|
63
|
+
ticker: str
|
|
64
|
+
The ticker symbol of the stock to get historical prices for, e.g. "AAPL"
|
|
65
|
+
period : str
|
|
66
|
+
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
|
|
67
|
+
Either Use period parameter or use start and end
|
|
68
|
+
Default is "1mo"
|
|
69
|
+
interval : str
|
|
70
|
+
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
|
|
71
|
+
Intraday data cannot extend last 60 days
|
|
72
|
+
Default is "1d"
|
|
73
|
+
""",
|
|
74
|
+
)
|
|
75
|
+
async def get_historical_stock_prices(
|
|
76
|
+
ticker: str,
|
|
77
|
+
period: str = "1mo",
|
|
78
|
+
interval: str = "1d",
|
|
79
|
+
page: int = 1,
|
|
80
|
+
export_path: Optional[str] = None,
|
|
81
|
+
) -> str:
|
|
82
|
+
"""Get historical stock prices for a given ticker symbol with pagination
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
ticker: The ticker symbol, e.g. "AAPL"
|
|
86
|
+
period: Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max (default: "1mo")
|
|
87
|
+
interval: Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo (default: "1d")
|
|
88
|
+
page: Page number for pagination (default: 1)
|
|
89
|
+
export_path: Optional path to export full data as JSON file
|
|
90
|
+
"""
|
|
91
|
+
cache = get_cache()
|
|
92
|
+
cache_key = f"hist_{ticker}_{period}_{interval}"
|
|
93
|
+
|
|
94
|
+
# Try to get from cache
|
|
95
|
+
hist_data, cache_age = cache.get_or_set(
|
|
96
|
+
cache_key,
|
|
97
|
+
lambda: yf.Ticker(ticker).history(period=period, interval=interval),
|
|
98
|
+
ttl_seconds=300 # 5 minutes for price data
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
if hist_data is None or hist_data.empty:
|
|
102
|
+
return f"No historical data available for {ticker}"
|
|
103
|
+
|
|
104
|
+
# Reset index to make Date a column
|
|
105
|
+
hist_data = hist_data.reset_index()
|
|
106
|
+
|
|
107
|
+
# Export if requested
|
|
108
|
+
if export_path:
|
|
109
|
+
return export_to_json(hist_data, export_path)
|
|
110
|
+
|
|
111
|
+
# Paginate the response
|
|
112
|
+
result = paginate_by_tokens(
|
|
113
|
+
data=hist_data,
|
|
114
|
+
page=page,
|
|
115
|
+
max_tokens=6000,
|
|
116
|
+
data_type="table",
|
|
117
|
+
title=f"HISTORICAL STOCK PRICES - {ticker} ({period}, {interval})",
|
|
118
|
+
cache_age=cache_age,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
return result.formatted_text
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@yfinance_server.tool(
|
|
125
|
+
name="get_stock_info",
|
|
126
|
+
description="""Get stock information for a given ticker symbol from yahoo finance. Include the following information:
|
|
127
|
+
Stock Price & Trading Info, Company Information, Financial Metrics, Earnings & Revenue, Margins & Returns, Dividends, Balance Sheet, Ownership, Analyst Coverage, Risk Metrics, Other.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
ticker: str
|
|
131
|
+
The ticker symbol of the stock to get information for, e.g. "AAPL"
|
|
132
|
+
""",
|
|
133
|
+
)
|
|
134
|
+
async def get_stock_info(
|
|
135
|
+
ticker: str,
|
|
136
|
+
fields: Optional[list] = None,
|
|
137
|
+
page: int = 1,
|
|
138
|
+
export_path: Optional[str] = None,
|
|
139
|
+
) -> str:
|
|
140
|
+
"""Get stock information with optional field filtering and pagination"""
|
|
141
|
+
cache = get_cache()
|
|
142
|
+
cache_key = f"info_{ticker}"
|
|
143
|
+
|
|
144
|
+
info, cache_age = cache.get_or_set(
|
|
145
|
+
cache_key,
|
|
146
|
+
lambda: yf.Ticker(ticker).info,
|
|
147
|
+
ttl_seconds=300
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if not info:
|
|
151
|
+
return f"No information available for {ticker}"
|
|
152
|
+
|
|
153
|
+
# Filter fields if specified
|
|
154
|
+
info_to_display = {k: info.get(k) for k in fields if k in info} if fields else info
|
|
155
|
+
|
|
156
|
+
if export_path:
|
|
157
|
+
return export_to_json(info_to_display, export_path)
|
|
158
|
+
|
|
159
|
+
result = paginate_by_tokens(
|
|
160
|
+
data=info_to_display,
|
|
161
|
+
page=page,
|
|
162
|
+
max_tokens=6000,
|
|
163
|
+
data_type="dict",
|
|
164
|
+
title=f"STOCK INFO - {ticker}",
|
|
165
|
+
cache_age=cache_age,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return result.formatted_text
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
@yfinance_server.tool(
|
|
172
|
+
name="get_yahoo_finance_news",
|
|
173
|
+
description="""Get news for a given ticker symbol from yahoo finance.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
ticker: str
|
|
177
|
+
The ticker symbol of the stock to get news for, e.g. "AAPL"
|
|
178
|
+
""",
|
|
179
|
+
)
|
|
180
|
+
async def get_yahoo_finance_news(
|
|
181
|
+
ticker: str,
|
|
182
|
+
page: int = 1,
|
|
183
|
+
export_path: Optional[str] = None,
|
|
184
|
+
) -> str:
|
|
185
|
+
"""Get news for a given ticker symbol with pagination"""
|
|
186
|
+
cache = get_cache()
|
|
187
|
+
cache_key = f"news_{ticker}"
|
|
188
|
+
|
|
189
|
+
news_data, cache_age = cache.get_or_set(
|
|
190
|
+
cache_key,
|
|
191
|
+
lambda: yf.Ticker(ticker).news,
|
|
192
|
+
ttl_seconds=300 # 5 minutes
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
if not news_data:
|
|
196
|
+
return f"No news found for {ticker}"
|
|
197
|
+
|
|
198
|
+
# Convert to structured format
|
|
199
|
+
news_list = []
|
|
200
|
+
for item in news_data:
|
|
201
|
+
if item.get("content", {}).get("contentType", "") == "STORY":
|
|
202
|
+
news_list.append({
|
|
203
|
+
"title": item.get("content", {}).get("title", ""),
|
|
204
|
+
"summary": item.get("content", {}).get("summary", ""),
|
|
205
|
+
"url": item.get("content", {}).get("canonicalUrl", {}).get("url", ""),
|
|
206
|
+
"provider": item.get("content", {}).get("provider", {}).get("displayName", "")
|
|
207
|
+
})
|
|
208
|
+
|
|
209
|
+
if not news_list:
|
|
210
|
+
return f"No news articles found for {ticker}"
|
|
211
|
+
|
|
212
|
+
if export_path:
|
|
213
|
+
return export_to_json(news_list, export_path)
|
|
214
|
+
|
|
215
|
+
result = paginate_by_tokens(
|
|
216
|
+
data=pd.DataFrame(news_list),
|
|
217
|
+
page=page,
|
|
218
|
+
max_tokens=6000,
|
|
219
|
+
data_type="table",
|
|
220
|
+
title=f"YAHOO FINANCE NEWS - {ticker}",
|
|
221
|
+
cache_age=cache_age,
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
return result.formatted_text
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
@yfinance_server.tool(
|
|
228
|
+
name="get_stock_actions",
|
|
229
|
+
description="""Get stock dividends and stock splits for a given ticker symbol from yahoo finance.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
ticker: str
|
|
233
|
+
The ticker symbol of the stock to get stock actions for, e.g. "AAPL"
|
|
234
|
+
""",
|
|
235
|
+
)
|
|
236
|
+
async def get_stock_actions(
|
|
237
|
+
ticker: str,
|
|
238
|
+
page: int = 1,
|
|
239
|
+
export_path: Optional[str] = None,
|
|
240
|
+
) -> str:
|
|
241
|
+
"""Get stock dividends and splits with pagination"""
|
|
242
|
+
cache = get_cache()
|
|
243
|
+
cache_key = f"actions_{ticker}"
|
|
244
|
+
|
|
245
|
+
actions_data, cache_age = cache.get_or_set(
|
|
246
|
+
cache_key,
|
|
247
|
+
lambda: yf.Ticker(ticker).actions,
|
|
248
|
+
ttl_seconds=3600 # 1 hour - less volatile
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
if actions_data is None or actions_data.empty:
|
|
252
|
+
return f"No stock actions available for {ticker}"
|
|
253
|
+
|
|
254
|
+
actions_data = actions_data.reset_index()
|
|
255
|
+
|
|
256
|
+
if export_path:
|
|
257
|
+
return export_to_json(actions_data, export_path)
|
|
258
|
+
|
|
259
|
+
result = paginate_by_tokens(
|
|
260
|
+
data=actions_data,
|
|
261
|
+
page=page,
|
|
262
|
+
max_tokens=6000,
|
|
263
|
+
data_type="table",
|
|
264
|
+
title=f"STOCK ACTIONS - {ticker}",
|
|
265
|
+
cache_age=cache_age,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return result.formatted_text
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
@yfinance_server.tool(
|
|
272
|
+
name="get_financial_statement",
|
|
273
|
+
description="""Get financial statement for a given ticker symbol from yahoo finance. You can choose from the following financial statement types: income_stmt, quarterly_income_stmt, balance_sheet, quarterly_balance_sheet, cashflow, quarterly_cashflow.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
ticker: str
|
|
277
|
+
The ticker symbol of the stock to get financial statement for, e.g. "AAPL"
|
|
278
|
+
financial_type: str
|
|
279
|
+
The type of financial statement to get. You can choose from the following financial statement types: income_stmt, quarterly_income_stmt, balance_sheet, quarterly_balance_sheet, cashflow, quarterly_cashflow.
|
|
280
|
+
""",
|
|
281
|
+
)
|
|
282
|
+
async def get_financial_statement(
|
|
283
|
+
ticker: str,
|
|
284
|
+
financial_type: str,
|
|
285
|
+
export_path: Optional[str] = None,
|
|
286
|
+
) -> str:
|
|
287
|
+
"""Get financial statement with caching and export"""
|
|
288
|
+
cache = get_cache()
|
|
289
|
+
cache_key = f"financial_{ticker}_{financial_type}"
|
|
290
|
+
|
|
291
|
+
def fetch_statement():
|
|
292
|
+
company = yf.Ticker(ticker)
|
|
293
|
+
if financial_type == FinancialType.income_stmt:
|
|
294
|
+
return company.income_stmt
|
|
295
|
+
elif financial_type == FinancialType.quarterly_income_stmt:
|
|
296
|
+
return company.quarterly_income_stmt
|
|
297
|
+
elif financial_type == FinancialType.balance_sheet:
|
|
298
|
+
return company.balance_sheet
|
|
299
|
+
elif financial_type == FinancialType.quarterly_balance_sheet:
|
|
300
|
+
return company.quarterly_balance_sheet
|
|
301
|
+
elif financial_type == FinancialType.cashflow:
|
|
302
|
+
return company.cashflow
|
|
303
|
+
elif financial_type == FinancialType.quarterly_cashflow:
|
|
304
|
+
return company.quarterly_cashflow
|
|
305
|
+
else:
|
|
306
|
+
return None
|
|
307
|
+
|
|
308
|
+
financial_statement, cache_age = cache.get_or_set(
|
|
309
|
+
cache_key,
|
|
310
|
+
fetch_statement,
|
|
311
|
+
ttl_seconds=3600 # 1 hour
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
if financial_statement is None or financial_statement.empty:
|
|
315
|
+
return f"No financial statement data available for {ticker} ({financial_type})"
|
|
316
|
+
|
|
317
|
+
# Convert to list of dicts for export
|
|
318
|
+
result = []
|
|
319
|
+
for column in financial_statement.columns:
|
|
320
|
+
if isinstance(column, pd.Timestamp):
|
|
321
|
+
date_str = column.strftime("%Y-%m-%d")
|
|
322
|
+
else:
|
|
323
|
+
date_str = str(column)
|
|
324
|
+
|
|
325
|
+
date_obj = {"date": date_str}
|
|
326
|
+
for index, value in financial_statement[column].items():
|
|
327
|
+
date_obj[index] = None if pd.isna(value) else value
|
|
328
|
+
result.append(date_obj)
|
|
329
|
+
|
|
330
|
+
if export_path:
|
|
331
|
+
return export_to_json(result, export_path)
|
|
332
|
+
|
|
333
|
+
# Format as plain text table
|
|
334
|
+
df = pd.DataFrame(result)
|
|
335
|
+
result_text = paginate_by_tokens(
|
|
336
|
+
data=df,
|
|
337
|
+
page=1, # Financial statements are small, no pagination needed
|
|
338
|
+
max_tokens=6000,
|
|
339
|
+
data_type="table",
|
|
340
|
+
title=f"FINANCIAL STATEMENT - {ticker} ({financial_type})",
|
|
341
|
+
cache_age=cache_age,
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
return result_text.formatted_text
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
@yfinance_server.tool(
|
|
348
|
+
name="get_holder_info",
|
|
349
|
+
description="""Get holder information for a given ticker symbol from yahoo finance. You can choose from the following holder types: major_holders, institutional_holders, mutualfund_holders, insider_transactions, insider_purchases, insider_roster_holders.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
ticker: str
|
|
353
|
+
The ticker symbol of the stock to get holder information for, e.g. "AAPL"
|
|
354
|
+
holder_type: str
|
|
355
|
+
The type of holder information to get. You can choose from the following holder types: major_holders, institutional_holders, mutualfund_holders, insider_transactions, insider_purchases, insider_roster_holders.
|
|
356
|
+
""",
|
|
357
|
+
)
|
|
358
|
+
async def get_holder_info(
|
|
359
|
+
ticker: str,
|
|
360
|
+
holder_type: str,
|
|
361
|
+
page: int = 1,
|
|
362
|
+
export_path: Optional[str] = None,
|
|
363
|
+
) -> str:
|
|
364
|
+
"""Get holder information with pagination"""
|
|
365
|
+
cache = get_cache()
|
|
366
|
+
cache_key = f"holder_{ticker}_{holder_type}"
|
|
367
|
+
|
|
368
|
+
def fetch_holder_data():
|
|
369
|
+
company = yf.Ticker(ticker)
|
|
370
|
+
if holder_type == HolderType.major_holders:
|
|
371
|
+
return company.major_holders.reset_index(names="metric")
|
|
372
|
+
elif holder_type == HolderType.institutional_holders:
|
|
373
|
+
return company.institutional_holders
|
|
374
|
+
elif holder_type == HolderType.mutualfund_holders:
|
|
375
|
+
return company.mutualfund_holders
|
|
376
|
+
elif holder_type == HolderType.insider_transactions:
|
|
377
|
+
return company.insider_transactions
|
|
378
|
+
elif holder_type == HolderType.insider_purchases:
|
|
379
|
+
return company.insider_purchases
|
|
380
|
+
elif holder_type == HolderType.insider_roster_holders:
|
|
381
|
+
return company.insider_roster_holders
|
|
382
|
+
else:
|
|
383
|
+
return None
|
|
384
|
+
|
|
385
|
+
holder_data, cache_age = cache.get_or_set(
|
|
386
|
+
cache_key,
|
|
387
|
+
fetch_holder_data,
|
|
388
|
+
ttl_seconds=3600 # 1 hour
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
if holder_data is None or holder_data.empty:
|
|
392
|
+
return f"No holder information available for {ticker} ({holder_type})"
|
|
393
|
+
|
|
394
|
+
if export_path:
|
|
395
|
+
return export_to_json(holder_data, export_path)
|
|
396
|
+
|
|
397
|
+
result = paginate_by_tokens(
|
|
398
|
+
data=holder_data,
|
|
399
|
+
page=page,
|
|
400
|
+
max_tokens=6000,
|
|
401
|
+
data_type="table",
|
|
402
|
+
title=f"HOLDER INFO - {ticker} ({holder_type})",
|
|
403
|
+
cache_age=cache_age,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
return result.formatted_text
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
@yfinance_server.tool(
|
|
410
|
+
name="get_option_expiration_dates",
|
|
411
|
+
description="""Fetch the available options expiration dates for a given ticker symbol.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
ticker: str
|
|
415
|
+
The ticker symbol of the stock to get option expiration dates for, e.g. "AAPL"
|
|
416
|
+
""",
|
|
417
|
+
)
|
|
418
|
+
async def get_option_expiration_dates(ticker: str) -> str:
|
|
419
|
+
"""Fetch the available options expiration dates for a given ticker symbol."""
|
|
420
|
+
|
|
421
|
+
company = yf.Ticker(ticker)
|
|
422
|
+
try:
|
|
423
|
+
if company.isin is None:
|
|
424
|
+
print(f"Company ticker {ticker} not found.")
|
|
425
|
+
return f"Company ticker {ticker} not found."
|
|
426
|
+
except Exception as e:
|
|
427
|
+
print(f"Error: getting option expiration dates for {ticker}: {e}")
|
|
428
|
+
return f"Error: getting option expiration dates for {ticker}: {e}"
|
|
429
|
+
return json.dumps(company.options)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
@yfinance_server.tool(
|
|
433
|
+
name="get_option_chain",
|
|
434
|
+
description="""Fetch the option chain for a given ticker symbol, expiration date, and option type.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
ticker: str
|
|
438
|
+
The ticker symbol of the stock to get option chain for, e.g. "AAPL"
|
|
439
|
+
expiration_date: str
|
|
440
|
+
The expiration date for the options chain (format: 'YYYY-MM-DD')
|
|
441
|
+
option_type: str
|
|
442
|
+
The type of option to fetch ('calls' or 'puts')
|
|
443
|
+
""",
|
|
444
|
+
)
|
|
445
|
+
async def get_option_chain(
|
|
446
|
+
ticker: str,
|
|
447
|
+
expiration_date: str,
|
|
448
|
+
option_type: str,
|
|
449
|
+
page: int = 1,
|
|
450
|
+
export_path: Optional[str] = None,
|
|
451
|
+
) -> str:
|
|
452
|
+
"""Fetch option chain with pagination"""
|
|
453
|
+
cache = get_cache()
|
|
454
|
+
cache_key = f"options_{ticker}_{expiration_date}_{option_type}"
|
|
455
|
+
|
|
456
|
+
def fetch_options():
|
|
457
|
+
company = yf.Ticker(ticker)
|
|
458
|
+
if expiration_date not in company.options:
|
|
459
|
+
raise ValueError(f"No options for date {expiration_date}")
|
|
460
|
+
if option_type not in ["calls", "puts"]:
|
|
461
|
+
raise ValueError("Invalid option type")
|
|
462
|
+
|
|
463
|
+
option_chain = company.option_chain(expiration_date)
|
|
464
|
+
return option_chain.calls if option_type == "calls" else option_chain.puts
|
|
465
|
+
|
|
466
|
+
try:
|
|
467
|
+
option_data, cache_age = cache.get_or_set(
|
|
468
|
+
cache_key,
|
|
469
|
+
fetch_options,
|
|
470
|
+
ttl_seconds=300 # 5 minutes - volatile
|
|
471
|
+
)
|
|
472
|
+
except ValueError as e:
|
|
473
|
+
return f"Error: {str(e)}"
|
|
474
|
+
|
|
475
|
+
if option_data is None or option_data.empty:
|
|
476
|
+
return f"No option chain data available for {ticker}"
|
|
477
|
+
|
|
478
|
+
if export_path:
|
|
479
|
+
return export_to_json(option_data, export_path)
|
|
480
|
+
|
|
481
|
+
result = paginate_by_tokens(
|
|
482
|
+
data=option_data,
|
|
483
|
+
page=page,
|
|
484
|
+
max_tokens=6000,
|
|
485
|
+
data_type="table",
|
|
486
|
+
title=f"OPTION CHAIN - {ticker} ({option_type.upper()}, {expiration_date})",
|
|
487
|
+
cache_age=cache_age,
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
return result.formatted_text
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
@yfinance_server.tool(
|
|
494
|
+
name="get_recommendations",
|
|
495
|
+
description="""Get recommendations or upgrades/downgrades for a given ticker symbol from yahoo finance. You can also specify the number of months back to get upgrades/downgrades for, default is 12.
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
ticker: str
|
|
499
|
+
The ticker symbol of the stock to get recommendations for, e.g. "AAPL"
|
|
500
|
+
recommendation_type: str
|
|
501
|
+
The type of recommendation to get. You can choose from the following recommendation types: recommendations, upgrades_downgrades.
|
|
502
|
+
months_back: int
|
|
503
|
+
The number of months back to get upgrades/downgrades for, default is 12.
|
|
504
|
+
""",
|
|
505
|
+
)
|
|
506
|
+
async def get_recommendations(
|
|
507
|
+
ticker: str,
|
|
508
|
+
recommendation_type: str,
|
|
509
|
+
months_back: int = 12,
|
|
510
|
+
page: int = 1,
|
|
511
|
+
export_path: Optional[str] = None,
|
|
512
|
+
) -> str:
|
|
513
|
+
"""Get recommendations with pagination"""
|
|
514
|
+
cache = get_cache()
|
|
515
|
+
cache_key = f"recommendations_{ticker}_{recommendation_type}_{months_back}"
|
|
516
|
+
|
|
517
|
+
def fetch_recommendations():
|
|
518
|
+
company = yf.Ticker(ticker)
|
|
519
|
+
if recommendation_type == RecommendationType.recommendations:
|
|
520
|
+
return company.recommendations
|
|
521
|
+
elif recommendation_type == RecommendationType.upgrades_downgrades:
|
|
522
|
+
upgrades_downgrades = company.upgrades_downgrades.reset_index()
|
|
523
|
+
cutoff_date = pd.Timestamp.now() - pd.DateOffset(months=months_back)
|
|
524
|
+
upgrades_downgrades = upgrades_downgrades[
|
|
525
|
+
upgrades_downgrades["GradeDate"] >= cutoff_date
|
|
526
|
+
]
|
|
527
|
+
upgrades_downgrades = upgrades_downgrades.sort_values("GradeDate", ascending=False)
|
|
528
|
+
return upgrades_downgrades.drop_duplicates(subset=["Firm"])
|
|
529
|
+
else:
|
|
530
|
+
return None
|
|
531
|
+
|
|
532
|
+
try:
|
|
533
|
+
rec_data, cache_age = cache.get_or_set(
|
|
534
|
+
cache_key,
|
|
535
|
+
fetch_recommendations,
|
|
536
|
+
ttl_seconds=3600 # 1 hour
|
|
537
|
+
)
|
|
538
|
+
except Exception as e:
|
|
539
|
+
return f"Error: getting recommendations for {ticker}: {e}"
|
|
540
|
+
|
|
541
|
+
if rec_data is None or rec_data.empty:
|
|
542
|
+
return f"No recommendations available for {ticker}"
|
|
543
|
+
|
|
544
|
+
if export_path:
|
|
545
|
+
return export_to_json(rec_data, export_path)
|
|
546
|
+
|
|
547
|
+
result = paginate_by_tokens(
|
|
548
|
+
data=rec_data,
|
|
549
|
+
page=page,
|
|
550
|
+
max_tokens=6000,
|
|
551
|
+
data_type="table",
|
|
552
|
+
title=f"RECOMMENDATIONS - {ticker} ({recommendation_type})",
|
|
553
|
+
cache_age=cache_age,
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
return result.formatted_text
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
def main() -> None:
|
|
560
|
+
"""Main entry point for the server"""
|
|
561
|
+
print("Starting Yahoo Finance MCP server...")
|
|
562
|
+
yfinance_server.run(transport="stdio")
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
if __name__ == "__main__":
|
|
566
|
+
main()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: yfin-mcp
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.5
|
|
4
4
|
Summary: Enhanced Yahoo Finance MCP Server with intelligent pagination, caching, and LLM-optimized responses
|
|
5
5
|
Author: AlexYoung (Original Author)
|
|
6
6
|
Author-email: fritzprix <innocentevil0914@gmail.com>
|
|
@@ -280,7 +280,7 @@ For testing with MCP Inspector:
|
|
|
280
280
|
|
|
281
281
|
```bash
|
|
282
282
|
# From source
|
|
283
|
-
uv run
|
|
283
|
+
uv run yfin-mcp
|
|
284
284
|
|
|
285
285
|
# Or if installed via pip
|
|
286
286
|
python -m yfin_mcp
|
|
@@ -315,7 +315,7 @@ chmod +x publish_package.sh
|
|
|
315
315
|
```
|
|
316
316
|
|
|
317
317
|
> [!NOTE]
|
|
318
|
-
> The scripts will build the package into the `dist/` directory and then use `
|
|
318
|
+
> The scripts will build the package into the `dist/` directory and then use `twine` to upload it. Ensure you have your PyPI credentials configured in `~/.pypirc` (or `%HOME%\.pypirc` on Windows) or set the `TWINE_PASSWORD` environment variable.
|
|
319
319
|
|
|
320
320
|
## License
|
|
321
321
|
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
yfin_mcp/__init__.py,sha256=o50r3pMClywOYSZNh-saX7Gop-XcJR9ZYoutb49sy18,20
|
|
2
|
+
yfin_mcp/__main__.py,sha256=tsUTknY-RMVcmeThCU7_f58cuicrPOSGRGmGD7zLjCU,68
|
|
3
|
+
yfin_mcp/cache_manager.py,sha256=QXqYpZ6q1rYqpsJspyCgXr7CD3g_d6Tlde-f4_32Td0,4439
|
|
4
|
+
yfin_mcp/pagination_utils.py,sha256=Q1IRJsY76n6ohfTZhQcfZTKAefRuSaDE6UNM9coQ8-M,8737
|
|
5
|
+
yfin_mcp/server.py,sha256=Jwg8L78WCWFkMTY-D02O5R9zBtClB-YC1Q-fAHq9Fj0,20460
|
|
6
|
+
yfin_mcp-0.2.5.dist-info/licenses/LICENSE,sha256=-X17dvA84FUO-t9DRgqDt7JO2d0tSz6UpuZ-ZMtFfyo,1189
|
|
7
|
+
yfin_mcp-0.2.5.dist-info/METADATA,sha256=5WdcLw2-YCcDQpfjQU-dy4JAiPzIgMwYWztLb8O5_QQ,10770
|
|
8
|
+
yfin_mcp-0.2.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
9
|
+
yfin_mcp-0.2.5.dist-info/entry_points.txt,sha256=UHi0TxeiN87R8aY4YIVFjEZBwS12NFOdtJRAlpgJvnU,50
|
|
10
|
+
yfin_mcp-0.2.5.dist-info/top_level.txt,sha256=--tJwK65rhM2E1QIObaxg4gTRFb_r1lKInXDSUXPlbE,9
|
|
11
|
+
yfin_mcp-0.2.5.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
yfin_mcp
|
yfin_mcp-0.2.4.dist-info/RECORD
DELETED
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
yfin_mcp-0.2.4.dist-info/licenses/LICENSE,sha256=-X17dvA84FUO-t9DRgqDt7JO2d0tSz6UpuZ-ZMtFfyo,1189
|
|
2
|
-
yfin_mcp-0.2.4.dist-info/METADATA,sha256=Y_1DaBQxoCpacFq7fFk1oszgDPzPaZBzxsKgQjm_54I,10778
|
|
3
|
-
yfin_mcp-0.2.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
4
|
-
yfin_mcp-0.2.4.dist-info/entry_points.txt,sha256=kpA80BnCqljODfRYZKgWGPco5SvNVQ1zJgQgpm5zz7k,41
|
|
5
|
-
yfin_mcp-0.2.4.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
6
|
-
yfin_mcp-0.2.4.dist-info/RECORD,,
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
|
|
File without changes
|
|
File without changes
|