mcp-stata 1.18.0__cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-stata might be problematic. Click here for more details.

@@ -0,0 +1,401 @@
1
+ """
2
+ Graph creation detection for streaming Stata output.
3
+
4
+ This module provides functionality to detect when graphs are created
5
+ during Stata command execution and automatically cache them.
6
+ """
7
+
8
+ import asyncio
9
+ import contextlib
10
+ import inspect
11
+ import re
12
+ import threading
13
+ import time
14
+ from typing import List, Set, Callable, Dict, Any
15
+ import logging
16
+
17
+
18
+ # SFI is always available
19
+ SFI_AVAILABLE = True
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class GraphCreationDetector:
25
+ """Detects graph creation using SFI-only detection with pystata integration."""
26
+
27
+ def __init__(self, stata_client=None):
28
+ self._lock = threading.Lock()
29
+ self._detected_graphs: Set[str] = set()
30
+ self._removed_graphs: Set[str] = set()
31
+ self._unnamed_graph_counter = 0 # Track unnamed graphs for identification
32
+ self._stata_client = stata_client
33
+ self._last_graph_state: Dict[str, Any] = {} # Track graph state changes
34
+
35
+ def _describe_graph_signature(self, graph_name: str) -> str:
36
+ """Return a stable signature for a graph.
37
+
38
+ We avoid using Stata calls like 'graph describe' here because they are slow
39
+ (each call takes ~35ms) and would be called for every graph on every poll,
40
+ bottlenecking the streaming output.
41
+
42
+ Instead, we use name-based tracking tied to the Stata command execution
43
+ context. The signature is stable within a single command execution but
44
+ changes when a new command starts, allowing us to detect modifications
45
+ between commands without any Stata overhead.
46
+ """
47
+ if not self._stata_client:
48
+ return ""
49
+
50
+ # Access command_idx from stata_client if available
51
+ # NOTE: We only use command_idx for the default 'Graph' name to detect
52
+ # modifications. For named graphs, we only detect creation (name change)
53
+ # to avoid triggering redundant notifications for all existing graphs
54
+ # on every command (since command_idx changes globally).
55
+ cmd_idx = getattr(self._stata_client, "_command_idx", 0)
56
+ if graph_name.lower() == "graph":
57
+ return f"{graph_name}_{cmd_idx}"
58
+ return graph_name
59
+
60
+ def _detect_graphs_via_pystata(self) -> List[str]:
61
+ """Detect newly created graphs using direct pystata state access."""
62
+ if not self._stata_client:
63
+ return []
64
+
65
+ try:
66
+ # Get current graph state using pystata's sfi interface
67
+ current_graphs = self._get_current_graphs_from_pystata()
68
+ current_state = self._get_graph_state_from_pystata()
69
+
70
+ # Compare with last known state to detect new graphs
71
+ new_graphs = []
72
+
73
+ # Check for new graph names
74
+ for graph_name in current_graphs:
75
+ if graph_name not in self._last_graph_state and graph_name not in self._removed_graphs:
76
+ new_graphs.append(graph_name)
77
+
78
+ # Check for state changes in existing graphs (modifications)
79
+ for graph_name, state in current_state.items():
80
+ if graph_name in self._last_graph_state:
81
+ last_state = self._last_graph_state[graph_name]
82
+ # Compare stable signature only.
83
+ if state.get("signature") != last_state.get("signature"):
84
+ if graph_name not in self._removed_graphs:
85
+ new_graphs.append(graph_name)
86
+
87
+ # Update cached state
88
+ self._last_graph_state = current_state.copy()
89
+
90
+ return new_graphs
91
+
92
+ except (ImportError, RuntimeError, ValueError, AttributeError) as e:
93
+ # These are expected exceptions when SFI is not available or Stata state is inaccessible
94
+ logger.debug(f"Failed to detect graphs via pystata (expected): {e}")
95
+ return []
96
+ except Exception as e:
97
+ # Unexpected errors should be logged as errors
98
+ logger.error(f"Unexpected error in pystata graph detection: {e}")
99
+ return []
100
+
101
+ def _get_current_graphs_from_pystata(self) -> List[str]:
102
+ """Get current list of graphs using pystata's sfi interface."""
103
+ try:
104
+ # Use pystata to get graph list directly
105
+ if self._stata_client and hasattr(self._stata_client, 'list_graphs'):
106
+ return self._stata_client.list_graphs(force_refresh=True)
107
+ else:
108
+ # Fallback to sfi Macro interface - only if stata is available
109
+ if self._stata_client and hasattr(self._stata_client, 'stata'):
110
+ # Access the lock from client to prevent concurrency issues with pystata
111
+ exec_lock = getattr(self._stata_client, "_exec_lock", None)
112
+ ctx = exec_lock if exec_lock else contextlib.nullcontext()
113
+
114
+ with ctx:
115
+ try:
116
+ from sfi import Macro
117
+ hold_name = f"_mcp_detector_hold_{int(time.time() * 1000 % 1000000)}"
118
+ self._stata_client.stata.run(f"capture _return hold {hold_name}", echo=False)
119
+ try:
120
+ self._stata_client.stata.run("macro define mcp_graph_list \"\"", echo=False)
121
+ self._stata_client.stata.run("quietly graph dir, memory", echo=False)
122
+ self._stata_client.stata.run("macro define mcp_graph_list `r(list)'", echo=False)
123
+ graph_list_str = Macro.getGlobal("mcp_graph_list")
124
+ finally:
125
+ self._stata_client.stata.run(f"capture _return restore {hold_name}", echo=False)
126
+ return graph_list_str.split() if graph_list_str else []
127
+ except ImportError:
128
+ logger.warning("sfi.Macro not available for fallback graph detection")
129
+ return []
130
+ else:
131
+ return []
132
+ except Exception as e:
133
+ logger.warning(f"Failed to get current graphs from pystata: {e}")
134
+ return []
135
+
136
+ def _get_graph_state_from_pystata(self) -> Dict[str, Any]:
137
+ """Get detailed graph state information using pystata's sfi interface."""
138
+ graph_state = {}
139
+
140
+ try:
141
+ current_graphs = self._get_current_graphs_from_pystata()
142
+
143
+ for graph_name in current_graphs:
144
+ try:
145
+ signature = self._describe_graph_signature(graph_name)
146
+ state_info = {
147
+ "name": graph_name,
148
+ "exists": True,
149
+ "valid": bool(signature),
150
+ "signature": signature,
151
+ }
152
+
153
+ # Only update timestamps when the signature changes.
154
+ prev = self._last_graph_state.get(graph_name)
155
+ if prev is None or prev.get("signature") != signature:
156
+ state_info["timestamp"] = time.time()
157
+ else:
158
+ state_info["timestamp"] = prev.get("timestamp", time.time())
159
+
160
+ graph_state[graph_name] = state_info
161
+
162
+ except Exception as e:
163
+ logger.warning(f"Failed to get state for graph {graph_name}: {e}")
164
+ graph_state[graph_name] = {"name": graph_name, "timestamp": time.time(), "exists": False, "signature": ""}
165
+
166
+ except Exception as e:
167
+ logger.warning(f"Failed to get graph state from pystata: {e}")
168
+
169
+ return graph_state
170
+
171
+
172
+
173
+ def detect_graph_modifications(self, text: str = None) -> dict:
174
+ """Detect graph modification/removal using SFI state comparison."""
175
+ modifications = {"dropped": [], "renamed": [], "cleared": False}
176
+
177
+ if not self._stata_client:
178
+ return modifications
179
+
180
+ try:
181
+ # Get current graph state via SFI
182
+ current_graphs = set(self._get_current_graphs_from_pystata())
183
+
184
+ # Compare with last known state to detect modifications
185
+ if self._last_graph_state:
186
+ last_graphs = set(self._last_graph_state.keys())
187
+
188
+ # Detect dropped graphs (in last state but not current)
189
+ dropped_graphs = last_graphs - current_graphs
190
+ modifications["dropped"].extend(dropped_graphs)
191
+
192
+ # Detect clear all (no graphs remain when there were some before)
193
+ if last_graphs and not current_graphs:
194
+ modifications["cleared"] = True
195
+
196
+ # Update last known state for next comparison (stable signatures)
197
+ new_state: Dict[str, Any] = {}
198
+ for graph in current_graphs:
199
+ sig = self._describe_graph_signature(graph)
200
+ new_state[graph] = {
201
+ "name": graph,
202
+ "exists": True,
203
+ "valid": bool(sig),
204
+ "signature": sig,
205
+ "timestamp": time.time(),
206
+ }
207
+ self._last_graph_state = new_state
208
+
209
+ except Exception as e:
210
+ logger.debug(f"SFI modification detection failed: {e}")
211
+
212
+ return modifications
213
+
214
+
215
+ def should_cache_graph(self, graph_name: str) -> bool:
216
+ """Determine if a graph should be cached."""
217
+ with self._lock:
218
+ # Don't cache if already detected or removed
219
+ if graph_name in self._detected_graphs or graph_name in self._removed_graphs:
220
+ return False
221
+
222
+ # Mark as detected
223
+ self._detected_graphs.add(graph_name)
224
+ return True
225
+
226
+ def mark_graph_removed(self, graph_name: str) -> None:
227
+ """Mark a graph as removed."""
228
+ with self._lock:
229
+ self._removed_graphs.add(graph_name)
230
+ self._detected_graphs.discard(graph_name)
231
+
232
+ def mark_all_cleared(self) -> None:
233
+ """Mark all graphs as cleared."""
234
+ with self._lock:
235
+ self._detected_graphs.clear()
236
+ self._removed_graphs.clear()
237
+
238
+ def clear_detection_state(self) -> None:
239
+ """Clear all detection state."""
240
+ with self._lock:
241
+ self._detected_graphs.clear()
242
+ self._removed_graphs.clear()
243
+ self._unnamed_graph_counter = 0
244
+
245
+ def process_modifications(self, modifications: dict) -> None:
246
+ """Process detected modifications."""
247
+ with self._lock:
248
+ # Handle dropped graphs
249
+ for graph_name in modifications.get("dropped", []):
250
+ self.mark_graph_removed(graph_name)
251
+
252
+ # Handle renamed graphs
253
+ for old_name, new_name in modifications.get("renamed", []):
254
+ self.mark_graph_removed(old_name)
255
+ self._detected_graphs.discard(new_name) # Allow re-detection with new name
256
+
257
+ # Handle clear all
258
+ if modifications.get("cleared", False):
259
+ self.mark_all_cleared()
260
+
261
+
262
+ class StreamingGraphCache:
263
+ """Integrates graph detection with caching during streaming."""
264
+
265
+ def __init__(self, stata_client, auto_cache: bool = False):
266
+ self.stata_client = stata_client
267
+ self.auto_cache = auto_cache
268
+ # Use persistent detector from client if available, else create local one
269
+ if hasattr(stata_client, "_graph_detector"):
270
+ self.detector = stata_client._graph_detector
271
+ else:
272
+ self.detector = GraphCreationDetector(stata_client)
273
+ self._lock = threading.Lock()
274
+ self._cache_callbacks: List[Callable[[str, bool], None]] = []
275
+ self._graphs_to_cache: List[str] = []
276
+ self._cached_graphs: Set[str] = set()
277
+ self._removed_graphs = set() # Track removed graphs directly
278
+ self._initial_graphs: Set[str] = set() # Captured before execution starts
279
+
280
+ def add_cache_callback(self, callback: Callable[[str, bool], None]) -> None:
281
+ """Add callback for graph cache events."""
282
+ with self._lock:
283
+ self._cache_callbacks.append(callback)
284
+
285
+ async def _notify_cache_callbacks(self, graph_name: str, success: bool) -> None:
286
+ for callback in self._cache_callbacks:
287
+ try:
288
+ result = callback(graph_name, success)
289
+ if inspect.isawaitable(result):
290
+ await result
291
+ except Exception as e:
292
+ logger.warning(f"Cache callback failed for {graph_name}: {e}")
293
+
294
+
295
+ async def cache_detected_graphs_with_pystata(self) -> List[str]:
296
+ """Enhanced caching method that uses pystata for real-time graph detection."""
297
+ if not self.auto_cache:
298
+ return []
299
+
300
+ cached_names = []
301
+
302
+ # First, try to get any newly detected graphs via pystata state
303
+ if self.stata_client:
304
+ try:
305
+ # Get current state and check for new graphs
306
+
307
+ pystata_detected = self.detector._detect_graphs_via_pystata()
308
+
309
+ # Add any newly detected graphs to cache queue
310
+ for graph_name in pystata_detected:
311
+ if graph_name not in self._cached_graphs and graph_name not in self._removed_graphs:
312
+ self._graphs_to_cache.append(graph_name)
313
+
314
+ except Exception as e:
315
+ logger.warning(f"Failed to get pystata graph updates: {e}")
316
+
317
+ # Process the cache queue
318
+ with self._lock:
319
+ graphs_to_process = self._graphs_to_cache.copy()
320
+ self._graphs_to_cache.clear()
321
+
322
+ # Get current graph list for verification
323
+ try:
324
+ current_graphs = self.stata_client.list_graphs()
325
+ except Exception as e:
326
+ logger.warning(f"Failed to get current graph list: {e}")
327
+ return cached_names
328
+
329
+ for graph_name in graphs_to_process:
330
+ if graph_name in current_graphs and graph_name not in self._cached_graphs:
331
+ try:
332
+ success = await asyncio.to_thread(self.stata_client.cache_graph_on_creation, graph_name)
333
+ if success:
334
+ cached_names.append(graph_name)
335
+ with self._lock:
336
+ self._cached_graphs.add(graph_name)
337
+
338
+ # Notify callbacks
339
+ await self._notify_cache_callbacks(graph_name, success)
340
+
341
+ except Exception as e:
342
+ logger.warning(f"Failed to cache graph {graph_name}: {e}")
343
+ # Still notify callbacks of failure
344
+ await self._notify_cache_callbacks(graph_name, False)
345
+
346
+ return cached_names
347
+
348
+ async def cache_detected_graphs(self) -> List[str]:
349
+ """Cache all detected graphs."""
350
+ if not self.auto_cache:
351
+ return []
352
+
353
+ cached_names = []
354
+
355
+ with self._lock:
356
+ graphs_to_process = self._graphs_to_cache.copy()
357
+ self._graphs_to_cache.clear()
358
+
359
+ # Get current graph list for verification
360
+ try:
361
+ current_graphs = self.stata_client.list_graphs()
362
+ except Exception as e:
363
+ logger.warning(f"Failed to get current graph list: {e}")
364
+ return cached_names
365
+
366
+ for graph_name in graphs_to_process:
367
+ if graph_name in current_graphs and graph_name not in self._cached_graphs:
368
+ try:
369
+ success = await asyncio.to_thread(self.stata_client.cache_graph_on_creation, graph_name)
370
+ if success:
371
+ cached_names.append(graph_name)
372
+ with self._lock:
373
+ self._cached_graphs.add(graph_name)
374
+
375
+ # Notify callbacks
376
+ await self._notify_cache_callbacks(graph_name, success)
377
+
378
+ except Exception as e:
379
+ logger.warning(f"Failed to cache graph {graph_name}: {e}")
380
+ # Still notify callbacks of failure
381
+ await self._notify_cache_callbacks(graph_name, False)
382
+
383
+ return cached_names
384
+
385
+ def get_cache_stats(self) -> dict:
386
+ """Get caching statistics."""
387
+ with self._lock:
388
+ return {
389
+ "auto_cache_enabled": self.auto_cache,
390
+ "pending_cache_count": len(self._graphs_to_cache),
391
+ "cached_graphs_count": len(self._cached_graphs),
392
+ "detected_graphs_count": len(self.detector._detected_graphs),
393
+ "removed_graphs_count": len(self.detector._removed_graphs),
394
+ }
395
+
396
+ def reset(self) -> None:
397
+ """Reset the cache state."""
398
+ with self._lock:
399
+ self._graphs_to_cache.clear()
400
+ self._cached_graphs.clear()
401
+ self.detector.clear_detection_state()
mcp_stata/models.py ADDED
@@ -0,0 +1,62 @@
1
+ from typing import List, Optional, Dict, Any
2
+ from pydantic import BaseModel
3
+
4
+
5
+ class ErrorEnvelope(BaseModel):
6
+ message: str
7
+ rc: Optional[int] = None
8
+ line: Optional[int] = None
9
+ command: Optional[str] = None
10
+ log_path: Optional[str] = None
11
+ context: Optional[str] = None
12
+ stdout: Optional[str] = None
13
+ stderr: Optional[str] = None
14
+ snippet: Optional[str] = None
15
+ trace: Optional[bool] = None
16
+ smcl_output: Optional[str] = None
17
+
18
+
19
+ class CommandResponse(BaseModel):
20
+ command: str
21
+ rc: int
22
+ stdout: str
23
+ stderr: Optional[str] = None
24
+ log_path: Optional[str] = None
25
+ success: bool
26
+ error: Optional[ErrorEnvelope] = None
27
+ smcl_output: Optional[str] = None
28
+
29
+
30
+ class DataResponse(BaseModel):
31
+ start: int
32
+ count: int
33
+ data: List[Dict[str, Any]]
34
+
35
+
36
+ class VariableInfo(BaseModel):
37
+ name: str
38
+ label: Optional[str] = None
39
+ type: Optional[str] = None
40
+
41
+
42
+ class VariablesResponse(BaseModel):
43
+ variables: List[VariableInfo]
44
+
45
+
46
+ class GraphInfo(BaseModel):
47
+ name: str
48
+ active: bool = False
49
+
50
+
51
+ class GraphListResponse(BaseModel):
52
+ graphs: List[GraphInfo]
53
+
54
+
55
+ class GraphExport(BaseModel):
56
+ name: str
57
+ file_path: Optional[str] = None
58
+
59
+
60
+ class GraphExportResponse(BaseModel):
61
+ graphs: List[GraphExport]
62
+
@@ -0,0 +1,87 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from typing import Iterable, Any, Tuple
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ try:
9
+ from mcp_stata import _native_ops as _native
10
+ except Exception: # pragma: no cover - optional module
11
+ _native = None
12
+
13
+
14
+ def argsort_numeric(
15
+ columns: Iterable["numpy.ndarray"],
16
+ descending: list[bool],
17
+ nulls_last: list[bool],
18
+ ) -> list[int] | None:
19
+ if _native is None:
20
+ return None
21
+ cols = list(columns)
22
+ if not cols:
23
+ return []
24
+ try:
25
+ return _native.argsort_numeric(cols, descending, nulls_last)
26
+ except Exception as e:
27
+ logger.warning(f"Native numeric sort failed: {e}")
28
+ return None
29
+
30
+
31
+ def argsort_mixed(
32
+ columns: Iterable[object],
33
+ is_string: list[bool],
34
+ descending: list[bool],
35
+ nulls_last: list[bool],
36
+ ) -> list[int] | None:
37
+ if _native is None:
38
+ return None
39
+ cols = list(columns)
40
+ if not cols:
41
+ return []
42
+ try:
43
+ return _native.argsort_mixed(cols, is_string, descending, nulls_last)
44
+ except Exception as e:
45
+ logger.warning(f"Native mixed sort failed: {e}")
46
+ return None
47
+
48
+
49
+ def smcl_to_markdown(smcl_text: str) -> str | None:
50
+ if _native is None:
51
+ return None
52
+ try:
53
+ return _native.smcl_to_markdown(smcl_text)
54
+ except Exception as e:
55
+ logger.warning(f"Native SMCL conversion failed: {e}")
56
+ return None
57
+
58
+
59
+ def fast_scan_log(smcl_content: str, rc_default: int) -> Tuple[str, str, int | None] | None:
60
+ if _native is None:
61
+ return None
62
+ try:
63
+ return _native.fast_scan_log(smcl_content, rc_default)
64
+ except Exception as e:
65
+ logger.warning(f"Native log scanning failed: {e}")
66
+ return None
67
+
68
+
69
+ def compute_filter_indices(
70
+ data_numeric: dict[str, "numpy.ndarray"],
71
+ data_string: dict[str, list[str]],
72
+ filter_json: str,
73
+ row_count: int,
74
+ ) -> list[int] | None:
75
+ if _native is None:
76
+ return None
77
+ try:
78
+ return _native.compute_filter_indices(
79
+ data_numeric,
80
+ data_string,
81
+ filter_json,
82
+ row_count
83
+ )
84
+ except Exception as e:
85
+ logger.warning(f"Native filtering failed: {e}")
86
+ return None
87
+