mcp-stata 1.20.0__cp311-abi3-macosx_11_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-stata might be problematic. Click here for more details.

@@ -0,0 +1,264 @@
1
+ from __future__ import annotations
2
+ import queue
3
+ import threading
4
+ import time
5
+ from typing import Any, Awaitable, Callable, Optional
6
+ import logging
7
+
8
+ import anyio
9
+
10
+
11
+ _SENTINEL = object()
12
+ logger = logging.getLogger("mcp_stata")
13
+
14
+
15
+ class StreamBuffer:
16
+ def __init__(
17
+ self,
18
+ *,
19
+ max_total_chars: int = 2_000_000,
20
+ truncation_marker: str = "\n... (output truncated)\n",
21
+ ):
22
+ self._lock = threading.Lock()
23
+ self._parts: list[str] = []
24
+ self._total_chars = 0
25
+ self._max_total_chars = max_total_chars
26
+ self._truncation_marker = truncation_marker
27
+ self._truncated = False
28
+
29
+ def write(self, data: Any) -> int:
30
+ text = self._normalize(data)
31
+ if not text:
32
+ return 0
33
+
34
+ with self._lock:
35
+ if self._truncated:
36
+ return len(text)
37
+
38
+ remaining = self._max_total_chars - self._total_chars
39
+ if remaining <= 0:
40
+ self._parts.append(self._truncation_marker)
41
+ self._total_chars += len(self._truncation_marker)
42
+ self._truncated = True
43
+ return len(text)
44
+
45
+ if len(text) <= remaining:
46
+ self._parts.append(text)
47
+ self._total_chars += len(text)
48
+ return len(text)
49
+
50
+ self._parts.append(text[:remaining])
51
+ self._parts.append(self._truncation_marker)
52
+ self._total_chars += remaining + len(self._truncation_marker)
53
+ self._truncated = True
54
+ return len(text)
55
+
56
+ def get_value(self) -> str:
57
+ with self._lock:
58
+ return "".join(self._parts)
59
+
60
+ @staticmethod
61
+ def _normalize(data: Any) -> str:
62
+ if data is None:
63
+ return ""
64
+ if isinstance(data, bytes):
65
+ return data.decode("utf-8", errors="replace")
66
+ return str(data)
67
+
68
+
69
+ class StreamingTeeIO:
70
+ def __init__(
71
+ self,
72
+ buffer: StreamBuffer,
73
+ q: queue.Queue,
74
+ *,
75
+ max_fragment_chars: int = 4000,
76
+ on_chunk_callback=None,
77
+ ):
78
+ self._buffer = buffer
79
+ self._queue = q
80
+ self._max_fragment_chars = max_fragment_chars
81
+ self._closed = False
82
+ self._lock = threading.Lock()
83
+ self._on_chunk_callback = on_chunk_callback
84
+
85
+ def write(self, data: Any) -> int:
86
+ text = StreamBuffer._normalize(data)
87
+ if not text:
88
+ return 0
89
+
90
+ n = self._buffer.write(text)
91
+
92
+ # Call chunk callback for graph detection
93
+ if self._on_chunk_callback:
94
+ try:
95
+ self._on_chunk_callback(text)
96
+ except Exception:
97
+ # Don't let callback errors break streaming
98
+ logger.debug("Streaming chunk callback failed", exc_info=True)
99
+
100
+ with self._lock:
101
+ if self._closed:
102
+ return n
103
+ if len(text) <= self._max_fragment_chars:
104
+ self._queue.put_nowait(text)
105
+ else:
106
+ for i in range(0, len(text), self._max_fragment_chars):
107
+ self._queue.put_nowait(text[i : i + self._max_fragment_chars])
108
+ return n
109
+
110
+ def flush(self) -> None:
111
+ return
112
+
113
+ def isatty(self) -> bool:
114
+ return False
115
+
116
+ def close(self) -> None:
117
+ with self._lock:
118
+ if self._closed:
119
+ return
120
+ self._closed = True
121
+ self._queue.put_nowait(_SENTINEL)
122
+
123
+
124
+ class TailBuffer:
125
+ def __init__(self, *, max_chars: int = 8000):
126
+ self._lock = threading.Lock()
127
+ self._parts: list[str] = []
128
+ self._total = 0
129
+ self._max_chars = max_chars
130
+
131
+ def append(self, data: Any) -> None:
132
+ text = StreamBuffer._normalize(data)
133
+ if not text:
134
+ return
135
+
136
+ with self._lock:
137
+ self._parts.append(text)
138
+ self._total += len(text)
139
+
140
+ if self._total <= self._max_chars:
141
+ return
142
+
143
+ # Trim from the left until we are within budget.
144
+ over = self._total - self._max_chars
145
+ while over > 0 and self._parts:
146
+ head = self._parts[0]
147
+ if len(head) <= over:
148
+ self._parts.pop(0)
149
+ self._total -= len(head)
150
+ over = self._total - self._max_chars
151
+ continue
152
+
153
+ self._parts[0] = head[over:]
154
+ self._total -= over
155
+ over = 0
156
+
157
+ def get_value(self) -> str:
158
+ with self._lock:
159
+ return "".join(self._parts)
160
+
161
+
162
+ class FileTeeIO:
163
+ def __init__(self, file_obj, tail: TailBuffer):
164
+ self._file = file_obj
165
+ self._tail = tail
166
+ self._lock = threading.Lock()
167
+ self._closed = False
168
+
169
+ def write(self, data: Any) -> int:
170
+ text = StreamBuffer._normalize(data)
171
+ if not text:
172
+ return 0
173
+
174
+ with self._lock:
175
+ if self._closed:
176
+ return len(text)
177
+
178
+ self._tail.append(text)
179
+ self._file.write(text)
180
+
181
+ if "\n" in text:
182
+ try:
183
+ self._file.flush()
184
+ except Exception:
185
+ pass
186
+ return len(text)
187
+
188
+ def flush(self) -> None:
189
+ with self._lock:
190
+ if self._closed:
191
+ return
192
+ try:
193
+ self._file.flush()
194
+ except Exception:
195
+ return
196
+
197
+ def isatty(self) -> bool:
198
+ return False
199
+
200
+ def close(self) -> None:
201
+ with self._lock:
202
+ if self._closed:
203
+ return
204
+ self._closed = True
205
+ try:
206
+ self._file.flush()
207
+ except Exception:
208
+ pass
209
+ try:
210
+ self._file.close()
211
+ except Exception:
212
+ pass
213
+
214
+
215
+ async def drain_queue_and_notify(
216
+ q: queue.Queue,
217
+ notify_log: Callable[[str], Awaitable[None]],
218
+ *,
219
+ min_interval_ms: int = 200,
220
+ max_chunk_chars: int = 4000,
221
+ on_chunk: Optional[Callable[[str], Awaitable[None]]] = None,
222
+ ) -> None:
223
+ buf: list[str] = []
224
+ buf_len = 0
225
+ last_send = time.monotonic()
226
+
227
+ async def flush() -> None:
228
+ nonlocal buf, buf_len, last_send
229
+ if not buf:
230
+ return
231
+ chunk = "".join(buf)
232
+ buf = []
233
+ buf_len = 0
234
+ if on_chunk is not None:
235
+ await on_chunk(chunk)
236
+ await notify_log(chunk)
237
+ last_send = time.monotonic()
238
+
239
+ while True:
240
+ item = None
241
+ try:
242
+ item = q.get_nowait()
243
+ except queue.Empty:
244
+ now = time.monotonic()
245
+ if buf and (now - last_send) * 1000 >= min_interval_ms:
246
+ await flush()
247
+ await anyio.sleep(min_interval_ms / 1000)
248
+ continue
249
+
250
+ if item is _SENTINEL:
251
+ break
252
+
253
+ text = StreamBuffer._normalize(item)
254
+ if not text:
255
+ continue
256
+
257
+ buf.append(text)
258
+ buf_len += len(text)
259
+
260
+ now = time.monotonic()
261
+ if buf_len >= max_chunk_chars or (now - last_send) * 1000 >= min_interval_ms:
262
+ await flush()
263
+
264
+ await flush()
@@ -0,0 +1,54 @@
1
+ import stata_setup
2
+ stata_setup.config("/Applications/StataNow/", "mp")
3
+ from pystata import stata
4
+ import tempfile
5
+ import os
6
+
7
+ print("=== Testing multiple concurrent logs ===\n")
8
+
9
+ # Create temp files for logs
10
+ log1_path = tempfile.mktemp(suffix='.smcl')
11
+ log2_path = tempfile.mktemp(suffix='.smcl')
12
+
13
+ stata.run("sysuse auto, clear")
14
+
15
+ try:
16
+ # Start first (unnamed) log - simulating user's log
17
+ print("1. Starting unnamed user log...")
18
+ stata.run(f'log using "{log1_path}", replace smcl')
19
+
20
+ # Start second (named) log - our capture log
21
+ print("2. Starting named capture log...")
22
+ stata.run(f'log using "{log2_path}", replace smcl name(_capture)')
23
+
24
+ # Run a command - should go to both logs
25
+ print("3. Running command...")
26
+ stata.run("summarize price mpg")
27
+
28
+ # Close named log first
29
+ print("4. Closing named log...")
30
+ stata.run("log close _capture")
31
+
32
+ # Close unnamed log
33
+ print("5. Closing unnamed log...")
34
+ stata.run("log close")
35
+
36
+ print("\n=== SUCCESS: Multiple concurrent logs work! ===\n")
37
+
38
+ # Show contents
39
+ print("--- User log contents (first 500 chars) ---")
40
+ with open(log1_path, 'r') as f:
41
+ print(f.read()[:500])
42
+
43
+ print("\n--- Capture log contents (first 500 chars) ---")
44
+ with open(log2_path, 'r') as f:
45
+ print(f.read()[:500])
46
+
47
+ except Exception as e:
48
+ print(f"\n=== FAILED: {e} ===\n")
49
+
50
+ finally:
51
+ # Cleanup
52
+ for p in [log1_path, log2_path]:
53
+ if os.path.exists(p):
54
+ os.unlink(p)