braintrust 0.4.3__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- braintrust/__init__.py +3 -0
- braintrust/_generated_types.py +106 -6
- braintrust/auto.py +179 -0
- braintrust/conftest.py +23 -4
- braintrust/framework.py +113 -3
- braintrust/functions/invoke.py +3 -1
- braintrust/functions/test_invoke.py +61 -0
- braintrust/generated_types.py +7 -1
- braintrust/logger.py +127 -45
- braintrust/oai.py +51 -0
- braintrust/span_cache.py +337 -0
- braintrust/span_identifier_v3.py +21 -0
- braintrust/test_bt_json.py +0 -5
- braintrust/test_framework.py +37 -0
- braintrust/test_http.py +444 -0
- braintrust/test_logger.py +295 -5
- braintrust/test_span_cache.py +344 -0
- braintrust/test_trace.py +267 -0
- braintrust/test_util.py +58 -1
- braintrust/trace.py +385 -0
- braintrust/util.py +20 -0
- braintrust/version.py +2 -2
- braintrust/wrappers/agno/__init__.py +2 -3
- braintrust/wrappers/anthropic.py +64 -0
- braintrust/wrappers/claude_agent_sdk/__init__.py +2 -3
- braintrust/wrappers/claude_agent_sdk/_wrapper.py +48 -6
- braintrust/wrappers/claude_agent_sdk/test_wrapper.py +115 -0
- braintrust/wrappers/dspy.py +52 -1
- braintrust/wrappers/google_genai/__init__.py +9 -6
- braintrust/wrappers/litellm.py +6 -43
- braintrust/wrappers/pydantic_ai.py +2 -3
- braintrust/wrappers/test_agno.py +9 -0
- braintrust/wrappers/test_anthropic.py +156 -0
- braintrust/wrappers/test_dspy.py +117 -0
- braintrust/wrappers/test_google_genai.py +9 -0
- braintrust/wrappers/test_litellm.py +57 -55
- braintrust/wrappers/test_openai.py +253 -1
- braintrust/wrappers/test_pydantic_ai_integration.py +9 -0
- braintrust/wrappers/test_utils.py +79 -0
- {braintrust-0.4.3.dist-info → braintrust-0.5.2.dist-info}/METADATA +1 -1
- {braintrust-0.4.3.dist-info → braintrust-0.5.2.dist-info}/RECORD +44 -37
- {braintrust-0.4.3.dist-info → braintrust-0.5.2.dist-info}/WHEEL +1 -1
- {braintrust-0.4.3.dist-info → braintrust-0.5.2.dist-info}/entry_points.txt +0 -0
- {braintrust-0.4.3.dist-info → braintrust-0.5.2.dist-info}/top_level.txt +0 -0
braintrust/test_http.py
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
"""Tests for HTTP connection handling, retries, and timeouts."""
|
|
2
|
+
|
|
3
|
+
import http.server
|
|
4
|
+
import os
|
|
5
|
+
import socketserver
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
|
|
9
|
+
import pytest
|
|
10
|
+
import requests
|
|
11
|
+
from braintrust.logger import HTTPConnection, RetryRequestExceptionsAdapter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class HangingConnectionHandler(http.server.BaseHTTPRequestHandler):
|
|
15
|
+
"""HTTP handler that simulates stale connections by HANGING (not responding).
|
|
16
|
+
|
|
17
|
+
This simulates what happens when a NAT gateway silently drops packets:
|
|
18
|
+
- The TCP connection appears open
|
|
19
|
+
- Packets are sent but never acknowledged
|
|
20
|
+
- The client waits forever for a response
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
request_count = 0
|
|
24
|
+
hang_count = 1
|
|
25
|
+
|
|
26
|
+
def log_message(self, format, *args):
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
def do_POST(self):
|
|
30
|
+
HangingConnectionHandler.request_count += 1
|
|
31
|
+
|
|
32
|
+
if HangingConnectionHandler.request_count <= HangingConnectionHandler.hang_count:
|
|
33
|
+
# Simulate stale connection: hang long enough for client to timeout
|
|
34
|
+
for _ in range(100): # 10 seconds total, interruptible
|
|
35
|
+
time.sleep(0.1)
|
|
36
|
+
return
|
|
37
|
+
|
|
38
|
+
self.send_response(200)
|
|
39
|
+
self.send_header("Content-Type", "application/json")
|
|
40
|
+
self.end_headers()
|
|
41
|
+
self.wfile.write(b'{"status": "ok"}')
|
|
42
|
+
|
|
43
|
+
def do_GET(self):
|
|
44
|
+
self.do_POST()
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class CloseConnectionHandler(http.server.BaseHTTPRequestHandler):
|
|
48
|
+
"""HTTP handler that closes connection immediately (triggers ConnectionError)."""
|
|
49
|
+
|
|
50
|
+
request_count = 0
|
|
51
|
+
fail_count = 1
|
|
52
|
+
|
|
53
|
+
def log_message(self, format, *args):
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
def do_POST(self):
|
|
57
|
+
CloseConnectionHandler.request_count += 1
|
|
58
|
+
|
|
59
|
+
if CloseConnectionHandler.request_count <= CloseConnectionHandler.fail_count:
|
|
60
|
+
self.connection.close()
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
self.send_response(200)
|
|
64
|
+
self.send_header("Content-Type", "application/json")
|
|
65
|
+
self.end_headers()
|
|
66
|
+
self.wfile.write(b'{"status": "ok"}')
|
|
67
|
+
|
|
68
|
+
def do_GET(self):
|
|
69
|
+
self.do_POST()
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@pytest.fixture
|
|
73
|
+
def hanging_server():
|
|
74
|
+
"""Fixture that creates a server that HANGS on first request (simulates stale NAT)."""
|
|
75
|
+
HangingConnectionHandler.request_count = 0
|
|
76
|
+
HangingConnectionHandler.hang_count = 1
|
|
77
|
+
|
|
78
|
+
server = socketserver.ThreadingTCPServer(("127.0.0.1", 0), HangingConnectionHandler)
|
|
79
|
+
server.daemon_threads = True
|
|
80
|
+
port = server.server_address[1]
|
|
81
|
+
|
|
82
|
+
thread = threading.Thread(target=server.serve_forever)
|
|
83
|
+
thread.daemon = True
|
|
84
|
+
thread.start()
|
|
85
|
+
|
|
86
|
+
yield f"http://127.0.0.1:{port}"
|
|
87
|
+
|
|
88
|
+
server.shutdown()
|
|
89
|
+
server.server_close()
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@pytest.fixture
|
|
93
|
+
def closing_server():
|
|
94
|
+
"""Fixture that creates a server that CLOSES connection on first request."""
|
|
95
|
+
CloseConnectionHandler.request_count = 0
|
|
96
|
+
CloseConnectionHandler.fail_count = 1
|
|
97
|
+
|
|
98
|
+
server = socketserver.ThreadingTCPServer(("127.0.0.1", 0), CloseConnectionHandler)
|
|
99
|
+
server.daemon_threads = True
|
|
100
|
+
port = server.server_address[1]
|
|
101
|
+
|
|
102
|
+
thread = threading.Thread(target=server.serve_forever)
|
|
103
|
+
thread.daemon = True
|
|
104
|
+
thread.start()
|
|
105
|
+
|
|
106
|
+
yield f"http://127.0.0.1:{port}"
|
|
107
|
+
|
|
108
|
+
server.shutdown()
|
|
109
|
+
server.server_close()
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class TestRetryRequestExceptionsAdapter:
|
|
113
|
+
"""Tests for RetryRequestExceptionsAdapter timeout and retry behavior."""
|
|
114
|
+
|
|
115
|
+
def test_adapter_has_default_timeout(self):
|
|
116
|
+
"""Adapter should have a default_timeout_secs attribute."""
|
|
117
|
+
adapter = RetryRequestExceptionsAdapter(base_num_retries=3, backoff_factor=0.1)
|
|
118
|
+
|
|
119
|
+
assert hasattr(adapter, "default_timeout_secs")
|
|
120
|
+
assert adapter.default_timeout_secs == 60
|
|
121
|
+
|
|
122
|
+
def test_adapter_applies_default_timeout_to_requests(self, hanging_server):
|
|
123
|
+
"""Requests without explicit timeout should use default_timeout_secs."""
|
|
124
|
+
adapter = RetryRequestExceptionsAdapter(
|
|
125
|
+
base_num_retries=3,
|
|
126
|
+
backoff_factor=0.05,
|
|
127
|
+
default_timeout_secs=0.2,
|
|
128
|
+
)
|
|
129
|
+
session = requests.Session()
|
|
130
|
+
session.mount("http://", adapter)
|
|
131
|
+
|
|
132
|
+
start = time.time()
|
|
133
|
+
resp = session.post(f"{hanging_server}/test", json={"hello": "world"})
|
|
134
|
+
elapsed = time.time() - start
|
|
135
|
+
|
|
136
|
+
assert resp.status_code == 200
|
|
137
|
+
assert elapsed < 2.0, f"Should complete within 2s, took {elapsed:.2f}s"
|
|
138
|
+
assert HangingConnectionHandler.request_count >= 2
|
|
139
|
+
|
|
140
|
+
def test_adapter_retries_on_connection_close(self, closing_server):
|
|
141
|
+
"""Adapter retries on connection close errors."""
|
|
142
|
+
adapter = RetryRequestExceptionsAdapter(base_num_retries=5, backoff_factor=0.05)
|
|
143
|
+
session = requests.Session()
|
|
144
|
+
session.mount("http://", adapter)
|
|
145
|
+
|
|
146
|
+
start = time.time()
|
|
147
|
+
resp = session.post(f"{closing_server}/test", json={"hello": "world"})
|
|
148
|
+
elapsed = time.time() - start
|
|
149
|
+
|
|
150
|
+
assert resp.status_code == 200
|
|
151
|
+
assert elapsed < 5.0
|
|
152
|
+
assert CloseConnectionHandler.request_count >= 2
|
|
153
|
+
|
|
154
|
+
def test_adapter_resets_pool_on_timeout(self, hanging_server):
|
|
155
|
+
"""Adapter resets connection pool on timeout errors via self.close().
|
|
156
|
+
|
|
157
|
+
This is the key fix for stale NAT connections: when a request times out,
|
|
158
|
+
we reset the connection pool to ensure the next retry uses a fresh connection.
|
|
159
|
+
"""
|
|
160
|
+
adapter = RetryRequestExceptionsAdapter(
|
|
161
|
+
base_num_retries=10,
|
|
162
|
+
backoff_factor=0.05,
|
|
163
|
+
default_timeout_secs=0.2,
|
|
164
|
+
)
|
|
165
|
+
session = requests.Session()
|
|
166
|
+
session.mount("http://", adapter)
|
|
167
|
+
|
|
168
|
+
start = time.time()
|
|
169
|
+
resp = session.post(f"{hanging_server}/test", json={"hello": "world"})
|
|
170
|
+
elapsed = time.time() - start
|
|
171
|
+
|
|
172
|
+
assert resp.status_code == 200
|
|
173
|
+
assert elapsed < 10.0, f"Request took too long: {elapsed:.2f}s"
|
|
174
|
+
assert HangingConnectionHandler.request_count >= 2
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class TestHTTPConnection:
|
|
178
|
+
"""Tests for HTTPConnection timeout configuration."""
|
|
179
|
+
|
|
180
|
+
def test_make_long_lived_uses_default_timeout(self, hanging_server):
|
|
181
|
+
"""HTTPConnection.make_long_lived() should use default_timeout_secs.
|
|
182
|
+
|
|
183
|
+
This tests the exact scenario from the stale connection bug:
|
|
184
|
+
- Long eval run (15+ minutes)
|
|
185
|
+
- app_conn() becomes stale due to NAT gateway idle timeout
|
|
186
|
+
- summarize() calls fetch_base_experiment()
|
|
187
|
+
- Request hangs forever because no timeout
|
|
188
|
+
|
|
189
|
+
With the fix, make_long_lived() uses default_timeout_secs (60s by default).
|
|
190
|
+
"""
|
|
191
|
+
os.environ["BRAINTRUST_HTTP_TIMEOUT"] = "0.2"
|
|
192
|
+
try:
|
|
193
|
+
conn = HTTPConnection(hanging_server)
|
|
194
|
+
conn.make_long_lived()
|
|
195
|
+
|
|
196
|
+
assert hasattr(conn.adapter, "default_timeout_secs")
|
|
197
|
+
assert conn.adapter.default_timeout_secs == 0.2
|
|
198
|
+
|
|
199
|
+
start = time.time()
|
|
200
|
+
resp = conn.post("/test", json={"hello": "world"})
|
|
201
|
+
elapsed = time.time() - start
|
|
202
|
+
|
|
203
|
+
assert resp.status_code == 200
|
|
204
|
+
# Allow more time due to backoff_factor=0.5 in make_long_lived()
|
|
205
|
+
assert elapsed < 15.0, f"Should complete within 15s, took {elapsed:.2f}s"
|
|
206
|
+
finally:
|
|
207
|
+
del os.environ["BRAINTRUST_HTTP_TIMEOUT"]
|
|
208
|
+
|
|
209
|
+
def test_env_var_configures_timeout(self):
|
|
210
|
+
"""BRAINTRUST_HTTP_TIMEOUT env var configures timeout via make_long_lived()."""
|
|
211
|
+
os.environ["BRAINTRUST_HTTP_TIMEOUT"] = "30"
|
|
212
|
+
try:
|
|
213
|
+
conn = HTTPConnection("http://localhost:8080")
|
|
214
|
+
conn.make_long_lived()
|
|
215
|
+
|
|
216
|
+
assert hasattr(conn.adapter, "default_timeout_secs")
|
|
217
|
+
assert conn.adapter.default_timeout_secs == 30
|
|
218
|
+
finally:
|
|
219
|
+
del os.environ["BRAINTRUST_HTTP_TIMEOUT"]
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class TestAdapterCloseAndReuse:
|
|
223
|
+
"""Tests verifying that adapter.close() allows subsequent requests.
|
|
224
|
+
|
|
225
|
+
This addresses the review concern about whether calling self.close()
|
|
226
|
+
(which calls PoolManager.clear()) breaks subsequent request handling.
|
|
227
|
+
"""
|
|
228
|
+
|
|
229
|
+
@pytest.fixture
|
|
230
|
+
def simple_server(self):
|
|
231
|
+
"""Fixture that creates a server that always succeeds."""
|
|
232
|
+
|
|
233
|
+
class SimpleHandler(http.server.BaseHTTPRequestHandler):
|
|
234
|
+
request_count = 0
|
|
235
|
+
|
|
236
|
+
def log_message(self, format, *args):
|
|
237
|
+
pass
|
|
238
|
+
|
|
239
|
+
def do_GET(self):
|
|
240
|
+
SimpleHandler.request_count += 1
|
|
241
|
+
self.send_response(200)
|
|
242
|
+
self.send_header("Content-Type", "application/json")
|
|
243
|
+
self.end_headers()
|
|
244
|
+
self.wfile.write(b'{"status": "ok"}')
|
|
245
|
+
|
|
246
|
+
SimpleHandler.request_count = 0
|
|
247
|
+
server = socketserver.ThreadingTCPServer(("127.0.0.1", 0), SimpleHandler)
|
|
248
|
+
server.daemon_threads = True
|
|
249
|
+
port = server.server_address[1]
|
|
250
|
+
|
|
251
|
+
thread = threading.Thread(target=server.serve_forever)
|
|
252
|
+
thread.daemon = True
|
|
253
|
+
thread.start()
|
|
254
|
+
|
|
255
|
+
yield f"http://127.0.0.1:{port}", SimpleHandler
|
|
256
|
+
|
|
257
|
+
server.shutdown()
|
|
258
|
+
server.server_close()
|
|
259
|
+
|
|
260
|
+
def test_adapter_works_after_close(self, simple_server):
|
|
261
|
+
"""Verify adapter.close() does not break subsequent requests.
|
|
262
|
+
|
|
263
|
+
This is the key test for the PR feedback: after calling close(),
|
|
264
|
+
the PoolManager should create new connection pools on demand.
|
|
265
|
+
"""
|
|
266
|
+
url, handler = simple_server
|
|
267
|
+
|
|
268
|
+
adapter = RetryRequestExceptionsAdapter(base_num_retries=3, backoff_factor=0.1)
|
|
269
|
+
session = requests.Session()
|
|
270
|
+
session.mount("http://", adapter)
|
|
271
|
+
|
|
272
|
+
# First request works
|
|
273
|
+
resp1 = session.get(f"{url}/test1")
|
|
274
|
+
assert resp1.status_code == 200
|
|
275
|
+
assert handler.request_count == 1
|
|
276
|
+
|
|
277
|
+
# Explicitly close the adapter (simulates what happens on timeout)
|
|
278
|
+
adapter.close()
|
|
279
|
+
|
|
280
|
+
# Second request should still work after close()
|
|
281
|
+
resp2 = session.get(f"{url}/test2")
|
|
282
|
+
assert resp2.status_code == 200
|
|
283
|
+
assert handler.request_count == 2
|
|
284
|
+
|
|
285
|
+
def test_adapter_works_after_multiple_closes(self, simple_server):
|
|
286
|
+
"""Verify adapter works even after multiple close() calls."""
|
|
287
|
+
url, handler = simple_server
|
|
288
|
+
|
|
289
|
+
adapter = RetryRequestExceptionsAdapter(base_num_retries=3, backoff_factor=0.1)
|
|
290
|
+
session = requests.Session()
|
|
291
|
+
session.mount("http://", adapter)
|
|
292
|
+
|
|
293
|
+
for i in range(3):
|
|
294
|
+
resp = session.get(f"{url}/test{i}")
|
|
295
|
+
assert resp.status_code == 200
|
|
296
|
+
adapter.close()
|
|
297
|
+
|
|
298
|
+
assert handler.request_count == 3
|
|
299
|
+
|
|
300
|
+
def test_concurrent_requests_with_close(self):
|
|
301
|
+
"""Test thread safety: close() called while requests are in-flight.
|
|
302
|
+
|
|
303
|
+
This tests a potential race condition where one thread calls close()
|
|
304
|
+
while another thread is mid-request. Requests are staggered to ensure
|
|
305
|
+
close() happens while some requests are in-flight.
|
|
306
|
+
"""
|
|
307
|
+
import concurrent.futures
|
|
308
|
+
|
|
309
|
+
class SlowHandler(http.server.BaseHTTPRequestHandler):
|
|
310
|
+
request_count = 0
|
|
311
|
+
|
|
312
|
+
def log_message(self, format, *args):
|
|
313
|
+
pass
|
|
314
|
+
|
|
315
|
+
def do_GET(self):
|
|
316
|
+
SlowHandler.request_count += 1
|
|
317
|
+
# Simulate slow response
|
|
318
|
+
time.sleep(0.1)
|
|
319
|
+
self.send_response(200)
|
|
320
|
+
self.send_header("Content-Type", "application/json")
|
|
321
|
+
self.end_headers()
|
|
322
|
+
self.wfile.write(b'{"status": "ok"}')
|
|
323
|
+
|
|
324
|
+
SlowHandler.request_count = 0
|
|
325
|
+
server = socketserver.ThreadingTCPServer(("127.0.0.1", 0), SlowHandler)
|
|
326
|
+
server.daemon_threads = True
|
|
327
|
+
port = server.server_address[1]
|
|
328
|
+
url = f"http://127.0.0.1:{port}"
|
|
329
|
+
|
|
330
|
+
server_thread = threading.Thread(target=server.serve_forever)
|
|
331
|
+
server_thread.daemon = True
|
|
332
|
+
server_thread.start()
|
|
333
|
+
|
|
334
|
+
try:
|
|
335
|
+
adapter = RetryRequestExceptionsAdapter(base_num_retries=3, backoff_factor=0.1)
|
|
336
|
+
session = requests.Session()
|
|
337
|
+
session.mount("http://", adapter)
|
|
338
|
+
|
|
339
|
+
errors = []
|
|
340
|
+
|
|
341
|
+
def make_request(i):
|
|
342
|
+
try:
|
|
343
|
+
time.sleep(i * 0.02) # Stagger requests
|
|
344
|
+
resp = session.get(f"{url}/test{i}")
|
|
345
|
+
return resp.status_code
|
|
346
|
+
except Exception as e:
|
|
347
|
+
errors.append(e)
|
|
348
|
+
return None
|
|
349
|
+
|
|
350
|
+
def close_adapter():
|
|
351
|
+
time.sleep(0.05) # Close while requests are in-flight
|
|
352
|
+
adapter.close()
|
|
353
|
+
|
|
354
|
+
# Launch concurrent requests and a close() call
|
|
355
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
|
|
356
|
+
# Start several requests (staggered)
|
|
357
|
+
request_futures = [executor.submit(make_request, i) for i in range(5)]
|
|
358
|
+
# Start close() call mid-flight
|
|
359
|
+
close_future = executor.submit(close_adapter)
|
|
360
|
+
|
|
361
|
+
close_future.result()
|
|
362
|
+
results = [f.result() for f in request_futures]
|
|
363
|
+
|
|
364
|
+
# All requests should succeed (retry on failure)
|
|
365
|
+
assert all(r == 200 for r in results), f"Some requests failed: {results}, errors: {errors}"
|
|
366
|
+
|
|
367
|
+
finally:
|
|
368
|
+
server.shutdown()
|
|
369
|
+
server.server_close()
|
|
370
|
+
|
|
371
|
+
def test_stress_concurrent_close_and_requests(self):
|
|
372
|
+
"""Stress test: many close() calls interleaved with requests.
|
|
373
|
+
|
|
374
|
+
Requests are staggered to ensure close() calls happen during requests.
|
|
375
|
+
"""
|
|
376
|
+
import concurrent.futures
|
|
377
|
+
|
|
378
|
+
class FastHandler(http.server.BaseHTTPRequestHandler):
|
|
379
|
+
request_count = 0
|
|
380
|
+
|
|
381
|
+
def log_message(self, format, *args):
|
|
382
|
+
pass
|
|
383
|
+
|
|
384
|
+
def do_GET(self):
|
|
385
|
+
FastHandler.request_count += 1
|
|
386
|
+
self.send_response(200)
|
|
387
|
+
self.send_header("Content-Type", "application/json")
|
|
388
|
+
self.end_headers()
|
|
389
|
+
self.wfile.write(b'{"status": "ok"}')
|
|
390
|
+
|
|
391
|
+
FastHandler.request_count = 0
|
|
392
|
+
server = socketserver.ThreadingTCPServer(("127.0.0.1", 0), FastHandler)
|
|
393
|
+
server.daemon_threads = True
|
|
394
|
+
port = server.server_address[1]
|
|
395
|
+
url = f"http://127.0.0.1:{port}"
|
|
396
|
+
|
|
397
|
+
server_thread = threading.Thread(target=server.serve_forever)
|
|
398
|
+
server_thread.daemon = True
|
|
399
|
+
server_thread.start()
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
adapter = RetryRequestExceptionsAdapter(base_num_retries=5, backoff_factor=0.01)
|
|
403
|
+
session = requests.Session()
|
|
404
|
+
session.mount("http://", adapter)
|
|
405
|
+
|
|
406
|
+
errors = []
|
|
407
|
+
success_count = 0
|
|
408
|
+
lock = threading.Lock()
|
|
409
|
+
|
|
410
|
+
def make_request(i):
|
|
411
|
+
nonlocal success_count
|
|
412
|
+
try:
|
|
413
|
+
time.sleep(i * 0.005) # Stagger requests
|
|
414
|
+
resp = session.get(f"{url}/test{i}")
|
|
415
|
+
if resp.status_code == 200:
|
|
416
|
+
with lock:
|
|
417
|
+
success_count += 1
|
|
418
|
+
return resp.status_code
|
|
419
|
+
except Exception as e:
|
|
420
|
+
with lock:
|
|
421
|
+
errors.append(str(e))
|
|
422
|
+
return None
|
|
423
|
+
|
|
424
|
+
def close_repeatedly():
|
|
425
|
+
for _ in range(20):
|
|
426
|
+
time.sleep(0.01) # Close throughout the request window
|
|
427
|
+
adapter.close()
|
|
428
|
+
|
|
429
|
+
# Launch many concurrent requests while repeatedly closing
|
|
430
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
|
|
431
|
+
request_futures = [executor.submit(make_request, i) for i in range(50)]
|
|
432
|
+
close_futures = [executor.submit(close_repeatedly) for _ in range(3)]
|
|
433
|
+
|
|
434
|
+
# Wait for all
|
|
435
|
+
for f in close_futures:
|
|
436
|
+
f.result()
|
|
437
|
+
results = [f.result() for f in request_futures]
|
|
438
|
+
|
|
439
|
+
failed = [r for r in results if r != 200]
|
|
440
|
+
assert len(failed) == 0, f"Failed requests: {len(failed)}, errors: {errors[:5]}"
|
|
441
|
+
|
|
442
|
+
finally:
|
|
443
|
+
server.shutdown()
|
|
444
|
+
server.server_close()
|