helix.fhir.client.sdk 4.2.3__py3-none-any.whl → 4.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. helix_fhir_client_sdk/fhir_auth_mixin.py +17 -10
  2. helix_fhir_client_sdk/fhir_client.py +152 -79
  3. helix_fhir_client_sdk/fhir_delete_mixin.py +62 -48
  4. helix_fhir_client_sdk/fhir_merge_mixin.py +188 -166
  5. helix_fhir_client_sdk/fhir_merge_resources_mixin.py +200 -15
  6. helix_fhir_client_sdk/fhir_patch_mixin.py +97 -84
  7. helix_fhir_client_sdk/fhir_update_mixin.py +71 -57
  8. helix_fhir_client_sdk/graph/simulated_graph_processor_mixin.py +147 -49
  9. helix_fhir_client_sdk/open_telemetry/__init__.py +0 -0
  10. helix_fhir_client_sdk/open_telemetry/attribute_names.py +7 -0
  11. helix_fhir_client_sdk/open_telemetry/span_names.py +12 -0
  12. helix_fhir_client_sdk/queue/request_queue_mixin.py +17 -12
  13. helix_fhir_client_sdk/responses/fhir_client_protocol.py +10 -6
  14. helix_fhir_client_sdk/responses/fhir_get_response.py +3 -4
  15. helix_fhir_client_sdk/responses/fhir_response_processor.py +73 -54
  16. helix_fhir_client_sdk/responses/get/fhir_get_bundle_response.py +49 -28
  17. helix_fhir_client_sdk/responses/get/fhir_get_error_response.py +0 -1
  18. helix_fhir_client_sdk/responses/get/fhir_get_list_by_resource_type_response.py +1 -1
  19. helix_fhir_client_sdk/responses/get/fhir_get_list_response.py +1 -1
  20. helix_fhir_client_sdk/responses/get/fhir_get_response_factory.py +0 -1
  21. helix_fhir_client_sdk/responses/get/fhir_get_single_response.py +1 -1
  22. helix_fhir_client_sdk/responses/merge/fhir_merge_resource_response_entry.py +30 -0
  23. helix_fhir_client_sdk/responses/resource_separator.py +35 -40
  24. helix_fhir_client_sdk/utilities/cache/request_cache.py +32 -43
  25. helix_fhir_client_sdk/utilities/retryable_aiohttp_client.py +185 -154
  26. helix_fhir_client_sdk/utilities/retryable_aiohttp_response.py +2 -1
  27. helix_fhir_client_sdk/validators/async_fhir_validator.py +3 -0
  28. helix_fhir_client_sdk-4.2.19.dist-info/METADATA +200 -0
  29. {helix_fhir_client_sdk-4.2.3.dist-info → helix_fhir_client_sdk-4.2.19.dist-info}/RECORD +36 -29
  30. tests/async/test_benchmark_compress.py +448 -0
  31. tests/async/test_benchmark_merge.py +506 -0
  32. tests/async/test_retryable_client_session_management.py +159 -0
  33. tests/test_fhir_client_clone.py +155 -0
  34. helix_fhir_client_sdk-4.2.3.dist-info/METADATA +0 -115
  35. {helix_fhir_client_sdk-4.2.3.dist-info → helix_fhir_client_sdk-4.2.19.dist-info}/WHEEL +0 -0
  36. {helix_fhir_client_sdk-4.2.3.dist-info → helix_fhir_client_sdk-4.2.19.dist-info}/licenses/LICENSE +0 -0
  37. {helix_fhir_client_sdk-4.2.3.dist-info → helix_fhir_client_sdk-4.2.19.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,506 @@
1
+ """
2
+ Benchmark tests for comparing compressed vs uncompressed FHIR client merge operations.
3
+
4
+ These tests measure the performance of:
5
+ - merge_async() with compress=True vs compress=False
6
+
7
+ =============================================================================
8
+ HOW TO RUN THESE TESTS
9
+ =============================================================================
10
+
11
+ 1. Start services using docker-compose:
12
+ docker-compose up -d mock-server
13
+
14
+ 2. First time only - rebuild dev container to include pytest-benchmark:
15
+ docker-compose build dev
16
+
17
+ OR install pytest-benchmark in the running container:
18
+ docker-compose run --rm dev pip install pytest-benchmark
19
+
20
+ 3. Run benchmark tests inside docker container:
21
+ docker-compose run --rm dev pytest tests/async/test_benchmark_merge.py -v --benchmark-only
22
+
23
+ 4. Or run all benchmark variations:
24
+ docker-compose run --rm dev pytest tests/async/test_benchmark_merge.py -v --benchmark-only --benchmark-group-by=func
25
+
26
+ 5. Save benchmark results for comparison:
27
+ docker-compose run --rm dev pytest tests/async/test_benchmark_merge.py -v --benchmark-autosave
28
+
29
+ 6. Compare with previous runs:
30
+ docker-compose run --rm dev pytest tests/async/test_benchmark_merge.py -v --benchmark-compare
31
+
32
+ 7. Run with more iterations for accuracy:
33
+ docker-compose run --rm dev pytest tests/async/test_benchmark_merge.py -v --benchmark-min-rounds=10
34
+
35
+ 8. To stop mock-server:
36
+ docker-compose down mock-server
37
+
38
+ =============================================================================
39
+ """
40
+
41
+ import asyncio
42
+ import json
43
+ import socket
44
+ from typing import Any
45
+
46
+ import pytest
47
+ from mockserver_client.mockserver_client import (
48
+ MockServerFriendlyClient,
49
+ mock_request,
50
+ mock_response,
51
+ times,
52
+ )
53
+
54
+ from helix_fhir_client_sdk.fhir_client import FhirClient
55
+ from helix_fhir_client_sdk.responses.fhir_merge_response import FhirMergeResponse
56
+
57
+
58
+ def is_mock_server_running(host: str = "mock-server", port: int = 1080) -> bool:
59
+ """Check if mock-server is reachable."""
60
+ try:
61
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
62
+ sock.settimeout(2)
63
+ result = sock.connect_ex((host, port))
64
+ sock.close()
65
+ return result == 0
66
+ except OSError:
67
+ return False
68
+
69
+
70
+ # Skip all tests if the mock-server is not running
71
+ pytestmark = pytest.mark.skipif(
72
+ not is_mock_server_running(), reason="Mock server not running. Start with: docker-compose up -d mock-server"
73
+ )
74
+
75
+
76
+ def generate_patient_resource(index: int) -> dict[str, Any]:
77
+ """Generate a realistic FHIR Patient resource."""
78
+ return {
79
+ "resourceType": "Patient",
80
+ "id": f"patient-{index}",
81
+ "meta": {
82
+ "versionId": "1",
83
+ "lastUpdated": "2025-01-15T10:30:00.000Z",
84
+ "source": "http://example.org/fhir",
85
+ "profile": ["http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient"],
86
+ },
87
+ "identifier": [
88
+ {
89
+ "use": "official",
90
+ "type": {
91
+ "coding": [
92
+ {
93
+ "system": "http://terminology.hl7.org/CodeSystem/v2-0203",
94
+ "code": "MR",
95
+ "display": "Medical Record Number",
96
+ }
97
+ ]
98
+ },
99
+ "system": "http://hospital.example.org/mrn",
100
+ "value": f"MRN-{index:08d}",
101
+ },
102
+ {
103
+ "use": "official",
104
+ "type": {
105
+ "coding": [
106
+ {
107
+ "system": "http://terminology.hl7.org/CodeSystem/v2-0203",
108
+ "code": "SS",
109
+ "display": "Social Security Number",
110
+ }
111
+ ]
112
+ },
113
+ "system": "http://hl7.org/fhir/sid/us-ssn",
114
+ "value": f"{100 + index:03d}-{50 + index:02d}-{1000 + index:04d}",
115
+ },
116
+ ],
117
+ "active": True,
118
+ "name": [
119
+ {
120
+ "use": "official",
121
+ "family": f"TestFamily{index}",
122
+ "given": [f"TestGiven{index}", f"MiddleName{index}"],
123
+ "prefix": ["Mr."],
124
+ "suffix": ["Jr."],
125
+ },
126
+ {
127
+ "use": "nickname",
128
+ "given": [f"Nick{index}"],
129
+ },
130
+ ],
131
+ "telecom": [
132
+ {"system": "phone", "value": f"555-{100 + index:03d}-{1000 + index:04d}", "use": "home"},
133
+ {"system": "phone", "value": f"555-{200 + index:03d}-{2000 + index:04d}", "use": "mobile"},
134
+ {"system": "email", "value": f"patient{index}@example.com", "use": "home"},
135
+ ],
136
+ "gender": "male" if index % 2 == 0 else "female",
137
+ "birthDate": f"{1950 + (index % 50)}-{(index % 12) + 1:02d}-{(index % 28) + 1:02d}",
138
+ "deceasedBoolean": False,
139
+ "address": [
140
+ {
141
+ "use": "home",
142
+ "type": "physical",
143
+ "line": [f"{100 + index} Main Street", f"Apt {index}"],
144
+ "city": "Boston",
145
+ "state": "MA",
146
+ "postalCode": f"02{100 + (index % 900):03d}",
147
+ "country": "USA",
148
+ },
149
+ {
150
+ "use": "work",
151
+ "type": "postal",
152
+ "line": [f"{200 + index} Business Ave"],
153
+ "city": "Cambridge",
154
+ "state": "MA",
155
+ "postalCode": f"02{200 + (index % 800):03d}",
156
+ "country": "USA",
157
+ },
158
+ ],
159
+ "maritalStatus": {
160
+ "coding": [
161
+ {
162
+ "system": "http://terminology.hl7.org/CodeSystem/v3-MaritalStatus",
163
+ "code": "M" if index % 2 == 0 else "S",
164
+ "display": "Married" if index % 2 == 0 else "Never Married",
165
+ }
166
+ ]
167
+ },
168
+ "communication": [
169
+ {
170
+ "language": {
171
+ "coding": [
172
+ {
173
+ "system": "urn:ietf:bcp:47",
174
+ "code": "en-US",
175
+ "display": "English (United States)",
176
+ }
177
+ ]
178
+ },
179
+ "preferred": True,
180
+ }
181
+ ],
182
+ "generalPractitioner": [{"reference": f"Practitioner/practitioner-{index % 10}"}],
183
+ "managingOrganization": {"reference": "Organization/org-1"},
184
+ }
185
+
186
+
187
+ def generate_patient_resources_list(count: int) -> list[dict[str, Any]]:
188
+ """Generate a list of FHIR Patient resources."""
189
+ return [generate_patient_resource(i) for i in range(count)]
190
+
191
+
192
+ def generate_merge_response(count: int) -> list[dict[str, Any]]:
193
+ """Generate a merge response for the given count of resources."""
194
+ return [{"created": 1, "updated": 0} for _ in range(count)]
195
+
196
+
197
+ @pytest.fixture(scope="module")
198
+ def mock_server_url() -> str:
199
+ return "http://mock-server:1080"
200
+
201
+
202
+ @pytest.fixture(scope="module")
203
+ def mock_client(mock_server_url: str) -> MockServerFriendlyClient:
204
+ return MockServerFriendlyClient(base_url=mock_server_url)
205
+
206
+
207
+ @pytest.fixture(scope="module")
208
+ def setup_mock_merge_endpoints(mock_client: MockServerFriendlyClient, mock_server_url: str) -> str:
209
+ """Set up mock endpoints for merge operations with different payload sizes."""
210
+ test_name = "benchmark_merge"
211
+
212
+ mock_client.clear(f"/{test_name}/*.*")
213
+ mock_client.reset()
214
+
215
+ # Create payloads of different sizes for benchmarking
216
+ payload_sizes = {
217
+ "small": 10, # 10 patients
218
+ "medium": 100, # 100 patients
219
+ "large": 500, # 500 patients
220
+ }
221
+
222
+ # Setup mock endpoints for each payload size - using regex to match any request body
223
+ for size, count in payload_sizes.items():
224
+ response_body = json.dumps(generate_merge_response(count))
225
+
226
+ # Endpoint for POST /Patient/1/$merge (single resource merge)
227
+ mock_client.expect(
228
+ request=mock_request(
229
+ path=f"/{test_name}/{size}/Patient/1/$merge",
230
+ method="POST",
231
+ ),
232
+ response=mock_response(body=response_body),
233
+ timing=times(10000), # Allow many requests for benchmarking
234
+ )
235
+
236
+ # Endpoint for POST /Patient/$merge (batch merge)
237
+ mock_client.expect(
238
+ request=mock_request(
239
+ path=f"/{test_name}/{size}/Patient/$merge",
240
+ method="POST",
241
+ ),
242
+ response=mock_response(body=response_body),
243
+ timing=times(10000),
244
+ )
245
+
246
+ return f"{mock_server_url}/{test_name}"
247
+
248
+
249
+ # ============================================================================
250
+ # Benchmark Tests for merge_async() - Small Payload (10 patients)
251
+ # ============================================================================
252
+
253
+
254
+ def test_benchmark_merge_async_compress_false_small(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
255
+ """Benchmark merge_async with compress=False and small payload (10 patients)."""
256
+ base_url = f"{setup_mock_merge_endpoints}/small"
257
+ resources = generate_patient_resources_list(10)
258
+ json_data_list = [json.dumps(r) for r in resources]
259
+
260
+ async def run_merge_async() -> FhirMergeResponse | None:
261
+ fhir_client = FhirClient().url(base_url).resource("Patient")
262
+ fhir_client = fhir_client.compress(False)
263
+ return await FhirMergeResponse.from_async_generator(
264
+ fhir_client.merge_async(id_="1", json_data_list=json_data_list)
265
+ )
266
+
267
+ def run_sync() -> FhirMergeResponse | None:
268
+ return asyncio.run(run_merge_async())
269
+
270
+ result = benchmark(run_sync)
271
+ assert result is not None
272
+
273
+
274
+ def test_benchmark_merge_async_compress_true_small(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
275
+ """Benchmark merge_async with compress=True and small payload (10 patients)."""
276
+ base_url = f"{setup_mock_merge_endpoints}/small"
277
+ resources = generate_patient_resources_list(10)
278
+ json_data_list = [json.dumps(r) for r in resources]
279
+
280
+ async def run_merge_async() -> FhirMergeResponse | None:
281
+ fhir_client = FhirClient().url(base_url).resource("Patient")
282
+ fhir_client = fhir_client.compress(True)
283
+ return await FhirMergeResponse.from_async_generator(
284
+ fhir_client.merge_async(id_="1", json_data_list=json_data_list)
285
+ )
286
+
287
+ def run_sync() -> FhirMergeResponse | None:
288
+ return asyncio.run(run_merge_async())
289
+
290
+ result = benchmark(run_sync)
291
+ assert result is not None
292
+
293
+
294
+ # ============================================================================
295
+ # Benchmark Tests for merge_async() - Medium Payload (100 patients)
296
+ # ============================================================================
297
+
298
+
299
+ def test_benchmark_merge_async_compress_false_medium(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
300
+ """Benchmark merge_async with compress=False and medium payload (100 patients)."""
301
+ base_url = f"{setup_mock_merge_endpoints}/medium"
302
+ resources = generate_patient_resources_list(100)
303
+ json_data_list = [json.dumps(r) for r in resources]
304
+
305
+ async def run_merge_async() -> FhirMergeResponse | None:
306
+ fhir_client = FhirClient().url(base_url).resource("Patient")
307
+ fhir_client = fhir_client.compress(False)
308
+ return await FhirMergeResponse.from_async_generator(
309
+ fhir_client.merge_async(id_="1", json_data_list=json_data_list)
310
+ )
311
+
312
+ def run_sync() -> FhirMergeResponse | None:
313
+ return asyncio.run(run_merge_async())
314
+
315
+ result = benchmark(run_sync)
316
+ assert result is not None
317
+
318
+
319
+ def test_benchmark_merge_async_compress_true_medium(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
320
+ """Benchmark merge_async with compress=True and medium payload (100 patients)."""
321
+ base_url = f"{setup_mock_merge_endpoints}/medium"
322
+ resources = generate_patient_resources_list(100)
323
+ json_data_list = [json.dumps(r) for r in resources]
324
+
325
+ async def run_merge_async() -> FhirMergeResponse | None:
326
+ fhir_client = FhirClient().url(base_url).resource("Patient")
327
+ fhir_client = fhir_client.compress(True)
328
+ return await FhirMergeResponse.from_async_generator(
329
+ fhir_client.merge_async(id_="1", json_data_list=json_data_list)
330
+ )
331
+
332
+ def run_sync() -> FhirMergeResponse | None:
333
+ return asyncio.run(run_merge_async())
334
+
335
+ result = benchmark(run_sync)
336
+ assert result is not None
337
+
338
+
339
+ # ============================================================================
340
+ # Benchmark Tests for merge_async() - Large Payload (500 patients)
341
+ # ============================================================================
342
+
343
+
344
+ def test_benchmark_merge_async_compress_false_large(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
345
+ """Benchmark merge_async with compress=False and large payload (500 patients)."""
346
+ base_url = f"{setup_mock_merge_endpoints}/large"
347
+ resources = generate_patient_resources_list(500)
348
+ json_data_list = [json.dumps(r) for r in resources]
349
+
350
+ async def run_merge_async() -> FhirMergeResponse | None:
351
+ fhir_client = FhirClient().url(base_url).resource("Patient")
352
+ fhir_client = fhir_client.compress(False)
353
+ return await FhirMergeResponse.from_async_generator(
354
+ fhir_client.merge_async(id_="1", json_data_list=json_data_list)
355
+ )
356
+
357
+ def run_sync() -> FhirMergeResponse | None:
358
+ return asyncio.run(run_merge_async())
359
+
360
+ result = benchmark(run_sync)
361
+ assert result is not None
362
+
363
+
364
+ def test_benchmark_merge_async_compress_true_large(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
365
+ """Benchmark merge_async with compress=True and large payload (500 patients)."""
366
+ base_url = f"{setup_mock_merge_endpoints}/large"
367
+ resources = generate_patient_resources_list(500)
368
+ json_data_list = [json.dumps(r) for r in resources]
369
+
370
+ async def run_merge_async() -> FhirMergeResponse | None:
371
+ fhir_client = FhirClient().url(base_url).resource("Patient")
372
+ fhir_client = fhir_client.compress(True)
373
+ return await FhirMergeResponse.from_async_generator(
374
+ fhir_client.merge_async(id_="1", json_data_list=json_data_list)
375
+ )
376
+
377
+ def run_sync() -> FhirMergeResponse | None:
378
+ return asyncio.run(run_merge_async())
379
+
380
+ result = benchmark(run_sync)
381
+ assert result is not None
382
+
383
+
384
+ # ============================================================================
385
+ # Benchmark Tests for batch merge_async() - Multiple resources in single call
386
+ # ============================================================================
387
+
388
+
389
+ def test_benchmark_batch_merge_async_compress_false_small(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
390
+ """Benchmark batch merge_async with compress=False and small payload (10 patients)."""
391
+ base_url = f"{setup_mock_merge_endpoints}/small"
392
+ resources = generate_patient_resources_list(10)
393
+ json_data_list = [json.dumps(r) for r in resources]
394
+
395
+ async def run_merge_async() -> FhirMergeResponse | None:
396
+ fhir_client = FhirClient().url(base_url).resource("Patient")
397
+ fhir_client = fhir_client.compress(False)
398
+ return await FhirMergeResponse.from_async_generator(
399
+ fhir_client.merge_async(json_data_list=json_data_list, batch_size=10)
400
+ )
401
+
402
+ def run_sync() -> FhirMergeResponse | None:
403
+ return asyncio.run(run_merge_async())
404
+
405
+ result = benchmark(run_sync)
406
+ assert result is not None
407
+
408
+
409
+ def test_benchmark_batch_merge_async_compress_true_small(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
410
+ """Benchmark batch merge_async with compress=True and small payload (10 patients)."""
411
+ base_url = f"{setup_mock_merge_endpoints}/small"
412
+ resources = generate_patient_resources_list(10)
413
+ json_data_list = [json.dumps(r) for r in resources]
414
+
415
+ async def run_merge_async() -> FhirMergeResponse | None:
416
+ fhir_client = FhirClient().url(base_url).resource("Patient")
417
+ fhir_client = fhir_client.compress(True)
418
+ return await FhirMergeResponse.from_async_generator(
419
+ fhir_client.merge_async(json_data_list=json_data_list, batch_size=10)
420
+ )
421
+
422
+ def run_sync() -> FhirMergeResponse | None:
423
+ return asyncio.run(run_merge_async())
424
+
425
+ result = benchmark(run_sync)
426
+ assert result is not None
427
+
428
+
429
+ def test_benchmark_batch_merge_async_compress_false_medium(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
430
+ """Benchmark batch merge_async with compress=False and medium payload (100 patients)."""
431
+ base_url = f"{setup_mock_merge_endpoints}/medium"
432
+ resources = generate_patient_resources_list(100)
433
+ json_data_list = [json.dumps(r) for r in resources]
434
+
435
+ async def run_merge_async() -> FhirMergeResponse | None:
436
+ fhir_client = FhirClient().url(base_url).resource("Patient")
437
+ fhir_client = fhir_client.compress(False)
438
+ return await FhirMergeResponse.from_async_generator(
439
+ fhir_client.merge_async(json_data_list=json_data_list, batch_size=50)
440
+ )
441
+
442
+ def run_sync() -> FhirMergeResponse | None:
443
+ return asyncio.run(run_merge_async())
444
+
445
+ result = benchmark(run_sync)
446
+ assert result is not None
447
+
448
+
449
+ def test_benchmark_batch_merge_async_compress_true_medium(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
450
+ """Benchmark batch merge_async with compress=True and medium payload (100 patients)."""
451
+ base_url = f"{setup_mock_merge_endpoints}/medium"
452
+ resources = generate_patient_resources_list(100)
453
+ json_data_list = [json.dumps(r) for r in resources]
454
+
455
+ async def run_merge_async() -> FhirMergeResponse | None:
456
+ fhir_client = FhirClient().url(base_url).resource("Patient")
457
+ fhir_client = fhir_client.compress(True)
458
+ return await FhirMergeResponse.from_async_generator(
459
+ fhir_client.merge_async(json_data_list=json_data_list, batch_size=50)
460
+ )
461
+
462
+ def run_sync() -> FhirMergeResponse | None:
463
+ return asyncio.run(run_merge_async())
464
+
465
+ result = benchmark(run_sync)
466
+ assert result is not None
467
+
468
+
469
+ def test_benchmark_batch_merge_async_compress_false_large(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
470
+ """Benchmark batch merge_async with compress=False and large payload (500 patients)."""
471
+ base_url = f"{setup_mock_merge_endpoints}/large"
472
+ resources = generate_patient_resources_list(500)
473
+ json_data_list = [json.dumps(r) for r in resources]
474
+
475
+ async def run_merge_async() -> FhirMergeResponse | None:
476
+ fhir_client = FhirClient().url(base_url).resource("Patient")
477
+ fhir_client = fhir_client.compress(False)
478
+ return await FhirMergeResponse.from_async_generator(
479
+ fhir_client.merge_async(json_data_list=json_data_list, batch_size=100)
480
+ )
481
+
482
+ def run_sync() -> FhirMergeResponse | None:
483
+ return asyncio.run(run_merge_async())
484
+
485
+ result = benchmark(run_sync)
486
+ assert result is not None
487
+
488
+
489
+ def test_benchmark_batch_merge_async_compress_true_large(benchmark: Any, setup_mock_merge_endpoints: str) -> None:
490
+ """Benchmark batch merge_async with compress=True and large payload (500 patients)."""
491
+ base_url = f"{setup_mock_merge_endpoints}/large"
492
+ resources = generate_patient_resources_list(500)
493
+ json_data_list = [json.dumps(r) for r in resources]
494
+
495
+ async def run_merge_async() -> FhirMergeResponse | None:
496
+ fhir_client = FhirClient().url(base_url).resource("Patient")
497
+ fhir_client = fhir_client.compress(True)
498
+ return await FhirMergeResponse.from_async_generator(
499
+ fhir_client.merge_async(json_data_list=json_data_list, batch_size=100)
500
+ )
501
+
502
+ def run_sync() -> FhirMergeResponse | None:
503
+ return asyncio.run(run_merge_async())
504
+
505
+ result = benchmark(run_sync)
506
+ assert result is not None
@@ -0,0 +1,159 @@
1
+ """
2
+ Tests for RetryableAioHttpClient session lifecycle management.
3
+
4
+ This module tests that sessions are properly closed or kept open depending on
5
+ whether they were created internally or provided by the user.
6
+ """
7
+
8
+ import aiohttp
9
+ import pytest
10
+
11
+ from helix_fhir_client_sdk.utilities.retryable_aiohttp_client import RetryableAioHttpClient
12
+
13
+
14
+ @pytest.mark.asyncio
15
+ async def test_internal_session_is_closed_after_exit() -> None:
16
+ """Test that internally created sessions are closed when context exits"""
17
+ client = RetryableAioHttpClient(
18
+ retries=1,
19
+ refresh_token_func=None,
20
+ tracer_request_func=None,
21
+ fn_get_session=None, # No custom factory - SDK will create session
22
+ use_data_streaming=False,
23
+ access_token=None,
24
+ access_token_expiry_date=None,
25
+ )
26
+
27
+ async with client:
28
+ # Session should be created
29
+ assert client.session is not None
30
+ assert not client.session.closed
31
+ session_ref = client.session
32
+
33
+ # After exiting context, the internal session should be closed
34
+ assert session_ref.closed
35
+
36
+
37
+ @pytest.mark.asyncio
38
+ async def test_user_provided_session_is_not_closed_after_exit() -> None:
39
+ """Test that user-provided sessions are NOT closed when context exits"""
40
+ # User creates their own session
41
+ user_session = aiohttp.ClientSession()
42
+
43
+ try:
44
+ # Provide a factory that returns the user's session
45
+ # Set caller_managed_session=True so SDK will NOT close the session
46
+ client = RetryableAioHttpClient(
47
+ retries=1,
48
+ refresh_token_func=None,
49
+ tracer_request_func=None,
50
+ fn_get_session=lambda: user_session, # User provides a custom factory
51
+ caller_managed_session=True, # User manages session lifecycle
52
+ use_data_streaming=False,
53
+ access_token=None,
54
+ access_token_expiry_date=None,
55
+ )
56
+
57
+ async with client:
58
+ # Session should be the user's session
59
+ assert client.session is user_session
60
+ assert not client.session.closed
61
+
62
+ # After exiting context, the user's session should still be open
63
+ # because caller_managed_session=True (caller manages session lifecycle)
64
+ assert not user_session.closed
65
+
66
+ finally:
67
+ # User closes their own session
68
+ await user_session.close()
69
+ assert user_session.closed
70
+
71
+
72
+ @pytest.mark.asyncio
73
+ async def test_multiple_clients_can_share_user_session() -> None:
74
+ """Test that multiple RetryableAioHttpClient instances can share the same user session"""
75
+ # User creates a persistent session
76
+ shared_session = aiohttp.ClientSession()
77
+
78
+ try:
79
+ # Multiple clients share the same session
80
+ async with RetryableAioHttpClient(
81
+ retries=1,
82
+ refresh_token_func=None,
83
+ tracer_request_func=None,
84
+ fn_get_session=lambda: shared_session,
85
+ caller_managed_session=True, # User manages session lifecycle
86
+ use_data_streaming=False,
87
+ access_token=None,
88
+ access_token_expiry_date=None,
89
+ ) as client1:
90
+ assert client1.session is shared_session
91
+ assert not shared_session.closed
92
+
93
+ # Session should still be open after the first client exits
94
+ assert not shared_session.closed
95
+
96
+ # The second client can reuse the same session
97
+ async with RetryableAioHttpClient(
98
+ retries=1,
99
+ refresh_token_func=None,
100
+ tracer_request_func=None,
101
+ fn_get_session=lambda: shared_session,
102
+ caller_managed_session=True, # User manages session lifecycle
103
+ use_data_streaming=False,
104
+ access_token=None,
105
+ access_token_expiry_date=None,
106
+ ) as client2:
107
+ assert client2.session is shared_session
108
+ assert not shared_session.closed
109
+
110
+ # Session should still be open after the second client exits
111
+ assert not shared_session.closed
112
+
113
+ finally:
114
+ # User closes the shared session when done
115
+ await shared_session.close()
116
+ assert shared_session.closed
117
+
118
+
119
+ @pytest.mark.asyncio
120
+ async def test_user_can_recreate_closed_session_via_factory() -> None:
121
+ """Test that a user's factory can be called multiple times if session gets closed"""
122
+ call_count = 0
123
+
124
+ def session_factory() -> aiohttp.ClientSession:
125
+ nonlocal call_count
126
+ call_count += 1
127
+ return aiohttp.ClientSession()
128
+
129
+ created_sessions = []
130
+
131
+ try:
132
+ # First client call
133
+ async with RetryableAioHttpClient(
134
+ retries=1,
135
+ refresh_token_func=None,
136
+ tracer_request_func=None,
137
+ fn_get_session=session_factory,
138
+ caller_managed_session=True, # User manages session lifecycle
139
+ use_data_streaming=False,
140
+ access_token=None,
141
+ access_token_expiry_date=None,
142
+ ) as client1:
143
+ assert client1.session is not None
144
+ created_sessions.append(client1.session)
145
+ assert call_count == 1 # Factory called once in __aenter__
146
+
147
+ # SDK doesn't close session (caller_managed_session=True)
148
+ assert created_sessions[0] is not None
149
+ assert not created_sessions[0].closed
150
+
151
+ # User could manually close and recreate via factory if needed
152
+ # (This demonstrates the pattern, though in practice the factory
153
+ # would handle closed session detection)
154
+
155
+ finally:
156
+ # Clean up all created sessions
157
+ for session in created_sessions:
158
+ if session is not None and not session.closed:
159
+ await session.close()