hccinfhir 0.1.3__tar.gz → 0.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hccinfhir-0.1.5/PKG-INFO +693 -0
- hccinfhir-0.1.5/README.md +679 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/__init__.py +2 -2
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/datamodels.py +17 -15
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/extractor_837.py +89 -22
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/hccinfhir.py +6 -5
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/model_calculate.py +19 -15
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/model_interactions.py +1 -0
- hccinfhir-0.1.5/hccinfhir/sample_files/sample_837_12.txt +113 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/samples.py +15 -15
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/pyproject.toml +1 -1
- hccinfhir-0.1.3/PKG-INFO +0 -390
- hccinfhir-0.1.3/README.md +0 -376
- hccinfhir-0.1.3/hccinfhir/sample_utils.py +0 -252
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/.gitignore +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/LICENSE +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/__init__.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/hcc_is_chronic.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_coefficients_2025.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_coefficients_2026.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_dx_to_cc_2025.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_dx_to_cc_2026.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_eligible_cpt_hcpcs_2023.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_eligible_cpt_hcpcs_2024.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_eligible_cpt_hcpcs_2025.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_eligible_cpt_hcpcs_2026.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_hierarchies_2025.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/data/ra_hierarchies_2026.csv +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/extractor.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/extractor_fhir.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/filter.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/model_coefficients.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/model_demographics.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/model_dx_to_cc.py +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/model_hierarchies.py +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/__init__.py +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_0.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_1.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_10.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_11.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_2.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_3.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_4.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_5.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_6.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_7.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_8.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_837_9.txt +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_eob_1.json +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_eob_2.json +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_eob_200.ndjson +0 -0
- {hccinfhir-0.1.3/hccinfhir/samples → hccinfhir-0.1.5/hccinfhir/sample_files}/sample_eob_3.json +0 -0
- {hccinfhir-0.1.3 → hccinfhir-0.1.5}/hccinfhir/utils.py +0 -0
hccinfhir-0.1.5/PKG-INFO
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: hccinfhir
|
|
3
|
+
Version: 0.1.5
|
|
4
|
+
Summary: HCC Algorithm for FHIR Resources
|
|
5
|
+
Project-URL: Homepage, https://github.com/mimilabs/hccinfhir
|
|
6
|
+
Project-URL: Issues, https://github.com/mimilabs/hccinfhir/issues
|
|
7
|
+
Author-email: Yubin Park <yubin.park@mimilabs.ai>
|
|
8
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Requires-Python: >=3.8
|
|
12
|
+
Requires-Dist: pydantic>=2.10.3
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# HCCInFHIR
|
|
16
|
+
|
|
17
|
+
[](https://badge.fury.io/py/hccinfhir)
|
|
18
|
+
[](https://www.python.org/downloads/)
|
|
19
|
+
[](https://opensource.org/licenses/Apache-2.0)
|
|
20
|
+
|
|
21
|
+
A Python library for calculating HCC (Hierarchical Condition Category) risk adjustment scores from healthcare claims data. Supports multiple data sources including FHIR resources, X12 837 claims, and CMS encounter data.
|
|
22
|
+
|
|
23
|
+
## 🚀 Quick Start
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
pip install hccinfhir
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from hccinfhir import HCCInFHIR, Demographics
|
|
31
|
+
|
|
32
|
+
# Initialize processor
|
|
33
|
+
processor = HCCInFHIR(model_name="CMS-HCC Model V28")
|
|
34
|
+
|
|
35
|
+
# Calculate from diagnosis codes
|
|
36
|
+
demographics = Demographics(age=67, sex="F")
|
|
37
|
+
diagnosis_codes = ["E11.9", "I10", "N18.3"]
|
|
38
|
+
|
|
39
|
+
result = processor.calculate_from_diagnosis(diagnosis_codes, demographics)
|
|
40
|
+
print(f"Risk Score: {result.risk_score}")
|
|
41
|
+
print(f"HCCs: {result.hcc_list}")
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## 📋 Table of Contents
|
|
45
|
+
|
|
46
|
+
- [Data Sources & Use Cases](#data-sources--use-cases)
|
|
47
|
+
- [Installation](#installation)
|
|
48
|
+
- [How-To Guides](#how-to-guides)
|
|
49
|
+
- [Working with CMS Encounter Data (EDRs)](#working-with-cms-encounter-data-edrs)
|
|
50
|
+
- [Processing 837 Claims from Clearinghouses](#processing-837-claims-from-clearinghouses)
|
|
51
|
+
- [Using CMS BCDA API Data](#using-cms-bcda-api-data)
|
|
52
|
+
- [Direct Diagnosis Code Processing](#direct-diagnosis-code-processing)
|
|
53
|
+
- [Model Configuration](#model-configuration)
|
|
54
|
+
- [API Reference](#api-reference)
|
|
55
|
+
- [Sample Data](#sample-data)
|
|
56
|
+
- [Advanced Usage](#advanced-usage)
|
|
57
|
+
|
|
58
|
+
## 📊 Data Sources & Use Cases
|
|
59
|
+
|
|
60
|
+
HCCInFHIR supports three primary data sources for HCC risk adjustment calculations:
|
|
61
|
+
|
|
62
|
+
### 1. **CMS Encounter Data Records (EDRs)**
|
|
63
|
+
- **Input**: X12 837 5010 transaction files (text format) + demographic data from payers
|
|
64
|
+
- **Use Case**: Medicare Advantage plans processing encounter data for CMS submissions
|
|
65
|
+
- **Output**: Risk scores with detailed HCC mappings and interactions
|
|
66
|
+
|
|
67
|
+
### 2. **Clearinghouse 837 Claims**
|
|
68
|
+
- **Input**: X12 837 5010 institutional/professional claim files + patient demographics
|
|
69
|
+
- **Use Case**: Health plans and providers calculating risk scores from claims data
|
|
70
|
+
- **Output**: Service-level analysis with filtering and risk score calculations
|
|
71
|
+
|
|
72
|
+
### 3. **CMS BCDA API Data**
|
|
73
|
+
- **Input**: FHIR ExplanationOfBenefit resources from Blue Button 2.0 API
|
|
74
|
+
- **Use Case**: Applications processing Medicare beneficiary data via BCDA
|
|
75
|
+
- **Output**: Standardized risk adjustment calculations from FHIR resources
|
|
76
|
+
|
|
77
|
+
### 4. **Direct Diagnosis Processing**
|
|
78
|
+
- **Input**: Diagnosis codes + demographics
|
|
79
|
+
- **Use Case**: Quick risk score validation or research applications
|
|
80
|
+
- **Output**: HCC mappings and risk scores without claims context
|
|
81
|
+
|
|
82
|
+
## 🛠️ Installation
|
|
83
|
+
|
|
84
|
+
### Basic Installation
|
|
85
|
+
```bash
|
|
86
|
+
pip install hccinfhir
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### Development Installation
|
|
90
|
+
```bash
|
|
91
|
+
git clone https://github.com/yourusername/hccinfhir.git
|
|
92
|
+
cd hccinfhir
|
|
93
|
+
pip install -e .
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### Requirements
|
|
97
|
+
- Python 3.9+
|
|
98
|
+
- Pydantic >= 2.10.3
|
|
99
|
+
|
|
100
|
+
## 📖 How-To Guides
|
|
101
|
+
|
|
102
|
+
### Working with CMS Encounter Data (EDRs)
|
|
103
|
+
|
|
104
|
+
**Scenario**: You're a Medicare Advantage plan processing encounter data for CMS risk adjustment.
|
|
105
|
+
|
|
106
|
+
**What you need**:
|
|
107
|
+
- X12 837 envelope files from your claims system
|
|
108
|
+
- Demographic data (age, sex, eligibility status) for each beneficiary
|
|
109
|
+
- Knowledge of your plan's model year and HCC model version
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
from hccinfhir import HCCInFHIR, Demographics, get_837_sample
|
|
113
|
+
from hccinfhir.extractor import extract_sld
|
|
114
|
+
|
|
115
|
+
# Step 1: Configure processor for your model year and version
|
|
116
|
+
processor = HCCInFHIR(
|
|
117
|
+
model_name="CMS-HCC Model V28", # CMS model version
|
|
118
|
+
filter_claims=True, # Apply CMS filtering rules
|
|
119
|
+
dx_cc_mapping_filename="ra_dx_to_cc_2026.csv",
|
|
120
|
+
proc_filtering_filename="ra_eligible_cpt_hcpcs_2026.csv"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Step 2: Load your 837 data
|
|
124
|
+
with open("encounter_data.txt", "r") as f:
|
|
125
|
+
raw_837_data = f.read()
|
|
126
|
+
|
|
127
|
+
# Step 3: Extract service-level data
|
|
128
|
+
service_data = extract_sld(raw_837_data, format="837")
|
|
129
|
+
|
|
130
|
+
# Step 4: Define beneficiary demographics
|
|
131
|
+
demographics = Demographics(
|
|
132
|
+
age=72,
|
|
133
|
+
sex="M",
|
|
134
|
+
dual_elgbl_cd="00", # Dual eligibility status
|
|
135
|
+
orig_disabled=False, # Original disability status
|
|
136
|
+
new_enrollee=False, # New enrollee flag
|
|
137
|
+
esrd=False # ESRD status
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Step 5: Calculate risk score
|
|
141
|
+
result = processor.run_from_service_data(service_data, demographics)
|
|
142
|
+
|
|
143
|
+
# Step 6: Review results
|
|
144
|
+
print(f"Beneficiary Risk Score: {result.risk_score}")
|
|
145
|
+
print(f"Active HCCs: {result.hcc_list}")
|
|
146
|
+
print(f"Disease Interactions: {result.interactions}")
|
|
147
|
+
|
|
148
|
+
# Export results for CMS submission
|
|
149
|
+
encounter_summary = {
|
|
150
|
+
"beneficiary_id": "example_id",
|
|
151
|
+
"risk_score": result.risk_score,
|
|
152
|
+
"hcc_list": result.hcc_list,
|
|
153
|
+
"model_version": "V28",
|
|
154
|
+
"payment_year": 2026
|
|
155
|
+
}
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Processing 837 Claims from Clearinghouses
|
|
159
|
+
|
|
160
|
+
**Scenario**: You're a health plan receiving 837 files from clearinghouses and need to calculate member risk scores.
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
from hccinfhir import HCCInFHIR, Demographics
|
|
164
|
+
from hccinfhir.extractor import extract_sld_list
|
|
165
|
+
|
|
166
|
+
# Configure for institutional and professional claims
|
|
167
|
+
processor = HCCInFHIR(
|
|
168
|
+
model_name="CMS-HCC Model V28",
|
|
169
|
+
filter_claims=True, # Enable CMS filtering
|
|
170
|
+
dx_cc_mapping_filename="ra_dx_to_cc_2026.csv"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Process multiple 837 files
|
|
174
|
+
claim_files = ["inst_claims.txt", "prof_claims.txt"]
|
|
175
|
+
all_service_data = []
|
|
176
|
+
|
|
177
|
+
for file_path in claim_files:
|
|
178
|
+
with open(file_path, "r") as f:
|
|
179
|
+
claims_data = f.read()
|
|
180
|
+
|
|
181
|
+
# Extract service data from each file
|
|
182
|
+
service_data = extract_sld_list([claims_data], format="837")
|
|
183
|
+
all_service_data.extend(service_data)
|
|
184
|
+
|
|
185
|
+
# Member demographics (typically from your enrollment system)
|
|
186
|
+
member_demographics = Demographics(
|
|
187
|
+
age=45,
|
|
188
|
+
sex="F",
|
|
189
|
+
dual_elgbl_cd="02", # Partial dual eligible
|
|
190
|
+
orig_disabled=True, # Originally disabled
|
|
191
|
+
new_enrollee=False
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Calculate comprehensive risk score
|
|
195
|
+
result = processor.run_from_service_data(all_service_data, member_demographics)
|
|
196
|
+
|
|
197
|
+
# Analyze by service type
|
|
198
|
+
professional_services = [svc for svc in result.service_level_data if svc.claim_type == "71"]
|
|
199
|
+
institutional_services = [svc for svc in result.service_level_data if svc.claim_type == "72"]
|
|
200
|
+
|
|
201
|
+
print(f"Member Risk Score: {result.risk_score}")
|
|
202
|
+
print(f"Professional Claims: {len(professional_services)}")
|
|
203
|
+
print(f"Institutional Claims: {len(institutional_services)}")
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
### Using CMS BCDA API Data
|
|
207
|
+
|
|
208
|
+
**Scenario**: You're building an application that processes Medicare beneficiary data from the BCDA API.
|
|
209
|
+
|
|
210
|
+
```python
|
|
211
|
+
from hccinfhir import HCCInFHIR, Demographics, get_eob_sample_list
|
|
212
|
+
from hccinfhir.extractor import extract_sld_list
|
|
213
|
+
import requests
|
|
214
|
+
|
|
215
|
+
# Configure processor for BCDA data
|
|
216
|
+
processor = HCCInFHIR(
|
|
217
|
+
model_name="CMS-HCC Model V24", # BCDA typically uses V24
|
|
218
|
+
filter_claims=True,
|
|
219
|
+
dx_cc_mapping_filename="ra_dx_to_cc_2025.csv"
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Fetch EOB data from BCDA (example using sample data)
|
|
223
|
+
eob_resources = get_eob_sample_list(limit=50) # Replace with actual BCDA API call
|
|
224
|
+
|
|
225
|
+
# Extract service-level data from FHIR resources
|
|
226
|
+
service_data = extract_sld_list(eob_resources, format="fhir")
|
|
227
|
+
|
|
228
|
+
# BCDA provides beneficiary demographics in the EOB
|
|
229
|
+
# Extract demographics from the first EOB resource
|
|
230
|
+
first_eob = eob_resources[0]
|
|
231
|
+
beneficiary_ref = first_eob.get("patient", {}).get("reference", "")
|
|
232
|
+
|
|
233
|
+
# You would typically look up demographics from your system
|
|
234
|
+
demographics = Demographics(
|
|
235
|
+
age=68,
|
|
236
|
+
sex="M",
|
|
237
|
+
dual_elgbl_cd="00",
|
|
238
|
+
new_enrollee=False,
|
|
239
|
+
esrd=False
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# Process FHIR data
|
|
243
|
+
result = processor.run(eob_resources, demographics)
|
|
244
|
+
|
|
245
|
+
# BCDA-specific analysis
|
|
246
|
+
print(f"Beneficiary: {beneficiary_ref}")
|
|
247
|
+
print(f"Risk Score: {result.risk_score}")
|
|
248
|
+
print(f"Data Source: BCDA API")
|
|
249
|
+
print(f"HCC Categories: {', '.join(map(str, result.hcc_list))}")
|
|
250
|
+
|
|
251
|
+
# Examine service utilization patterns
|
|
252
|
+
service_dates = [svc.service_date for svc in result.service_level_data if svc.service_date]
|
|
253
|
+
if service_dates:
|
|
254
|
+
print(f"Service Period: {min(service_dates)} to {max(service_dates)}")
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
### Direct Diagnosis Code Processing
|
|
258
|
+
|
|
259
|
+
**Scenario**: You need to quickly validate HCC mappings or calculate risk scores for research purposes.
|
|
260
|
+
|
|
261
|
+
```python
|
|
262
|
+
from hccinfhir import HCCInFHIR, Demographics
|
|
263
|
+
|
|
264
|
+
# Simple setup for diagnosis-only processing
|
|
265
|
+
processor = HCCInFHIR(model_name="CMS-HCC Model V28")
|
|
266
|
+
|
|
267
|
+
# Define patient population
|
|
268
|
+
demographics = Demographics(
|
|
269
|
+
age=75,
|
|
270
|
+
sex="F",
|
|
271
|
+
dual_elgbl_cd="02", # Dual eligible
|
|
272
|
+
orig_disabled=False,
|
|
273
|
+
new_enrollee=False,
|
|
274
|
+
esrd=False
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Diagnosis codes from clinical encounter
|
|
278
|
+
diagnosis_codes = [
|
|
279
|
+
"E11.9", # Type 2 diabetes without complications
|
|
280
|
+
"I10", # Essential hypertension
|
|
281
|
+
"N18.3", # Chronic kidney disease, stage 3
|
|
282
|
+
"F32.9", # Major depressive disorder
|
|
283
|
+
"M79.3" # Panniculitis
|
|
284
|
+
]
|
|
285
|
+
|
|
286
|
+
# Calculate risk score
|
|
287
|
+
result = processor.calculate_from_diagnosis(diagnosis_codes, demographics)
|
|
288
|
+
|
|
289
|
+
# Detailed analysis
|
|
290
|
+
print("=== HCC Risk Analysis ===")
|
|
291
|
+
print(f"Risk Score: {result.risk_score:.3f}")
|
|
292
|
+
print(f"HCC Categories: {result.hcc_list}")
|
|
293
|
+
|
|
294
|
+
# Show diagnosis to HCC mappings
|
|
295
|
+
print("\nDiagnosis Mappings:")
|
|
296
|
+
for cc, dx_list in result.cc_to_dx.items():
|
|
297
|
+
print(f" CC {cc}: {', '.join(dx_list)}")
|
|
298
|
+
|
|
299
|
+
# Show applied coefficients
|
|
300
|
+
print(f"\nApplied Coefficients: {len(result.coefficients)}")
|
|
301
|
+
for coeff_name, value in result.coefficients.items():
|
|
302
|
+
print(f" {coeff_name}: {value}")
|
|
303
|
+
|
|
304
|
+
# Check for interactions
|
|
305
|
+
if result.interactions:
|
|
306
|
+
print(f"\nDisease Interactions: {len(result.interactions)}")
|
|
307
|
+
for interaction, value in result.interactions.items():
|
|
308
|
+
print(f" {interaction}: {value}")
|
|
309
|
+
```
|
|
310
|
+
|
|
311
|
+
## ⚙️ Model Configuration
|
|
312
|
+
|
|
313
|
+
### Supported HCC Models
|
|
314
|
+
|
|
315
|
+
| Model Name | Model Years | Use Case |
|
|
316
|
+
|------------|-------------|----------|
|
|
317
|
+
| `"CMS-HCC Model V22"` | 2024-2025 | Community populations |
|
|
318
|
+
| `"CMS-HCC Model V24"` | 2024-2026 | Community populations (current) |
|
|
319
|
+
| `"CMS-HCC Model V28"` | 2025-2026 | Community populations (latest) |
|
|
320
|
+
| `"CMS-HCC ESRD Model V21"` | 2024-2025 | ESRD populations |
|
|
321
|
+
| `"CMS-HCC ESRD Model V24"` | 2025-2026 | ESRD populations |
|
|
322
|
+
| `"RxHCC Model V08"` | 2024-2026 | Part D prescription drug coverage |
|
|
323
|
+
|
|
324
|
+
### Configuration Parameters
|
|
325
|
+
|
|
326
|
+
```python
|
|
327
|
+
processor = HCCInFHIR(
|
|
328
|
+
# Core model settings
|
|
329
|
+
model_name="CMS-HCC Model V28", # Required: HCC model version
|
|
330
|
+
|
|
331
|
+
# Filtering options
|
|
332
|
+
filter_claims=True, # Apply CMS filtering rules
|
|
333
|
+
|
|
334
|
+
# Custom data files (optional)
|
|
335
|
+
dx_cc_mapping_filename="ra_dx_to_cc_2026.csv", # Diagnosis to CC mapping
|
|
336
|
+
proc_filtering_filename="ra_eligible_cpt_hcpcs_2026.csv", # Procedure code filtering
|
|
337
|
+
)
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
### Demographics Configuration
|
|
341
|
+
|
|
342
|
+
```python
|
|
343
|
+
from hccinfhir import Demographics
|
|
344
|
+
|
|
345
|
+
demographics = Demographics(
|
|
346
|
+
# Required fields
|
|
347
|
+
age=67, # Age in years
|
|
348
|
+
sex="F", # "M" or "F"
|
|
349
|
+
|
|
350
|
+
# CMS-specific fields
|
|
351
|
+
dual_elgbl_cd="00", # Dual eligibility: "00"=Non-dual, "01"=Partial, "02"=Full
|
|
352
|
+
orig_disabled=False, # Original reason for Medicare entitlement was disability
|
|
353
|
+
new_enrollee=False, # New Medicare enrollee (< 12 months)
|
|
354
|
+
esrd=False, # End-Stage Renal Disease status
|
|
355
|
+
|
|
356
|
+
# Optional fields
|
|
357
|
+
snp=False, # Special Needs Plan member
|
|
358
|
+
low_income=False, # Low-income subsidy
|
|
359
|
+
graft_months=None, # Months since kidney transplant (for ESRD models)
|
|
360
|
+
category="CNA" # Beneficiary category (auto-calculated if not provided)
|
|
361
|
+
)
|
|
362
|
+
```
|
|
363
|
+
|
|
364
|
+
### Data File Specifications
|
|
365
|
+
|
|
366
|
+
The library includes CMS reference data files for 2025 and 2026. You can override with custom files:
|
|
367
|
+
|
|
368
|
+
```python
|
|
369
|
+
# Use custom mapping files
|
|
370
|
+
processor = HCCInFHIR(
|
|
371
|
+
model_name="CMS-HCC Model V28",
|
|
372
|
+
dx_cc_mapping_filename="custom_dx_mapping.csv", # Format: diagnosis_code,cc,model_name
|
|
373
|
+
proc_filtering_filename="custom_procedures.csv" # Format: cpt_hcpcs_code
|
|
374
|
+
)
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
## 📚 API Reference
|
|
378
|
+
|
|
379
|
+
### Main Classes
|
|
380
|
+
|
|
381
|
+
#### `HCCInFHIR`
|
|
382
|
+
Main processor class for HCC calculations.
|
|
383
|
+
|
|
384
|
+
**Methods**:
|
|
385
|
+
- `run(eob_list, demographics)` - Process FHIR ExplanationOfBenefit resources
|
|
386
|
+
- `run_from_service_data(service_data, demographics)` - Process service-level data
|
|
387
|
+
- `calculate_from_diagnosis(diagnosis_codes, demographics)` - Calculate from diagnosis codes only
|
|
388
|
+
|
|
389
|
+
#### `Demographics`
|
|
390
|
+
Patient demographic information for risk adjustment.
|
|
391
|
+
|
|
392
|
+
**Fields**:
|
|
393
|
+
- `age: int` - Patient age in years
|
|
394
|
+
- `sex: str` - Patient sex ("M" or "F")
|
|
395
|
+
- `dual_elgbl_cd: str` - Dual eligibility code
|
|
396
|
+
- `orig_disabled: bool` - Original disability status
|
|
397
|
+
- `new_enrollee: bool` - New enrollee flag
|
|
398
|
+
- `esrd: bool` - ESRD status
|
|
399
|
+
|
|
400
|
+
#### `RAFResult`
|
|
401
|
+
Risk adjustment calculation results.
|
|
402
|
+
|
|
403
|
+
**Fields**:
|
|
404
|
+
- `risk_score: float` - Final RAF score
|
|
405
|
+
- `risk_score_demographics: float` - Demographics-only risk score
|
|
406
|
+
- `risk_score_chronic_only: float` - Chronic conditions risk score
|
|
407
|
+
- `risk_score_hcc: float` - HCC conditions risk score
|
|
408
|
+
- `hcc_list: List[str]` - List of active HCC categories
|
|
409
|
+
- `cc_to_dx: Dict[str, Set[str]]` - Condition categories mapped to diagnosis codes
|
|
410
|
+
- `coefficients: Dict[str, float]` - Applied model coefficients
|
|
411
|
+
- `interactions: Dict[str, float]` - Disease interaction coefficients
|
|
412
|
+
- `demographics: Demographics` - Patient demographics used in calculation
|
|
413
|
+
- `model_name: ModelName` - HCC model used for calculation
|
|
414
|
+
- `version: str` - Library version
|
|
415
|
+
- `diagnosis_codes: List[str]` - Input diagnosis codes
|
|
416
|
+
- `service_level_data: Optional[List[ServiceLevelData]]` - Processed service records (when applicable)
|
|
417
|
+
|
|
418
|
+
### Utility Functions
|
|
419
|
+
|
|
420
|
+
```python
|
|
421
|
+
from hccinfhir import (
|
|
422
|
+
get_eob_sample, # Get sample FHIR EOB data
|
|
423
|
+
get_837_sample, # Get sample 837 claim data
|
|
424
|
+
list_available_samples, # List all available sample data
|
|
425
|
+
extract_sld, # Extract service-level data from single resource
|
|
426
|
+
extract_sld_list, # Extract service-level data from multiple resources
|
|
427
|
+
apply_filter # Apply CMS filtering rules to service data
|
|
428
|
+
)
|
|
429
|
+
```
|
|
430
|
+
|
|
431
|
+
## 📝 Sample Data
|
|
432
|
+
|
|
433
|
+
The library includes comprehensive sample data for testing and development:
|
|
434
|
+
|
|
435
|
+
```python
|
|
436
|
+
from hccinfhir import get_eob_sample, get_837_sample, list_available_samples
|
|
437
|
+
|
|
438
|
+
# FHIR ExplanationOfBenefit samples
|
|
439
|
+
eob_data = get_eob_sample(1) # Individual EOB (cases 1, 2, 3)
|
|
440
|
+
eob_list = get_eob_sample_list(limit=10) # Up to 200 EOB resources
|
|
441
|
+
|
|
442
|
+
# X12 837 claim samples
|
|
443
|
+
claim_data = get_837_sample(0) # Individual 837 claim (cases 0-12)
|
|
444
|
+
claim_list = get_837_sample_list([0, 1, 2]) # Multiple 837 claims
|
|
445
|
+
|
|
446
|
+
# Sample information
|
|
447
|
+
sample_info = list_available_samples()
|
|
448
|
+
print(f"Available EOB samples: {len(sample_info['eob_case_numbers'])}")
|
|
449
|
+
print(f"Available 837 samples: {len(sample_info['837_case_numbers'])}")
|
|
450
|
+
```
|
|
451
|
+
|
|
452
|
+
## 🔧 Advanced Usage
|
|
453
|
+
|
|
454
|
+
### Converting to Dictionary Format
|
|
455
|
+
|
|
456
|
+
If you need to work with regular Python dictionaries (e.g., for JSON serialization, database storage, or legacy code compatibility), you can easily convert Pydantic models using built-in methods:
|
|
457
|
+
|
|
458
|
+
```python
|
|
459
|
+
from hccinfhir import HCCInFHIR, Demographics
|
|
460
|
+
|
|
461
|
+
processor = HCCInFHIR(model_name="CMS-HCC Model V28")
|
|
462
|
+
demographics = Demographics(age=67, sex="F")
|
|
463
|
+
diagnosis_codes = ["E11.9", "I10"]
|
|
464
|
+
|
|
465
|
+
# Get Pydantic model result
|
|
466
|
+
result = processor.calculate_from_diagnosis(diagnosis_codes, demographics)
|
|
467
|
+
print(f"Risk Score: {result.risk_score}") # Pydantic attribute access
|
|
468
|
+
|
|
469
|
+
# Convert to dictionary
|
|
470
|
+
result_dict = result.model_dump()
|
|
471
|
+
print(f"Risk Score: {result_dict['risk_score']}") # Dictionary access
|
|
472
|
+
|
|
473
|
+
# Convert with different modes
|
|
474
|
+
result_json_compatible = result.model_dump(mode='json') # JSON-serializable types
|
|
475
|
+
result_python_types = result.model_dump(mode='python') # Python native types (default)
|
|
476
|
+
|
|
477
|
+
# Convert only specific fields
|
|
478
|
+
partial_dict = result.model_dump(include={'risk_score', 'hcc_list', 'demographics'})
|
|
479
|
+
|
|
480
|
+
# Convert excluding certain fields
|
|
481
|
+
summary_dict = result.model_dump(exclude={'service_level_data', 'interactions'})
|
|
482
|
+
|
|
483
|
+
# Convert to JSON string directly
|
|
484
|
+
json_string = result.model_dump_json() # Returns JSON string
|
|
485
|
+
```
|
|
486
|
+
|
|
487
|
+
#### Working with Nested Models
|
|
488
|
+
|
|
489
|
+
```python
|
|
490
|
+
# Demographics also support dictionary conversion
|
|
491
|
+
demographics_dict = result.demographics.model_dump()
|
|
492
|
+
print(demographics_dict)
|
|
493
|
+
# Output: {'age': 67, 'sex': 'F', 'dual_elgbl_cd': '00', ...}
|
|
494
|
+
|
|
495
|
+
# Service data conversion (list of Pydantic models)
|
|
496
|
+
if result.service_level_data:
|
|
497
|
+
service_dicts = [svc.model_dump() for svc in result.service_level_data]
|
|
498
|
+
```
|
|
499
|
+
|
|
500
|
+
#### Common Use Cases
|
|
501
|
+
|
|
502
|
+
**1. API Responses:**
|
|
503
|
+
```python
|
|
504
|
+
# FastAPI automatically handles Pydantic models, but for other frameworks:
|
|
505
|
+
@app.route('/calculate')
|
|
506
|
+
def calculate_risk():
|
|
507
|
+
result = processor.calculate_from_diagnosis(diagnosis_codes, demographics)
|
|
508
|
+
return jsonify(result.model_dump(mode='json')) # JSON-safe types
|
|
509
|
+
```
|
|
510
|
+
|
|
511
|
+
**2. Database Storage:**
|
|
512
|
+
```python
|
|
513
|
+
# Store in database
|
|
514
|
+
result_data = result.model_dump(exclude={'service_level_data'}) # Exclude large nested data
|
|
515
|
+
db.risks.insert_one(result_data)
|
|
516
|
+
```
|
|
517
|
+
|
|
518
|
+
**3. Legacy Code Integration:**
|
|
519
|
+
```python
|
|
520
|
+
# Working with existing code that expects dictionaries
|
|
521
|
+
def legacy_function(risk_data):
|
|
522
|
+
return risk_data['risk_score'] * risk_data['demographics']['age']
|
|
523
|
+
|
|
524
|
+
# Easy conversion
|
|
525
|
+
result_dict = result.model_dump()
|
|
526
|
+
legacy_result = legacy_function(result_dict)
|
|
527
|
+
```
|
|
528
|
+
|
|
529
|
+
**4. Custom Serialization:**
|
|
530
|
+
```python
|
|
531
|
+
# Custom formatting for specific needs
|
|
532
|
+
export_data = result.model_dump(
|
|
533
|
+
include={'risk_score', 'hcc_list', 'model_name'},
|
|
534
|
+
mode='json'
|
|
535
|
+
)
|
|
536
|
+
```
|
|
537
|
+
|
|
538
|
+
### Custom Filtering Rules
|
|
539
|
+
|
|
540
|
+
```python
|
|
541
|
+
from hccinfhir.filter import apply_filter
|
|
542
|
+
|
|
543
|
+
# Apply custom filtering to service data
|
|
544
|
+
filtered_data = apply_filter(
|
|
545
|
+
service_data,
|
|
546
|
+
include_inpatient=True,
|
|
547
|
+
include_outpatient=True,
|
|
548
|
+
eligible_cpt_hcpcs_file="custom_procedures.csv"
|
|
549
|
+
)
|
|
550
|
+
```
|
|
551
|
+
|
|
552
|
+
### Batch Processing
|
|
553
|
+
|
|
554
|
+
```python
|
|
555
|
+
# Process multiple beneficiaries efficiently
|
|
556
|
+
results = []
|
|
557
|
+
for beneficiary_data in beneficiary_list:
|
|
558
|
+
demographics = Demographics(**beneficiary_data['demographics'])
|
|
559
|
+
service_data = beneficiary_data['service_data']
|
|
560
|
+
|
|
561
|
+
result = processor.run_from_service_data(service_data, demographics)
|
|
562
|
+
results.append({
|
|
563
|
+
'beneficiary_id': beneficiary_data['id'],
|
|
564
|
+
'risk_score': result.risk_score,
|
|
565
|
+
'hcc_list': result.hcc_list
|
|
566
|
+
})
|
|
567
|
+
```
|
|
568
|
+
|
|
569
|
+
### Error Handling
|
|
570
|
+
|
|
571
|
+
```python
|
|
572
|
+
from hccinfhir.exceptions import ValidationError, ModelNotFoundError
|
|
573
|
+
|
|
574
|
+
try:
|
|
575
|
+
result = processor.calculate_from_diagnosis(diagnosis_codes, demographics)
|
|
576
|
+
except ValidationError as e:
|
|
577
|
+
print(f"Data validation error: {e}")
|
|
578
|
+
except ModelNotFoundError as e:
|
|
579
|
+
print(f"Model configuration error: {e}")
|
|
580
|
+
```
|
|
581
|
+
|
|
582
|
+
### Custom Valuesets
|
|
583
|
+
|
|
584
|
+
Users can generate custom and more specific valuesets using the mimilabs data lakehouse.
|
|
585
|
+
|
|
586
|
+
For example, the valuesets in the package are created as follows:
|
|
587
|
+
|
|
588
|
+
`ra_dx_to_cc_mapping_2026.csv`
|
|
589
|
+
```sql
|
|
590
|
+
WITH latest_years AS (
|
|
591
|
+
SELECT
|
|
592
|
+
model_name,
|
|
593
|
+
MAX(year) as latest_year
|
|
594
|
+
FROM mimi_ws_1.cmspayment.ra_dx_to_cc_mapping
|
|
595
|
+
WHERE model_type = 'Initial'
|
|
596
|
+
AND year <= 2026 -- Don't go beyond 2026
|
|
597
|
+
GROUP BY model_name
|
|
598
|
+
)
|
|
599
|
+
SELECT
|
|
600
|
+
r.diagnosis_code,
|
|
601
|
+
r.cc,
|
|
602
|
+
r.model_name
|
|
603
|
+
FROM mimi_ws_1.cmspayment.ra_dx_to_cc_mapping r
|
|
604
|
+
INNER JOIN latest_years l
|
|
605
|
+
ON r.model_name = l.model_name
|
|
606
|
+
AND r.year = l.latest_year
|
|
607
|
+
WHERE r.model_type = 'Initial'
|
|
608
|
+
ORDER BY r.model_name, r.diagnosis_code;
|
|
609
|
+
```
|
|
610
|
+
|
|
611
|
+
`ra_hierarchies_2026.csv`
|
|
612
|
+
```sql
|
|
613
|
+
WITH latest_dates AS (
|
|
614
|
+
SELECT
|
|
615
|
+
model_domain,
|
|
616
|
+
model_version,
|
|
617
|
+
model_fullname,
|
|
618
|
+
MAX(eff_last_date) as latest_eff_last_date
|
|
619
|
+
FROM mimi_ws_1.cmspayment.ra_hierarchies
|
|
620
|
+
GROUP BY model_domain, model_version, model_fullname
|
|
621
|
+
)
|
|
622
|
+
SELECT
|
|
623
|
+
r.cc_parent,
|
|
624
|
+
r.cc_child,
|
|
625
|
+
r.model_domain,
|
|
626
|
+
r.model_version,
|
|
627
|
+
r.model_fullname
|
|
628
|
+
FROM mimi_ws_1.cmspayment.ra_hierarchies r
|
|
629
|
+
INNER JOIN latest_dates l
|
|
630
|
+
ON r.model_domain = l.model_domain
|
|
631
|
+
AND r.model_version = l.model_version
|
|
632
|
+
AND r.model_fullname = l.model_fullname
|
|
633
|
+
AND r.eff_last_date = l.latest_eff_last_date
|
|
634
|
+
ORDER BY r.model_domain, r.model_version, r.model_fullname, r.cc_parent, r.cc_child;
|
|
635
|
+
```
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
`ra_coefficients_2026.csv`
|
|
639
|
+
```sql
|
|
640
|
+
WITH preferred_records AS (
|
|
641
|
+
SELECT
|
|
642
|
+
model_domain,
|
|
643
|
+
model_version,
|
|
644
|
+
MAX(eff_last_date) as latest_eff_last_date
|
|
645
|
+
FROM mimi_ws_1.cmspayment.ra_coefficients
|
|
646
|
+
GROUP BY model_domain, model_version
|
|
647
|
+
)
|
|
648
|
+
SELECT
|
|
649
|
+
r.coefficient,
|
|
650
|
+
r.value,
|
|
651
|
+
r.model_domain,
|
|
652
|
+
r.model_version
|
|
653
|
+
FROM mimi_ws_1.cmspayment.ra_coefficients r
|
|
654
|
+
INNER JOIN preferred_records p
|
|
655
|
+
ON r.model_domain = p.model_domain
|
|
656
|
+
AND r.model_version = p.model_version
|
|
657
|
+
AND r.eff_last_date = p.latest_eff_last_date
|
|
658
|
+
ORDER BY r.model_domain, r.model_version, r.coefficient;
|
|
659
|
+
```
|
|
660
|
+
|
|
661
|
+
`ra_eligible_cpt_hcpcs_2026.csv`
|
|
662
|
+
```sql
|
|
663
|
+
SELECT DISTINCT cpt_hcpcs_code
|
|
664
|
+
FROM mimi_ws_1.cmspayment.ra_eligible_cpt_hcpcs
|
|
665
|
+
WHERE is_included = 'yes' AND YEAR(mimi_src_file_date) = 2025;
|
|
666
|
+
```
|
|
667
|
+
|
|
668
|
+
|
|
669
|
+
## 🧪 Testing
|
|
670
|
+
|
|
671
|
+
```bash
|
|
672
|
+
$ hatch shell
|
|
673
|
+
$ pip install -e .
|
|
674
|
+
$ pytest tests/*
|
|
675
|
+
```
|
|
676
|
+
|
|
677
|
+
## 📄 License
|
|
678
|
+
|
|
679
|
+
Apache License 2.0. See [LICENSE](LICENSE) for details.
|
|
680
|
+
|
|
681
|
+
## 🤝 Contributing
|
|
682
|
+
|
|
683
|
+
We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
|
|
684
|
+
|
|
685
|
+
## 📞 Support
|
|
686
|
+
|
|
687
|
+
- **Documentation**: [https://hccinfhir.readthedocs.io](https://hccinfhir.readthedocs.io)
|
|
688
|
+
- **Issues**: [GitHub Issues](https://github.com/yourusername/hccinfhir/issues)
|
|
689
|
+
- **Discussions**: [GitHub Discussions](https://github.com/yourusername/hccinfhir/discussions)
|
|
690
|
+
|
|
691
|
+
---
|
|
692
|
+
|
|
693
|
+
**Made with ❤️ by the HCCInFHIR team**
|