h-adminsim 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h_adminsim/__init__.py +5 -0
- h_adminsim/admin_staff.py +280 -0
- h_adminsim/assets/configs/data4primary.yaml +47 -0
- h_adminsim/assets/configs/data4secondary.yaml +47 -0
- h_adminsim/assets/configs/data4tertiary.yaml +47 -0
- h_adminsim/assets/country/address.json +141859 -0
- h_adminsim/assets/country/country_code.json +244 -0
- h_adminsim/assets/departments/department.json +85 -0
- h_adminsim/assets/departments/symptom.json +4530 -0
- h_adminsim/assets/fhir.schema.json +75253 -0
- h_adminsim/assets/names/firstname.txt +1219 -0
- h_adminsim/assets/names/lastname.txt +88799 -0
- h_adminsim/assets/prompts/cancel_patient_system.txt +38 -0
- h_adminsim/assets/prompts/intake_staff_task_user.txt +16 -0
- h_adminsim/assets/prompts/intake_supervisor_system.txt +8 -0
- h_adminsim/assets/prompts/intake_supervisor_user.txt +31 -0
- h_adminsim/assets/prompts/reschedule_patient_system.txt +38 -0
- h_adminsim/assets/prompts/schedule_patient_rejected_system.txt +42 -0
- h_adminsim/assets/prompts/schedule_patient_system.txt +36 -0
- h_adminsim/assets/prompts/schedule_staff_reasoning.txt +57 -0
- h_adminsim/assets/prompts/schedule_staff_sc_tool_calling.txt +13 -0
- h_adminsim/assets/prompts/schedule_staff_system.txt +10 -0
- h_adminsim/assets/prompts/schedule_staff_tool_calling.txt +41 -0
- h_adminsim/client/__init__.py +3 -0
- h_adminsim/client/google_client.py +209 -0
- h_adminsim/client/openai_client.py +199 -0
- h_adminsim/client/vllm_client.py +160 -0
- h_adminsim/environment/__init__.py +1 -0
- h_adminsim/environment/hospital.py +462 -0
- h_adminsim/environment/op_scheduling_simulation.py +1126 -0
- h_adminsim/pipeline/__init__.py +3 -0
- h_adminsim/pipeline/data_generator.py +192 -0
- h_adminsim/pipeline/evaluator.py +33 -0
- h_adminsim/pipeline/simulation.py +231 -0
- h_adminsim/registry/__init__.py +5 -0
- h_adminsim/registry/errors.py +89 -0
- h_adminsim/registry/models.py +126 -0
- h_adminsim/registry/phrases.py +10 -0
- h_adminsim/registry/pydantic_models.py +21 -0
- h_adminsim/registry/variables.py +9 -0
- h_adminsim/supervisor.py +182 -0
- h_adminsim/task/agent_task.py +900 -0
- h_adminsim/task/fhir_manager.py +222 -0
- h_adminsim/task/schedule_assign.py +151 -0
- h_adminsim/tools/__init__.py +5 -0
- h_adminsim/tools/agent_data_builder.py +124 -0
- h_adminsim/tools/data_converter.py +536 -0
- h_adminsim/tools/data_synthesizer.py +365 -0
- h_adminsim/tools/evaluator.py +258 -0
- h_adminsim/tools/sanity_checker.py +216 -0
- h_adminsim/tools/scheduling_rule.py +420 -0
- h_adminsim/utils/__init__.py +136 -0
- h_adminsim/utils/common_utils.py +698 -0
- h_adminsim/utils/fhir_utils.py +190 -0
- h_adminsim/utils/filesys_utils.py +135 -0
- h_adminsim/utils/image_preprocess_utils.py +188 -0
- h_adminsim/utils/random_utils.py +358 -0
- h_adminsim/version.txt +1 -0
- h_adminsim-1.0.0.dist-info/LICENSE +30 -0
- h_adminsim-1.0.0.dist-info/METADATA +494 -0
- h_adminsim-1.0.0.dist-info/RECORD +62 -0
- h_adminsim-1.0.0.dist-info/WHEEL +4 -0
h_adminsim/__init__.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from importlib import resources
|
|
3
|
+
from typing import Optional, Tuple
|
|
4
|
+
from langchain_openai import ChatOpenAI
|
|
5
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
6
|
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
7
|
+
from langchain.agents import (
|
|
8
|
+
AgentExecutor,
|
|
9
|
+
create_openai_tools_agent,
|
|
10
|
+
create_tool_calling_agent,
|
|
11
|
+
)
|
|
12
|
+
from patientsim.utils.common_utils import set_seed
|
|
13
|
+
|
|
14
|
+
from h_adminsim.utils import colorstr, log
|
|
15
|
+
from h_adminsim.tools import SchedulingRule, create_tools
|
|
16
|
+
from h_adminsim.client import GeminiClient, GPTClient, VLLMClient
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AdminStaffAgent:
|
|
21
|
+
def __init__(self,
|
|
22
|
+
target_task: str,
|
|
23
|
+
model: str,
|
|
24
|
+
api_key: Optional[str] = None,
|
|
25
|
+
use_vllm: bool = False,
|
|
26
|
+
vllm_endpoint: Optional[str] = None,
|
|
27
|
+
system_prompt_path: Optional[str] = None,
|
|
28
|
+
scheduling_user_prompt_path: Optional[str] = None,
|
|
29
|
+
tool_calling_prompt_path: Optional[str] = None,
|
|
30
|
+
sc_tool_calling_prompt_path: Optional[str] = None,
|
|
31
|
+
**kwargs):
|
|
32
|
+
|
|
33
|
+
# Initialize environment
|
|
34
|
+
self.target_task = target_task
|
|
35
|
+
self._init_env(**kwargs)
|
|
36
|
+
|
|
37
|
+
# Initialize model, API client, and other parameters
|
|
38
|
+
self.model = model
|
|
39
|
+
self._init_model(
|
|
40
|
+
model=self.model,
|
|
41
|
+
api_key=api_key,
|
|
42
|
+
use_vllm=use_vllm,
|
|
43
|
+
vllm_endpoint=vllm_endpoint,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Initialize prompt
|
|
47
|
+
self.system_prompt, self.scheduling_user_prompt_template, self.tool_calling_prompt, self.sc_tool_calling_prompt = \
|
|
48
|
+
self._init_prompt(
|
|
49
|
+
system_prompt_path=system_prompt_path,
|
|
50
|
+
scheduling_user_prompt_path=scheduling_user_prompt_path,
|
|
51
|
+
tool_calling_prompt_path=tool_calling_prompt_path,
|
|
52
|
+
sc_tool_calling_prompt_path=sc_tool_calling_prompt_path,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
log("Administrative staff agent initialized successfully", color=True)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _init_env(self, **kwargs):
|
|
59
|
+
"""
|
|
60
|
+
Initialize the environment with default settings.
|
|
61
|
+
"""
|
|
62
|
+
assert self.target_task in ['first_outpatient_intake', 'first_outpatient_scheduling'], \
|
|
63
|
+
log(colorstr("red", f"Unsupported target task: {self.target_task}. Supported tasks are 'first_outpatient_intake' and 'first_outpatient_scheduling'."))
|
|
64
|
+
|
|
65
|
+
self.random_seed = kwargs.get('random_seed', None)
|
|
66
|
+
self.temperature = kwargs.get('temperature', 0.2) # For various responses. If you want deterministic responses, set it to 0.
|
|
67
|
+
self.general_staff_greet = kwargs.get('general_staff_greet', "How can I help you?")
|
|
68
|
+
self.staff_greet = kwargs.get('staff_greet', "How would you like to schedule the appointment?")
|
|
69
|
+
self.staff_suggestion = kwargs.get('staff_suggestion', "How about this schedule: {schedule}")
|
|
70
|
+
|
|
71
|
+
# Set random seed for reproducibility
|
|
72
|
+
if self.random_seed:
|
|
73
|
+
set_seed(self.random_seed)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _init_model(self,
|
|
77
|
+
model: str,
|
|
78
|
+
api_key: Optional[str] = None,
|
|
79
|
+
use_vllm: bool = False,
|
|
80
|
+
vllm_endpoint: Optional[str] = None):
|
|
81
|
+
"""
|
|
82
|
+
Initialize the model and API client based on the specified model type.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
model (str): The administration office agent model to use.
|
|
86
|
+
api_key (Optional[str], optional): API key for the model. If not provided, it will be fetched from environment variables.
|
|
87
|
+
Defaults to None.
|
|
88
|
+
use_vllm (bool): Whether to use vLLM client.
|
|
89
|
+
vllm_endpoint (Optional[str], optional): Path to the vLLM server. Defaults to None.
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
ValueError: If the specified model is not supported.
|
|
93
|
+
"""
|
|
94
|
+
if 'gemini' in model.lower():
|
|
95
|
+
self.client = GeminiClient(model, api_key)
|
|
96
|
+
elif 'gpt' in model.lower(): # TODO: Support o3, o4 models etc.
|
|
97
|
+
self.client = GPTClient(model, api_key)
|
|
98
|
+
elif use_vllm:
|
|
99
|
+
self.client = VLLMClient(model, vllm_endpoint)
|
|
100
|
+
else:
|
|
101
|
+
raise ValueError(colorstr("red", f"Unsupported model: {model}. Supported models are 'gemini' and 'gpt'."))
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _init_prompt(self,
|
|
105
|
+
system_prompt_path: Optional[str] = None,
|
|
106
|
+
scheduling_user_prompt_path: Optional[str] = None,
|
|
107
|
+
tool_calling_prompt_path: Optional[str] = None,
|
|
108
|
+
sc_tool_calling_prompt_path: Optional[str] = None) -> Tuple[str, str, str, str]:
|
|
109
|
+
"""
|
|
110
|
+
Initialize the system prompt for the administration staff agent.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
system_prompt_path (Optional[str], optional): Path to a custom system prompt file.
|
|
114
|
+
If not provided, the default system prompt will be used. Defaults to None.
|
|
115
|
+
scheduling_user_prompt_path (Optional[str], optional): Path to a custom user prompt file.
|
|
116
|
+
If not provided, the default user prompt will be used. Defaults to None.
|
|
117
|
+
tool_calling_prompt_path (Optional[str], optional): Path to a custom tool calling prompt file.
|
|
118
|
+
If not provided, the default tool calling prompt will be used. Defaults to None.
|
|
119
|
+
Raises:
|
|
120
|
+
FileNotFoundError: If the specified system prompt file does not exist.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Tuple[str, str, str, str]: The system prompt, user prompt templates, tool calling prompt, and the only scheduling tool calling prompt.
|
|
124
|
+
"""
|
|
125
|
+
# Initialilze with the default system prompt
|
|
126
|
+
if not system_prompt_path:
|
|
127
|
+
prompt_file_name = 'schedule_staff_system.txt'
|
|
128
|
+
file_path = resources.files("h_adminsim.assets.prompts").joinpath(prompt_file_name)
|
|
129
|
+
system_prompt = file_path.read_text()
|
|
130
|
+
|
|
131
|
+
# User can specify a custom system prompt
|
|
132
|
+
else:
|
|
133
|
+
if not os.path.exists(system_prompt_path):
|
|
134
|
+
raise FileNotFoundError(colorstr("red", f"System prompt file not found: {system_prompt_path}"))
|
|
135
|
+
with open(system_prompt_path, 'r') as f:
|
|
136
|
+
system_prompt = f.read()
|
|
137
|
+
|
|
138
|
+
# Initialilze with the default user prompt for scheduling task
|
|
139
|
+
if not scheduling_user_prompt_path:
|
|
140
|
+
prompt_file_name = 'schedule_staff_reasoning.txt'
|
|
141
|
+
file_path = resources.files("h_adminsim.assets.prompts").joinpath(prompt_file_name)
|
|
142
|
+
scheduling_user_prompt_template = file_path.read_text()
|
|
143
|
+
|
|
144
|
+
# User can specify a custom user prompt
|
|
145
|
+
else:
|
|
146
|
+
if not os.path.exists(scheduling_user_prompt_path):
|
|
147
|
+
raise FileNotFoundError(colorstr("red", f"User prompt file not found: {scheduling_user_prompt_path}"))
|
|
148
|
+
with open(scheduling_user_prompt_path, 'r') as f:
|
|
149
|
+
scheduling_user_prompt_template = f.read()
|
|
150
|
+
|
|
151
|
+
# Initialilze with the default tool calling prompt
|
|
152
|
+
if not tool_calling_prompt_path:
|
|
153
|
+
prompt_file_name = 'schedule_staff_tool_calling.txt'
|
|
154
|
+
file_path = resources.files("h_adminsim.assets.prompts").joinpath(prompt_file_name)
|
|
155
|
+
tool_calling_prompt = file_path.read_text()
|
|
156
|
+
|
|
157
|
+
# User can specify a custom tool calling prompt
|
|
158
|
+
else:
|
|
159
|
+
if not os.path.exists(tool_calling_prompt_path):
|
|
160
|
+
raise FileNotFoundError(colorstr("red", f"User prompt file not found: {tool_calling_prompt_path}"))
|
|
161
|
+
else:
|
|
162
|
+
with open(tool_calling_prompt_path, 'r') as f:
|
|
163
|
+
tool_calling_prompt = f.read()
|
|
164
|
+
|
|
165
|
+
# Initialilze with the only scheduling tool calling prompt
|
|
166
|
+
if not sc_tool_calling_prompt_path:
|
|
167
|
+
prompt_file_name = 'schedule_staff_sc_tool_calling.txt'
|
|
168
|
+
file_path = resources.files("h_adminsim.assets.prompts").joinpath(prompt_file_name)
|
|
169
|
+
sc_tool_calling_prompt = file_path.read_text()
|
|
170
|
+
|
|
171
|
+
# User can specify a custom scheduling tool calling prompt
|
|
172
|
+
else:
|
|
173
|
+
if not os.path.exists(sc_tool_calling_prompt_path):
|
|
174
|
+
raise FileNotFoundError(colorstr("red", f"User prompt file not found: {sc_tool_calling_prompt_path}"))
|
|
175
|
+
else:
|
|
176
|
+
with open(sc_tool_calling_prompt_path, 'r') as f:
|
|
177
|
+
sc_tool_calling_prompt = f.read()
|
|
178
|
+
|
|
179
|
+
return system_prompt, scheduling_user_prompt_template, tool_calling_prompt, sc_tool_calling_prompt
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def reset_history(self, verbose: bool = True):
|
|
183
|
+
"""
|
|
184
|
+
Reset the conversation history.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
verbose (bool): Whether to print verbose output. Defaults to True.
|
|
188
|
+
"""
|
|
189
|
+
self.client.reset_history(verbose=verbose)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def build_agent(self,
|
|
193
|
+
rule: SchedulingRule,
|
|
194
|
+
doctor_info: dict,
|
|
195
|
+
patient_schedule_list: Optional[list[dict]] = None,
|
|
196
|
+
gt_idx: Optional[int] = None,
|
|
197
|
+
only_schedule_tool: bool = False) -> AgentExecutor:
|
|
198
|
+
"""
|
|
199
|
+
Build a LangChain agent with scheduling tools.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
rule (SchedulingRule): An instance of SchedulingRule containing scheduling logic.
|
|
203
|
+
doctor_info (dict): A dictionary containing information about doctors. Defaults to None.
|
|
204
|
+
patient_schedule_list (Optional[list[dict]], optional): A list of the patient's scheduled appointments. Defaults to None.
|
|
205
|
+
gt_idx (Optional[int], optional): Ground-truth index of the appointment to be canceled or rescheduled. Defaults to None.
|
|
206
|
+
only_schedule_tool (bool, optional): Whether use only scheduling tools or not. Defaults to False.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
AgentExecutor: A LangChain agent executor with the scheduling tools.
|
|
210
|
+
"""
|
|
211
|
+
tools = create_tools(rule, doctor_info, patient_schedule_list, gt_idx, only_schedule_tool)
|
|
212
|
+
tool_calling_prompt = self.sc_tool_calling_prompt if only_schedule_tool else self.tool_calling_prompt
|
|
213
|
+
prompt = ChatPromptTemplate.from_messages([
|
|
214
|
+
("system", tool_calling_prompt),
|
|
215
|
+
MessagesPlaceholder("chat_history"),
|
|
216
|
+
("user", "{input}"),
|
|
217
|
+
("assistant", "{agent_scratchpad}"),
|
|
218
|
+
])
|
|
219
|
+
|
|
220
|
+
if 'gemini' in self.model.lower():
|
|
221
|
+
llm = ChatGoogleGenerativeAI(
|
|
222
|
+
model=self.model,
|
|
223
|
+
temperature=0,
|
|
224
|
+
)
|
|
225
|
+
agent = create_tool_calling_agent(
|
|
226
|
+
llm=llm,
|
|
227
|
+
tools=tools,
|
|
228
|
+
prompt=prompt
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
elif 'gpt' in self.model.lower():
|
|
232
|
+
llm = ChatOpenAI(
|
|
233
|
+
model_name=self.model,
|
|
234
|
+
temperature=0 if not 'gpt-5' in self.model.lower() else 1
|
|
235
|
+
)
|
|
236
|
+
agent = create_openai_tools_agent(
|
|
237
|
+
llm=llm,
|
|
238
|
+
tools=tools,
|
|
239
|
+
prompt=prompt
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
else:
|
|
243
|
+
log('Currently, we have supported only Gemini and GPT API-based models.', 'error')
|
|
244
|
+
|
|
245
|
+
executor = AgentExecutor(
|
|
246
|
+
agent=agent,
|
|
247
|
+
tools=tools,
|
|
248
|
+
verbose=False,
|
|
249
|
+
max_iterations=1,
|
|
250
|
+
return_intermediate_steps=True,
|
|
251
|
+
)
|
|
252
|
+
return executor
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def __call__(self,
|
|
256
|
+
user_prompt: str,
|
|
257
|
+
using_multi_turn: bool = True,
|
|
258
|
+
verbose: bool = True,
|
|
259
|
+
**kwargs) -> str:
|
|
260
|
+
"""
|
|
261
|
+
Call the patient agent with a user prompt and return the response.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
user_prompt (str): The user prompt to send to the patient agent.
|
|
265
|
+
using_multi_turn (bool, optional): Whether to use multi-turn conversation. Defaults to True.
|
|
266
|
+
verbose (bool, optional): Whether to print verbose output. Defaults to True.
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
str: The response from the patient agent.
|
|
270
|
+
"""
|
|
271
|
+
response = self.client(
|
|
272
|
+
user_prompt=user_prompt,
|
|
273
|
+
system_prompt=self.system_prompt,
|
|
274
|
+
using_multi_turn=using_multi_turn,
|
|
275
|
+
verbose=verbose,
|
|
276
|
+
temperature=self.temperature,
|
|
277
|
+
**kwargs
|
|
278
|
+
)
|
|
279
|
+
return response
|
|
280
|
+
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Base
|
|
2
|
+
seed: 9999
|
|
3
|
+
|
|
4
|
+
# FHIR server url
|
|
5
|
+
fhir_url: http://localhost:8080/fhir
|
|
6
|
+
|
|
7
|
+
# Data configs
|
|
8
|
+
project: ./hospital_data/
|
|
9
|
+
data_name: primary
|
|
10
|
+
hospital_data:
|
|
11
|
+
hospital_n: 3
|
|
12
|
+
start_date:
|
|
13
|
+
min: 2025-03-17 # ISO format: YYYY-MM-DD
|
|
14
|
+
max: 2025-09-21 # ISO format: YYYY-MM-DD
|
|
15
|
+
days: 7
|
|
16
|
+
interval_hour: 0.25
|
|
17
|
+
start_hour:
|
|
18
|
+
min: 9
|
|
19
|
+
max: 10
|
|
20
|
+
end_hour:
|
|
21
|
+
min: 18
|
|
22
|
+
max: 19
|
|
23
|
+
department_per_hospital:
|
|
24
|
+
min: 2
|
|
25
|
+
max: 3
|
|
26
|
+
doctor_per_department:
|
|
27
|
+
min: 1
|
|
28
|
+
max: 1
|
|
29
|
+
working_days: # The number of days a doctor can work during the given simulation period, with a random number of days assigned to each doctor.
|
|
30
|
+
min: 5
|
|
31
|
+
max: 7
|
|
32
|
+
doctor_capacity_per_hour:
|
|
33
|
+
min: 4
|
|
34
|
+
max: 4
|
|
35
|
+
doctor_has_schedule_prob: 0 # The probability that a doctor has at least one fixed schedule.
|
|
36
|
+
schedule_coverage_ratio: # If the doctor has a fixed schedule, the proportion of that schedule relative to the total working hours.
|
|
37
|
+
min: 0.4
|
|
38
|
+
max: 0.6
|
|
39
|
+
appointment_coverage_ratio: # Proportion of appointment time scheduled with patients outside the doctor's fixed schedule.
|
|
40
|
+
min: 0.2
|
|
41
|
+
max: 0.5
|
|
42
|
+
preference:
|
|
43
|
+
type: ['asap', 'doctor', 'date'] # Types of patient preferences.
|
|
44
|
+
probs: [0.6, 0.2, 0.2] # Types probability distribution.
|
|
45
|
+
symptom:
|
|
46
|
+
type: ['simple', 'with_history'] # Types of patient history.
|
|
47
|
+
probs: [0.9, 0.1] # Types probability distribution.
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Base
|
|
2
|
+
seed: 9999
|
|
3
|
+
|
|
4
|
+
# FHIR server url
|
|
5
|
+
fhir_url: http://localhost:8080/fhir
|
|
6
|
+
|
|
7
|
+
# Data configs
|
|
8
|
+
project: ./hospital_data/
|
|
9
|
+
data_name: secondary
|
|
10
|
+
hospital_data:
|
|
11
|
+
hospital_n: 3
|
|
12
|
+
start_date:
|
|
13
|
+
min: 2025-03-17 # ISO format: YYYY-MM-DD
|
|
14
|
+
max: 2025-09-21 # ISO format: YYYY-MM-DD
|
|
15
|
+
days: 7
|
|
16
|
+
interval_hour: 0.25
|
|
17
|
+
start_hour:
|
|
18
|
+
min: 9
|
|
19
|
+
max: 10
|
|
20
|
+
end_hour:
|
|
21
|
+
min: 18
|
|
22
|
+
max: 19
|
|
23
|
+
department_per_hospital:
|
|
24
|
+
min: 7
|
|
25
|
+
max: 9
|
|
26
|
+
doctor_per_department:
|
|
27
|
+
min: 1
|
|
28
|
+
max: 2
|
|
29
|
+
working_days: # The number of days a doctor can work during the given simulation period, with a random number of days assigned to each doctor.
|
|
30
|
+
min: 3
|
|
31
|
+
max: 4
|
|
32
|
+
doctor_capacity_per_hour:
|
|
33
|
+
min: 1
|
|
34
|
+
max: 4
|
|
35
|
+
doctor_has_schedule_prob: 0 # The probability that a doctor has at least one fixed schedule.
|
|
36
|
+
schedule_coverage_ratio: # If the doctor has a fixed schedule, the proportion of that schedule relative to the total working hours.
|
|
37
|
+
min: 0.4
|
|
38
|
+
max: 0.6
|
|
39
|
+
appointment_coverage_ratio: # Proportion of appointment time scheduled with patients outside the doctor's fixed schedule.
|
|
40
|
+
min: 0.2
|
|
41
|
+
max: 0.5
|
|
42
|
+
preference:
|
|
43
|
+
type: ['asap', 'doctor', 'date'] # Types of patient preferences.
|
|
44
|
+
probs: [0.4, 0.4, 0.2] # Types probability distribution.
|
|
45
|
+
symptom:
|
|
46
|
+
type: ['simple', 'with_history'] # Types of patient history.
|
|
47
|
+
probs: [0.6, 0.4] # Types probability distribution.
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Base
|
|
2
|
+
seed: 9999
|
|
3
|
+
|
|
4
|
+
# FHIR server url
|
|
5
|
+
fhir_url: http://localhost:8080/fhir
|
|
6
|
+
|
|
7
|
+
# Data configs
|
|
8
|
+
project: ./hospital_data/
|
|
9
|
+
data_name: tertiary
|
|
10
|
+
hospital_data:
|
|
11
|
+
hospital_n: 3
|
|
12
|
+
start_date:
|
|
13
|
+
min: 2025-03-17 # ISO format: YYYY-MM-DD
|
|
14
|
+
max: 2025-09-21 # ISO format: YYYY-MM-DD
|
|
15
|
+
days: 7
|
|
16
|
+
interval_hour: 0.05
|
|
17
|
+
start_hour:
|
|
18
|
+
min: 9
|
|
19
|
+
max: 10
|
|
20
|
+
end_hour:
|
|
21
|
+
min: 18
|
|
22
|
+
max: 19
|
|
23
|
+
department_per_hospital:
|
|
24
|
+
min: 9
|
|
25
|
+
max: 9
|
|
26
|
+
doctor_per_department:
|
|
27
|
+
min: 2
|
|
28
|
+
max: 3
|
|
29
|
+
working_days: # The number of days a doctor can work during the given simulation period, with a random number of days assigned to each doctor.
|
|
30
|
+
min: 3
|
|
31
|
+
max: 4
|
|
32
|
+
doctor_capacity_per_hour:
|
|
33
|
+
min: 1
|
|
34
|
+
max: 20
|
|
35
|
+
doctor_has_schedule_prob: 0 # The probability that a doctor has at least one fixed schedule.
|
|
36
|
+
schedule_coverage_ratio: # If the doctor has a fixed schedule, the proportion of that schedule relative to the total working hours.
|
|
37
|
+
min: 0.4
|
|
38
|
+
max: 0.6
|
|
39
|
+
appointment_coverage_ratio: # Proportion of appointment time scheduled with patients outside the doctor's fixed schedule.
|
|
40
|
+
min: 0.2
|
|
41
|
+
max: 0.5
|
|
42
|
+
preference:
|
|
43
|
+
type: ['asap', 'doctor', 'date'] # Types of patient preferences.
|
|
44
|
+
probs: [0.4, 0.4, 0.2] # Types probability distribution.
|
|
45
|
+
symptom:
|
|
46
|
+
type: ['simple', 'with_history'] # Types of patient history.
|
|
47
|
+
probs: [0.2, 0.8] # Types probability distribution.
|