h-adminsim 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h_adminsim/__init__.py +5 -0
- h_adminsim/admin_staff.py +280 -0
- h_adminsim/assets/configs/data4primary.yaml +47 -0
- h_adminsim/assets/configs/data4secondary.yaml +47 -0
- h_adminsim/assets/configs/data4tertiary.yaml +47 -0
- h_adminsim/assets/country/address.json +141859 -0
- h_adminsim/assets/country/country_code.json +244 -0
- h_adminsim/assets/departments/department.json +85 -0
- h_adminsim/assets/departments/symptom.json +4530 -0
- h_adminsim/assets/fhir.schema.json +75253 -0
- h_adminsim/assets/names/firstname.txt +1219 -0
- h_adminsim/assets/names/lastname.txt +88799 -0
- h_adminsim/assets/prompts/cancel_patient_system.txt +38 -0
- h_adminsim/assets/prompts/intake_staff_task_user.txt +16 -0
- h_adminsim/assets/prompts/intake_supervisor_system.txt +8 -0
- h_adminsim/assets/prompts/intake_supervisor_user.txt +31 -0
- h_adminsim/assets/prompts/reschedule_patient_system.txt +38 -0
- h_adminsim/assets/prompts/schedule_patient_rejected_system.txt +42 -0
- h_adminsim/assets/prompts/schedule_patient_system.txt +36 -0
- h_adminsim/assets/prompts/schedule_staff_reasoning.txt +57 -0
- h_adminsim/assets/prompts/schedule_staff_sc_tool_calling.txt +13 -0
- h_adminsim/assets/prompts/schedule_staff_system.txt +10 -0
- h_adminsim/assets/prompts/schedule_staff_tool_calling.txt +41 -0
- h_adminsim/client/__init__.py +3 -0
- h_adminsim/client/google_client.py +209 -0
- h_adminsim/client/openai_client.py +199 -0
- h_adminsim/client/vllm_client.py +160 -0
- h_adminsim/environment/__init__.py +1 -0
- h_adminsim/environment/hospital.py +462 -0
- h_adminsim/environment/op_scheduling_simulation.py +1126 -0
- h_adminsim/pipeline/__init__.py +3 -0
- h_adminsim/pipeline/data_generator.py +192 -0
- h_adminsim/pipeline/evaluator.py +33 -0
- h_adminsim/pipeline/simulation.py +231 -0
- h_adminsim/registry/__init__.py +5 -0
- h_adminsim/registry/errors.py +89 -0
- h_adminsim/registry/models.py +126 -0
- h_adminsim/registry/phrases.py +10 -0
- h_adminsim/registry/pydantic_models.py +21 -0
- h_adminsim/registry/variables.py +9 -0
- h_adminsim/supervisor.py +182 -0
- h_adminsim/task/agent_task.py +900 -0
- h_adminsim/task/fhir_manager.py +222 -0
- h_adminsim/task/schedule_assign.py +151 -0
- h_adminsim/tools/__init__.py +5 -0
- h_adminsim/tools/agent_data_builder.py +124 -0
- h_adminsim/tools/data_converter.py +536 -0
- h_adminsim/tools/data_synthesizer.py +365 -0
- h_adminsim/tools/evaluator.py +258 -0
- h_adminsim/tools/sanity_checker.py +216 -0
- h_adminsim/tools/scheduling_rule.py +420 -0
- h_adminsim/utils/__init__.py +136 -0
- h_adminsim/utils/common_utils.py +698 -0
- h_adminsim/utils/fhir_utils.py +190 -0
- h_adminsim/utils/filesys_utils.py +135 -0
- h_adminsim/utils/image_preprocess_utils.py +188 -0
- h_adminsim/utils/random_utils.py +358 -0
- h_adminsim/version.txt +1 -0
- h_adminsim-1.0.0.dist-info/LICENSE +30 -0
- h_adminsim-1.0.0.dist-info/METADATA +494 -0
- h_adminsim-1.0.0.dist-info/RECORD +62 -0
- h_adminsim-1.0.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
PREFERENCE_PHRASE_PATIENT = {
|
|
2
|
+
'asap': 'You want the earliest available doctor in the department for the outpatient visit.',
|
|
3
|
+
'doctor': 'You have a preferred doctor for the outpatient visit.',
|
|
4
|
+
'date': 'You want the earliest available doctor in the department for the outpatient visit, starting from **{date}**.'
|
|
5
|
+
}
|
|
6
|
+
PREFERENCE_PHRASE_STAFF = {
|
|
7
|
+
'asap': 'The patient wants the earliest available doctor in the department for the outpatient visit.',
|
|
8
|
+
'doctor': 'The patient has a preferred doctor for the outpatient visit.',
|
|
9
|
+
'date': 'The patient wants the earliest available doctor in the department for the outpatient visit, starting from **{date}**.'
|
|
10
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from pydantic import BaseModel, Field
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class PromptRequest(BaseModel):
|
|
7
|
+
user_prompt: str
|
|
8
|
+
system_prompt: Optional[str] = None
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ScheduleItem(BaseModel):
|
|
13
|
+
start: float = Field(description="Start time of the appointment")
|
|
14
|
+
end: float = Field(description="End time of the appointment")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ScheduleModel(BaseModel):
|
|
19
|
+
schedule: dict[str, ScheduleItem] = Field(description="Doctor's schedule")
|
|
20
|
+
changed_existing_schedule_list: list = Field(description="List of changed appointments among the exised schedules")
|
|
21
|
+
|
h_adminsim/supervisor.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from importlib import resources
|
|
3
|
+
from typing import Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from h_adminsim.utils import colorstr, log
|
|
6
|
+
from h_adminsim.client import GeminiClient, GPTClient, VLLMClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SupervisorAgent:
|
|
11
|
+
def __init__(self,
|
|
12
|
+
target_task: str,
|
|
13
|
+
model: str,
|
|
14
|
+
api_key: Optional[str] = None,
|
|
15
|
+
use_vllm: bool = False,
|
|
16
|
+
vllm_endpoint: Optional[str] = None,
|
|
17
|
+
system_prompt_path: Optional[str] = None,
|
|
18
|
+
user_prompt_path: Optional[str] = None,
|
|
19
|
+
reasoning_effort: str = 'low',
|
|
20
|
+
**kwargs):
|
|
21
|
+
|
|
22
|
+
# Initialize environment
|
|
23
|
+
self.target_task = target_task
|
|
24
|
+
self._init_env(**kwargs)
|
|
25
|
+
|
|
26
|
+
# Initialize model, API client, and other parameters
|
|
27
|
+
self.model = model
|
|
28
|
+
self._init_model(
|
|
29
|
+
model=self.model,
|
|
30
|
+
api_key=api_key,
|
|
31
|
+
use_vllm=use_vllm,
|
|
32
|
+
vllm_endpoint=vllm_endpoint,
|
|
33
|
+
reasoning_effort=reasoning_effort
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Initialize prompt
|
|
37
|
+
self.system_prompt, self.user_prompt_template = self._init_prompt(
|
|
38
|
+
system_prompt_path=system_prompt_path,
|
|
39
|
+
user_prompt_path=user_prompt_path
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
log(f"Supervisor agent for {self.target_task} initialized successfully", color=True)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _init_env(self, **kwargs):
|
|
46
|
+
"""
|
|
47
|
+
Initialize the environment with default settings.
|
|
48
|
+
"""
|
|
49
|
+
assert self.target_task in ['first_outpatient_intake', 'first_outpatient_scheduling'], \
|
|
50
|
+
log(colorstr("red", f"Unsupported target task: {self.target_task}. Supported tasks are 'first_outpatient_intake' and 'first_outpatient_scheduling'."))
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _init_model(self,
|
|
54
|
+
model: str,
|
|
55
|
+
api_key: Optional[str] = None,
|
|
56
|
+
use_vllm: bool = False,
|
|
57
|
+
vllm_endpoint: Optional[str] = None,
|
|
58
|
+
reasoning_effort: str = 'low'):
|
|
59
|
+
"""
|
|
60
|
+
Initialize the model and API client based on the specified model type.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
model (str): The administration office agent model to use.
|
|
64
|
+
api_key (Optional[str], optional): API key for the model. If not provided, it will be fetched from environment variables.
|
|
65
|
+
Defaults to None.
|
|
66
|
+
use_vllm (bool): Whether to use vLLM client.
|
|
67
|
+
vllm_endpoint (Optional[str], optional): Path to the vLLM server. Defaults to None.
|
|
68
|
+
reasoning_effort (str, optional): Reasoning effort level for the model. Defaults to 'low'.
|
|
69
|
+
|
|
70
|
+
Raises:
|
|
71
|
+
ValueError: If the specified model is not supported.
|
|
72
|
+
"""
|
|
73
|
+
if 'gemini' in model.lower():
|
|
74
|
+
self.client = GeminiClient(model, api_key)
|
|
75
|
+
self.reasoning_kwargs = {}
|
|
76
|
+
if reasoning_effort:
|
|
77
|
+
log("'reasoning_effort' is not supported for Gemini models and will be ignored.", level='warning')
|
|
78
|
+
|
|
79
|
+
elif 'gpt' in model.lower(): # TODO: Support o3, o4 models etc.
|
|
80
|
+
self.client = GPTClient(model, api_key)
|
|
81
|
+
self.reasoning_kwargs = {'reasoning_effort': reasoning_effort} if 'gpt-5' in model.lower() else {}
|
|
82
|
+
if 'gpt-5' not in model.lower() and reasoning_effort:
|
|
83
|
+
log(f"'reasoning_effort' is not supported for {model} model and will be ignored.", level='warning')
|
|
84
|
+
|
|
85
|
+
elif use_vllm:
|
|
86
|
+
self.client = VLLMClient(model, vllm_endpoint)
|
|
87
|
+
self.reasoning_kwargs = {}
|
|
88
|
+
if reasoning_effort:
|
|
89
|
+
log("'reasoning_effort' is not supported for vLLM models and will be ignored.", level='warning')
|
|
90
|
+
|
|
91
|
+
else:
|
|
92
|
+
raise ValueError(colorstr("red", f"Unsupported model: {model}. Supported models are 'gemini' and 'gpt'."))
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _init_prompt(self,
|
|
96
|
+
system_prompt_path: Optional[str] = None,
|
|
97
|
+
user_prompt_path: Optional[str] = None) -> Tuple[str, str]:
|
|
98
|
+
"""
|
|
99
|
+
Initialize the system prompt for the administration staff agent.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
system_prompt_path (Optional[str], optional): Path to a custom system prompt file.
|
|
103
|
+
If not provided, the default system prompt will be used. Defaults to None.
|
|
104
|
+
user_prompt_path (Optional[str], optional): Path to a custom user prompt file.
|
|
105
|
+
If not provided, the default user prompt will be used. Defaults to None.
|
|
106
|
+
Raises:
|
|
107
|
+
FileNotFoundError: If the specified system prompt file does not exist.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Tuple[str, str]: The system prompt and user prompt templates.
|
|
111
|
+
"""
|
|
112
|
+
# Initialilze with the default system prompt
|
|
113
|
+
if not system_prompt_path:
|
|
114
|
+
if self.target_task == "first_outpatient_intake":
|
|
115
|
+
prompt_file_name = "intake_supervisor_system.txt"
|
|
116
|
+
elif self.target_task == "first_outpatient_scheduling":
|
|
117
|
+
prompt_file_name = "schedule_supervisor_system.txt"
|
|
118
|
+
file_path = resources.files("h_adminsim.assets.prompts").joinpath(prompt_file_name)
|
|
119
|
+
system_prompt = file_path.read_text()
|
|
120
|
+
|
|
121
|
+
# User can specify a custom system prompt
|
|
122
|
+
else:
|
|
123
|
+
if not os.path.exists(system_prompt_path):
|
|
124
|
+
raise FileNotFoundError(colorstr("red", f"System prompt file not found: {system_prompt_path}"))
|
|
125
|
+
with open(system_prompt_path, 'r') as f:
|
|
126
|
+
system_prompt = f.read()
|
|
127
|
+
|
|
128
|
+
# Initialilze with the default user prompt
|
|
129
|
+
if not user_prompt_path:
|
|
130
|
+
if self.target_task == "first_outpatient_intake":
|
|
131
|
+
prompt_file_name = "intake_supervisor_user.txt"
|
|
132
|
+
elif self.target_task == "first_outpatient_scheduling":
|
|
133
|
+
prompt_file_name = "schedule_supervisor_user.txt"
|
|
134
|
+
file_path = resources.files("h_adminsim.assets.prompts").joinpath(prompt_file_name)
|
|
135
|
+
user_prompt_template = file_path.read_text()
|
|
136
|
+
|
|
137
|
+
# User can specify a custom user prompt
|
|
138
|
+
else:
|
|
139
|
+
if not os.path.exists(user_prompt_path):
|
|
140
|
+
raise FileNotFoundError(colorstr("red", f"User prompt file not found: {user_prompt_path}"))
|
|
141
|
+
with open(user_prompt_path, 'r') as f:
|
|
142
|
+
user_prompt_template = f.read()
|
|
143
|
+
|
|
144
|
+
return system_prompt, user_prompt_template
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def reset_history(self, verbose: bool = True):
|
|
148
|
+
"""
|
|
149
|
+
Reset the conversation history.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
verbose (bool): Whether to print verbose output. Defaults to True.
|
|
153
|
+
"""
|
|
154
|
+
self.client.reset_history(verbose=verbose)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def __call__(self,
|
|
158
|
+
user_prompt: str,
|
|
159
|
+
using_multi_turn: bool = False,
|
|
160
|
+
verbose: bool = True,
|
|
161
|
+
**kwargs) -> str:
|
|
162
|
+
"""
|
|
163
|
+
Call the patient agent with a user prompt and return the response.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
user_prompt (str): The user prompt to send to the patient agent.
|
|
167
|
+
using_multi_turn (bool, optional): Whether to use multi-turn conversation. Defaults to False.
|
|
168
|
+
verbose (bool, optional): Whether to print verbose output. Defaults to True.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
str: The response from the patient agent.
|
|
172
|
+
"""
|
|
173
|
+
kwargs.update(self.reasoning_kwargs)
|
|
174
|
+
response = self.client(
|
|
175
|
+
user_prompt=user_prompt,
|
|
176
|
+
system_prompt=self.system_prompt,
|
|
177
|
+
using_multi_turn=using_multi_turn,
|
|
178
|
+
verbose=verbose,
|
|
179
|
+
**kwargs
|
|
180
|
+
)
|
|
181
|
+
return response
|
|
182
|
+
|