h-adminsim 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h_adminsim/__init__.py +5 -0
- h_adminsim/admin_staff.py +280 -0
- h_adminsim/assets/configs/data4primary.yaml +47 -0
- h_adminsim/assets/configs/data4secondary.yaml +47 -0
- h_adminsim/assets/configs/data4tertiary.yaml +47 -0
- h_adminsim/assets/country/address.json +141859 -0
- h_adminsim/assets/country/country_code.json +244 -0
- h_adminsim/assets/departments/department.json +85 -0
- h_adminsim/assets/departments/symptom.json +4530 -0
- h_adminsim/assets/fhir.schema.json +75253 -0
- h_adminsim/assets/names/firstname.txt +1219 -0
- h_adminsim/assets/names/lastname.txt +88799 -0
- h_adminsim/assets/prompts/cancel_patient_system.txt +38 -0
- h_adminsim/assets/prompts/intake_staff_task_user.txt +16 -0
- h_adminsim/assets/prompts/intake_supervisor_system.txt +8 -0
- h_adminsim/assets/prompts/intake_supervisor_user.txt +31 -0
- h_adminsim/assets/prompts/reschedule_patient_system.txt +38 -0
- h_adminsim/assets/prompts/schedule_patient_rejected_system.txt +42 -0
- h_adminsim/assets/prompts/schedule_patient_system.txt +36 -0
- h_adminsim/assets/prompts/schedule_staff_reasoning.txt +57 -0
- h_adminsim/assets/prompts/schedule_staff_sc_tool_calling.txt +13 -0
- h_adminsim/assets/prompts/schedule_staff_system.txt +10 -0
- h_adminsim/assets/prompts/schedule_staff_tool_calling.txt +41 -0
- h_adminsim/client/__init__.py +3 -0
- h_adminsim/client/google_client.py +209 -0
- h_adminsim/client/openai_client.py +199 -0
- h_adminsim/client/vllm_client.py +160 -0
- h_adminsim/environment/__init__.py +1 -0
- h_adminsim/environment/hospital.py +462 -0
- h_adminsim/environment/op_scheduling_simulation.py +1126 -0
- h_adminsim/pipeline/__init__.py +3 -0
- h_adminsim/pipeline/data_generator.py +192 -0
- h_adminsim/pipeline/evaluator.py +33 -0
- h_adminsim/pipeline/simulation.py +231 -0
- h_adminsim/registry/__init__.py +5 -0
- h_adminsim/registry/errors.py +89 -0
- h_adminsim/registry/models.py +126 -0
- h_adminsim/registry/phrases.py +10 -0
- h_adminsim/registry/pydantic_models.py +21 -0
- h_adminsim/registry/variables.py +9 -0
- h_adminsim/supervisor.py +182 -0
- h_adminsim/task/agent_task.py +900 -0
- h_adminsim/task/fhir_manager.py +222 -0
- h_adminsim/task/schedule_assign.py +151 -0
- h_adminsim/tools/__init__.py +5 -0
- h_adminsim/tools/agent_data_builder.py +124 -0
- h_adminsim/tools/data_converter.py +536 -0
- h_adminsim/tools/data_synthesizer.py +365 -0
- h_adminsim/tools/evaluator.py +258 -0
- h_adminsim/tools/sanity_checker.py +216 -0
- h_adminsim/tools/scheduling_rule.py +420 -0
- h_adminsim/utils/__init__.py +136 -0
- h_adminsim/utils/common_utils.py +698 -0
- h_adminsim/utils/fhir_utils.py +190 -0
- h_adminsim/utils/filesys_utils.py +135 -0
- h_adminsim/utils/image_preprocess_utils.py +188 -0
- h_adminsim/utils/random_utils.py +358 -0
- h_adminsim/version.txt +1 -0
- h_adminsim-1.0.0.dist-info/LICENSE +30 -0
- h_adminsim-1.0.0.dist-info/METADATA +494 -0
- h_adminsim-1.0.0.dist-info/RECORD +62 -0
- h_adminsim-1.0.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from openai import OpenAI
|
|
3
|
+
from dotenv import load_dotenv, find_dotenv
|
|
4
|
+
from typing import List, Tuple, Optional
|
|
5
|
+
|
|
6
|
+
from h_adminsim.utils import log
|
|
7
|
+
from h_adminsim.utils.image_preprocess_utils import *
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
########### For langchain integration (currently not used) ############
|
|
11
|
+
# from langchain_openai import ChatOpenAI
|
|
12
|
+
# from langchain_core.prompts import ChatPromptTemplate
|
|
13
|
+
# from langchain_core.output_parsers import JsonOutputParser
|
|
14
|
+
# from h_adminsim.registry import ScheduleModel
|
|
15
|
+
#######################################################################
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class GPTClient:
|
|
20
|
+
def __init__(self, model: str, api_key: Optional[str] = None):
|
|
21
|
+
# Iniitialize
|
|
22
|
+
self.model = model
|
|
23
|
+
self._init_environment(api_key)
|
|
24
|
+
self.histories = list()
|
|
25
|
+
self.token_usages = dict()
|
|
26
|
+
self.__first_turn = True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _init_environment(self, api_key: Optional[str] = None):
|
|
30
|
+
"""
|
|
31
|
+
Initialize OpenAI client.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
api_key (Optional[str]): API key for OpenAI. If not provided, it will
|
|
35
|
+
be loaded from environment variables.
|
|
36
|
+
"""
|
|
37
|
+
if not api_key:
|
|
38
|
+
dotenv_path = find_dotenv(usecwd=True)
|
|
39
|
+
load_dotenv(dotenv_path, override=True)
|
|
40
|
+
api_key = os.environ.get("OPENAI_API_KEY", None)
|
|
41
|
+
self.client = OpenAI(api_key=api_key)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def reset_history(self, verbose: bool = True):
|
|
45
|
+
"""
|
|
46
|
+
Reset the conversation history.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
verbose (bool): Whether to print verbose output. Defaults to True.
|
|
50
|
+
"""
|
|
51
|
+
self.__first_turn = True
|
|
52
|
+
self.histories = list()
|
|
53
|
+
self.token_usages = dict()
|
|
54
|
+
if verbose:
|
|
55
|
+
log('Conversation history has been reset.', color=True)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def __make_payload(self,
|
|
59
|
+
user_prompt: str,
|
|
60
|
+
image_path: Optional[str] = None,
|
|
61
|
+
image_size: Optional[Tuple[int]] = None) -> List[dict]:
|
|
62
|
+
"""
|
|
63
|
+
Create a payload for API calls to the GPT model.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
user_prompt (str): User prompt.
|
|
67
|
+
image_path (Optional[str], optional): Image path if you need to send image. Defaults to None.
|
|
68
|
+
image_size (Optional[Tuple[int]], optional): Image size to be resized. Defaults to None.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
List[dict]: Payload including prompts and image data.
|
|
72
|
+
"""
|
|
73
|
+
payloads = list()
|
|
74
|
+
user_contents = {"role": "user", "content": []}
|
|
75
|
+
|
|
76
|
+
# User prompts
|
|
77
|
+
user_contents["content"].append(
|
|
78
|
+
{"type": "text", "text": user_prompt}
|
|
79
|
+
)
|
|
80
|
+
if image_path:
|
|
81
|
+
base64_image = encode_resize_image(image_path, image_size) if image_size else encode_image(image_path)
|
|
82
|
+
extension = 'jpeg' if image_size else get_image_extension(image_path)
|
|
83
|
+
user_contents["content"].append(
|
|
84
|
+
{
|
|
85
|
+
"type": "image_url",
|
|
86
|
+
"image_url": {
|
|
87
|
+
"url": f"data:image/{extension};base64,{base64_image}"
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
payloads.append(user_contents)
|
|
93
|
+
|
|
94
|
+
return payloads
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def __call__(self,
|
|
98
|
+
user_prompt: str,
|
|
99
|
+
system_prompt: Optional[str] = None,
|
|
100
|
+
image_path: Optional[str] = None,
|
|
101
|
+
image_size: Optional[Tuple[int]] = None,
|
|
102
|
+
using_multi_turn: bool = False,
|
|
103
|
+
verbose: bool = True,
|
|
104
|
+
**kwargs) -> str:
|
|
105
|
+
"""
|
|
106
|
+
Sends a chat completion request to the model with optional image input and system prompt.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
user_prompt (str): The main user prompt or query to send to the model.
|
|
110
|
+
system_prompt (Optional[str], optional): An optional system-level prompt to set context or behavior. Defaults to None.
|
|
111
|
+
image_path (Optional[str], optional): Path to an image file to be included in the prompt. Defaults to None.
|
|
112
|
+
image_size (Optional[Tuple[int]], optional): The target image size in (width, height) format, if resizing is needed. Defaults to None.
|
|
113
|
+
using_multi_turn (bool): Whether to structure it as multi-turn. Defaults to False.
|
|
114
|
+
verbose (bool): Whether to print verbose output. Defaults to True.
|
|
115
|
+
|
|
116
|
+
Raises:
|
|
117
|
+
FileNotFoundError: If `image_path` is provided but the file does not exist.
|
|
118
|
+
e: Any exception raised during the API call is re-raised.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
str: The model's response message.
|
|
122
|
+
"""
|
|
123
|
+
if image_path and not os.path.exists(image_path):
|
|
124
|
+
raise FileNotFoundError
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
# To ensure empty history
|
|
128
|
+
if not using_multi_turn:
|
|
129
|
+
self.reset_history(verbose)
|
|
130
|
+
|
|
131
|
+
if self.__first_turn:
|
|
132
|
+
# System prompt
|
|
133
|
+
if system_prompt:
|
|
134
|
+
self.histories.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]})
|
|
135
|
+
self.__first_turn = False
|
|
136
|
+
|
|
137
|
+
# User prompt
|
|
138
|
+
self.histories += self.__make_payload(user_prompt, image_path, image_size)
|
|
139
|
+
|
|
140
|
+
# Model response
|
|
141
|
+
response = self.client.chat.completions.create(
|
|
142
|
+
model=self.model,
|
|
143
|
+
messages=self.histories,
|
|
144
|
+
**kwargs
|
|
145
|
+
)
|
|
146
|
+
assistant_msg = response.choices[0].message
|
|
147
|
+
self.histories.append({"role": assistant_msg.role, "content": [{"type": "text", "text": assistant_msg.content}]})
|
|
148
|
+
|
|
149
|
+
# Logging token usage
|
|
150
|
+
if response.usage:
|
|
151
|
+
self.token_usages.setdefault("prompt_tokens", []).append(response.usage.prompt_tokens)
|
|
152
|
+
self.token_usages.setdefault("completion_tokens", []).append(response.usage.completion_tokens)
|
|
153
|
+
self.token_usages.setdefault("total_tokens", []).append(response.usage.total_tokens)
|
|
154
|
+
self.token_usages.setdefault("reasoning_tokens", []).append(response.usage.completion_tokens_details.reasoning_tokens)
|
|
155
|
+
|
|
156
|
+
return assistant_msg.content
|
|
157
|
+
|
|
158
|
+
except Exception as e:
|
|
159
|
+
raise e
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# class GPTLangChainClient(GPTClient):
|
|
164
|
+
# def __init__(self, model: str):
|
|
165
|
+
# super(GPTLangChainClient, self).__init__(model)
|
|
166
|
+
# self.client_lc = ChatOpenAI(
|
|
167
|
+
# model=self.model,
|
|
168
|
+
# api_key=self.client.api_key
|
|
169
|
+
# )
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
# def __call__(self,
|
|
173
|
+
# user_prompt: str,
|
|
174
|
+
# system_prompt: Optional[str] = None,
|
|
175
|
+
# image_path: Optional[str] = None,
|
|
176
|
+
# image_size: Optional[Tuple[int]] = None,
|
|
177
|
+
# using_multi_turn: bool = False,
|
|
178
|
+
# **kwargs) -> str:
|
|
179
|
+
# try:
|
|
180
|
+
# # To ensure empty history
|
|
181
|
+
# self.reset_history()
|
|
182
|
+
|
|
183
|
+
# # Prompts
|
|
184
|
+
# parser = JsonOutputParser(pydantic_object=ScheduleModel)
|
|
185
|
+
# prompt = ChatPromptTemplate.from_messages(
|
|
186
|
+
# [
|
|
187
|
+
# ('system', system_prompt),
|
|
188
|
+
# ('human', user_prompt)
|
|
189
|
+
# ]
|
|
190
|
+
# ).partial(format_instructions=parser.get_format_instructions())
|
|
191
|
+
# chain = prompt | self.client_lc | parser
|
|
192
|
+
|
|
193
|
+
# # Model response
|
|
194
|
+
# response = chain.invoke(kwargs)
|
|
195
|
+
|
|
196
|
+
# return response
|
|
197
|
+
|
|
198
|
+
# except Exception as e:
|
|
199
|
+
# raise e
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import requests
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
from typing import List, Tuple, Optional
|
|
5
|
+
|
|
6
|
+
from h_adminsim.utils import colorstr, log
|
|
7
|
+
from h_adminsim.utils.image_preprocess_utils import *
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class VLLMClient:
|
|
12
|
+
def __init__(self, model: str, vllm_endpoint: str):
|
|
13
|
+
# Initialize
|
|
14
|
+
self.model = model
|
|
15
|
+
self.vllm_endpoint = vllm_endpoint
|
|
16
|
+
self._init_environment()
|
|
17
|
+
self.histories = list()
|
|
18
|
+
self.token_usages = dict()
|
|
19
|
+
self.__first_turn = False
|
|
20
|
+
self.__sanity_check()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _init_environment(self):
|
|
24
|
+
"""
|
|
25
|
+
Initialize vLLM OpenAI-formatted client.
|
|
26
|
+
"""
|
|
27
|
+
self.client = OpenAI(
|
|
28
|
+
base_url=f'{self.vllm_endpoint}/v1',
|
|
29
|
+
api_key='EMPTY'
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def __sanity_check(self):
|
|
34
|
+
response = requests.get(f'{self.vllm_endpoint}/v1/models')
|
|
35
|
+
if response.status_code != 200:
|
|
36
|
+
raise ValueError(colorstr("red", f"Failed to retrieve models: {response.text}"))
|
|
37
|
+
|
|
38
|
+
models = response.json()
|
|
39
|
+
if not models.get("data"):
|
|
40
|
+
raise ValueError(colorstr("red", "No models found."))
|
|
41
|
+
available_model_ids = [m['id'] for m in models['data']]
|
|
42
|
+
if self.model not in available_model_ids:
|
|
43
|
+
raise ValueError(colorstr("red", f"Model '{self.model}' not found in available models: {', '.join(available_model_ids)}"))
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def reset_history(self, verbose: bool = True):
|
|
47
|
+
"""
|
|
48
|
+
Reset the conversation history.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
verbose (bool): Whether to print verbose output. Defaults to True.
|
|
52
|
+
"""
|
|
53
|
+
self.__first_turn = True
|
|
54
|
+
self.histories = list()
|
|
55
|
+
self.token_usages = dict()
|
|
56
|
+
if verbose:
|
|
57
|
+
log('Conversation history has been reset.', color=True)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def __make_payload(self,
|
|
61
|
+
user_prompt: str,
|
|
62
|
+
image_path: Optional[str] = None,
|
|
63
|
+
image_size: Optional[Tuple[int]] = None) -> List[dict]:
|
|
64
|
+
"""
|
|
65
|
+
Create a payload for API calls to the model.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
user_prompt (str): User prompt.
|
|
69
|
+
image_path (Optional[str], optional): Image path if you need to send image. Defaults to None.
|
|
70
|
+
image_size (Optional[Tuple[int]], optional): Image size to be resized. Defaults to None.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
List[dict]: Payload including prompts and image data.
|
|
74
|
+
"""
|
|
75
|
+
payloads = list()
|
|
76
|
+
user_contents = {"role": "user", "content": []}
|
|
77
|
+
|
|
78
|
+
# User prompts
|
|
79
|
+
user_contents["content"].append(
|
|
80
|
+
{"type": "text", "text": user_prompt}
|
|
81
|
+
)
|
|
82
|
+
if image_path:
|
|
83
|
+
base64_image = encode_resize_image(image_path, image_size) if image_size else encode_image(image_path)
|
|
84
|
+
extension = 'jpeg' if image_size else get_image_extension(image_path)
|
|
85
|
+
user_contents["content"].append(
|
|
86
|
+
{
|
|
87
|
+
"type": "image_url",
|
|
88
|
+
"image_url": {
|
|
89
|
+
"url": f"data:image/{extension};base64,{base64_image}"
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
payloads.append(user_contents)
|
|
95
|
+
|
|
96
|
+
return payloads
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def __call__(self,
|
|
100
|
+
user_prompt: str,
|
|
101
|
+
system_prompt: Optional[str] = None,
|
|
102
|
+
image_path: Optional[str] = None,
|
|
103
|
+
image_size: Optional[Tuple[int]] = None,
|
|
104
|
+
using_multi_turn: bool = False,
|
|
105
|
+
verbose: bool = True,
|
|
106
|
+
**kwargs) -> str:
|
|
107
|
+
"""
|
|
108
|
+
Sends a chat completion request to the model with optional image input and system prompt.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
user_prompt (str): The main user prompt or query to send to the model.
|
|
112
|
+
system_prompt (Optional[str], optional): An optional system-level prompt to set context or behavior. Defaults to None.
|
|
113
|
+
image_path (Optional[str], optional): Path to an image file to be included in the prompt. Defaults to None.
|
|
114
|
+
image_size (Optional[Tuple[int]], optional): The target image size in (width, height) format, if resizing is needed. Defaults to None.
|
|
115
|
+
using_multi_turn (bool): Whether to structure it as multi-turn. Defaults to False.
|
|
116
|
+
verbose (bool): Whether to print verbose output. Defaults to True.
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
FileNotFoundError: If `image_path` is provided but the file does not exist.
|
|
120
|
+
e: Any exception raised during the API call is re-raised.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
str: The model's response message.
|
|
124
|
+
"""
|
|
125
|
+
if image_path and not os.path.exists(image_path):
|
|
126
|
+
raise FileNotFoundError
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
# To ensure empty history
|
|
130
|
+
if not using_multi_turn:
|
|
131
|
+
self.reset_history(verbose)
|
|
132
|
+
|
|
133
|
+
if self.__first_turn:
|
|
134
|
+
# System prompt
|
|
135
|
+
if system_prompt:
|
|
136
|
+
self.histories.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]})
|
|
137
|
+
self.__first_turn = False
|
|
138
|
+
|
|
139
|
+
# User prompt
|
|
140
|
+
self.histories += self.__make_payload(user_prompt, image_path, image_size)
|
|
141
|
+
|
|
142
|
+
# Model response
|
|
143
|
+
response = self.client.chat.completions.create(
|
|
144
|
+
model=self.model,
|
|
145
|
+
messages=self.histories,
|
|
146
|
+
**kwargs
|
|
147
|
+
)
|
|
148
|
+
assistant_msg = response.choices[0].message
|
|
149
|
+
self.histories.append({"role": assistant_msg.role, "content": [{"type": "text", "text": assistant_msg.content}]})
|
|
150
|
+
|
|
151
|
+
# Logging token usage
|
|
152
|
+
if response.usage:
|
|
153
|
+
self.token_usages.setdefault("prompt_tokens", []).append(response.usage.prompt_tokens)
|
|
154
|
+
self.token_usages.setdefault("completion_tokens", []).append(response.usage.completion_tokens)
|
|
155
|
+
self.token_usages.setdefault("total_tokens", []).append(response.usage.total_tokens)
|
|
156
|
+
|
|
157
|
+
return assistant_msg.content
|
|
158
|
+
|
|
159
|
+
except Exception as e:
|
|
160
|
+
raise e
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .op_scheduling_simulation import OPScehdulingSimulation
|