ob-metaflow-extensions 1.1.130__py2.py3-none-any.whl → 1.5.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ob-metaflow-extensions might be problematic. Click here for more details.

Files changed (105) hide show
  1. metaflow_extensions/outerbounds/__init__.py +1 -1
  2. metaflow_extensions/outerbounds/plugins/__init__.py +34 -4
  3. metaflow_extensions/outerbounds/plugins/apps/__init__.py +0 -0
  4. metaflow_extensions/outerbounds/plugins/apps/app_cli.py +0 -0
  5. metaflow_extensions/outerbounds/plugins/apps/app_utils.py +187 -0
  6. metaflow_extensions/outerbounds/plugins/apps/consts.py +3 -0
  7. metaflow_extensions/outerbounds/plugins/apps/core/__init__.py +15 -0
  8. metaflow_extensions/outerbounds/plugins/apps/core/_state_machine.py +506 -0
  9. metaflow_extensions/outerbounds/plugins/apps/core/_vendor/__init__.py +0 -0
  10. metaflow_extensions/outerbounds/plugins/apps/core/_vendor/spinner/__init__.py +4 -0
  11. metaflow_extensions/outerbounds/plugins/apps/core/_vendor/spinner/spinners.py +478 -0
  12. metaflow_extensions/outerbounds/plugins/apps/core/app_config.py +128 -0
  13. metaflow_extensions/outerbounds/plugins/apps/core/app_deploy_decorator.py +330 -0
  14. metaflow_extensions/outerbounds/plugins/apps/core/artifacts.py +0 -0
  15. metaflow_extensions/outerbounds/plugins/apps/core/capsule.py +958 -0
  16. metaflow_extensions/outerbounds/plugins/apps/core/click_importer.py +24 -0
  17. metaflow_extensions/outerbounds/plugins/apps/core/code_package/__init__.py +3 -0
  18. metaflow_extensions/outerbounds/plugins/apps/core/code_package/code_packager.py +618 -0
  19. metaflow_extensions/outerbounds/plugins/apps/core/code_package/examples.py +125 -0
  20. metaflow_extensions/outerbounds/plugins/apps/core/config/__init__.py +15 -0
  21. metaflow_extensions/outerbounds/plugins/apps/core/config/cli_generator.py +165 -0
  22. metaflow_extensions/outerbounds/plugins/apps/core/config/config_utils.py +966 -0
  23. metaflow_extensions/outerbounds/plugins/apps/core/config/schema_export.py +299 -0
  24. metaflow_extensions/outerbounds/plugins/apps/core/config/typed_configs.py +233 -0
  25. metaflow_extensions/outerbounds/plugins/apps/core/config/typed_init_generator.py +537 -0
  26. metaflow_extensions/outerbounds/plugins/apps/core/config/unified_config.py +1125 -0
  27. metaflow_extensions/outerbounds/plugins/apps/core/config_schema.yaml +337 -0
  28. metaflow_extensions/outerbounds/plugins/apps/core/dependencies.py +115 -0
  29. metaflow_extensions/outerbounds/plugins/apps/core/deployer.py +959 -0
  30. metaflow_extensions/outerbounds/plugins/apps/core/experimental/__init__.py +89 -0
  31. metaflow_extensions/outerbounds/plugins/apps/core/perimeters.py +87 -0
  32. metaflow_extensions/outerbounds/plugins/apps/core/secrets.py +164 -0
  33. metaflow_extensions/outerbounds/plugins/apps/core/utils.py +233 -0
  34. metaflow_extensions/outerbounds/plugins/apps/core/validations.py +17 -0
  35. metaflow_extensions/outerbounds/plugins/apps/deploy_decorator.py +201 -0
  36. metaflow_extensions/outerbounds/plugins/apps/supervisord_utils.py +243 -0
  37. metaflow_extensions/outerbounds/plugins/aws/__init__.py +4 -0
  38. metaflow_extensions/outerbounds/plugins/aws/assume_role.py +3 -0
  39. metaflow_extensions/outerbounds/plugins/aws/assume_role_decorator.py +118 -0
  40. metaflow_extensions/outerbounds/plugins/card_utilities/injector.py +1 -1
  41. metaflow_extensions/outerbounds/plugins/checkpoint_datastores/__init__.py +2 -0
  42. metaflow_extensions/outerbounds/plugins/checkpoint_datastores/coreweave.py +71 -0
  43. metaflow_extensions/outerbounds/plugins/checkpoint_datastores/external_chckpt.py +85 -0
  44. metaflow_extensions/outerbounds/plugins/checkpoint_datastores/nebius.py +73 -0
  45. metaflow_extensions/outerbounds/plugins/fast_bakery/baker.py +110 -0
  46. metaflow_extensions/outerbounds/plugins/fast_bakery/docker_environment.py +43 -9
  47. metaflow_extensions/outerbounds/plugins/fast_bakery/fast_bakery.py +12 -0
  48. metaflow_extensions/outerbounds/plugins/kubernetes/kubernetes_client.py +18 -44
  49. metaflow_extensions/outerbounds/plugins/kubernetes/pod_killer.py +374 -0
  50. metaflow_extensions/outerbounds/plugins/nim/card.py +2 -16
  51. metaflow_extensions/outerbounds/plugins/nim/{__init__.py → nim_decorator.py} +13 -49
  52. metaflow_extensions/outerbounds/plugins/nim/nim_manager.py +294 -233
  53. metaflow_extensions/outerbounds/plugins/nim/utils.py +36 -0
  54. metaflow_extensions/outerbounds/plugins/nvcf/constants.py +2 -2
  55. metaflow_extensions/outerbounds/plugins/nvcf/nvcf.py +100 -19
  56. metaflow_extensions/outerbounds/plugins/nvcf/nvcf_decorator.py +6 -1
  57. metaflow_extensions/outerbounds/plugins/nvct/__init__.py +0 -0
  58. metaflow_extensions/outerbounds/plugins/nvct/exceptions.py +71 -0
  59. metaflow_extensions/outerbounds/plugins/nvct/nvct.py +131 -0
  60. metaflow_extensions/outerbounds/plugins/nvct/nvct_cli.py +289 -0
  61. metaflow_extensions/outerbounds/plugins/nvct/nvct_decorator.py +286 -0
  62. metaflow_extensions/outerbounds/plugins/nvct/nvct_runner.py +218 -0
  63. metaflow_extensions/outerbounds/plugins/nvct/utils.py +29 -0
  64. metaflow_extensions/outerbounds/plugins/ollama/__init__.py +225 -0
  65. metaflow_extensions/outerbounds/plugins/ollama/constants.py +1 -0
  66. metaflow_extensions/outerbounds/plugins/ollama/exceptions.py +22 -0
  67. metaflow_extensions/outerbounds/plugins/ollama/ollama.py +1924 -0
  68. metaflow_extensions/outerbounds/plugins/ollama/status_card.py +292 -0
  69. metaflow_extensions/outerbounds/plugins/optuna/__init__.py +48 -0
  70. metaflow_extensions/outerbounds/plugins/profilers/simple_card_decorator.py +96 -0
  71. metaflow_extensions/outerbounds/plugins/s3_proxy/__init__.py +7 -0
  72. metaflow_extensions/outerbounds/plugins/s3_proxy/binary_caller.py +132 -0
  73. metaflow_extensions/outerbounds/plugins/s3_proxy/constants.py +11 -0
  74. metaflow_extensions/outerbounds/plugins/s3_proxy/exceptions.py +13 -0
  75. metaflow_extensions/outerbounds/plugins/s3_proxy/proxy_bootstrap.py +59 -0
  76. metaflow_extensions/outerbounds/plugins/s3_proxy/s3_proxy_api.py +93 -0
  77. metaflow_extensions/outerbounds/plugins/s3_proxy/s3_proxy_decorator.py +250 -0
  78. metaflow_extensions/outerbounds/plugins/s3_proxy/s3_proxy_manager.py +225 -0
  79. metaflow_extensions/outerbounds/plugins/secrets/secrets.py +38 -2
  80. metaflow_extensions/outerbounds/plugins/snowflake/snowflake.py +81 -11
  81. metaflow_extensions/outerbounds/plugins/snowpark/snowpark.py +18 -8
  82. metaflow_extensions/outerbounds/plugins/snowpark/snowpark_cli.py +6 -0
  83. metaflow_extensions/outerbounds/plugins/snowpark/snowpark_client.py +45 -18
  84. metaflow_extensions/outerbounds/plugins/snowpark/snowpark_decorator.py +18 -9
  85. metaflow_extensions/outerbounds/plugins/snowpark/snowpark_job.py +10 -4
  86. metaflow_extensions/outerbounds/plugins/torchtune/__init__.py +163 -0
  87. metaflow_extensions/outerbounds/plugins/vllm/__init__.py +255 -0
  88. metaflow_extensions/outerbounds/plugins/vllm/constants.py +1 -0
  89. metaflow_extensions/outerbounds/plugins/vllm/exceptions.py +1 -0
  90. metaflow_extensions/outerbounds/plugins/vllm/status_card.py +352 -0
  91. metaflow_extensions/outerbounds/plugins/vllm/vllm_manager.py +621 -0
  92. metaflow_extensions/outerbounds/remote_config.py +46 -9
  93. metaflow_extensions/outerbounds/toplevel/global_aliases_for_metaflow_package.py +94 -2
  94. metaflow_extensions/outerbounds/toplevel/ob_internal.py +4 -0
  95. metaflow_extensions/outerbounds/toplevel/plugins/ollama/__init__.py +1 -0
  96. metaflow_extensions/outerbounds/toplevel/plugins/optuna/__init__.py +1 -0
  97. metaflow_extensions/outerbounds/toplevel/plugins/torchtune/__init__.py +1 -0
  98. metaflow_extensions/outerbounds/toplevel/plugins/vllm/__init__.py +1 -0
  99. metaflow_extensions/outerbounds/toplevel/s3_proxy.py +88 -0
  100. {ob_metaflow_extensions-1.1.130.dist-info → ob_metaflow_extensions-1.5.1.dist-info}/METADATA +2 -2
  101. ob_metaflow_extensions-1.5.1.dist-info/RECORD +133 -0
  102. metaflow_extensions/outerbounds/plugins/nim/utilities.py +0 -5
  103. ob_metaflow_extensions-1.1.130.dist-info/RECORD +0 -56
  104. {ob_metaflow_extensions-1.1.130.dist-info → ob_metaflow_extensions-1.5.1.dist-info}/WHEEL +0 -0
  105. {ob_metaflow_extensions-1.1.130.dist-info → ob_metaflow_extensions-1.5.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,255 @@
1
+ from metaflow.decorators import StepDecorator
2
+ from metaflow import current
3
+ import functools
4
+ from enum import Enum
5
+ import threading
6
+ from metaflow.unbounded_foreach import UBF_CONTROL, UBF_TASK
7
+ from metaflow.metaflow_config import from_conf
8
+
9
+ from .vllm_manager import VLLMOpenAIManager, VLLMPyManager
10
+ from .status_card import VLLMStatusCard, CardDecoratorInjector
11
+
12
+ __mf_promote_submodules__ = ["plugins.vllm"]
13
+
14
+
15
+ ### The following classes are used to store the vLLM information in the current environment.
16
+ # Then, Metaflow users can access the vLLM information through the current environment.
17
+ class OpenAIAPIInfo:
18
+ def __init__(self, local_endpoint, local_api_key):
19
+ self.local_endpoint = local_endpoint
20
+ self.local_api_key = local_api_key
21
+
22
+
23
+ class VLLM:
24
+ def __init__(self, llm):
25
+ self.llm = llm
26
+
27
+
28
+ class VLLMDecorator(StepDecorator, CardDecoratorInjector):
29
+ """
30
+ This decorator is used to run vllm APIs as Metaflow task sidecars.
31
+
32
+ User code call
33
+ --------------
34
+ @vllm(
35
+ model="...",
36
+ ...
37
+ )
38
+
39
+ Valid backend options
40
+ ---------------------
41
+ - 'local': Run as a separate process on the local task machine.
42
+
43
+ Valid model options
44
+ -------------------
45
+ Any HuggingFace model identifier, e.g. 'meta-llama/Llama-3.2-1B'
46
+
47
+ NOTE: vLLM's OpenAI-compatible server serves ONE model per server instance.
48
+ If you need multiple models, you must create multiple @vllm decorators.
49
+
50
+ Parameters
51
+ ----------
52
+ model: str
53
+ HuggingFace model identifier to be served by vLLM.
54
+ backend: str
55
+ Determines where and how to run the vLLM process.
56
+ openai_api_server: bool
57
+ Whether to use OpenAI-compatible API server mode (subprocess) instead of native engine.
58
+ Default is False (uses native engine).
59
+ Set to True for backward compatibility with existing code.
60
+ debug: bool
61
+ Whether to turn on verbose debugging logs.
62
+ card_refresh_interval: int
63
+ Interval in seconds for refreshing the vLLM status card.
64
+ Only used when openai_api_server=True.
65
+ max_retries: int
66
+ Maximum number of retries checking for vLLM server startup.
67
+ Only used when openai_api_server=True.
68
+ retry_alert_frequency: int
69
+ Frequency of alert logs for vLLM server startup retries.
70
+ Only used when openai_api_server=True.
71
+ engine_args : dict
72
+ Additional keyword arguments to pass to the vLLM engine.
73
+ For example, `tensor_parallel_size=2`.
74
+ """
75
+
76
+ name = "vllm"
77
+ defaults = {
78
+ "model": None,
79
+ "backend": "local",
80
+ "openai_api_server": False, # Default to native engine
81
+ "debug": False,
82
+ "stream_logs_to_card": False,
83
+ "card_refresh_interval": 10,
84
+ "max_retries": 60,
85
+ "retry_alert_frequency": 5,
86
+ "engine_args": {},
87
+ }
88
+
89
+ def step_init(
90
+ self, flow, graph, step_name, decorators, environment, flow_datastore, logger
91
+ ):
92
+ super().step_init(
93
+ flow, graph, step_name, decorators, environment, flow_datastore, logger
94
+ )
95
+
96
+ # Validate that a model is specified
97
+ if not self.attributes["model"]:
98
+ raise ValueError(
99
+ f"@vllm decorator on step '{step_name}' requires a 'model' parameter. "
100
+ f"Example: @vllm(model='meta-llama/Llama-3.2-1B')"
101
+ )
102
+
103
+ # Attach the vllm status card only for API server mode
104
+ if self.attributes["openai_api_server"]:
105
+ self.attach_card_decorator(
106
+ flow,
107
+ step_name,
108
+ "vllm_status",
109
+ "blank",
110
+ refresh_interval=self.attributes["card_refresh_interval"],
111
+ )
112
+
113
+ def task_decorate(
114
+ self, step_func, flow, graph, retry_count, max_user_code_retries, ubf_context
115
+ ):
116
+ @functools.wraps(step_func)
117
+ def vllm_wrapper():
118
+ # FIXME: Kind of ugly branch. Causing branching elsewhere.
119
+ # Other possibile code paths:
120
+ # - OpenAI batch API
121
+ # - Embedding
122
+ # - Special types of models
123
+ if self.attributes["openai_api_server"]:
124
+ # API Server mode (existing functionality)
125
+ self._run_api_server_mode(step_func)
126
+ else:
127
+ # Native engine mode (new functionality)
128
+ self._run_native_engine_mode(step_func)
129
+
130
+ return vllm_wrapper
131
+
132
+ def _run_api_server_mode(self, step_func):
133
+ """Run vLLM in API server mode (subprocess, existing functionality)"""
134
+ self.vllm_manager = None
135
+ self.status_card = None
136
+ self.card_monitor_thread = None
137
+
138
+ try:
139
+ self.status_card = VLLMStatusCard(
140
+ refresh_interval=self.attributes["card_refresh_interval"]
141
+ )
142
+
143
+ def monitor_card():
144
+ try:
145
+ self.status_card.on_startup(current.card["vllm_status"])
146
+
147
+ while not getattr(self.card_monitor_thread, "_stop_event", False):
148
+ try:
149
+ self.status_card.on_update(
150
+ current.card["vllm_status"], None
151
+ )
152
+ import time
153
+
154
+ time.sleep(self.attributes["card_refresh_interval"])
155
+ except Exception as e:
156
+ if self.attributes["debug"]:
157
+ print(f"[@vllm] Card monitoring error: {e}")
158
+ break
159
+ except Exception as e:
160
+ if self.attributes["debug"]:
161
+ print(f"[@vllm] Card monitor thread error: {e}")
162
+ self.status_card.on_error(current.card["vllm_status"], str(e))
163
+
164
+ self.card_monitor_thread = threading.Thread(
165
+ target=monitor_card, daemon=True
166
+ )
167
+ self.card_monitor_thread._stop_event = False
168
+ self.card_monitor_thread.start()
169
+ self.vllm_manager = VLLMOpenAIManager(
170
+ model=self.attributes["model"],
171
+ backend=self.attributes["backend"],
172
+ debug=self.attributes["debug"],
173
+ status_card=self.status_card,
174
+ max_retries=self.attributes["max_retries"],
175
+ retry_alert_frequency=self.attributes["retry_alert_frequency"],
176
+ stream_logs_to_card=self.attributes["stream_logs_to_card"],
177
+ **self.attributes["engine_args"],
178
+ )
179
+ current._update_env(
180
+ dict(
181
+ vllm=OpenAIAPIInfo(
182
+ local_endpoint=f"http://127.0.0.1:{self.vllm_manager.port}/v1",
183
+ local_api_key="token123",
184
+ )
185
+ )
186
+ )
187
+
188
+ if self.attributes["debug"]:
189
+ print("[@vllm] API server mode initialized.")
190
+
191
+ except Exception as e:
192
+ if self.status_card:
193
+ self.status_card.add_event("error", f"Initialization failed: {str(e)}")
194
+ try:
195
+ self.status_card.on_error(current.card["vllm_status"], str(e))
196
+ except:
197
+ pass
198
+ print(f"[@vllm] Error initializing API server mode: {e}")
199
+ raise
200
+
201
+ try:
202
+ if self.status_card:
203
+ self.status_card.add_event("info", "Starting user step function")
204
+ step_func()
205
+ if self.status_card:
206
+ self.status_card.add_event(
207
+ "success", "User step function completed successfully"
208
+ )
209
+ finally:
210
+ if self.vllm_manager:
211
+ self.vllm_manager.terminate_models()
212
+
213
+ if self.card_monitor_thread and self.status_card:
214
+ import time
215
+
216
+ try:
217
+ self.status_card.on_update(current.card["vllm_status"], None)
218
+ except Exception as e:
219
+ if self.attributes["debug"]:
220
+ print(f"[@vllm] Final card update error: {e}")
221
+ time.sleep(2)
222
+
223
+ if self.card_monitor_thread:
224
+ self.card_monitor_thread._stop_event = True
225
+ self.card_monitor_thread.join(timeout=5)
226
+ if self.attributes["debug"]:
227
+ print("[@vllm] Card monitoring thread stopped.")
228
+
229
+ def _run_native_engine_mode(self, step_func):
230
+ """Run vLLM in native engine mode (direct LLM API access)"""
231
+ self.vllm = None
232
+
233
+ try:
234
+ if self.attributes["debug"]:
235
+ print("[@vllm] Initializing native engine mode")
236
+
237
+ self.vllm = VLLMPyManager(
238
+ model=self.attributes["model"],
239
+ debug=self.attributes["debug"],
240
+ **self.attributes["engine_args"],
241
+ )
242
+ current._update_env(dict(vllm=VLLM(llm=self.vllm.engine)))
243
+
244
+ if self.attributes["debug"]:
245
+ print("[@vllm] Native engine mode initialized.")
246
+
247
+ except Exception as e:
248
+ print(f"[@vllm] Error initializing native engine mode: {e}")
249
+ raise
250
+
251
+ try:
252
+ step_func()
253
+ finally:
254
+ if self.vllm:
255
+ self.vllm.terminate_engine()
@@ -0,0 +1 @@
1
+ VLLM_SUFFIX = "mf.vllm"
@@ -0,0 +1 @@
1
+ from metaflow.exception import MetaflowException
@@ -0,0 +1,352 @@
1
+ from metaflow.cards import Markdown, Table, VegaChart
2
+ from metaflow.metaflow_current import current
3
+ from datetime import datetime
4
+ import threading
5
+ import time
6
+
7
+
8
+ from metaflow.exception import MetaflowException
9
+ from collections import defaultdict
10
+
11
+
12
+ class CardDecoratorInjector:
13
+ """
14
+ Mixin Useful for injecting @card decorators from other first class Metaflow decorators.
15
+ """
16
+
17
+ _first_time_init = defaultdict(dict)
18
+
19
+ @classmethod
20
+ def _get_first_time_init_cached_value(cls, step_name, card_id):
21
+ return cls._first_time_init.get(step_name, {}).get(card_id, None)
22
+
23
+ @classmethod
24
+ def _set_first_time_init_cached_value(cls, step_name, card_id, value):
25
+ cls._first_time_init[step_name][card_id] = value
26
+
27
+ def _card_deco_already_attached(self, step, card_id):
28
+ for decorator in step.decorators:
29
+ if decorator.name == "card":
30
+ if decorator.attributes["id"] and card_id == decorator.attributes["id"]:
31
+ return True
32
+ return False
33
+
34
+ def _get_step(self, flow, step_name):
35
+ for step in flow:
36
+ if step.name == step_name:
37
+ return step
38
+ return None
39
+
40
+ def _first_time_init_check(self, step_dag_node, card_id):
41
+ """ """
42
+ return not self._card_deco_already_attached(step_dag_node, card_id)
43
+
44
+ def attach_card_decorator(
45
+ self,
46
+ flow,
47
+ step_name,
48
+ card_id,
49
+ card_type,
50
+ refresh_interval=5,
51
+ ):
52
+ """
53
+ This method is called `step_init` in your StepDecorator code since
54
+ this class is used as a Mixin
55
+ """
56
+ from metaflow import decorators as _decorators
57
+
58
+ if not all([card_id, card_type]):
59
+ raise MetaflowException(
60
+ "`INJECTED_CARD_ID` and `INJECTED_CARD_TYPE` must be set in the `CardDecoratorInjector` Mixin"
61
+ )
62
+
63
+ step_dag_node = self._get_step(flow, step_name)
64
+ if (
65
+ self._get_first_time_init_cached_value(step_name, card_id) is None
66
+ ): # First check class level setting.
67
+ if self._first_time_init_check(step_dag_node, card_id):
68
+ self._set_first_time_init_cached_value(step_name, card_id, True)
69
+ _decorators._attach_decorators_to_step(
70
+ step_dag_node,
71
+ [
72
+ "card:type=%s,id=%s,refresh_interval=%s"
73
+ % (card_type, card_id, str(refresh_interval))
74
+ ],
75
+ )
76
+ else:
77
+ self._set_first_time_init_cached_value(step_name, card_id, False)
78
+
79
+
80
+ class CardRefresher:
81
+
82
+ CARD_ID = None
83
+
84
+ def on_startup(self, current_card):
85
+ raise NotImplementedError("make_card method must be implemented")
86
+
87
+ def on_error(self, current_card, error_message):
88
+ raise NotImplementedError("error_card method must be implemented")
89
+
90
+ def on_update(self, current_card, data_object):
91
+ raise NotImplementedError("update_card method must be implemented")
92
+
93
+ def sqlite_fetch_func(self, conn):
94
+ raise NotImplementedError("sqlite_fetch_func must be implemented")
95
+
96
+
97
+ class VLLMStatusCard(CardRefresher):
98
+ """
99
+ Real-time status card for vLLM system monitoring.
100
+ Shows server health, model status, and recent events.
101
+
102
+ Intended to be inherited from in a step decorator like this:
103
+ class VLLMDecorator(StepDecorator, VLLMStatusCard):
104
+ """
105
+
106
+ CARD_ID = "vllm_status"
107
+
108
+ def __init__(self, refresh_interval=10):
109
+ self.refresh_interval = refresh_interval
110
+ self.status_data = {
111
+ "server": {
112
+ "status": "Starting",
113
+ "uptime_start": None,
114
+ "last_health_check": None,
115
+ "health_status": "Unknown",
116
+ "models": [],
117
+ },
118
+ "models": {}, # model_name -> {status, load_time, etc}
119
+ "performance": {
120
+ "install_time": None,
121
+ "server_startup_time": None,
122
+ "total_initialization_time": None,
123
+ },
124
+ "versions": {
125
+ "vllm": "Detecting...",
126
+ },
127
+ "events": [], # Recent events log
128
+ "logs": [],
129
+ }
130
+ self._lock = threading.Lock()
131
+ self._already_rendered = False
132
+
133
+ def update_status(self, category, data):
134
+ """Thread-safe method to update status data"""
135
+ with self._lock:
136
+ if category in self.status_data:
137
+ self.status_data[category].update(data)
138
+
139
+ def add_log_line(self, log_line):
140
+ """Add a log line to the logs."""
141
+ with self._lock:
142
+ self.status_data["logs"].append(log_line)
143
+ # Keep only last 20 lines
144
+ self.status_data["logs"] = self.status_data["logs"][-20:]
145
+
146
+ def add_event(self, event_type, message, timestamp=None):
147
+ """Add an event to the timeline"""
148
+ if timestamp is None:
149
+ timestamp = datetime.now()
150
+
151
+ with self._lock:
152
+ self.status_data["events"].insert(
153
+ 0,
154
+ {
155
+ "type": event_type, # 'info', 'warning', 'error', 'success'
156
+ "message": message,
157
+ "timestamp": timestamp,
158
+ },
159
+ )
160
+ # Keep only last 10 events
161
+ self.status_data["events"] = self.status_data["events"][:10]
162
+
163
+ # def get_circuit_breaker_emoji(self, state):
164
+ # """Get status emoji for circuit breaker state"""
165
+ # emoji_map = {"CLOSED": "đŸŸĸ", "OPEN": "🔴", "HALF_OPEN": "🟡"}
166
+ # return emoji_map.get(state, "âšĒ")
167
+
168
+ def get_uptime_string(self, start_time):
169
+ """Calculate uptime string"""
170
+ if not start_time:
171
+ return "Not started"
172
+
173
+ uptime = datetime.now() - start_time
174
+ hours, remainder = divmod(int(uptime.total_seconds()), 3600)
175
+ minutes, seconds = divmod(remainder, 60)
176
+
177
+ if hours > 0:
178
+ return f"{hours}h {minutes}m {seconds}s"
179
+ elif minutes > 0:
180
+ return f"{minutes}m {seconds}s"
181
+ else:
182
+ return f"{seconds}s"
183
+
184
+ def on_startup(self, current_card):
185
+ """Initialize the card when monitoring starts"""
186
+ current_card.append(Markdown("# 🚀 `@vllm` Status Dashboard"))
187
+ current_card.append(Markdown("_Initializing vLLM system..._"))
188
+ current_card.refresh()
189
+
190
+ def render_card_fresh(self, current_card, data):
191
+ """Render the complete card with all status information"""
192
+ self._already_rendered = True
193
+ current_card.clear()
194
+
195
+ current_card.append(Markdown("# 🚀 `@vllm` Status Dashboard"))
196
+
197
+ versions = data.get("versions", {})
198
+ vllm_version = versions.get("vllm", "Unknown")
199
+ current_card.append(Markdown(f"**vLLM Version:** `{vllm_version}`"))
200
+
201
+ current_card.append(
202
+ Markdown(f"_Last updated: {datetime.now().strftime('%H:%M:%S')}_")
203
+ )
204
+
205
+ server_data = data["server"]
206
+ uptime = self.get_uptime_string(server_data.get("uptime_start"))
207
+ server_status = server_data.get("status", "Unknown")
208
+ model = server_data.get("model", "Unknown")
209
+
210
+ # Determine status emoji
211
+ if server_status == "Running":
212
+ status_emoji = "đŸŸĸ"
213
+ model_emoji = "✅"
214
+ elif server_status == "Failed":
215
+ status_emoji = "🔴"
216
+ model_emoji = "❌"
217
+ elif server_status == "Starting":
218
+ status_emoji = "🟡"
219
+ model_emoji = "âŗ"
220
+ else: # Stopped, etc.
221
+ status_emoji = "âšĢ"
222
+ model_emoji = "âšī¸"
223
+
224
+ # Main status section
225
+ current_card.append(
226
+ Markdown(f"## {status_emoji} Server Status: {server_status}")
227
+ )
228
+
229
+ if server_status == "Running" and uptime:
230
+ current_card.append(Markdown(f"**Uptime:** {uptime}"))
231
+
232
+ # Model information - only show detailed status if server is running
233
+ if server_status == "Running":
234
+ current_card.append(Markdown(f"## {model_emoji} Model: `{model}`"))
235
+
236
+ # Show model-specific status if available
237
+ models_data = data.get("models", {})
238
+ if models_data and model in models_data:
239
+ model_info = models_data[model]
240
+ model_status = model_info.get("status", "Unknown")
241
+ load_time = model_info.get("load_time")
242
+ location = model_info.get("location")
243
+
244
+ current_card.append(Markdown(f"**Status:** {model_status}"))
245
+ if location:
246
+ current_card.append(Markdown(f"**Location:** `{location}`"))
247
+ if load_time and isinstance(load_time, (int, float)):
248
+ current_card.append(Markdown(f"**Load Time:** {load_time:.1f}s"))
249
+ elif model != "Unknown":
250
+ current_card.append(
251
+ Markdown(f"## {model_emoji} Model: `{model}` (Server Stopped)")
252
+ )
253
+
254
+ # Simplified monitoring note
255
+ # current_card.append(
256
+ # Markdown(
257
+ # "## 🔧 Monitoring\n**Advanced Features:** Disabled (Circuit Breaker, Request Interception)"
258
+ # )
259
+ # )
260
+
261
+ # Performance metrics
262
+ perf_data = data["performance"]
263
+ if any(v is not None for v in perf_data.values()):
264
+ current_card.append(Markdown("## ⚡ Performance"))
265
+
266
+ init_metrics = []
267
+ shutdown_metrics = []
268
+
269
+ for metric, value in perf_data.items():
270
+ if value is not None:
271
+ display_value = (
272
+ f"{value:.1f}s" if isinstance(value, (int, float)) else value
273
+ )
274
+ metric_display = metric.replace("_", " ").title()
275
+
276
+ if "shutdown" in metric.lower():
277
+ shutdown_metrics.append([metric_display, display_value])
278
+ elif metric in [
279
+ "install_time",
280
+ "server_startup_time",
281
+ "total_initialization_time",
282
+ ]:
283
+ init_metrics.append([metric_display, display_value])
284
+
285
+ if init_metrics:
286
+ current_card.append(Markdown("### Initialization"))
287
+ current_card.append(Table(init_metrics, headers=["Metric", "Duration"]))
288
+
289
+ if shutdown_metrics:
290
+ current_card.append(Markdown("### Shutdown"))
291
+ current_card.append(
292
+ Table(shutdown_metrics, headers=["Metric", "Value"])
293
+ )
294
+
295
+ # Recent events
296
+ events = data.get("events", [])
297
+ if events:
298
+ current_card.append(Markdown("## 📝 Recent Events"))
299
+ for event in events[:5]: # Show last 5 events
300
+ event_type = event.get("type", "info")
301
+ message = event.get("message", "")
302
+ timestamp = event.get("timestamp", datetime.now())
303
+
304
+ emoji_map = {
305
+ "info": "â„šī¸",
306
+ "success": "✅",
307
+ "warning": "âš ī¸",
308
+ "error": "❌",
309
+ }
310
+ emoji = emoji_map.get(event_type, "â„šī¸")
311
+
312
+ time_str = (
313
+ timestamp.strftime("%H:%M:%S")
314
+ if isinstance(timestamp, datetime)
315
+ else str(timestamp)
316
+ )
317
+ current_card.append(Markdown(f"- {emoji} `{time_str}` {message}"))
318
+
319
+ # Server Logs
320
+ logs = data.get("logs", [])
321
+ if logs:
322
+ current_card.append(Markdown("## 📜 Server Logs"))
323
+ # The logs are appended, so they are in chronological order.
324
+ log_content = "\n".join(logs)
325
+ current_card.append(Markdown(f"```\n{log_content}\n```"))
326
+
327
+ current_card.refresh()
328
+
329
+ def on_error(self, current_card, error_message):
330
+ """Handle errors in card rendering"""
331
+ if not self._already_rendered:
332
+ current_card.clear()
333
+ current_card.append(Markdown("# 🚀 `@vllm` Status Dashboard"))
334
+ current_card.append(Markdown(f"## ❌ Error: {str(error_message)}"))
335
+ current_card.refresh()
336
+
337
+ def on_update(self, current_card, data_object):
338
+ """Update the card with new data"""
339
+ with self._lock:
340
+ current_data = self.status_data.copy()
341
+
342
+ if not self._already_rendered:
343
+ self.render_card_fresh(current_card, current_data)
344
+ else:
345
+ # For frequent updates, we could implement incremental updates here
346
+ # For now, just re-render the whole card
347
+ self.render_card_fresh(current_card, current_data)
348
+
349
+ def sqlite_fetch_func(self, conn):
350
+ """Required by CardRefresher (which needs a refactor), but we use in-memory data instead"""
351
+ with self._lock:
352
+ return {"status": self.status_data}