truss 0.10.9rc514__py3-none-any.whl → 0.10.9rc601__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of truss might be problematic. Click here for more details.

truss/base/constants.py CHANGED
@@ -29,6 +29,7 @@ BEI_REQUIRED_MAX_NUM_TOKENS = 16384
29
29
  TRTLLM_MIN_MEMORY_REQUEST_GI = 10
30
30
  HF_MODELS_API_URL = "https://huggingface.co/api/models"
31
31
  HF_ACCESS_TOKEN_KEY = "hf_access_token"
32
+ HF_ACCESS_TOKEN_FILE_NAME = "hf_access_token"
32
33
  TRUSSLESS_MAX_PAYLOAD_SIZE = "64M"
33
34
  # Alias for TEMPLATES_DIR
34
35
  SERVING_DIR: pathlib.Path = TEMPLATES_DIR
@@ -9,7 +9,7 @@ from truss.remote.baseten.api import BasetenApi
9
9
 
10
10
  POLL_INTERVAL_SEC = 2
11
11
  # NB(nikhil): This helps account for (1) log processing delays (2) clock skews
12
- CLOCK_SKEW_BUFFER_MS = 10000
12
+ CLOCK_SKEW_BUFFER_MS = 60000
13
13
 
14
14
 
15
15
  class LogWatcher(ABC):
@@ -14,12 +14,19 @@ from .deploy_checkpoints_helpers import (
14
14
  setup_environment_variables_and_secrets,
15
15
  )
16
16
 
17
+ # NB(aghilan): Transformers was recently changed to save a chat_template.jinja file instead of inside the tokenizer_config.json file.
18
+ # Old Models will not have this file, so we check for it and use it if it exists.
19
+ # vLLM will not automatically resolve the chat_template.jinja file, so we need to pass it to the start command.
20
+ # This logic is needed for any models trained using Transformers v4.51.3 or later
17
21
  VLLM_FULL_START_COMMAND = Template(
18
- 'sh -c "{%if envvars %}{{ envvars }} {% endif %}vllm serve {{ model_path }}'
19
- + " --port 8000"
20
- + " --tensor-parallel-size {{ specify_tensor_parallelism }}"
21
- + " --dtype bfloat16"
22
- + '"'
22
+ "sh -c '{% if envvars %}{{ envvars }} {% endif %}"
23
+ 'HF_TOKEN="$$(cat /secrets/hf_access_token)" && export HF_TOKEN && '
24
+ "if [ -f {{ model_path }}/chat_template.jinja ]; then "
25
+ " vllm serve {{ model_path }} --chat-template {{ model_path }}/chat_template.jinja "
26
+ " --port 8000 --tensor-parallel-size {{ specify_tensor_parallelism }} --dtype bfloat16; "
27
+ "else "
28
+ " vllm serve {{ model_path }} --port 8000 --tensor-parallel-size {{ specify_tensor_parallelism }} --dtype bfloat16; "
29
+ "fi'"
23
30
  )
24
31
 
25
32
 
@@ -4,6 +4,7 @@ import traceback
4
4
  from typing import Any, Dict, List, Optional, Tuple, cast
5
5
 
6
6
  from rich.columns import Columns
7
+ from rich.layout import Layout
7
8
  from rich.live import Live
8
9
  from rich.table import Table
9
10
  from rich.text import Text
@@ -96,90 +97,200 @@ class MetricsWatcher(TrainingPollerMixin):
96
97
  )
97
98
  return True
98
99
 
99
- def create_metrics_table(self, metrics_data: Dict) -> Columns:
100
+ def create_metrics_table(self, metrics_data: Dict) -> Layout:
100
101
  """Create a Rich table with the metrics"""
101
- compute_table = self._create_compute_table(metrics_data)
102
- storage_table = self._maybe_create_storage_table(metrics_data)
103
- tables = [compute_table]
104
- if storage_table:
105
- tables.append(storage_table)
106
- return Columns(tables, title="Training Job Metrics")
107
-
108
- def _create_compute_table(self, metrics_data: Dict) -> Table:
109
- table = Table(title="Compute Metrics")
110
- table.add_column("Metric")
111
- table.add_column("Value")
102
+ tables = []
103
+
104
+ timestamp = self._get_timestamp_from_metrics(metrics_data)
105
+
106
+ node_tables = self._create_unified_node_metrics_tables(metrics_data)
107
+ tables.extend(node_tables)
108
+
109
+ storage_tables = self._create_storage_tables(metrics_data)
110
+ tables.extend(storage_tables)
111
+
112
+ columns = Columns(tables, title="Training Job Metrics")
113
+
114
+ layout = Layout()
115
+
116
+ if timestamp:
117
+ from rich.panel import Panel
118
+
119
+ layout.split_column(
120
+ Layout(
121
+ Panel(
122
+ f"🕐 Last Updated: {timestamp}\n💡 Press Ctrl+C to exit",
123
+ style="bold cyan",
124
+ ),
125
+ size=4,
126
+ ),
127
+ Layout(columns),
128
+ )
129
+ else:
130
+ layout.split_column(Layout(columns))
131
+
132
+ return layout
133
+
134
+ def _get_timestamp_from_metrics(self, metrics_data: Dict) -> Optional[str]:
135
+ """Extract timestamp from metrics data for display"""
136
+ # Try to get timestamp from per_node_metrics first. Fall back to main metrics if not there.
137
+ per_node_metrics = metrics_data.get("per_node_metrics", [])
138
+ if per_node_metrics and len(per_node_metrics) > 0:
139
+ first_node_metrics = per_node_metrics[0].get("metrics", {})
140
+ cpu_usage_data = first_node_metrics.get("cpu_usage", [])
141
+ if cpu_usage_data and len(cpu_usage_data) > 0:
142
+ timestamp = cpu_usage_data[-1].get("timestamp")
143
+ if timestamp:
144
+ return common.format_localized_time(timestamp)
112
145
 
113
- # Add timestamp if available
114
146
  cpu_usage_data = metrics_data.get("cpu_usage", [])
115
147
  if cpu_usage_data and len(cpu_usage_data) > 0:
116
- latest_timestamp = cpu_usage_data[-1].get("timestamp")
117
- # TODO: API result has missing timezone info.
118
- if latest_timestamp:
119
- table.add_row(
120
- "Timestamp", common.format_localized_time(latest_timestamp)
121
- )
122
- table.add_section()
148
+ timestamp = cpu_usage_data[-1].get("timestamp")
149
+ if timestamp:
150
+ return common.format_localized_time(timestamp)
151
+
152
+ return None
153
+
154
+ def _create_unified_node_metrics_tables(self, metrics_data: Dict) -> List[Table]:
155
+ """Create tables for node metrics, handling both single and multi-node scenarios"""
156
+ tables = []
157
+
158
+ per_node_metrics = metrics_data.get("per_node_metrics", [])
159
+
160
+ if not per_node_metrics:
161
+ # Job is likely just starting up - it takes some type for the
162
+ # the metrics to become available after the job starts running.
163
+ from rich.text import Text
164
+
165
+ waiting_table = Table(title="Training Job Status")
166
+ waiting_table.add_column("Status")
167
+ waiting_table.add_column("Message")
168
+
169
+ waiting_table.add_row(
170
+ "Status",
171
+ Text("⏳ Waiting for metrics to become available...", style="yellow"),
172
+ )
173
+ waiting_table.add_row(
174
+ "Note",
175
+ Text(
176
+ "Metrics will appear once the training job starts running.",
177
+ style="dim",
178
+ ),
179
+ )
180
+
181
+ tables.append(waiting_table)
182
+ return tables
183
+
184
+ for node_metrics in per_node_metrics:
185
+ node_id = node_metrics.get("node_id", "Unknown")
186
+ metrics = node_metrics.get("metrics", {})
187
+
188
+ if not metrics:
189
+ continue
123
190
 
124
- # CPU metrics
125
- cpu_usage = self._get_latest_metric(metrics_data.get("cpu_usage", []))
191
+ table = self._create_node_table(node_id, metrics)
192
+ tables.append(table)
193
+
194
+ return tables
195
+
196
+ def _create_node_table(self, node_id: str, metrics: Dict) -> Table:
197
+ """Create a table for a single node's metrics"""
198
+ table = Table(title=f"Node: {node_id}")
199
+ table.add_column("Metric")
200
+ table.add_column("Value")
201
+
202
+ cpu_usage = self._get_latest_metric(metrics.get("cpu_usage", []))
126
203
  if cpu_usage is not None:
127
- table.add_row("CPU Usage", f"{cpu_usage:.2f} cores")
204
+ table.add_row("CPU usage", f"{cpu_usage:.2f} cores")
128
205
 
129
- cpu_memory = self._get_latest_metric(
130
- metrics_data.get("cpu_memory_usage_bytes", [])
131
- )
206
+ cpu_memory = self._get_latest_metric(metrics.get("cpu_memory_usage_bytes", []))
132
207
  if cpu_memory is not None:
133
208
  formatted_value, color = self._format_bytes(cpu_memory)
134
- table.add_row("CPU Memory", Text(formatted_value, style=color))
209
+ table.add_row("CPU memory", Text(formatted_value, style=color))
135
210
 
136
- # Add separator after CPU metrics
137
- table.add_section()
211
+ if cpu_usage is not None or cpu_memory is not None:
212
+ table.add_section()
138
213
 
139
- # GPU metrics - grouped by GPU ID
140
- gpu_metrics = metrics_data.get("gpu_utilization", {})
141
- gpu_memory = metrics_data.get("gpu_memory_usage_bytes", {})
214
+ gpu_utilization = metrics.get("gpu_utilization", {})
215
+ gpu_memory = metrics.get("gpu_memory_usage_bytes", {})
142
216
 
143
- for gpu_id in sorted(set(gpu_metrics.keys()) | set(gpu_memory.keys())):
144
- # Add GPU utilization
145
- latest_util = self._get_latest_metric(gpu_metrics.get(gpu_id, []))
217
+ # API should return same GPU IDs for utilization and memory
218
+ keys = gpu_utilization.keys()
219
+ for idx, gpu_id in enumerate(keys):
220
+ latest_util = self._get_latest_metric(gpu_utilization.get(gpu_id, []))
146
221
  if latest_util is not None:
147
- table.add_row(f"GPU {gpu_id} Usage", f"{latest_util * 100:.1f}%")
222
+ table.add_row(f"GPU {gpu_id} utilization", f"{latest_util * 100:.1f}%")
148
223
 
149
- # Add GPU memory right after its utilization
150
224
  latest_memory = self._get_latest_metric(gpu_memory.get(gpu_id, []))
151
225
  if latest_memory is not None:
152
226
  formatted_value, color = self._format_bytes(latest_memory)
153
227
  table.add_row(
154
- f"GPU {gpu_id} Memory", Text(formatted_value, style=color)
228
+ f"GPU {gpu_id} memory", Text(formatted_value, style=color)
155
229
  )
156
230
 
157
- # Add separator after each GPU's metrics (except for the last one)
158
- if gpu_id != max(set(gpu_metrics.keys()) | set(gpu_memory.keys())):
231
+ if idx != len(keys) - 1:
159
232
  table.add_section()
160
233
 
161
- # Add separator before storage metrics
162
- if gpu_metrics or gpu_memory:
163
- table.add_section()
164
- return table
234
+ ephemeral_storage = metrics.get("ephemeral_storage")
235
+ if ephemeral_storage:
236
+ if gpu_utilization or gpu_memory:
237
+ table.add_section()
165
238
 
166
- def _maybe_create_storage_table(self, metrics_data: Dict) -> Optional[Table]:
167
- ephemeral_storage_metrics = metrics_data.get("ephemeral_storage")
168
- cache_storage_metrics = metrics_data.get("cache")
169
- if ephemeral_storage_metrics or cache_storage_metrics:
170
- storage_table = Table(title="Storage Metrics")
171
- storage_table.add_column("Storage Type")
172
- storage_table.add_column("Usage")
173
- storage_table.add_column("Utilization")
174
- did_add_ephemeral = self._maybe_format_storage_table_row(
175
- storage_table, "Ephemeral Storage", ephemeral_storage_metrics
239
+ usage_bytes = self._get_latest_metric(
240
+ ephemeral_storage.get("usage_bytes", [])
176
241
  )
177
- did_add_cache = self._maybe_format_storage_table_row(
178
- storage_table, "Cache Storage", cache_storage_metrics
242
+ utilization = self._get_latest_metric(
243
+ ephemeral_storage.get("utilization", [])
179
244
  )
180
- if did_add_ephemeral or did_add_cache:
181
- return storage_table
182
- return None
245
+
246
+ if usage_bytes is not None:
247
+ formatted_value, color = self._format_bytes(usage_bytes)
248
+ table.add_row("Eph. storage usage", Text(formatted_value, style=color))
249
+
250
+ if utilization is not None:
251
+ utilization_percent = utilization * 100
252
+ if utilization_percent > 90:
253
+ color = "red"
254
+ elif utilization_percent > 70:
255
+ color = "yellow"
256
+ else:
257
+ color = "green"
258
+ table.add_row(
259
+ "Eph. storage utilization",
260
+ Text(f"{utilization_percent:.1f}%", style=color),
261
+ )
262
+
263
+ return table
264
+
265
+ def _create_storage_tables(self, metrics_data: Dict) -> List[Table]:
266
+ """Create storage tables - only cache per job (ephemeral is now in node tables)"""
267
+ tables = []
268
+
269
+ # Create cache storage table (job-level, shown once)
270
+ cache_storage = metrics_data.get("cache")
271
+ if cache_storage:
272
+ table = self._create_cache_storage_table(cache_storage)
273
+ if table:
274
+ tables.append(table)
275
+
276
+ return tables
277
+
278
+ def _create_cache_storage_table(self, cache_storage: Dict) -> Optional[Table]:
279
+ """Create table for cache storage metrics (job-level)"""
280
+ usage_bytes = self._get_latest_metric(cache_storage.get("usage_bytes", []))
281
+ utilization = self._get_latest_metric(cache_storage.get("utilization", []))
282
+
283
+ if usage_bytes is None and utilization is None:
284
+ return None
285
+
286
+ table = Table(title="Cache storage")
287
+ table.add_column("Storage Type")
288
+ table.add_column("Usage")
289
+ table.add_column("Utilization")
290
+
291
+ self._maybe_format_storage_table_row(table, "Cache storage", cache_storage)
292
+
293
+ return table
183
294
 
184
295
  def watch(self, refresh_rate: int = METRICS_POLL_INTERVAL_SEC):
185
296
  """Display continuously updating metrics"""
@@ -15,6 +15,8 @@ from botocore.exceptions import ClientError, NoCredentialsError
15
15
  from google.cloud import storage
16
16
  from huggingface_hub import hf_hub_download
17
17
 
18
+ from truss.base import constants
19
+
18
20
  B10CP_PATH_TRUSS_ENV_VAR_NAME = "B10CP_PATH_TRUSS"
19
21
 
20
22
  GCS_CREDENTIALS = "/app/data/service_account.json"
@@ -108,7 +110,7 @@ class RepositoryFile(ABC):
108
110
 
109
111
  class HuggingFaceFile(RepositoryFile):
110
112
  def download_to_cache(self):
111
- secret_path = Path("/etc/secrets/hf-access-token")
113
+ secret_path = Path(f"/etc/secrets/{constants.HF_ACCESS_TOKEN_FILE_NAME}")
112
114
  secret = secret_path.read_text().strip() if secret_path.exists() else None
113
115
  try:
114
116
  hf_hub_download(
@@ -93,8 +93,6 @@ USER_TRUSS_IGNORE_FILE = ".truss_ignore"
93
93
  GCS_CREDENTIALS = "service_account.json"
94
94
  S3_CREDENTIALS = "s3_credentials.json"
95
95
 
96
- HF_ACCESS_TOKEN_FILE_NAME = "hf-access-token"
97
-
98
96
  CLOUD_BUCKET_CACHE = MODEL_CACHE_PATH
99
97
 
100
98
  HF_SOURCE_DIR = Path("./root/.cache/huggingface/hub/")
@@ -821,7 +819,7 @@ class ServingImageBuilder(ImageBuilder):
821
819
  model_cache_v1=config.model_cache.is_v1,
822
820
  model_cache_v2=config.model_cache.is_v2,
823
821
  hf_access_token=hf_access_token,
824
- hf_access_token_file_name=HF_ACCESS_TOKEN_FILE_NAME,
822
+ hf_access_token_file_name=constants.HF_ACCESS_TOKEN_FILE_NAME,
825
823
  external_data_files=external_data_files,
826
824
  build_commands=build_commands,
827
825
  use_local_src=config.use_local_src,
@@ -8,30 +8,6 @@ FROM {{ base_image_name_and_tag }} AS truss_server
8
8
  {%- set python_executable = config.base_image.python_executable_path or 'python3' %}
9
9
  ENV PYTHON_EXECUTABLE="{{ python_executable }}"
10
10
 
11
- # Non-root user for the app.
12
- ENV APP_USERNAME app
13
- # We choose a high number for user ID to avoid conflicts with existing users.
14
- ENV APP_USER_UID 60000
15
- # Directories owned by the non-root user.
16
- # Directory containing inference server code.
17
- ENV APP_HOME /app
18
- # Directory containing control server code.
19
- ENV CONTROL_SERVER_DIR /control
20
- # The non-root user's home directory.
21
- ENV HOME /home/app
22
- # Create a non-root user to run model containers.
23
- RUN useradd -u ${APP_USER_UID} -ms /bin/bash ${APP_USERNAME} \
24
- && mkdir ${APP_HOME} \
25
- && mkdir ${CONTROL_SERVER_DIR} \
26
- && chmod -R a=rwx ${APP_HOME} ${HOME} ${CONTROL_SERVER_DIR}
27
- ENV PATH=${PATH}:${HOME}/.local/bin
28
- # CAUTION: COPY directives use the root user, so we need to chown the directories to the non-root user.
29
- # If we have COPY directives after this, we need to run chown again. (or use COPY --chown ...)
30
- # RUN chown -R ${APP_USERNAME}:${APP_USERNAME} ${HOME} ${APP_HOME} ${CONTROL_SERVER_DIR}
31
- # Now switch to the non-root user early, we will switch back to root where needed.
32
- # USER ${APP_USERNAME}
33
- ENV PATH="${HOME}/.local/bin:$PATH"
34
-
35
11
  {%- set UV_VERSION = "0.7.19" %}
36
12
  {#
37
13
  NB(nikhil): We use a semi-complex uv installation command across the board:
@@ -63,6 +39,7 @@ RUN if ! command -v uv >/dev/null 2>&1; then \
63
39
  command -v curl >/dev/null 2>&1 || (apt update && apt install -y curl) && \
64
40
  curl -LsSf --retry 5 --retry-delay 5 https://astral.sh/uv/{{ UV_VERSION }}/install.sh | sh; \
65
41
  fi
42
+ ENV PATH="/root/.local/bin:$PATH"
66
43
  {% endblock %}
67
44
 
68
45
  {% block base_image_patch %}
@@ -59,6 +59,7 @@ class MethodName(str, enum.Enum):
59
59
  CHAT_COMPLETIONS = enum.auto()
60
60
  COMPLETIONS = enum.auto()
61
61
  IS_HEALTHY = enum.auto()
62
+ MESSAGES = enum.auto()
62
63
  POSTPROCESS = enum.auto()
63
64
  PREDICT = enum.auto()
64
65
  PREPROCESS = enum.auto()
@@ -244,6 +245,7 @@ class ModelDescriptor:
244
245
  is_healthy: Optional[MethodDescriptor]
245
246
  completions: Optional[MethodDescriptor]
246
247
  chat_completions: Optional[MethodDescriptor]
248
+ messages: Optional[MethodDescriptor]
247
249
  websocket: Optional[MethodDescriptor]
248
250
 
249
251
  @cached_property
@@ -291,6 +293,7 @@ class ModelDescriptor:
291
293
  setup = cls._safe_extract_descriptor(model_cls, MethodName.SETUP_ENVIRONMENT)
292
294
  completions = cls._safe_extract_descriptor(model_cls, MethodName.COMPLETIONS)
293
295
  chats = cls._safe_extract_descriptor(model_cls, MethodName.CHAT_COMPLETIONS)
296
+ messages = cls._safe_extract_descriptor(model_cls, MethodName.MESSAGES)
294
297
  is_healthy = cls._safe_extract_descriptor(model_cls, MethodName.IS_HEALTHY)
295
298
  if is_healthy and is_healthy.arg_config != ArgConfig.NONE:
296
299
  raise errors.ModelDefinitionError(
@@ -359,6 +362,7 @@ class ModelDescriptor:
359
362
  is_healthy=is_healthy,
360
363
  completions=completions,
361
364
  chat_completions=chats,
365
+ messages=messages,
362
366
  websocket=websocket,
363
367
  )
364
368
 
@@ -925,6 +929,14 @@ class ModelWrapper:
925
929
  )
926
930
  return await self._execute_model_endpoint(inputs, request, descriptor)
927
931
 
932
+ async def messages(
933
+ self, inputs: InputType, request: starlette.requests.Request
934
+ ) -> OutputType:
935
+ descriptor = self._get_descriptor_or_raise(
936
+ self.model_descriptor.messages, MethodName.MESSAGES
937
+ )
938
+ return await self._execute_model_endpoint(inputs, request, descriptor)
939
+
928
940
  async def websocket(self, ws: WebSocket) -> None:
929
941
  descriptor = self.model_descriptor.websocket
930
942
  assert descriptor, "websocket can only be invoked if present on model."
@@ -231,6 +231,13 @@ class BasetenEndpoints:
231
231
  method=self._model.completions, request=request, body_raw=body_raw
232
232
  )
233
233
 
234
+ async def messages(
235
+ self, request: Request, body_raw: bytes = Depends(parse_body)
236
+ ) -> Response:
237
+ return await self._execute_request(
238
+ method=self._model.messages, request=request, body_raw=body_raw
239
+ )
240
+
234
241
  async def websocket(self, ws: WebSocket) -> None:
235
242
  self.check_healthy()
236
243
  trace_ctx = otel_propagate.extract(ws.headers) or None
@@ -428,6 +435,12 @@ class TrussServer:
428
435
  methods=["POST"],
429
436
  tags=["V1"],
430
437
  ),
438
+ FastAPIRoute(
439
+ r"/v1/messages",
440
+ self._endpoints.messages,
441
+ methods=["POST"],
442
+ tags=["V1"],
443
+ ),
431
444
  # Websocket endpoint
432
445
  FastAPIWebSocketRoute(r"/v1/websocket", self._endpoints.websocket),
433
446
  # Endpoint aliases for Sagemaker hosting
@@ -120,26 +120,16 @@ RUN mkdir -p {{ supervisor_log_dir }}
120
120
  COPY supervisord.conf {{ supervisor_config_path }}
121
121
  ENV SUPERVISOR_SERVER_URL="{{ supervisor_server_url }}"
122
122
  ENV SERVER_START_CMD="/docker_server/.venv/bin/supervisord -c {{ supervisor_config_path }}"
123
- RUN chown -R ${APP_USERNAME}:${APP_USERNAME} ${HOME} ${APP_HOME}
124
- {#- nginx writes to these directories #}
125
- {% set nginx_dirs = ["/var/lib/nginx", "/var/log/nginx", "/run"] %}
126
- RUN mkdir -p {{ nginx_dirs | join(" ") }} && \
127
- chown -R ${APP_USERNAME}:${APP_USERNAME} {{ nginx_dirs | join(" ") }}
128
- USER ${APP_USERNAME}
129
123
  ENTRYPOINT ["/docker_server/.venv/bin/supervisord", "-c", "{{ supervisor_config_path }}"]
130
124
  {%- elif requires_live_reload %} {#- elif requires_live_reload #}
131
125
  ENV HASH_TRUSS="{{ truss_hash }}"
132
126
  ENV CONTROL_SERVER_PORT="8080"
133
127
  ENV INFERENCE_SERVER_PORT="8090"
134
128
  ENV SERVER_START_CMD="/control/.env/bin/python /control/control/server.py"
135
- RUN chown -R ${APP_USERNAME}:${APP_USERNAME} ${HOME} ${APP_HOME}
136
- USER ${APP_USERNAME}
137
129
  ENTRYPOINT ["/control/.env/bin/python", "/control/control/server.py"]
138
130
  {%- else %} {#- else (default inference server) #}
139
131
  ENV INFERENCE_SERVER_PORT="8080"
140
132
  ENV SERVER_START_CMD="{{ python_executable }} /app/main.py"
141
- RUN chown -R ${APP_USERNAME}:${APP_USERNAME} ${HOME} ${APP_HOME}
142
- USER ${APP_USERNAME}
143
133
  ENTRYPOINT ["{{ python_executable }}", "/app/main.py"]
144
134
  {%- endif %} {#- endif config.docker_server / live_reload #}
145
135
  {% endblock %} {#- endblock run #}
@@ -505,8 +505,16 @@ def test_render_vllm_full_truss_config():
505
505
  )
506
506
 
507
507
  result = render_vllm_full_truss_config(deploy_config)
508
-
509
- expected_vllm_command = 'sh -c "HF_TOKEN=$(cat /secrets/hf_token) vllm serve /tmp/training_checkpoints/job123/rank-0/checkpoint-1 --port 8000 --tensor-parallel-size 2 --dtype bfloat16"'
508
+ expected_vllm_command = (
509
+ "sh -c 'HF_TOKEN=$(cat /secrets/hf_token) "
510
+ 'HF_TOKEN="$$(cat /secrets/hf_access_token)" && export HF_TOKEN && '
511
+ "if [ -f /tmp/training_checkpoints/job123/rank-0/checkpoint-1/chat_template.jinja ]; then "
512
+ "vllm serve /tmp/training_checkpoints/job123/rank-0/checkpoint-1 "
513
+ "--chat-template /tmp/training_checkpoints/job123/rank-0/checkpoint-1/chat_template.jinja "
514
+ "--port 8000 --tensor-parallel-size 2 --dtype bfloat16; else "
515
+ "vllm serve /tmp/training_checkpoints/job123/rank-0/checkpoint-1 "
516
+ "--port 8000 --tensor-parallel-size 2 --dtype bfloat16; fi'"
517
+ )
510
518
 
511
519
  assert isinstance(result, truss_config.TrussConfig)
512
520
  assert result.model_name == "test-full-model"
@@ -51,6 +51,38 @@ def test_view_training_job_metrics(time_sleep, capfd):
51
51
  "0": [{"timestamp": "", "value": 4321}],
52
52
  "1": [{"timestamp": "", "value": 2222}],
53
53
  },
54
+ "per_node_metrics": [
55
+ {
56
+ "node_id": "node-0",
57
+ "metrics": {
58
+ "cpu_usage": [{"timestamp": "", "value": 3.2}],
59
+ "cpu_memory_usage_bytes": [{"timestamp": "", "value": 1234}],
60
+ "gpu_utilization": {
61
+ "0": [{"timestamp": "", "value": 0.2}],
62
+ "1": [{"timestamp": "", "value": 0.3}],
63
+ },
64
+ "gpu_memory_usage_bytes": {
65
+ "0": [{"timestamp": "", "value": 4321}],
66
+ "1": [{"timestamp": "", "value": 2222}],
67
+ },
68
+ },
69
+ },
70
+ {
71
+ "node_id": "node-1",
72
+ "metrics": {
73
+ "cpu_usage": [{"timestamp": "", "value": 2.8}],
74
+ "cpu_memory_usage_bytes": [{"timestamp": "", "value": 1000}],
75
+ "gpu_utilization": {
76
+ "0": [{"timestamp": "", "value": 0.15}],
77
+ "1": [{"timestamp": "", "value": 0.25}],
78
+ },
79
+ "gpu_memory_usage_bytes": {
80
+ "0": [{"timestamp": "", "value": 4000}],
81
+ "1": [{"timestamp": "", "value": 2000}],
82
+ },
83
+ },
84
+ },
85
+ ],
54
86
  },
55
87
  {
56
88
  "training_job": {
@@ -68,6 +100,38 @@ def test_view_training_job_metrics(time_sleep, capfd):
68
100
  "0": [{"timestamp": "", "value": 4321}],
69
101
  "1": [{"timestamp": "", "value": 2222}],
70
102
  },
103
+ "per_node_metrics": [
104
+ {
105
+ "node_id": "node-0",
106
+ "metrics": {
107
+ "cpu_usage": [{"timestamp": "", "value": 3.2}],
108
+ "cpu_memory_usage_bytes": [{"timestamp": "", "value": 1234}],
109
+ "gpu_utilization": {
110
+ "0": [{"timestamp": "", "value": 0.2}],
111
+ "1": [{"timestamp": "", "value": 0.3}],
112
+ },
113
+ "gpu_memory_usage_bytes": {
114
+ "0": [{"timestamp": "", "value": 4321}],
115
+ "1": [{"timestamp": "", "value": 2222}],
116
+ },
117
+ },
118
+ },
119
+ {
120
+ "node_id": "node-1",
121
+ "metrics": {
122
+ "cpu_usage": [{"timestamp": "", "value": 2.8}],
123
+ "cpu_memory_usage_bytes": [{"timestamp": "", "value": 1000}],
124
+ "gpu_utilization": {
125
+ "0": [{"timestamp": "", "value": 0.15}],
126
+ "1": [{"timestamp": "", "value": 0.25}],
127
+ },
128
+ "gpu_memory_usage_bytes": {
129
+ "0": [{"timestamp": "", "value": 4000}],
130
+ "1": [{"timestamp": "", "value": 2000}],
131
+ },
132
+ },
133
+ },
134
+ ],
71
135
  },
72
136
  {
73
137
  "training_job": {
@@ -85,6 +149,38 @@ def test_view_training_job_metrics(time_sleep, capfd):
85
149
  "0": [{"timestamp": "", "value": 4321}],
86
150
  "1": [{"timestamp": "", "value": 2222}],
87
151
  },
152
+ "per_node_metrics": [
153
+ {
154
+ "node_id": "node-0",
155
+ "metrics": {
156
+ "cpu_usage": [{"timestamp": "", "value": 3.2}],
157
+ "cpu_memory_usage_bytes": [{"timestamp": "", "value": 1234}],
158
+ "gpu_utilization": {
159
+ "0": [{"timestamp": "", "value": 0.2}],
160
+ "1": [{"timestamp": "", "value": 0.3}],
161
+ },
162
+ "gpu_memory_usage_bytes": {
163
+ "0": [{"timestamp": "", "value": 4321}],
164
+ "1": [{"timestamp": "", "value": 2222}],
165
+ },
166
+ },
167
+ },
168
+ {
169
+ "node_id": "node-1",
170
+ "metrics": {
171
+ "cpu_usage": [{"timestamp": "", "value": 2.8}],
172
+ "cpu_memory_usage_bytes": [{"timestamp": "", "value": 1000}],
173
+ "gpu_utilization": {
174
+ "0": [{"timestamp": "", "value": 0.15}],
175
+ "1": [{"timestamp": "", "value": 0.25}],
176
+ },
177
+ "gpu_memory_usage_bytes": {
178
+ "0": [{"timestamp": "", "value": 4000}],
179
+ "1": [{"timestamp": "", "value": 2002}],
180
+ },
181
+ },
182
+ },
183
+ ],
88
184
  },
89
185
  ]
90
186
 
@@ -10,6 +10,7 @@ import pytest
10
10
  import yaml
11
11
 
12
12
  from truss.base.constants import (
13
+ HF_ACCESS_TOKEN_FILE_NAME,
13
14
  TRTLLM_BASE_IMAGE,
14
15
  TRTLLM_PREDICT_CONCURRENCY,
15
16
  TRTLLM_PYTHON_EXECUTABLE,
@@ -17,7 +18,6 @@ from truss.base.constants import (
17
18
  )
18
19
  from truss.base.truss_config import ModelCache, ModelRepo, TrussConfig
19
20
  from truss.contexts.image_builder.serving_image_builder import (
20
- HF_ACCESS_TOKEN_FILE_NAME,
21
21
  ServingImageBuilderContext,
22
22
  get_files_to_model_cache_v1,
23
23
  )
@@ -13,3 +13,6 @@ class Model:
13
13
 
14
14
  def predict(self, input: Dict) -> str:
15
15
  return "predict"
16
+
17
+ def messages(self, input: Dict) -> str:
18
+ return "messages"
@@ -106,6 +106,7 @@ class DockerURLs:
106
106
  self.predict_url = f"{base_url}/v1/models/model:predict"
107
107
  self.completions_url = f"{base_url}/v1/completions"
108
108
  self.chat_completions_url = f"{base_url}/v1/chat/completions"
109
+ self.messages_url = f"{base_url}/v1/messages"
109
110
 
110
111
  self.schema_url = f"{base_url}/v1/models/model/schema"
111
112
  self.metrics_url = f"{base_url}/metrics"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: truss
3
- Version: 0.10.9rc514
3
+ Version: 0.10.9rc601
4
4
  Summary: A seamless bridge from model development to model delivery
5
5
  Project-URL: Repository, https://github.com/basetenlabs/truss
6
6
  Project-URL: Homepage, https://truss.baseten.co
@@ -2,7 +2,7 @@ truss/__init__.py,sha256=CoUcP6vx_pocyemRmpbCPlndkHhdMkABAlr0ZXVuPCk,1163
2
2
  truss/api/__init__.py,sha256=spBAa_m1pItiid97iDLKPmumgAkSirPkv-E8RWMZyOk,5090
3
3
  truss/api/definitions.py,sha256=QAaIBqL59Q-R7HtLcXcoeCIWBN2HqOzApdFX0PpCq2s,1604
4
4
  truss/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- truss/base/constants.py,sha256=qwNNkd9EOAuiTxYLVccJaiPCNRayBAFvyj_GisYOT3I,3488
5
+ truss/base/constants.py,sha256=ohZalB5yu9yoCIMSEEe5GF4swNDBVj2dAjG39vktW-s,3534
6
6
  truss/base/custom_types.py,sha256=FUSIT2lPOQb6gfg6IzT63YBV8r8L6NIZ0D74Fp3e_jQ,2835
7
7
  truss/base/errors.py,sha256=zDVLEvseTChdPP0oNhBBQCtQUtZJUaof5zeWMIjqz6o,691
8
8
  truss/base/trt_llm_config.py,sha256=CRz3AqGDAyv8YpcBWXUrnfjvNAauyo3yf8ZOGVsSt6g,32782
@@ -12,29 +12,29 @@ truss/cli/chains_commands.py,sha256=y6pdIAGCcKOPG9bPuCXPfSA0onQm5x-tT_3blSBfPYg,
12
12
  truss/cli/cli.py,sha256=PaMkuwXZflkU7sa1tEoT_Zmy-iBkEZs1m4IVqcieaeo,30367
13
13
  truss/cli/remote_cli.py,sha256=G_xCKRXzgkCmkiZJhUFfsv5YSVgde1jLA5LPQitpZgI,1905
14
14
  truss/cli/train_commands.py,sha256=VPwlyfCUumRpwN1m9hkBxHKqobUmIbeQnfqnsl6_uPo,12078
15
- truss/cli/logs/base_watcher.py,sha256=1qu0bKLKSB7aVociYrdpJD_ItjW8C1t5P6zr1Ku87YU,2785
15
+ truss/cli/logs/base_watcher.py,sha256=KKyd7lIrdaEeDVt8EtjMioSPGVpLyOcF0ewyzE_GGdQ,2785
16
16
  truss/cli/logs/model_log_watcher.py,sha256=NACcP-wkcaroYa2Cb9BZC7Yr0554WZa_FSM2LXOf4A8,1263
17
17
  truss/cli/logs/training_log_watcher.py,sha256=r6HRqrLnz-PiKTUXiDYYxg4ZnP8vYcXlEX1YmgHhzlo,1173
18
18
  truss/cli/logs/utils.py,sha256=z-U_FG4BUzdZLbE3BnXb4DZQ0zt3LSZ3PiQpLaDuc3o,1031
19
19
  truss/cli/train/common.py,sha256=Es1yllSYxjM9x2uBzTGbYwyd8ML66cqqge0XO8_G_X0,992
20
20
  truss/cli/train/core.py,sha256=MBOhPSVYOU7wVh09uWQrJDEVOhJQug_2Odv3u6tCVTA,13855
21
21
  truss/cli/train/deploy_from_checkpoint_config.yml,sha256=mktaVrfhN8Kjx1UveC4xr-gTW-kjwbHvq6bx_LpO-Wg,371
22
- truss/cli/train/metrics_watcher.py,sha256=iDD06zt7ze6thy9APE-SlyHwUdetWmbKbN7NUp1jE1U,9128
22
+ truss/cli/train/metrics_watcher.py,sha256=ftrLQ5m7V1lAqcAvdGbMv5r0aF4D0lypfKjokCBQvLw,12798
23
23
  truss/cli/train/poller.py,sha256=TGRzELxsicga0bEXewSX1ujw6lfPmDnHd6nr8zvOFO8,3550
24
24
  truss/cli/train/types.py,sha256=alGtr4Q71GeB65PpGMhsoKygw4k_ncR6MKIP1ioP8rI,951
25
25
  truss/cli/train/deploy_checkpoints/__init__.py,sha256=wL-M2yu8PxO2tFvjwshXAfPnB-5TlvsBp2v_bdzimRU,99
26
26
  truss/cli/train/deploy_checkpoints/deploy_checkpoints.py,sha256=wWFCpaMjM868-f_ChECrGHyMHka1F2VS1pEKPcEw3eM,16667
27
27
  truss/cli/train/deploy_checkpoints/deploy_checkpoints_helpers.py,sha256=7CdYgsxDF7nHITyCGpjjTMeaaOvtlCYwF7NSxpKedS0,1723
28
- truss/cli/train/deploy_checkpoints/deploy_full_checkpoints.py,sha256=pFj7rDcnvb9C4MMfr3wc4aBXfziqFkzw0H883NtQ1Es,3245
28
+ truss/cli/train/deploy_checkpoints/deploy_full_checkpoints.py,sha256=onY-Xk6n1M5H_QGcV47LhiCq1f9p4bOLkHLTJzH8MnI,3970
29
29
  truss/cli/train/deploy_checkpoints/deploy_lora_checkpoints.py,sha256=P91dIAzuhl2GlzmrWwCcYI7uCMT1Lm7C79JQHM_exN4,4442
30
30
  truss/cli/utils/common.py,sha256=aWnla4qMSEz57dRMTl7R-EaScsuEpnQUeziGUaIeqeU,6149
31
31
  truss/cli/utils/output.py,sha256=GNjU85ZAMp5BI6Yij5wYXcaAvpm_kmHV0nHNmdkMxb0,646
32
32
  truss/cli/utils/self_upgrade.py,sha256=eTJZA4Wc8uUp4Qh6viRQp6bZm--wnQp7KWe5KRRpPtg,5427
33
33
  truss/contexts/docker_build_setup.py,sha256=cF4ExZgtYvrWxvyCAaUZUvV_DB_7__MqVomUDpalvKo,3925
34
34
  truss/contexts/truss_context.py,sha256=uS6L-ACHxNk0BsJwESOHh1lA0OGGw0pb33aFKGsASj4,436
35
- truss/contexts/image_builder/cache_warmer.py,sha256=TGMV1Mh87n2e_dSowH0sf0rZhZraDOR-LVapZL3a5r8,7377
35
+ truss/contexts/image_builder/cache_warmer.py,sha256=wKE5zE3efnEpjmiLqAtQZX1pb2z57aRU0uElOxxb5f4,7434
36
36
  truss/contexts/image_builder/image_builder.py,sha256=IuRgDeeoHVLzIkJvKtX3807eeqEyaroCs_KWDcIHZUg,1461
37
- truss/contexts/image_builder/serving_image_builder.py,sha256=21a25Dea0ZOEgs41PKmhXjzEZJr1AuvXgcQBPZi1g4s,33923
37
+ truss/contexts/image_builder/serving_image_builder.py,sha256=NdjlQDZqKsmFDXx1UqL2HszL4ozPuNC6MFoTbXB5l0w,33886
38
38
  truss/contexts/image_builder/util.py,sha256=y2-CjUKv0XV-0w2sr1fUCflysDJLsoU4oPp6tvvoFnk,1203
39
39
  truss/contexts/local_loader/docker_build_emulator.py,sha256=rmf7I28zksSmHjwvJMx2rIa6xK4KeR5fBm5YFth_fQg,2464
40
40
  truss/contexts/local_loader/dockerfile_parser.py,sha256=GoRJ0Af_3ILyLhjovK5lrCGn1rMxz6W3l681ro17ZzI,1344
@@ -63,12 +63,12 @@ truss/remote/baseten/utils/tar.py,sha256=pMUv--YkwXDngUx1WUOK-KmAIKMcOg2E-CD5x4h
63
63
  truss/remote/baseten/utils/transfer.py,sha256=d3VptuQb6M1nyS6kz0BAfeOYDLkMKUjatJXpY-mp-As,1548
64
64
  truss/templates/README.md.jinja,sha256=N7CJdyldZuJamj5jLh47le0hFBdu9irVsTBqoxhPNPQ,2476
65
65
  truss/templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
66
- truss/templates/base.Dockerfile.jinja,sha256=AXEKM5XyBW5qndMxYQpnz9ldVU1F5ox9uZCqtjkabAI,5084
66
+ truss/templates/base.Dockerfile.jinja,sha256=vFAJH1lC9jg90-076H2DCmkXUAlpseitIN6c4UwagxA,4020
67
67
  truss/templates/cache.Dockerfile.jinja,sha256=LhsVP9F3BATKQGkgya_YT4v6ABTUkpy-Jb3N36zsw10,1030
68
68
  truss/templates/cache_requirements.txt,sha256=xoPoJ-OVnf1z6oq_RVM3vCr3ionByyqMLj7wGs61nUs,87
69
69
  truss/templates/copy_cache_files.Dockerfile.jinja,sha256=arHldnuclt7vUFHyRz6vus5NGMDkIofm-1RU37A0xZM,98
70
70
  truss/templates/docker_server_requirements.txt,sha256=PyhOPKAmKW1N2vLvTfLMwsEtuGpoRrbWuNo7tT6v2Mc,18
71
- truss/templates/server.Dockerfile.jinja,sha256=-WZ7a4OoV65_25gvvWAZ6_cf5vEENjSPkq59KcRyi0Q,6486
71
+ truss/templates/server.Dockerfile.jinja,sha256=DkNyshoLWISSb9k4s1SJoT8LycPHjUI0EaKhKaCh5Rg,5994
72
72
  truss/templates/control/requirements.txt,sha256=Kk0tYID7trPk5gwX38Wrt2-YGWZAXFJCJRcqJ8ZzCjc,251
73
73
  truss/templates/control/control/application.py,sha256=jYeta6hWe1SkfLL3W4IDmdYjg3ZuKqI_UagWYs5RB_E,3793
74
74
  truss/templates/control/control/endpoints.py,sha256=FM-sgao7I3gMoUTasM3Xq_g2LDoJQe75JxIoaQxzeNo,10031
@@ -92,9 +92,9 @@ truss/templates/docker_server/proxy.conf.jinja,sha256=Lg-PcZzKflG85exZKHNgW_I6r0
92
92
  truss/templates/docker_server/supervisord.conf.jinja,sha256=CoaSLv0Lr8t1tS_q102IFufNX2lWrlbCHJLjMhYjOwM,1711
93
93
  truss/templates/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
94
94
  truss/templates/server/main.py,sha256=kWXrdD8z8IpamyWxc8qcvd5ck9gM1Kz2QH5qHJCnmOQ,222
95
- truss/templates/server/model_wrapper.py,sha256=k75VVISwwlsx5EGb82UZsu8kCM_i6Yi3-Hd0-Kpm1yo,42055
95
+ truss/templates/server/model_wrapper.py,sha256=3xrEVKdFMCEnBJ4sZB14Vt_kwGoBxUibkFlCfonBOTE,42554
96
96
  truss/templates/server/requirements.txt,sha256=iRR2BEpBQnt-YOiTEKOnaab7tlR4C23V1cuURuIt7ZY,672
97
- truss/templates/server/truss_server.py,sha256=ob_nceeGtFPZzKKdk_ZZGLoZrJOGE6hR52xM1sPR97A,19498
97
+ truss/templates/server/truss_server.py,sha256=FuouBLIQZ3-lsW_FGunY-S7hJ9KUNuqjLMOwYKVx8mE,19945
98
98
  truss/templates/server/common/__init__.py,sha256=qHIqr68L5Tn4mV6S-PbORpcuJ4jmtBR8aCuRTIWDvNo,85
99
99
  truss/templates/server/common/errors.py,sha256=qWeZlmNI8ZGbZbOIp_mtS6IKvUFIzhj3QH8zp-xTp9o,8554
100
100
  truss/templates/server/common/patches.py,sha256=uEOzvDnXsHOkTSa8zygGYuR4GHhrFNVHNQc5peJcwvo,1393
@@ -136,10 +136,10 @@ truss/tests/test_truss_gatherer.py,sha256=bn288OEkC49YY0mhly4cAl410ktZPfElNdWwZy
136
136
  truss/tests/test_truss_handle.py,sha256=-xz9VXkecXDTslmQZ-dmUmQLnvD0uumRqHS2uvGlMBA,30750
137
137
  truss/tests/test_util.py,sha256=hs1bNMkXKEdoPRx4Nw-NAEdoibR92OubZuADGmbiYsQ,1344
138
138
  truss/tests/cli/test_cli.py,sha256=yfbVS5u1hnAmmA8mJ539vj3lhH-JVGUvC4Q_Mbort44,787
139
- truss/tests/cli/train/test_deploy_checkpoints.py,sha256=pPTqyGqvSsrsEHXguo6swLXkVObytLdupfSElqlujPc,25733
140
- truss/tests/cli/train/test_train_cli_core.py,sha256=ONI4Oc1R0pL-M3WuNsnnHLzqEfMTE0u9g5dE9Ii9yW8,3424
139
+ truss/tests/cli/train/test_deploy_checkpoints.py,sha256=yCPE5S4D_92hz692QKYDaHvwye4imuz2e1pd1K1pXkE,26203
140
+ truss/tests/cli/train/test_train_cli_core.py,sha256=T1Xa6-NRk2nTJGX6sXaA8x4qCwL3Ini72PBI2gW7rYM,7879
141
141
  truss/tests/cli/train/resources/test_deploy_from_checkpoint_config.yml,sha256=GF7r9l0KaeXiUYCPSBpeMPd2QG6PeWWyI12NdbqLOgc,1930
142
- truss/tests/contexts/image_builder/test_serving_image_builder.py,sha256=iJA7nxcLXhBmyjhLIKeN64ql0OI_R53l-qSt3SsENV8,22368
142
+ truss/tests/contexts/image_builder/test_serving_image_builder.py,sha256=ycOxhGIfe5OVcgqZqJvn1Ca2368AN1KdNmIO0vSZ4ko,22368
143
143
  truss/tests/contexts/local_loader/test_load_local.py,sha256=D1qMH2IpYA2j5009v50QMgUnKdeOsX15ndkwXe10a4E,801
144
144
  truss/tests/contexts/local_loader/test_truss_module_finder.py,sha256=oN1K2lg3ATHY5yOVUTfQIaSqusTF9I2wFaYaTSo5-O4,5342
145
145
  truss/tests/local/test_local_config_handler.py,sha256=aLvcOyfppskA2MziVLy_kMcagjxMpO4mjar9zxUN6g0,2245
@@ -236,7 +236,7 @@ truss/tests/test_data/test_go_custom_server_truss/docker/main.go,sha256=WR3mJU1o
236
236
  truss/tests/test_data/test_openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
237
237
  truss/tests/test_data/test_openai/config.yaml,sha256=ByY_Smgx0lw24Yj0hqgofEmL3nrGNj7gZE5iBKlvwxk,235
238
238
  truss/tests/test_data/test_openai/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
239
- truss/tests/test_data/test_openai/model/model.py,sha256=GEtIJnWlU1snBid2sS-bZHrjQpP8UzL8tanzyH_tdgE,319
239
+ truss/tests/test_data/test_openai/model/model.py,sha256=NMyZH6QcJv4TMw2Cd8M02wvmwDqlWJB4ZwtD4n8QU0Y,390
240
240
  truss/tests/test_data/test_pyantic_v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
241
241
  truss/tests/test_data/test_pyantic_v1/config.yaml,sha256=fqWpH3E4UPEnjvAw6Q9_F5oZZLy69RAfycbgtmCFsXo,270
242
242
  truss/tests/test_data/test_pyantic_v1/requirements.txt,sha256=OpG4JAdJME9VWjoNftdHYg-y94k2gbhqdM1_NwOgcT8,13
@@ -316,7 +316,7 @@ truss/truss_handle/build.py,sha256=BKFV-S57tnWcfRffvQ7SPp78BrjmRy3GhgF6ThaIrDM,3
316
316
  truss/truss_handle/decorators.py,sha256=PUR5w2rl_cvcsVtAUpcYLzNXuOml9R0-wtpXy-9hDPk,407
317
317
  truss/truss_handle/readme_generator.py,sha256=B4XbGwUjzMNOr71DWNAL8kCu5_ZHq7YOM8yVGaOZMSE,716
318
318
  truss/truss_handle/truss_gatherer.py,sha256=Xysl_UnCVhehPfZeHa8p7WFp94ENqh-VVpbuqnCui3A,2870
319
- truss/truss_handle/truss_handle.py,sha256=WF2MQSly9DQ1SoAvqfi87Ulu4llTadpXoncsDjpL79E,40886
319
+ truss/truss_handle/truss_handle.py,sha256=pGsrsfCSEa5DmrFAoA5Nxr9TBke0Dr2eaq81Ht82j9U,40940
320
320
  truss/truss_handle/patch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
321
321
  truss/truss_handle/patch/calc_patch.py,sha256=Qyk1QmacK4jy9Ia8-93L8VtAWJhw15z22DdZUkBKlys,18334
322
322
  truss/truss_handle/patch/constants.py,sha256=pCEi5Pwi8Rnqthrr3VEsWL9EP1P1VV1T8DEYuitHLmc,139
@@ -361,8 +361,8 @@ truss_train/definitions.py,sha256=yFQYJoxK2tDBeKFHR-IJz12jU1CtWRXN-ZERh9zjMHo,66
361
361
  truss_train/deployment.py,sha256=zmeJ66kg1Wc7l7bwA_cXqv85uMF77hYl7NPHuhc1NPs,2493
362
362
  truss_train/loader.py,sha256=0o66EjBaHc2YY4syxxHVR4ordJWs13lNXnKjKq2wq0U,1630
363
363
  truss_train/public_api.py,sha256=9N_NstiUlmBuLUwH_fNG_1x7OhGCytZLNvqKXBlStrM,1220
364
- truss-0.10.9rc514.dist-info/METADATA,sha256=6LiDk-raxa4I2VayuLXZuzc42A3VTETrJfR2W98ZlH8,6674
365
- truss-0.10.9rc514.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
366
- truss-0.10.9rc514.dist-info/entry_points.txt,sha256=-MwKfHHQHQ6j0HqIgvxrz3CehCmczDLTD-OsRHnjjuU,130
367
- truss-0.10.9rc514.dist-info/licenses/LICENSE,sha256=FTqGzu85i-uw1Gi8E_o0oD60bH9yQ_XIGtZbA1QUYiw,1064
368
- truss-0.10.9rc514.dist-info/RECORD,,
364
+ truss-0.10.9rc601.dist-info/METADATA,sha256=lVGZYss-iZy1X7xth17tJozGiJKzPr6cpFKm9kb-7qY,6674
365
+ truss-0.10.9rc601.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
366
+ truss-0.10.9rc601.dist-info/entry_points.txt,sha256=-MwKfHHQHQ6j0HqIgvxrz3CehCmczDLTD-OsRHnjjuU,130
367
+ truss-0.10.9rc601.dist-info/licenses/LICENSE,sha256=FTqGzu85i-uw1Gi8E_o0oD60bH9yQ_XIGtZbA1QUYiw,1064
368
+ truss-0.10.9rc601.dist-info/RECORD,,