guidellm 0.1.0__py3-none-any.whl → 0.2.0rc20250418__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (69) hide show
  1. guidellm/__init__.py +38 -6
  2. guidellm/__main__.py +294 -0
  3. guidellm/backend/__init__.py +19 -6
  4. guidellm/backend/backend.py +238 -0
  5. guidellm/backend/openai.py +532 -122
  6. guidellm/backend/response.py +132 -0
  7. guidellm/benchmark/__init__.py +73 -0
  8. guidellm/benchmark/aggregator.py +760 -0
  9. guidellm/benchmark/benchmark.py +838 -0
  10. guidellm/benchmark/benchmarker.py +334 -0
  11. guidellm/benchmark/entrypoints.py +141 -0
  12. guidellm/benchmark/output.py +946 -0
  13. guidellm/benchmark/profile.py +409 -0
  14. guidellm/benchmark/progress.py +720 -0
  15. guidellm/config.py +34 -56
  16. guidellm/data/__init__.py +4 -0
  17. guidellm/data/prideandprejudice.txt.gz +0 -0
  18. guidellm/dataset/__init__.py +22 -0
  19. guidellm/dataset/creator.py +213 -0
  20. guidellm/dataset/entrypoints.py +42 -0
  21. guidellm/dataset/file.py +90 -0
  22. guidellm/dataset/hf_datasets.py +62 -0
  23. guidellm/dataset/in_memory.py +132 -0
  24. guidellm/dataset/synthetic.py +262 -0
  25. guidellm/objects/__init__.py +18 -0
  26. guidellm/objects/pydantic.py +60 -0
  27. guidellm/objects/statistics.py +947 -0
  28. guidellm/request/__init__.py +12 -10
  29. guidellm/request/loader.py +281 -0
  30. guidellm/request/request.py +79 -0
  31. guidellm/scheduler/__init__.py +51 -3
  32. guidellm/scheduler/result.py +137 -0
  33. guidellm/scheduler/scheduler.py +382 -0
  34. guidellm/scheduler/strategy.py +493 -0
  35. guidellm/scheduler/types.py +7 -0
  36. guidellm/scheduler/worker.py +511 -0
  37. guidellm/utils/__init__.py +16 -29
  38. guidellm/utils/colors.py +8 -0
  39. guidellm/utils/hf_transformers.py +35 -0
  40. guidellm/utils/random.py +43 -0
  41. guidellm/utils/text.py +118 -357
  42. {guidellm-0.1.0.dist-info → guidellm-0.2.0rc20250418.dist-info}/METADATA +96 -79
  43. guidellm-0.2.0rc20250418.dist-info/RECORD +48 -0
  44. {guidellm-0.1.0.dist-info → guidellm-0.2.0rc20250418.dist-info}/WHEEL +1 -1
  45. guidellm-0.2.0rc20250418.dist-info/entry_points.txt +2 -0
  46. guidellm/backend/base.py +0 -320
  47. guidellm/core/__init__.py +0 -24
  48. guidellm/core/distribution.py +0 -190
  49. guidellm/core/report.py +0 -321
  50. guidellm/core/request.py +0 -44
  51. guidellm/core/result.py +0 -545
  52. guidellm/core/serializable.py +0 -169
  53. guidellm/executor/__init__.py +0 -10
  54. guidellm/executor/base.py +0 -213
  55. guidellm/executor/profile_generator.py +0 -343
  56. guidellm/main.py +0 -336
  57. guidellm/request/base.py +0 -194
  58. guidellm/request/emulated.py +0 -391
  59. guidellm/request/file.py +0 -76
  60. guidellm/request/transformers.py +0 -100
  61. guidellm/scheduler/base.py +0 -374
  62. guidellm/scheduler/load_generator.py +0 -196
  63. guidellm/utils/injector.py +0 -70
  64. guidellm/utils/progress.py +0 -196
  65. guidellm/utils/transformers.py +0 -151
  66. guidellm-0.1.0.dist-info/RECORD +0 -35
  67. guidellm-0.1.0.dist-info/entry_points.txt +0 -3
  68. {guidellm-0.1.0.dist-info → guidellm-0.2.0rc20250418.dist-info/licenses}/LICENSE +0 -0
  69. {guidellm-0.1.0.dist-info → guidellm-0.2.0rc20250418.dist-info}/top_level.txt +0 -0
guidellm/core/result.py DELETED
@@ -1,545 +0,0 @@
1
- from time import time
2
- from typing import Any, Dict, List, Literal, Optional, Union
3
-
4
- from loguru import logger
5
- from pydantic import Field
6
-
7
- from guidellm.core.distribution import Distribution
8
- from guidellm.core.request import TextGenerationRequest
9
- from guidellm.core.serializable import Serializable
10
-
11
- __all__ = [
12
- "RequestConcurrencyMeasurement",
13
- "TextGenerationBenchmark",
14
- "TextGenerationBenchmarkReport",
15
- "TextGenerationError",
16
- "TextGenerationResult",
17
- ]
18
-
19
-
20
- class TextGenerationResult(Serializable):
21
- """
22
- A class to represent the result of a text generation request
23
- for generative AI workloads.
24
- """
25
-
26
- request: TextGenerationRequest = Field(
27
- description="The text generation request used to generate the result.",
28
- )
29
- prompt: str = Field(
30
- default_factory=str,
31
- description="The input prompt for the text generation.",
32
- )
33
- prompt_word_count: int = Field(
34
- default=0,
35
- description="The number of words in the input prompt.",
36
- )
37
- prompt_token_count: int = Field(
38
- default=0,
39
- description="The number of tokens in the input prompt.",
40
- )
41
- output: str = Field(
42
- default_factory=str,
43
- description="The generated output for the text generation.",
44
- )
45
- output_word_count: int = Field(
46
- default=0,
47
- description="The number of words in the output.",
48
- )
49
- output_token_count: int = Field(
50
- default=0,
51
- description="The number of tokens in the output.",
52
- )
53
- last_time: Optional[float] = Field(
54
- default=None,
55
- description="The last time recorded.",
56
- )
57
- first_token_set: bool = Field(
58
- default=False,
59
- description="Whether the first token time is set.",
60
- )
61
- start_time: Optional[float] = Field(
62
- default=None,
63
- description="The start time of the text generation.",
64
- )
65
- end_time: Optional[float] = Field(
66
- default=None,
67
- description="The end time of the text generation.",
68
- )
69
- first_token_time: Optional[float] = Field(
70
- default=None,
71
- description="The time taken to decode the first token.",
72
- )
73
- decode_times: Distribution = Field(
74
- default_factory=Distribution,
75
- description="The distribution of decode times.",
76
- )
77
-
78
- def start(self, prompt: str):
79
- """
80
- Start the text generation by recording the prompt and start time.
81
-
82
- :param prompt: The input prompt for the text generation.
83
- :type prompt: str
84
- """
85
- self.prompt = prompt
86
- self.prompt_word_count = len(prompt.split())
87
- self.prompt_token_count = len(prompt) # Token count placeholder
88
- self.start_time = time()
89
- self.last_time = time()
90
- self.first_token_set = False
91
-
92
- logger.info("Text generation started with prompt: '{}'", prompt)
93
-
94
- def output_token(self, token: str):
95
- """
96
- Add a token to the output and record the decode time.
97
-
98
- :param token: The decoded token.
99
- :type token: str
100
- """
101
- self._check_recording_started()
102
-
103
- if self.last_time is None:
104
- raise ValueError(
105
- "last time is not specified. "
106
- "Did you call `text_generation_benchmark.start()`?"
107
- )
108
-
109
- current_counter = time()
110
-
111
- if not self.first_token_set:
112
- self.first_token_time = current_counter - self.last_time
113
- self.first_token_set = True
114
- logger.debug(f"First token decode time: {self.first_token_time}")
115
- else:
116
- decode_time = current_counter - self.last_time
117
- self.decode_times.add_data([decode_time])
118
- logger.debug(f"Token '{token}' decoded in {decode_time} seconds")
119
-
120
- self.last_time = current_counter
121
- self.output += token
122
- logger.debug("Added token {} to output", token)
123
-
124
- def end(
125
- self,
126
- output: Optional[str] = None,
127
- prompt_token_count: Optional[int] = None,
128
- output_token_count: Optional[int] = None,
129
- ):
130
- """
131
- End the text generation by recording the output and end time.
132
-
133
- :param output: The generated output for the text generation.
134
- :type output: str
135
- :param prompt_token_count: Optional token count for the prompt,
136
- defaults to word count.
137
- :type prompt_token_count: Optional[int]
138
- :param output_token_count: Optional token count for the output,
139
- defaults to word count.
140
- :type output_token_count: Optional[int]
141
- """
142
- self._check_recording_started()
143
- self.end_time = time()
144
-
145
- if output:
146
- self.output = output
147
-
148
- self.output_word_count = len(self.output.split())
149
- self.output_token_count = output_token_count or self.output_word_count
150
- self.prompt_token_count = prompt_token_count or self.prompt_word_count
151
-
152
- logger.info(f"Text generation ended with output: '{self.output}'")
153
-
154
- def _check_recording_started(
155
- self,
156
- ):
157
- if self.start_time is None:
158
- raise ValueError(
159
- "start time is not specified. "
160
- "Did you make the `text_generation_benchmark.start()`?",
161
- )
162
-
163
-
164
- class TextGenerationError(Serializable):
165
- """
166
- A class to represent an error that occurred during a text generation request
167
- for generative AI workloads.
168
- """
169
-
170
- request: TextGenerationRequest = Field(
171
- description="The text generation request that resulted in an error.",
172
- )
173
- message: str = Field(
174
- description="The error message that occurred during text generation.",
175
- )
176
-
177
-
178
- class RequestConcurrencyMeasurement(Serializable):
179
- """
180
- A dataclass to represent the concurrency measurement of a request.
181
- """
182
-
183
- time: float = Field(description="The time of the measurement.")
184
- completed: int = Field(description="The number of completed requests.")
185
- errored: int = Field(description="The number of errored requests.")
186
- processing: int = Field(description="The number of processing requests.")
187
-
188
-
189
- class TextGenerationBenchmark(Serializable):
190
- """
191
- A class to represent a report of text generation requests
192
- (results and errors) for generative AI workloads.
193
- This is a set of results and errors for a specific mode and rate.
194
- """
195
-
196
- mode: Literal["asynchronous", "synchronous", "throughput"] = Field(
197
- description="The generation mode, one of 'async', 'sync', or 'throughput'."
198
- )
199
- rate: Optional[float] = Field(
200
- default=None,
201
- description="The requested rate of requests per second.",
202
- )
203
- results: List[TextGenerationResult] = Field(
204
- default_factory=list,
205
- description="The results of the text generation requests.",
206
- )
207
- errors: List[TextGenerationError] = Field(
208
- default_factory=list,
209
- description="The errors of the text generation requests.",
210
- )
211
- concurrencies: List[RequestConcurrencyMeasurement] = Field(
212
- default_factory=list,
213
- description="The concurrency measurements of the requests.",
214
- )
215
-
216
- def __iter__(self):
217
- """
218
- Provide an iterator interface to iterate over the results.
219
-
220
- :return: An iterator over the results.
221
- """
222
- return iter(self.results)
223
-
224
- @property
225
- def request_count(self) -> int:
226
- """
227
- Get the number of requests in the result.
228
-
229
- :return: The number of requests.
230
- :rtype: int
231
- """
232
- return len(self.results)
233
-
234
- @property
235
- def error_count(self) -> int:
236
- """
237
- Get the number of errors in the result.
238
-
239
- :return: The number of errors.
240
- :rtype: int
241
- """
242
- return len(self.errors)
243
-
244
- @property
245
- def total_count(self) -> int:
246
- """
247
- Get the total number of requests in the result.
248
-
249
- :return: The total number of requests.
250
- :rtype: int
251
- """
252
- return self.request_count + self.error_count
253
-
254
- @property
255
- def start_time(self) -> Optional[float]:
256
- """
257
- Get the start time of the first request in the result.
258
-
259
- :return: The start time of the first request.
260
- :rtype: Optional[float]
261
- """
262
- if not self.results:
263
- return None
264
-
265
- return self.results[0].start_time
266
-
267
- @property
268
- def end_time(self) -> Optional[float]:
269
- """
270
- Get the end time of the last request in the result.
271
-
272
- :return: The end time of the last request.
273
- :rtype: Optional[float]
274
- """
275
- if not self.results:
276
- return None
277
-
278
- return self.results[-1].end_time
279
-
280
- @property
281
- def duration(self) -> float:
282
- """
283
- Get the duration of the result in seconds.
284
-
285
- :return: The duration of the result.
286
- :rtype: float
287
- """
288
- if not self.results or not self.start_time or not self.end_time:
289
- return 0.0
290
-
291
- return self.end_time - self.start_time
292
-
293
- @property
294
- def completed_request_rate(self) -> float:
295
- """
296
- Get the rate of requests per second in the result.
297
-
298
- :return: The rate of requests per second.
299
- :rtype: float
300
- """
301
- if not self.results or not self.duration:
302
- return 0.0
303
-
304
- return len(self.results) / self.duration
305
-
306
- @property
307
- def request_latency(self) -> float:
308
- """
309
- Get the average request latency in seconds.
310
-
311
- :return: The average request latency in seconds.
312
- :rtype: float
313
- """
314
- if not self.results:
315
- return 0.0
316
-
317
- return self.request_latency_distribution.mean
318
-
319
- @property
320
- def request_latency_distribution(self) -> Distribution:
321
- """
322
- Get the distribution of request latencies.
323
-
324
- :return: The distribution of request latencies.
325
- :rtype: Distribution
326
- """
327
- return Distribution(
328
- data=[
329
- result.end_time - result.start_time
330
- for result in self.results
331
- if result.end_time is not None and result.start_time is not None
332
- ]
333
- )
334
-
335
- @property
336
- def time_to_first_token(self) -> float:
337
- """
338
- Get the time taken to decode the first token in milliseconds.
339
-
340
- :return: The time taken to decode the first token in milliseconds.
341
- :rtype: float
342
- """
343
- if not self.results:
344
- return 0.0
345
-
346
- return 1000 * self.ttft_distribution.mean
347
-
348
- @property
349
- def ttft_distribution(self) -> Distribution:
350
- """
351
- Get the distribution of time taken to decode the first token.
352
-
353
- :return: The distribution of time taken to decode the first token.
354
- :rtype: Distribution
355
- """
356
- return Distribution(
357
- data=[
358
- result.first_token_time
359
- for result in self.results
360
- if result.first_token_time is not None
361
- ]
362
- )
363
-
364
- @property
365
- def inter_token_latency(self) -> float:
366
- """
367
- Get the average time between tokens in milliseconds.
368
-
369
- :return: The average time between tokens.
370
- :rtype: float
371
- """
372
- if not self.results:
373
- return 0.0
374
-
375
- return 1000 * self.itl_distribution.mean
376
-
377
- @property
378
- def itl_distribution(self) -> Distribution:
379
- """
380
- Get the distribution of time between tokens.
381
-
382
- :return: The distribution of time between tokens.
383
- :rtype: Distribution
384
- """
385
- return Distribution(
386
- data=[
387
- decode for result in self.results for decode in result.decode_times.data
388
- ]
389
- )
390
-
391
- @property
392
- def output_token_throughput(self) -> float:
393
- """
394
- Get the average token throughput in tokens per second.
395
-
396
- :return: The average token throughput.
397
- :rtype: float
398
- """
399
- if not self.results or not self.duration:
400
- return 0.0
401
-
402
- total_tokens = sum(result.output_token_count for result in self.results)
403
-
404
- return total_tokens / self.duration
405
-
406
- @property
407
- def prompt_token_distribution(self) -> Distribution:
408
- """
409
- Get the distribution of prompt token counts.
410
-
411
- :return: The distribution of prompt token counts.
412
- :rtype: Distribution
413
- """
414
- return Distribution(data=[result.prompt_token_count for result in self.results])
415
-
416
- @property
417
- def output_token_distribution(self) -> Distribution:
418
- """
419
- Get the distribution of output token counts.
420
-
421
- :return: The distribution of output token counts.
422
- :rtype: Distribution
423
- """
424
- return Distribution(data=[result.output_token_count for result in self.results])
425
-
426
- @property
427
- def overloaded(self) -> bool:
428
- if (
429
- self.rate is None
430
- or not self.results
431
- or not self.concurrencies
432
- or len(self.concurrencies) < 2 # noqa: PLR2004
433
- ):
434
- # if rate was not set, sync mode is assumed,
435
- # or we have less than 2 data points,
436
- # then we cannot be overloaded by definition
437
- return False
438
-
439
- # if the calculated rate is less than 75% of the requested rate,
440
- # safe to assume the system is overloaded
441
- return self.completed_request_rate < 0.75 * self.rate
442
-
443
- def request_started(self):
444
- """
445
- Record the start of a generation request.
446
- """
447
- if not self.concurrencies:
448
- self.concurrencies = [
449
- RequestConcurrencyMeasurement(
450
- time=time(),
451
- completed=0,
452
- errored=0,
453
- processing=1,
454
- ),
455
- ]
456
- else:
457
- last = self.concurrencies[-1]
458
- self.concurrencies.append(
459
- RequestConcurrencyMeasurement(
460
- time=time(),
461
- completed=last.completed,
462
- errored=last.errored,
463
- processing=last.processing + 1,
464
- ),
465
- )
466
-
467
- logger.info("Text generation request started")
468
-
469
- def request_completed(
470
- self,
471
- result: Union[TextGenerationResult, TextGenerationError],
472
- ):
473
- """
474
- Record the completion of a text generation request.
475
-
476
- :param result: The completed result or error.
477
- :type result: Union[TextGenerationResult, TextGenerationError]
478
- """
479
- if not self.concurrencies:
480
- raise ValueError("Request completed without starting")
481
-
482
- if isinstance(result, TextGenerationError):
483
- is_error = True
484
- self.errors.append(result)
485
- logger.info(
486
- "Text generation request resulted in error: {}",
487
- result.message,
488
- )
489
- else:
490
- if not result.start_time or not result.end_time:
491
- raise ValueError("Start time and End time are not defined")
492
-
493
- is_error = False
494
- self.results.append(result)
495
- logger.info("Text generation request completed successfully: {}", result)
496
-
497
- last = self.concurrencies[-1]
498
- self.concurrencies.append(
499
- RequestConcurrencyMeasurement(
500
- time=time(),
501
- completed=last.completed + (not is_error),
502
- errored=last.errored + is_error,
503
- processing=last.processing - 1,
504
- )
505
- )
506
-
507
-
508
- class TextGenerationBenchmarkReport(Serializable):
509
- """
510
- A class to represent a report of text generation benchmarks
511
- for generative AI workloads.
512
- This is a collection of benchmarks for different modes and rates.
513
- """
514
-
515
- benchmarks: List[TextGenerationBenchmark] = Field(
516
- default_factory=list,
517
- description="The benchmarks of text generation requests.",
518
- )
519
- args: Dict[str, Any] = Field(
520
- default_factory=dict,
521
- description="The arguments used for the benchmarks.",
522
- )
523
-
524
- def __iter__(self):
525
- return iter(self.benchmarks)
526
-
527
- @property
528
- def benchmarks_sorted(self) -> List[TextGenerationBenchmark]:
529
- """
530
- Get the list of benchmarks sorted by request rate.
531
-
532
- :return: The sorted list of benchmarks.
533
- :rtype: List[TextGenerationBenchmark]
534
- """
535
- return sorted(self.benchmarks, key=lambda x: x.completed_request_rate)
536
-
537
- def add_benchmark(self, benchmark: TextGenerationBenchmark):
538
- """
539
- Add a result to the report.
540
-
541
- :param benchmark: The result to add.
542
- :type benchmark: TextGenerationBenchmark
543
- """
544
- self.benchmarks.append(benchmark)
545
- logger.debug("Added result: {}", benchmark)
@@ -1,169 +0,0 @@
1
- from pathlib import Path
2
- from typing import Any, Literal, Union, get_args
3
-
4
- import yaml
5
- from loguru import logger
6
- from pydantic import BaseModel, ConfigDict
7
-
8
- __all__ = ["Serializable", "SerializableFileType"]
9
-
10
-
11
- SerializableFileType = Literal["yaml", "json"]
12
-
13
-
14
- class Serializable(BaseModel):
15
- """
16
- A base class for models that require YAML and JSON serialization and
17
- deserialization.
18
- """
19
-
20
- model_config = ConfigDict(
21
- extra="forbid",
22
- use_enum_values=True,
23
- validate_assignment=True,
24
- from_attributes=True,
25
- )
26
-
27
- def __init__(self, /, **data: Any) -> None:
28
- super().__init__(**data)
29
- logger.debug(
30
- "Initialized new instance of {} with data: {}",
31
- self.__class__.__name__,
32
- data,
33
- )
34
-
35
- def to_yaml(self) -> str:
36
- """
37
- Serialize the model to a YAML string.
38
-
39
- :return: YAML string representation of the model.
40
- """
41
- logger.debug("Serializing to YAML... {}", self)
42
-
43
- return yaml.dump(self.model_dump())
44
-
45
- @classmethod
46
- def from_yaml(cls, data: str):
47
- """
48
- Deserialize a YAML string to a model instance.
49
-
50
- :param data: YAML string to deserialize.
51
- :return: An instance of the model.
52
- """
53
- logger.debug("Deserializing from YAML... {}", data)
54
-
55
- return cls.model_validate(yaml.safe_load(data))
56
-
57
- def to_json(self) -> str:
58
- """
59
- Serialize the model to a JSON string.
60
-
61
- :return: JSON string representation of the model.
62
- """
63
- logger.debug("Serializing to JSON... {}", self)
64
-
65
- return self.model_dump_json()
66
-
67
- @classmethod
68
- def from_json(cls, data: str):
69
- """
70
- Deserialize a JSON string to a model instance.
71
-
72
- :param data: JSON string to deserialize.
73
- :return: An instance of the model.
74
- """
75
- logger.debug("Deserializing from JSON... {}", data)
76
-
77
- return cls.model_validate_json(data)
78
-
79
- def save_file(
80
- self,
81
- path: Union[str, Path],
82
- type_: SerializableFileType = "yaml",
83
- ) -> str:
84
- """
85
- Save the model to a file in either YAML or JSON format.
86
-
87
- :param path: Path to the exact file or the containing directory.
88
- If it is a directory, the file name will be inferred from the class name.
89
- :param type_: Optional type to save ('yaml' or 'json').
90
- If not provided and the path has an extension,
91
- it will be inferred to save in that format.
92
- If not provided and the path does not have an extension,
93
- it will save in YAML format.
94
- :return: The path to the saved file.
95
- """
96
- logger.debug("Saving to file... {} with format: {}", path, type_)
97
-
98
- if isinstance(path, str):
99
- path = Path(path)
100
-
101
- if path.suffix:
102
- # is a file
103
- ext = path.suffix[1:].lower()
104
- if type_ not in get_args(SerializableFileType):
105
- raise ValueError(
106
- f"Unsupported file extension: {type_}. "
107
- f"Expected one of {SerializableFileType} "
108
- f"for {path}"
109
- )
110
- type_ = ext # type: ignore # noqa: PGH003
111
- else:
112
- # is a directory
113
- file_name = f"{self.__class__.__name__.lower()}.{type_}"
114
- path = path / file_name
115
-
116
- path.parent.mkdir(parents=True, exist_ok=True)
117
-
118
- with path.open("w") as file:
119
- if type_ == "yaml":
120
- file.write(self.to_yaml())
121
- elif type_ == "json":
122
- file.write(self.to_json())
123
- else:
124
- raise ValueError(
125
- f"Unsupported file extension: {type_}"
126
- f"Expected one of {SerializableFileType} "
127
- f"for {path}"
128
- )
129
-
130
- logger.info("Successfully saved {} to {}", self.__class__.__name__, path)
131
-
132
- return str(path)
133
-
134
- @classmethod
135
- def load_file(cls, path: Union[str, Path]):
136
- """
137
- Load a model from a file in either YAML or JSON format.
138
-
139
- :param path: Path to the file.
140
- :return: An instance of the model.
141
- """
142
- logger.debug("Loading from file... {}", path)
143
-
144
- if isinstance(path, str):
145
- path = Path(path)
146
-
147
- if not path.exists():
148
- raise FileNotFoundError(f"File not found: {path}")
149
-
150
- if not path.is_file():
151
- raise ValueError(f"Path is not a file: {path}")
152
-
153
- extension = path.suffix[1:].lower()
154
-
155
- with path.open() as file:
156
- data = file.read()
157
-
158
- if extension == "yaml":
159
- obj = cls.from_yaml(data)
160
- elif extension == "json":
161
- obj = cls.from_json(data)
162
- else:
163
- raise ValueError(
164
- f"Unsupported file extension: {extension}"
165
- f"Expected one of {SerializableFileType} "
166
- f"for {path}"
167
- )
168
-
169
- return obj