lemonade-sdk 8.0.3__py3-none-any.whl → 8.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lemonade-sdk might be problematic. Click here for more details.

@@ -24,7 +24,7 @@ class AccuracyHumaneval(Tool):
24
24
  - pass@10: Percentage of problems solved within 10 generation attempts
25
25
  - pass@100: Percentage of problems solved within 100 generation attempts
26
26
 
27
- See docs/lemonade/humaneval_accuracy.md for more details
27
+ See docs/dev_cli/humaneval_accuracy.md for more details
28
28
  """
29
29
 
30
30
  unique_name = "accuracy-humaneval"
@@ -1,12 +1,17 @@
1
1
  import argparse
2
2
  import abc
3
+ import json
3
4
  from typing import List
4
5
  import lemonade.common.filesystem as fs
5
6
  import lemonade.common.exceptions as exp
6
7
  import lemonade.common.printing as printing
7
8
  from lemonade.tools.tool import ToolParser
8
9
  from lemonade.version import __version__ as lemonade_version
9
- from lemonade.common.system_info import get_system_info_dict
10
+ from lemonade.common.system_info import (
11
+ get_system_info_dict,
12
+ get_device_info_dict,
13
+ get_system_info,
14
+ )
10
15
  from lemonade.common.build import output_dir
11
16
  import lemonade.cache as lemonade_cache
12
17
 
@@ -245,28 +250,69 @@ class SystemInfo(ManagementTool):
245
250
  @staticmethod
246
251
  def parser(add_help: bool = True) -> argparse.ArgumentParser:
247
252
  parser = __class__.helpful_parser(
248
- short_description="Print system information",
253
+ short_description="Print system and device information",
249
254
  add_help=add_help,
250
255
  )
251
256
 
257
+ parser.add_argument(
258
+ "--format", choices=["table", "json"], default="table", help="Output format"
259
+ )
260
+
261
+ parser.add_argument(
262
+ "--verbose",
263
+ action="store_true",
264
+ help="Show detailed system information",
265
+ )
266
+
252
267
  return parser
253
268
 
254
269
  @staticmethod
255
270
  def pretty_print(my_dict: dict, level=0):
256
271
  for k, v in my_dict.items():
272
+ if k == "available" and v is True:
273
+ continue
274
+
257
275
  if isinstance(v, dict):
258
- print(" " * level + f"{k}:")
259
- SystemInfo.pretty_print(v, level + 1)
276
+ # Special handling for device availability
277
+ if v.get("available") is False:
278
+ error_msg = v.get("error", "Not available")
279
+ print(" " * level + f"{k}: {error_msg}")
280
+ else:
281
+ print(" " * level + f"{k}:")
282
+ SystemInfo.pretty_print(v, level + 1)
260
283
  elif isinstance(v, list):
261
284
  print(" " * level + f"{k}:")
262
285
  for item in v:
263
- print(" " * (level + 1) + f"{item}")
286
+ if isinstance(item, dict):
287
+ SystemInfo.pretty_print(item, level + 1)
288
+ print()
289
+ else:
290
+ print(" " * (level + 1) + f"{item}")
264
291
  else:
265
292
  print(" " * level + f"{k}: {v}")
266
293
 
267
- def run(self, _):
294
+ def run(self, _, format="table", verbose=False):
295
+ # Get basic system info
268
296
  system_info_dict = get_system_info_dict()
269
- self.pretty_print(system_info_dict)
297
+
298
+ # Always include devices
299
+ system_info_dict["Devices"] = get_device_info_dict()
300
+
301
+ # Filter out verbose-only information if not in verbose mode
302
+ if not verbose:
303
+ essential_keys = ["OS Version", "Processor", "Physical Memory", "Devices"]
304
+ system_info_dict = {
305
+ k: v for k, v in system_info_dict.items() if k in essential_keys
306
+ }
307
+ else:
308
+ # In verbose mode, add Python packages at the end
309
+ system_info = get_system_info()
310
+ system_info_dict["Python Packages"] = system_info.get_python_packages()
311
+
312
+ if format == "json":
313
+ print(json.dumps(system_info_dict, indent=2))
314
+ else:
315
+ self.pretty_print(system_info_dict)
270
316
 
271
317
 
272
318
  # This file was originally licensed under Apache 2.0. It has been modified.
lemonade/tools/mmlu.py CHANGED
@@ -27,7 +27,7 @@ def min_handle_none(*args: int):
27
27
 
28
28
  class AccuracyMMLU(Tool):
29
29
  """
30
- See docs/lemonade/mmlu_accuracy.md for more details
30
+ See docs/dev_cli/mmlu_accuracy.md for more details
31
31
  """
32
32
 
33
33
  unique_name = "accuracy-mmlu"
@@ -58,7 +58,7 @@ class OgaLoad(FirstTool):
58
58
  Input: path to a checkpoint.
59
59
  Supported choices for cpu and igpu from HF model repository:
60
60
  LLM models on Huggingface supported by model_builder. See documentation
61
- (https://github.com/lemonade-sdk/lemonade/blob/main/docs/ort_genai_igpu.md)
61
+ (https://github.com/lemonade-sdk/lemonade/blob/main/docs/dev_cli/ort_genai_igpu.md)
62
62
  for supported models.
63
63
  Supported choices for npu from HF model repository:
64
64
  Models on Hugging Face that follow the "amd/**-onnx-ryzen-strix" pattern
@@ -17,7 +17,7 @@ class AccuracyPerplexity(Tool):
17
17
 
18
18
  Output state produced: None
19
19
 
20
- See docs/lemonade/perplexity.md for more details.
20
+ See docs/dev_cli/perplexity.md for more details.
21
21
  """
22
22
 
23
23
  unique_name = "accuracy-perplexity"
@@ -63,7 +63,7 @@ class AccuracyPerplexity(Tool):
63
63
  # try-except will allow a few more LLMs to work
64
64
  max_length = 2048
65
65
  # Set stride to half of the maximum input length for overlapping window processing
66
- # Refer to docs/perplexity.md for more information on sliding window
66
+ # Refer to docs/dev_cli/perplexity.md for more information on sliding window
67
67
  stride = max_length // 2
68
68
  # Determine the total sequence length of the tokenized input
69
69
  seq_len = encodings.input_ids.size(1)
@@ -18,7 +18,7 @@ class QuarkLoad(Tool):
18
18
  Output:
19
19
  - state of the loaded model
20
20
 
21
- See docs/quark.md for more details.
21
+ See docs/dev_cli/quark.md for more details.
22
22
  """
23
23
 
24
24
  unique_name = "quark-load"
@@ -25,7 +25,7 @@ class QuarkQuantize(Tool):
25
25
  Output:
26
26
  - Modifies `state` with quantized and optionally exported model.
27
27
 
28
- See docs/quark.md for more details.
28
+ See docs/dev_cli/quark.md for more details.
29
29
  """
30
30
 
31
31
  unique_name = "quark-quantize"
@@ -94,7 +94,7 @@ class QuarkQuantize(Tool):
94
94
  help="Number of samples for calibration.",
95
95
  )
96
96
 
97
- # See docs/quark.md for more details.
97
+ # See docs/dev_cli/quark.md for more details.
98
98
  parser.add_argument(
99
99
  "--quant-scheme",
100
100
  type=str,
@@ -16,11 +16,29 @@ from fastapi.responses import StreamingResponse
16
16
 
17
17
  from openai import OpenAI
18
18
 
19
- from lemonade_server.pydantic_models import ChatCompletionRequest, PullConfig
19
+ from lemonade_server.pydantic_models import (
20
+ ChatCompletionRequest,
21
+ PullConfig,
22
+ EmbeddingsRequest,
23
+ RerankingRequest,
24
+ )
20
25
  from lemonade_server.model_manager import ModelManager
21
26
  from lemonade.tools.server.utils.port import find_free_port
22
27
 
23
- LLAMA_VERSION = "b5699"
28
+ LLAMA_VERSION = "b5787"
29
+
30
+
31
+ def llamacpp_address(port: int) -> str:
32
+ """
33
+ Generate the base URL for the llamacpp server.
34
+
35
+ Args:
36
+ port: The port number the llamacpp server is running on
37
+
38
+ Returns:
39
+ The base URL for the llamacpp server
40
+ """
41
+ return f"http://127.0.0.1:{port}/v1"
24
42
 
25
43
 
26
44
  def get_llama_server_paths():
@@ -244,10 +262,24 @@ def _wait_for_load(llama_server_process: subprocess.Popen, port: int):
244
262
 
245
263
 
246
264
  def _launch_llama_subprocess(
247
- snapshot_files: dict, use_gpu: bool, telemetry: LlamaTelemetry
265
+ snapshot_files: dict,
266
+ use_gpu: bool,
267
+ telemetry: LlamaTelemetry,
268
+ supports_embeddings: bool = False,
269
+ supports_reranking: bool = False,
248
270
  ) -> subprocess.Popen:
249
271
  """
250
- Launch llama server subprocess with GPU or CPU configuration
272
+ Launch llama server subprocess with appropriate configuration.
273
+
274
+ Args:
275
+ snapshot_files: Dictionary of model files to load
276
+ use_gpu: Whether to use GPU acceleration
277
+ telemetry: Telemetry object for tracking performance metrics
278
+ supports_embeddings: Whether the model supports embeddings
279
+ supports_reranking: Whether the model supports reranking
280
+
281
+ Returns:
282
+ Subprocess handle for the llama server
251
283
  """
252
284
 
253
285
  # Get the current executable path (handles both Windows and Ubuntu structures)
@@ -271,6 +303,14 @@ def _launch_llama_subprocess(
271
303
  # reasoning_content field
272
304
  base_command.extend(["--reasoning-format", "none"])
273
305
 
306
+ # Add embeddings support if the model supports it
307
+ if supports_embeddings:
308
+ base_command.append("--embeddings")
309
+
310
+ # Add reranking support if the model supports it
311
+ if supports_reranking:
312
+ base_command.append("--reranking")
313
+
274
314
  # Configure GPU layers: 99 for GPU, 0 for CPU-only
275
315
  ngl_value = "99" if use_gpu else "0"
276
316
  command = base_command + ["-ngl", ngl_value]
@@ -310,7 +350,6 @@ def _launch_llama_subprocess(
310
350
 
311
351
 
312
352
  def server_load(model_config: PullConfig, telemetry: LlamaTelemetry):
313
-
314
353
  # Validate platform support before proceeding
315
354
  validate_platform_support()
316
355
 
@@ -367,15 +406,26 @@ def server_load(model_config: PullConfig, telemetry: LlamaTelemetry):
367
406
  logging.info("Cleaned up zip file")
368
407
 
369
408
  # Download the gguf to the hugging face cache
370
- snapshot_files = ModelManager().download_gguf(model_config)
409
+ model_manager = ModelManager()
410
+ snapshot_files = model_manager.download_gguf(model_config)
371
411
  logging.debug(f"GGUF file paths: {snapshot_files}")
372
412
 
413
+ # Check if model supports embeddings
414
+ supported_models = model_manager.supported_models
415
+ model_info = supported_models.get(model_config.model_name, {})
416
+ supports_embeddings = "embeddings" in model_info.get("labels", [])
417
+ supports_reranking = "reranking" in model_info.get("labels", [])
418
+
373
419
  # Start the llama-serve.exe process
374
420
  logging.debug(f"Using llama_server for GGUF model: {llama_server_exe_path}")
375
421
 
376
422
  # Attempt loading on GPU first
377
423
  llama_server_process = _launch_llama_subprocess(
378
- snapshot_files, use_gpu=True, telemetry=telemetry
424
+ snapshot_files,
425
+ use_gpu=True,
426
+ telemetry=telemetry,
427
+ supports_embeddings=supports_embeddings,
428
+ supports_reranking=supports_reranking,
379
429
  )
380
430
 
381
431
  # Check the /health endpoint until GPU server is ready
@@ -395,7 +445,11 @@ def server_load(model_config: PullConfig, telemetry: LlamaTelemetry):
395
445
  raise Exception("llamacpp GPU loading failed")
396
446
 
397
447
  llama_server_process = _launch_llama_subprocess(
398
- snapshot_files, use_gpu=False, telemetry=telemetry
448
+ snapshot_files,
449
+ use_gpu=False,
450
+ telemetry=telemetry,
451
+ supports_embeddings=supports_embeddings,
452
+ supports_reranking=supports_reranking,
399
453
  )
400
454
 
401
455
  # Check the /health endpoint until CPU server is ready
@@ -416,7 +470,7 @@ def server_load(model_config: PullConfig, telemetry: LlamaTelemetry):
416
470
  def chat_completion(
417
471
  chat_completion_request: ChatCompletionRequest, telemetry: LlamaTelemetry
418
472
  ):
419
- base_url = f"http://127.0.0.1:{telemetry.port}/v1"
473
+ base_url = llamacpp_address(telemetry.port)
420
474
  client = OpenAI(
421
475
  base_url=base_url,
422
476
  api_key="lemonade",
@@ -467,3 +521,70 @@ def chat_completion(
467
521
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
468
522
  detail=f"Chat completion error: {str(e)}",
469
523
  )
524
+
525
+
526
+ def embeddings(embeddings_request: EmbeddingsRequest, telemetry: LlamaTelemetry):
527
+ """
528
+ Generate embeddings using the llamacpp server.
529
+
530
+ Args:
531
+ embeddings_request: The embeddings request containing input text/tokens
532
+ telemetry: Telemetry object containing the server port
533
+
534
+ Returns:
535
+ Embeddings response from the llamacpp server
536
+ """
537
+ base_url = llamacpp_address(telemetry.port)
538
+ client = OpenAI(
539
+ base_url=base_url,
540
+ api_key="lemonade",
541
+ )
542
+
543
+ # Convert Pydantic model to dict and remove unset/null values
544
+ request_dict = embeddings_request.model_dump(exclude_unset=True, exclude_none=True)
545
+
546
+ try:
547
+ # Call the embeddings endpoint
548
+ response = client.embeddings.create(**request_dict)
549
+ return response
550
+
551
+ except Exception as e: # pylint: disable=broad-exception-caught
552
+ raise HTTPException(
553
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
554
+ detail=f"Embeddings error: {str(e)}",
555
+ )
556
+
557
+
558
+ def reranking(reranking_request: RerankingRequest, telemetry: LlamaTelemetry):
559
+ """
560
+ Rerank documents based on their relevance to a query using the llamacpp server.
561
+
562
+ Args:
563
+ reranking_request: The reranking request containing query and documents
564
+ telemetry: Telemetry object containing the server port
565
+
566
+ Returns:
567
+ Reranking response from the llamacpp server containing ranked documents and scores
568
+ """
569
+ base_url = llamacpp_address(telemetry.port)
570
+
571
+ try:
572
+ # Convert Pydantic model to dict and exclude unset/null values
573
+ request_dict = reranking_request.model_dump(
574
+ exclude_unset=True, exclude_none=True
575
+ )
576
+
577
+ # Call the reranking endpoint directly since it's not supported by the OpenAI API
578
+ response = requests.post(
579
+ f"{base_url}/rerank",
580
+ json=request_dict,
581
+ )
582
+ response.raise_for_status()
583
+ return response.json()
584
+
585
+ except Exception as e:
586
+ logging.error("Error during reranking: %s", str(e))
587
+ raise HTTPException(
588
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
589
+ detail=f"Reranking error: {str(e)}",
590
+ ) from e
@@ -54,6 +54,8 @@ from lemonade_server.pydantic_models import (
54
54
  LoadConfig,
55
55
  CompletionRequest,
56
56
  ChatCompletionRequest,
57
+ EmbeddingsRequest,
58
+ RerankingRequest,
57
59
  ResponsesRequest,
58
60
  PullConfig,
59
61
  DeleteConfig,
@@ -226,13 +228,19 @@ class Server(ManagementTool):
226
228
  self.app.get(f"{prefix}/health")(self.health)
227
229
  self.app.get(f"{prefix}/halt")(self.halt_generation)
228
230
  self.app.get(f"{prefix}/stats")(self.send_stats)
231
+ self.app.get(f"{prefix}/system-info")(self.get_system_info)
229
232
  self.app.post(f"{prefix}/completions")(self.completions)
230
233
  self.app.post(f"{prefix}/responses")(self.responses)
231
234
 
232
235
  # OpenAI-compatible routes
233
236
  self.app.post(f"{prefix}/chat/completions")(self.chat_completions)
237
+ self.app.post(f"{prefix}/embeddings")(self.embeddings)
234
238
  self.app.get(f"{prefix}/models")(self.models)
235
239
 
240
+ # JinaAI routes (jina.ai/reranker/)
241
+ self.app.post(f"{prefix}/reranking")(self.reranking)
242
+ self.app.post(f"{prefix}/rerank")(self.reranking)
243
+
236
244
  @staticmethod
237
245
  def parser(add_help: bool = True) -> argparse.ArgumentParser:
238
246
  parser = __class__.helpful_parser(
@@ -796,6 +804,72 @@ class Server(ManagementTool):
796
804
  created=int(time.time()),
797
805
  )
798
806
 
807
+ async def embeddings(self, embeddings_request: EmbeddingsRequest):
808
+ """
809
+ Generate embeddings for the provided input.
810
+ """
811
+ # Initialize load config from embeddings request
812
+ lc = LoadConfig(model_name=embeddings_request.model)
813
+
814
+ # Load the model if it's different from the currently loaded one
815
+ await self.load_llm(lc)
816
+
817
+ if self.llm_loaded.recipe == "llamacpp":
818
+ try:
819
+ return llamacpp.embeddings(embeddings_request, self.llama_telemetry)
820
+ except Exception as e: # pylint: disable=broad-exception-caught
821
+ # Check if model has embeddings label
822
+ model_info = ModelManager().supported_models.get(
823
+ self.llm_loaded.model_name, {}
824
+ )
825
+ if "embeddings" not in model_info.get("labels", []):
826
+ raise HTTPException(
827
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
828
+ detail="You tried to generate embeddings for a model that is "
829
+ "not labeled as an embeddings model. Please use another model "
830
+ "or re-register the current model with the 'embeddings' label.",
831
+ ) from e
832
+ else:
833
+ raise e
834
+ else:
835
+ raise HTTPException(
836
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
837
+ detail=f"Embeddings not supported for recipe: {self.llm_loaded.recipe}",
838
+ )
839
+
840
+ async def reranking(self, reranking_request: RerankingRequest):
841
+ """
842
+ Rerank documents based on their relevance to a query using the llamacpp server.
843
+ """
844
+ # Initialize load config from reranking request
845
+ lc = LoadConfig(model_name=reranking_request.model)
846
+
847
+ # Load the model if it's different from the currently loaded one
848
+ await self.load_llm(lc)
849
+
850
+ if self.llm_loaded.recipe == "llamacpp":
851
+ try:
852
+ return llamacpp.reranking(reranking_request, self.llama_telemetry)
853
+ except Exception as e: # pylint: disable=broad-exception-caught
854
+ # Check if model has reranking label
855
+ model_info = ModelManager().supported_models.get(
856
+ self.llm_loaded.model_name, {}
857
+ )
858
+ if "reranking" not in model_info.get("labels", []):
859
+ raise HTTPException(
860
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
861
+ detail="You tried to use reranking for a model that is "
862
+ "not labeled as a reranking model. Please use another model "
863
+ "or re-register the current model with the 'reranking' label.",
864
+ ) from e
865
+ else:
866
+ raise e
867
+ else:
868
+ raise HTTPException(
869
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
870
+ detail=f"Reranking not supported for recipe: {self.llm_loaded.recipe}",
871
+ )
872
+
799
873
  def apply_chat_template(
800
874
  self, messages: list[dict], tools: list[dict] | None = None
801
875
  ):
@@ -1203,6 +1277,34 @@ class Server(ManagementTool):
1203
1277
  ),
1204
1278
  }
1205
1279
 
1280
+ async def get_system_info(self, request: Request):
1281
+ """
1282
+ Return system and device enumeration information.
1283
+ Supports optional 'verbose' query parameter.
1284
+ """
1285
+ from lemonade.common.system_info import (
1286
+ get_system_info_dict,
1287
+ get_device_info_dict,
1288
+ get_system_info as get_system_info_obj,
1289
+ )
1290
+
1291
+ # Get verbose parameter from query string (default to False)
1292
+ verbose = request.query_params.get("verbose", "false").lower() in ["true", "1"]
1293
+
1294
+ info = get_system_info_dict()
1295
+ info["devices"] = get_device_info_dict()
1296
+
1297
+ # Filter out verbose-only information if not in verbose mode
1298
+ if not verbose:
1299
+ essential_keys = ["OS Version", "Processor", "Physical Memory", "devices"]
1300
+ info = {k: v for k, v in info.items() if k in essential_keys}
1301
+ else:
1302
+ # In verbose mode, add Python packages at the end
1303
+ system_info_obj = get_system_info_obj()
1304
+ info["Python Packages"] = system_info_obj.get_python_packages()
1305
+
1306
+ return info
1307
+
1206
1308
  def model_load_failure(self, model_reference: str, message: Optional[str] = None):
1207
1309
  """
1208
1310
  Clean up after a model load failure, then log it and raise