crfm-helm 0.5.1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (98) hide show
  1. {crfm_helm-0.5.1.dist-info → crfm_helm-0.5.2.dist-info}/METADATA +13 -3
  2. {crfm_helm-0.5.1.dist-info → crfm_helm-0.5.2.dist-info}/RECORD +96 -63
  3. helm/benchmark/adaptation/adapter_spec.py +32 -31
  4. helm/benchmark/annotation/air_bench_annotator.py +64 -0
  5. helm/benchmark/annotation/annotator_factory.py +6 -0
  6. helm/benchmark/annotation/live_qa_annotator.py +84 -0
  7. helm/benchmark/annotation/medication_qa_annotator.py +81 -0
  8. helm/benchmark/augmentations/translate_perturbation.py +1 -0
  9. helm/benchmark/huggingface_registration.py +16 -6
  10. helm/benchmark/metrics/air_bench_metrics.py +56 -0
  11. helm/benchmark/metrics/fin_qa_metrics.py +60 -0
  12. helm/benchmark/metrics/fin_qa_metrics_helper.py +398 -0
  13. helm/benchmark/metrics/gpt4v_originality_critique_metrics.py +126 -0
  14. helm/benchmark/metrics/instruction_following_critique_metrics.py +1 -0
  15. helm/benchmark/metrics/live_qa_metrics.py +23 -0
  16. helm/benchmark/metrics/medication_qa_metrics.py +23 -0
  17. helm/benchmark/metrics/prometheus_vision_critique_metrics.py +185 -0
  18. helm/benchmark/metrics/reka_vibe_critique_metrics.py +158 -0
  19. helm/benchmark/metrics/unitxt_metrics.py +20 -10
  20. helm/benchmark/metrics/vision_language/emd_utils.py +4 -0
  21. helm/benchmark/metrics/vision_language/image_metrics.py +29 -71
  22. helm/benchmark/presentation/schema.py +54 -4
  23. helm/benchmark/presentation/test_schema.py +11 -0
  24. helm/benchmark/run.py +16 -2
  25. helm/benchmark/run_expander.py +77 -0
  26. helm/benchmark/run_spec_factory.py +4 -0
  27. helm/benchmark/run_specs/air_bench_run_specs.py +40 -0
  28. helm/benchmark/run_specs/classic_run_specs.py +15 -11
  29. helm/benchmark/run_specs/decodingtrust_run_specs.py +3 -1
  30. helm/benchmark/run_specs/experimental_run_specs.py +33 -0
  31. helm/benchmark/run_specs/finance_run_specs.py +33 -0
  32. helm/benchmark/run_specs/vlm_run_specs.py +168 -45
  33. helm/benchmark/scenarios/air_bench_scenario.py +50 -0
  34. helm/benchmark/scenarios/ci_mcqa_scenario.py +80 -0
  35. helm/benchmark/scenarios/entity_data_imputation_scenario.py +8 -2
  36. helm/benchmark/scenarios/fin_qa_scenario.py +117 -0
  37. helm/benchmark/scenarios/test_air_bench_scenario.py +27 -0
  38. helm/benchmark/scenarios/vision_language/bingo_scenario.py +3 -3
  39. helm/benchmark/scenarios/vision_language/image2structure/image2structure_scenario.py +13 -2
  40. helm/benchmark/scenarios/vision_language/image2structure/latex_scenario.py +1 -5
  41. helm/benchmark/scenarios/vision_language/image2structure/musicsheet_scenario.py +0 -4
  42. helm/benchmark/scenarios/vision_language/image2structure/webpage_scenario.py +4 -2
  43. helm/benchmark/scenarios/vision_language/pairs_scenario.py +6 -5
  44. helm/benchmark/scenarios/vision_language/unicorn_scenario.py +3 -3
  45. helm/benchmark/scenarios/vision_language/vibe_eval_scenario.py +95 -0
  46. helm/benchmark/static/schema_air_bench.yaml +3149 -0
  47. helm/benchmark/static/schema_classic.yaml +3 -59
  48. helm/benchmark/static/schema_finance.yaml +143 -0
  49. helm/benchmark/static/schema_image2structure.yaml +254 -111
  50. helm/benchmark/static/schema_instruction_following.yaml +3 -52
  51. helm/benchmark/static/schema_lite.yaml +3 -61
  52. helm/benchmark/static/schema_medical.yaml +255 -0
  53. helm/benchmark/static/schema_mmlu.yaml +3 -61
  54. helm/benchmark/static/schema_tables.yaml +200 -0
  55. helm/benchmark/static/schema_thai.yaml +223 -0
  56. helm/benchmark/static/schema_unitxt.yaml +3 -61
  57. helm/benchmark/static/{schema_vlm.yaml → schema_vhelm.yaml} +294 -293
  58. helm/benchmark/static/schema_vhelm_lite.yaml +4 -59
  59. helm/benchmark/static_build/assets/air-overview-d2e6c49f.png +0 -0
  60. helm/benchmark/static_build/assets/index-30dbceba.js +10 -0
  61. helm/benchmark/static_build/assets/index-66b02d40.css +1 -0
  62. helm/benchmark/static_build/assets/overview-74aea3d8.png +0 -0
  63. helm/benchmark/static_build/assets/process-flow-bd2eba96.png +0 -0
  64. helm/benchmark/static_build/index.html +2 -2
  65. helm/clients/anthropic_client.py +43 -9
  66. helm/clients/auto_client.py +11 -0
  67. helm/clients/client.py +24 -7
  68. helm/clients/cohere_client.py +98 -3
  69. helm/clients/huggingface_client.py +71 -12
  70. helm/clients/openai_client.py +9 -2
  71. helm/clients/reka_client.py +189 -0
  72. helm/clients/test_client.py +3 -3
  73. helm/clients/test_huggingface_client.py +19 -3
  74. helm/clients/test_together_client.py +72 -2
  75. helm/clients/together_client.py +129 -23
  76. helm/clients/vertexai_client.py +62 -18
  77. helm/clients/vision_language/huggingface_vlm_client.py +1 -0
  78. helm/clients/vision_language/paligemma_client.py +146 -0
  79. helm/clients/vision_language/palmyra_vision_client.py +84 -0
  80. helm/clients/yi_client.py +31 -0
  81. helm/common/critique_request.py +10 -1
  82. helm/common/images_utils.py +19 -0
  83. helm/config/model_deployments.yaml +412 -18
  84. helm/config/model_metadata.yaml +447 -25
  85. helm/config/tokenizer_configs.yaml +93 -1
  86. helm/proxy/critique/model_critique_client.py +32 -4
  87. helm/proxy/services/server_service.py +1 -1
  88. helm/tokenizers/auto_tokenizer.py +1 -1
  89. helm/tokenizers/cohere_tokenizer.py +44 -2
  90. helm/tokenizers/huggingface_tokenizer.py +36 -13
  91. helm/tokenizers/test_cohere_tokenizer.py +39 -0
  92. helm/tokenizers/test_huggingface_tokenizer.py +5 -1
  93. helm/benchmark/static_build/assets/index-737eef9e.js +0 -10
  94. helm/benchmark/static_build/assets/index-878a1094.css +0 -1
  95. {crfm_helm-0.5.1.dist-info → crfm_helm-0.5.2.dist-info}/LICENSE +0 -0
  96. {crfm_helm-0.5.1.dist-info → crfm_helm-0.5.2.dist-info}/WHEEL +0 -0
  97. {crfm_helm-0.5.1.dist-info → crfm_helm-0.5.2.dist-info}/entry_points.txt +0 -0
  98. {crfm_helm-0.5.1.dist-info → crfm_helm-0.5.2.dist-info}/top_level.txt +0 -0
@@ -7,11 +7,11 @@
7
7
  <title>Holistic Evaluation of Language Models (HELM)</title>
8
8
  <meta name="description" content="The Holistic Evaluation of Language Models (HELM) serves as a living benchmark for transparency in language models. Providing broad coverage and recognizing incompleteness, multi-metric measurements, and standardization. All data and analysis are freely accessible on the website for exploration and study." />
9
9
  <script type="text/javascript" src="./config.js"></script>
10
- <script type="module" crossorigin src="./assets/index-737eef9e.js"></script>
10
+ <script type="module" crossorigin src="./assets/index-30dbceba.js"></script>
11
11
  <link rel="modulepreload" crossorigin href="./assets/react-d4a0b69b.js">
12
12
  <link rel="modulepreload" crossorigin href="./assets/recharts-6d337683.js">
13
13
  <link rel="modulepreload" crossorigin href="./assets/tremor-54a99cc4.js">
14
- <link rel="stylesheet" href="./assets/index-878a1094.css">
14
+ <link rel="stylesheet" href="./assets/index-66b02d40.css">
15
15
  </head>
16
16
  <body class="block">
17
17
  <div id="root"></div>
@@ -1,5 +1,6 @@
1
1
  from typing import Any, Dict, List, Optional, TypedDict, Union, cast
2
2
  import json
3
+ import os
3
4
  import requests
4
5
  import tempfile
5
6
  import time
@@ -244,6 +245,8 @@ class AnthropicMessagesClient(CachingClient):
244
245
  # Source: https://docs.anthropic.com/claude/docs/models-overview
245
246
  MAX_OUTPUT_TOKENS: int = 4096
246
247
 
248
+ MAX_IMAGE_SIZE_BYTES: int = 5242880 # 5MB
249
+
247
250
  def __init__(
248
251
  self, tokenizer: Tokenizer, tokenizer_name: str, cache_config: CacheConfig, api_key: Optional[str] = None
249
252
  ):
@@ -286,7 +289,12 @@ class AnthropicMessagesClient(CachingClient):
286
289
  if not media_object.location:
287
290
  raise Exception("MediaObject of image type has missing location field value")
288
291
 
289
- from helm.common.images_utils import encode_base64, get_dimensions, copy_image
292
+ from helm.common.images_utils import (
293
+ encode_base64,
294
+ get_dimensions,
295
+ copy_image,
296
+ resize_image_to_max_file_size,
297
+ )
290
298
 
291
299
  image_location: str = media_object.location
292
300
  base64_image: str
@@ -310,6 +318,21 @@ class AnthropicMessagesClient(CachingClient):
310
318
  height=min(image_height, AnthropicClient.MAX_IMAGE_DIMENSION),
311
319
  )
312
320
  base64_image = encode_base64(temp_file.name, format="JPEG")
321
+
322
+ elif os.path.getsize(image_location) > AnthropicMessagesClient.MAX_IMAGE_SIZE_BYTES:
323
+ hlog(
324
+ f"WARNING: Image {image_location} exceeds max allowed size: "
325
+ f"{AnthropicMessagesClient.MAX_IMAGE_SIZE_BYTES} bytes"
326
+ )
327
+ # Resize the image so it is smaller than the max allowed size
328
+ with tempfile.NamedTemporaryFile(suffix=".jpg") as temp_file:
329
+ hlog(f"Resizing image to temporary path: {temp_file.name}")
330
+ resize_image_to_max_file_size(
331
+ src=image_location,
332
+ dest=temp_file.name,
333
+ max_size_in_bytes=AnthropicMessagesClient.MAX_IMAGE_SIZE_BYTES,
334
+ )
335
+ base64_image = encode_base64(temp_file.name, format="JPEG")
313
336
  else:
314
337
  base64_image = encode_base64(image_location, format="JPEG")
315
338
 
@@ -368,14 +391,25 @@ class AnthropicMessagesClient(CachingClient):
368
391
  return response
369
392
  raise
370
393
 
371
- cache_key = CachingClient.make_cache_key(
372
- {
373
- "completion_index": completion_index,
374
- **raw_request,
375
- },
376
- request,
377
- )
378
- response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
394
+ try:
395
+ cache_key = CachingClient.make_cache_key(
396
+ {
397
+ "completion_index": completion_index,
398
+ **raw_request,
399
+ },
400
+ request,
401
+ )
402
+ response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
403
+ except AnthropicMessagesResponseError:
404
+ hlog("WARNING: Response has empty content")
405
+ return RequestResult(
406
+ success=False,
407
+ cached=False,
408
+ error="Anthropic response has empty content",
409
+ completions=[],
410
+ embedding=[],
411
+ error_flags=ErrorFlags(is_retriable=False, is_fatal=False),
412
+ )
379
413
 
380
414
  if _is_content_moderation_failure(response):
381
415
  hlog(
@@ -5,6 +5,7 @@ from typing import Any, Dict, Mapping, Optional
5
5
  from retrying import Attempt, RetryError
6
6
 
7
7
  from helm.benchmark.model_deployment_registry import ModelDeployment, get_model_deployment
8
+ from helm.benchmark.tokenizer_config_registry import get_tokenizer_config
8
9
  from helm.common.file_caches.file_cache import FileCache
9
10
  from helm.common.file_caches.local_file_cache import LocalFileCache
10
11
  from helm.common.credentials_utils import provide_api_key
@@ -88,6 +89,10 @@ class AutoClient(Client):
88
89
  "location": lambda: self.credentials.get(host_organization + "Location", None), # VertexAI
89
90
  "hf_auth_token": lambda: self.credentials.get("huggingfaceAuthToken", None), # HuggingFace
90
91
  "file_cache": lambda: self._get_file_cache(host_organization), # Text-to-image models
92
+ "endpoint": lambda: self.credentials.get(host_organization + "Endpoint", None), # Palmyra
93
+ "end_of_text_token": lambda: self._get_end_of_text_token(
94
+ tokenizer_name=model_deployment.tokenizer_name or model_deployment.name
95
+ ),
91
96
  },
92
97
  )
93
98
  client = create_object(client_spec)
@@ -213,3 +218,9 @@ class AutoClient(Client):
213
218
  # Initialize `FileCache` for text-to-image model APIs
214
219
  local_file_cache_path: str = os.path.join(self.file_storage_path, "output", host_organization)
215
220
  return LocalFileCache(local_file_cache_path, file_extension="png")
221
+
222
+ def _get_end_of_text_token(self, tokenizer_name: str) -> Optional[str]:
223
+ tokenizer_config = get_tokenizer_config(tokenizer_name)
224
+ if tokenizer_config is None:
225
+ raise ValueError(f"Could not find tokenizer_config for tokenizer {tokenizer_name}")
226
+ return tokenizer_config.end_of_text_token
helm/clients/client.py CHANGED
@@ -39,13 +39,17 @@ class CachingClient(Client):
39
39
  """
40
40
  if request.random is not None:
41
41
  assert "random" not in raw_request
42
- cache_key: Mapping = {**raw_request, "random": request.random}
42
+ return {**raw_request, "random": request.random}
43
43
  else:
44
- cache_key = raw_request
45
- return cache_key
44
+ return {**raw_request}
46
45
 
47
46
 
48
- def truncate_sequence(sequence: GeneratedOutput, request: Request, print_warning: bool = True) -> GeneratedOutput:
47
+ def truncate_sequence(
48
+ sequence: GeneratedOutput,
49
+ request: Request,
50
+ end_of_text_token: Optional[str] = None,
51
+ print_warning: bool = True,
52
+ ) -> GeneratedOutput:
49
53
  """
50
54
  Certain providers have bugs where they aren't respecting max_tokens,
51
55
  stop_sequences and the end of text token, so as a hack, we have to manually
@@ -64,7 +68,11 @@ def truncate_sequence(sequence: GeneratedOutput, request: Request, print_warning
64
68
  hlog("WARNING: don't know how to handle echo_prompt and max_tokens > 0, not truncating")
65
69
  return sequence
66
70
 
67
- for stop in request.stop_sequences:
71
+ if end_of_text_token:
72
+ stop_sequences = request.stop_sequences + [end_of_text_token]
73
+ else:
74
+ stop_sequences = request.stop_sequences
75
+ for stop in stop_sequences:
68
76
  # Find `stop` in the text
69
77
  try:
70
78
  new_text = sequence.text[: sequence.text.index(stop)]
@@ -116,7 +124,12 @@ def truncate_sequence(sequence: GeneratedOutput, request: Request, print_warning
116
124
 
117
125
 
118
126
  def truncate_and_tokenize_response_text(
119
- text: str, request: Request, tokenizer: Tokenizer, tokenizer_name: str, original_finish_reason: str = "endoftext"
127
+ text: str,
128
+ request: Request,
129
+ tokenizer: Tokenizer,
130
+ tokenizer_name: str,
131
+ end_of_text_token: Optional[str] = None,
132
+ original_finish_reason: str = "endoftext",
120
133
  ) -> GeneratedOutput:
121
134
  """Truncate a string-only response to respect stop_sequences and max_tokens.
122
135
 
@@ -139,7 +152,11 @@ def truncate_and_tokenize_response_text(
139
152
  if request.echo_prompt:
140
153
  raise Exception("truncate_and_tokenize_response_text() does not support requests with echo_prompt = True")
141
154
 
142
- for stop_sequence in request.stop_sequences:
155
+ if end_of_text_token:
156
+ stop_sequences = request.stop_sequences + [end_of_text_token]
157
+ else:
158
+ stop_sequences = request.stop_sequences
159
+ for stop_sequence in stop_sequences:
143
160
  try:
144
161
  text = text[: text.index(stop_sequence)]
145
162
  finish_reason = "stop"
@@ -1,8 +1,9 @@
1
1
  import json
2
2
  import requests
3
- from typing import List
3
+ from typing import List, Optional, Sequence, TypedDict
4
4
 
5
5
  from helm.common.cache import CacheConfig
6
+ from helm.common.optional_dependencies import handle_module_not_found_error
6
7
  from helm.common.request import (
7
8
  wrap_request_time,
8
9
  EMBEDDING_UNAVAILABLE_REQUEST_RESULT,
@@ -11,8 +12,13 @@ from helm.common.request import (
11
12
  GeneratedOutput,
12
13
  Token,
13
14
  )
14
- from .client import CachingClient, truncate_sequence
15
- from .cohere_utils import get_cohere_url, DEFAULT_COHERE_API_VERSION
15
+ from helm.clients.client import CachingClient, truncate_sequence
16
+ from helm.clients.cohere_utils import get_cohere_url, DEFAULT_COHERE_API_VERSION
17
+
18
+ try:
19
+ import cohere
20
+ except ModuleNotFoundError as e:
21
+ handle_module_not_found_error(e, ["cohere"])
16
22
 
17
23
 
18
24
  class CohereClient(CachingClient):
@@ -152,3 +158,92 @@ class CohereClient(CachingClient):
152
158
  completions=completions,
153
159
  embedding=[],
154
160
  )
161
+
162
+
163
+ class CohereRawChatRequest(TypedDict):
164
+ message: str
165
+ model: Optional[str]
166
+ preamble: Optional[str]
167
+ chat_history: Optional[Sequence[cohere.ChatMessage]]
168
+ temperature: Optional[float]
169
+ max_tokens: Optional[int]
170
+ k: Optional[int]
171
+ p: Optional[float]
172
+ seed: Optional[float]
173
+ stop_sequences: Optional[Sequence[str]]
174
+ frequency_penalty: Optional[float]
175
+ presence_penalty: Optional[float]
176
+
177
+
178
+ def convert_to_raw_chat_request(request: Request) -> CohereRawChatRequest:
179
+ # TODO: Support chat
180
+ model = request.model.replace("cohere/", "")
181
+ return {
182
+ "message": request.prompt,
183
+ "model": model,
184
+ "preamble": None,
185
+ "chat_history": None,
186
+ "temperature": request.temperature,
187
+ "max_tokens": request.max_tokens,
188
+ "k": request.top_k_per_token,
189
+ "p": request.top_p,
190
+ "stop_sequences": request.stop_sequences,
191
+ "seed": float(request.random) if request.random is not None else None,
192
+ "frequency_penalty": request.frequency_penalty,
193
+ "presence_penalty": request.presence_penalty,
194
+ }
195
+
196
+
197
+ class CohereChatClient(CachingClient):
198
+ """
199
+ Leverages the chat endpoint: https://docs.cohere.com/reference/chat
200
+
201
+ Cohere models will only support chat soon: https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat
202
+ """
203
+
204
+ def __init__(self, api_key: str, cache_config: CacheConfig):
205
+ super().__init__(cache_config=cache_config)
206
+ self.client = cohere.Client(api_key=api_key)
207
+
208
+ def make_request(self, request: Request) -> RequestResult:
209
+ if request.embedding:
210
+ return EMBEDDING_UNAVAILABLE_REQUEST_RESULT
211
+ # TODO: Support multiple completions
212
+ assert request.num_completions == 1, "CohereChatClient only supports num_completions=1"
213
+ # TODO: Support messages
214
+ assert not request.messages, "CohereChatClient currently does not support the messages API"
215
+
216
+ raw_request: CohereRawChatRequest = convert_to_raw_chat_request(request)
217
+
218
+ try:
219
+
220
+ def do_it():
221
+ """
222
+ Send the request to the Cohere Chat API. Responses will be structured like this:
223
+ cohere.Chat {
224
+ message: What's up?
225
+ text: Hey there! How's it going? I'm doing well, thank you for asking 😊.
226
+ ...
227
+ }
228
+ """
229
+ raw_response = self.client.chat(**raw_request).dict()
230
+ assert "text" in raw_response, f"Response does not contain text: {raw_response}"
231
+ return raw_response
232
+
233
+ response, cached = self.cache.get(raw_request, wrap_request_time(do_it))
234
+ except (requests.exceptions.RequestException, AssertionError) as e:
235
+ error: str = f"CohereClient error: {e}"
236
+ return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
237
+
238
+ completions: List[GeneratedOutput] = []
239
+ completion: GeneratedOutput = GeneratedOutput(text=response["text"], logprob=0.0, tokens=[])
240
+ completions.append(completion)
241
+
242
+ return RequestResult(
243
+ success=True,
244
+ cached=cached,
245
+ request_time=response["request_time"],
246
+ request_datetime=response["request_datetime"],
247
+ completions=completions,
248
+ embedding=[],
249
+ )
@@ -17,6 +17,7 @@ from helm.common.request import (
17
17
  GeneratedOutput,
18
18
  Token,
19
19
  )
20
+ from helm.tokenizers.tokenizer import Tokenizer
20
21
  from .client import CachingClient, truncate_sequence
21
22
  from helm.tokenizers.huggingface_tokenizer import HuggingFaceTokenizer, WrappedPreTrainedTokenizer
22
23
  from threading import Lock
@@ -53,7 +54,13 @@ class HuggingFaceRequest(TypedDict):
53
54
  class HuggingFaceServer:
54
55
  """A thin wrapper around a Hugging Face AutoModelForCausalLM for HuggingFaceClient to call."""
55
56
 
56
- def __init__(self, pretrained_model_name_or_path: str, **kwargs):
57
+ def __init__(
58
+ self,
59
+ pretrained_model_name_or_path: str,
60
+ wrapped_tokenizer: WrappedPreTrainedTokenizer,
61
+ openvino=False,
62
+ **kwargs,
63
+ ):
57
64
  if torch.cuda.is_available():
58
65
  hlog("CUDA is available, initializing with a GPU...")
59
66
  self.device: str = "cuda:0"
@@ -61,13 +68,44 @@ class HuggingFaceServer:
61
68
  self.device = "cpu"
62
69
  with htrack_block(f"Loading Hugging Face model {pretrained_model_name_or_path}"):
63
70
  # WARNING this may fail if your GPU does not have enough memory
64
- self.model = AutoModelForCausalLM.from_pretrained(
65
- pretrained_model_name_or_path, trust_remote_code=True, **kwargs
66
- ).to(self.device)
67
- with htrack_block(f"Loading Hugging Face tokenizer for model {pretrained_model_name_or_path}"):
68
- self.wrapped_tokenizer: WrappedPreTrainedTokenizer = HuggingFaceTokenizer.create_tokenizer(
69
- pretrained_model_name_or_path, **kwargs
70
- )
71
+ if openvino:
72
+ """
73
+ Optimum Intel provides a simple interface to optimize Transformer models and convert them to \
74
+ OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \
75
+ Intel® architectures using OpenVINO™ runtime.
76
+ """
77
+ from helm.common.optional_dependencies import handle_module_not_found_error
78
+
79
+ try:
80
+ from optimum.intel.openvino import OVModelForCausalLM
81
+ except ModuleNotFoundError as e:
82
+ handle_module_not_found_error(e, ["openvino"])
83
+
84
+ self.device = "cpu"
85
+ # Security issue: currently we trust remote code by default.
86
+ # We retain this temporarily to maintain reverse compatibility.
87
+ # TODO: Delete if-else and don't set trust_remote_code=True
88
+ if "trust_remote_code" in kwargs:
89
+ self.model = OVModelForCausalLM.from_pretrained(
90
+ pretrained_model_name_or_path, export=True, **kwargs
91
+ ).to(self.device)
92
+ else:
93
+ self.model = OVModelForCausalLM.from_pretrained(
94
+ pretrained_model_name_or_path, export=True, trust_remote_code=True, **kwargs
95
+ ).to(self.device)
96
+ else:
97
+ # Security issue: currently we trust remote code by default.
98
+ # We retain this temporarily to maintain reverse compatibility.
99
+ # TODO: Delete if-else and don't set trust_remote_code=True
100
+ if "trust_remote_code" in kwargs:
101
+ self.model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, **kwargs).to(
102
+ self.device
103
+ )
104
+ else:
105
+ self.model = AutoModelForCausalLM.from_pretrained(
106
+ pretrained_model_name_or_path, trust_remote_code=True, **kwargs
107
+ ).to(self.device)
108
+ self.wrapped_tokenizer = wrapped_tokenizer
71
109
 
72
110
  def serve_request(self, raw_request: HuggingFaceRequest) -> Dict:
73
111
  with self.wrapped_tokenizer as tokenizer:
@@ -170,7 +208,12 @@ class HuggingFaceServerFactory:
170
208
  _servers_lock: Lock = Lock()
171
209
 
172
210
  @staticmethod
173
- def get_server(helm_model_name: str, pretrained_model_name_or_path: str, **kwargs) -> Any:
211
+ def get_server(
212
+ helm_model_name: str,
213
+ pretrained_model_name_or_path: str,
214
+ wrapped_tokenizer: WrappedPreTrainedTokenizer,
215
+ **kwargs,
216
+ ) -> Any:
174
217
  """
175
218
  Checks if the desired HuggingFaceModel is cached. Creates the HuggingFaceModel if it's not cached.
176
219
  Returns the HuggingFaceModel.
@@ -182,7 +225,7 @@ class HuggingFaceServerFactory:
182
225
  f"for HELM model {helm_model_name} with Hugging Face Transformers"
183
226
  ):
184
227
  HuggingFaceServerFactory._servers[helm_model_name] = HuggingFaceServer(
185
- pretrained_model_name_or_path, **kwargs
228
+ pretrained_model_name_or_path, wrapped_tokenizer, **kwargs
186
229
  )
187
230
 
188
231
  return HuggingFaceServerFactory._servers[helm_model_name]
@@ -214,10 +257,25 @@ def _process_huggingface_client_kwargs(raw_kwargs: Dict[str, Any]):
214
257
 
215
258
 
216
259
  class HuggingFaceClient(CachingClient):
217
- def __init__(self, cache_config: CacheConfig, pretrained_model_name_or_path: Optional[str] = None, **kwargs):
260
+ def __init__(
261
+ self,
262
+ cache_config: CacheConfig,
263
+ tokenizer: Tokenizer,
264
+ pretrained_model_name_or_path: Optional[str] = None,
265
+ end_of_text_token: Optional[str] = None,
266
+ **kwargs,
267
+ ):
218
268
  super().__init__(cache_config=cache_config)
219
269
  self._pretrained_model_name_or_path = pretrained_model_name_or_path
270
+ if not isinstance(tokenizer, HuggingFaceTokenizer):
271
+ raise ValueError(
272
+ f"Tokenizer for Hugging Face model {pretrained_model_name_or_path} must be a HuggingFaceTokenizer, "
273
+ "but instead it is {tokenizer}"
274
+ )
275
+ self._wrapped_tokenizer: WrappedPreTrainedTokenizer = tokenizer.get_wrapped_tokenizer()
276
+ self._tokenizer = tokenizer
220
277
  self._kwargs = _process_huggingface_client_kwargs(kwargs)
278
+ self._end_of_text_token = end_of_text_token
221
279
 
222
280
  def make_request(self, request: Request) -> RequestResult:
223
281
  # Embedding not supported for this model
@@ -242,6 +300,7 @@ class HuggingFaceClient(CachingClient):
242
300
  huggingface_model: HuggingFaceServer = HuggingFaceServerFactory.get_server(
243
301
  helm_model_name=request.model,
244
302
  pretrained_model_name_or_path=pretrained_model_name_or_path,
303
+ wrapped_tokenizer=self._wrapped_tokenizer,
245
304
  **self._kwargs,
246
305
  )
247
306
 
@@ -284,7 +343,7 @@ class HuggingFaceClient(CachingClient):
284
343
  sequence_logprob += logprob
285
344
 
286
345
  completion = GeneratedOutput(text=raw_completion["text"], logprob=sequence_logprob, tokens=tokens)
287
- completion = truncate_sequence(completion, request)
346
+ completion = truncate_sequence(completion, request, end_of_text_token=self._end_of_text_token)
288
347
  completions.append(completion)
289
348
 
290
349
  return RequestResult(
@@ -60,8 +60,7 @@ class OpenAIClient(CachingClient):
60
60
 
61
61
  def _get_cache_key(self, raw_request: Dict, request: Request):
62
62
  cache_key = CachingClient.make_cache_key(raw_request, request)
63
- if is_vlm(request.model):
64
- assert request.multimodal_prompt is not None
63
+ if request.multimodal_prompt:
65
64
  prompt_key: str = generate_uid_for_multimodal_prompt(request.multimodal_prompt)
66
65
  cache_key = {**cache_key, "multimodal_prompt": prompt_key}
67
66
  del cache_key["messages"]
@@ -103,6 +102,14 @@ class OpenAIClient(CachingClient):
103
102
 
104
103
  def _make_chat_request(self, request: Request) -> RequestResult:
105
104
  messages: Optional[List[Dict[str, Union[str, Any]]]] = request.messages
105
+ if (
106
+ (request.prompt and request.messages)
107
+ or (request.prompt and request.multimodal_prompt)
108
+ or (request.messages and request.multimodal_prompt)
109
+ ):
110
+ raise ValueError(
111
+ f"More than one of `prompt`, `messages` and `multimodal_prompt` was set in request: {request}"
112
+ )
106
113
  if request.messages is not None:
107
114
  # Checks that all messages have a role and some content
108
115
  for message in request.messages:
@@ -0,0 +1,189 @@
1
+ # mypy: check_untyped_defs = False
2
+ import requests
3
+ from typing import Any, Dict, List, Optional, TypedDict
4
+
5
+ from helm.proxy.retry import NonRetriableException
6
+ from helm.common.cache import CacheConfig
7
+ from helm.common.media_object import TEXT_TYPE
8
+ from helm.common.request import wrap_request_time, Request, RequestResult, GeneratedOutput
9
+ from helm.common.hierarchical_logger import hlog
10
+ from helm.common.optional_dependencies import handle_module_not_found_error
11
+ from helm.tokenizers.tokenizer import Tokenizer
12
+ from .client import CachingClient, truncate_and_tokenize_response_text
13
+
14
+ try:
15
+ import reka
16
+ except ModuleNotFoundError as e:
17
+ handle_module_not_found_error(e, ["reka-api"])
18
+
19
+
20
+ class RekaAIRequest(TypedDict):
21
+ """Data passed between make_request and _send_request. Used as the cache key."""
22
+
23
+ model_name: str
24
+ conversation_history: List[Dict[str, str]]
25
+ request_output_len: int
26
+ temperature: float
27
+ runtime_top_p: float
28
+ random_seed: Optional[int]
29
+ stop_words: Optional[List[str]]
30
+ presence_penalty: float
31
+ frequency_penalty: float
32
+
33
+
34
+ class RekaClient(CachingClient):
35
+ REKA_CHAT_ROLE_MAPPING: Dict[str, str] = {
36
+ "user": "human",
37
+ "assistant": "model",
38
+ }
39
+
40
+ def __init__(
41
+ self,
42
+ tokenizer: Tokenizer,
43
+ tokenizer_name: str,
44
+ cache_config: CacheConfig,
45
+ api_key: Optional[str] = None,
46
+ ):
47
+ super().__init__(cache_config=cache_config)
48
+ self.tokenizer = tokenizer
49
+ self.tokenizer_name = tokenizer_name
50
+ self.client = reka
51
+ self.client.API_KEY = api_key
52
+
53
+ def _is_reka_model_engine(self, model_engine: str) -> bool:
54
+ if (
55
+ model_engine.startswith("reka-edge")
56
+ or model_engine.startswith("reka-flash")
57
+ or model_engine.startswith("reka-core")
58
+ ):
59
+ return True
60
+ else:
61
+ return False
62
+
63
+ def _get_model_for_request(self, request: Request) -> str:
64
+ return request.model_engine
65
+
66
+ def _get_random_seed(self, request: Request, completion_index: int) -> Optional[int]:
67
+ if request.random is None and completion_index == 0:
68
+ return None
69
+
70
+ # Treat the user's request.random as an integer for the random seed.
71
+ try:
72
+ request_random_seed = int(request.random) if request.random is not None else 0
73
+ except ValueError:
74
+ raise NonRetriableException("RekaAIClient only supports integer values for request.random")
75
+
76
+ # A large prime is used so that the resulting values are unlikely to collide
77
+ # with request.random values chosen by the user.
78
+ fixed_large_prime = 1911011
79
+ completion_index_random_seed = completion_index * fixed_large_prime
80
+
81
+ return request_random_seed + completion_index_random_seed
82
+
83
+ def _convert_messages_to_reka_chat_history(self, messages: List[Dict[str, Any]]):
84
+ chat_history = []
85
+ num_images: int = 0
86
+ for chat_turn, message in enumerate(messages):
87
+ role = message["role"]
88
+ content = message["content"]
89
+ current_chat_history: Dict[str, Any] = {
90
+ "type": self.REKA_CHAT_ROLE_MAPPING[role],
91
+ "text": "", # text placeholder
92
+ "media_url": None,
93
+ }
94
+ for item in content:
95
+ if item["type"] == "image_url":
96
+ if chat_turn == 0 and num_images == 0:
97
+ current_chat_history["media_url"] = item["image_url"]["url"]
98
+ num_images += 1
99
+ else:
100
+ raise ValueError(
101
+ f"Only the first message can contain one image. Found image input "
102
+ f"in message {chat_turn + 1}"
103
+ )
104
+ elif item["type"] == "text":
105
+ current_chat_history["text"] = item["text"]
106
+ else:
107
+ raise ValueError(f"Unrecognized message type {item['type']}")
108
+ chat_history.append(current_chat_history)
109
+ return chat_history
110
+
111
+ def make_request(self, request: Request) -> RequestResult:
112
+ completions: List[GeneratedOutput] = []
113
+ messages: Optional[List[Dict[str, Any]]] = request.messages
114
+ reka_chat_history: List[Dict[str, Any]]
115
+ if messages is not None:
116
+ # Checks that all messages have a role and some content
117
+ for message in messages:
118
+ if not message.get("role") or not message.get("content"):
119
+ raise ValueError("All messages must have a role and content")
120
+ # Checks that the last role is "user"
121
+ if messages[-1]["role"] != "user":
122
+ raise ValueError("Last message must have role 'user'")
123
+ if request.prompt != "":
124
+ hlog("WARNING: Since message is set, prompt will be ignored")
125
+ reka_chat_history = self._convert_messages_to_reka_chat_history(messages)
126
+ else:
127
+ current_chat_history: Dict[str, Any] = {
128
+ "type": "human",
129
+ "text": "",
130
+ "media_url": None,
131
+ }
132
+ if request.multimodal_prompt is not None:
133
+ for media_object in request.multimodal_prompt.media_objects:
134
+ if media_object.is_type("image") and media_object.location:
135
+ from helm.common.images_utils import encode_base64
136
+
137
+ base64_image: str = encode_base64(media_object.location)
138
+ current_chat_history["media_url"] = f"data:image/jpeg;base64,{base64_image}"
139
+ elif media_object.is_type(TEXT_TYPE):
140
+ if media_object.text is None:
141
+ raise ValueError("MediaObject of text type has missing text field value")
142
+ current_chat_history["text"] = media_object.text
143
+ else:
144
+ raise ValueError(f"Unrecognized MediaObject type {media_object.type}")
145
+
146
+ else:
147
+ current_chat_history["text"] = request.prompt
148
+ reka_chat_history = [current_chat_history]
149
+
150
+ # `num_completions` is not supported, so instead make `num_completions` separate requests.
151
+ for completion_index in range(request.num_completions):
152
+ try:
153
+ raw_request: RekaAIRequest = {
154
+ "model_name": self._get_model_for_request(request),
155
+ "conversation_history": reka_chat_history, # we only use chat_history as the input
156
+ "request_output_len": request.max_tokens,
157
+ "temperature": request.temperature,
158
+ "random_seed": self._get_random_seed(request, completion_index),
159
+ "stop_words": request.stop_sequences or None, # API doesn't like empty list
160
+ "runtime_top_p": request.top_p,
161
+ "presence_penalty": request.presence_penalty,
162
+ "frequency_penalty": request.frequency_penalty,
163
+ }
164
+
165
+ def do_it() -> Dict[str, Any]:
166
+ return self.client.chat(**raw_request)
167
+
168
+ response, cached = self.cache.get(raw_request, wrap_request_time(do_it))
169
+ except (requests.exceptions.RequestException, AssertionError) as e:
170
+ error: str = f"RekaClient error: {e}"
171
+ return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
172
+
173
+ response_message: Dict[str, Any] = response
174
+ assert response_message["type"] == "model"
175
+ response_text: str = response_message["text"]
176
+
177
+ # The Reka API doesn't support echo. If `echo_prompt` is true, combine the prompt and completion.
178
+ text: str = request.prompt + response_text if request.echo_prompt else response_text
179
+ completion = truncate_and_tokenize_response_text(text, request, self.tokenizer, self.tokenizer_name)
180
+ completions.append(completion)
181
+
182
+ return RequestResult(
183
+ success=True,
184
+ cached=cached,
185
+ request_time=response["request_time"],
186
+ request_datetime=response.get("request_datetime"),
187
+ completions=completions,
188
+ embedding=[],
189
+ )