rapidata 2.36.2__py3-none-any.whl → 2.38.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rapidata might be problematic. Click here for more details.

Files changed (65) hide show
  1. rapidata/__init__.py +3 -4
  2. rapidata/rapidata_client/__init__.py +1 -4
  3. rapidata/rapidata_client/api/{rapidata_exception.py → rapidata_api_client.py} +119 -2
  4. rapidata/rapidata_client/benchmark/leaderboard/rapidata_leaderboard.py +88 -46
  5. rapidata/rapidata_client/benchmark/participant/_participant.py +26 -9
  6. rapidata/rapidata_client/benchmark/rapidata_benchmark.py +274 -205
  7. rapidata/rapidata_client/benchmark/rapidata_benchmark_manager.py +98 -76
  8. rapidata/rapidata_client/config/__init__.py +3 -0
  9. rapidata/rapidata_client/config/logger.py +135 -0
  10. rapidata/rapidata_client/config/logging_config.py +58 -0
  11. rapidata/rapidata_client/config/managed_print.py +6 -0
  12. rapidata/rapidata_client/config/order_config.py +14 -0
  13. rapidata/rapidata_client/config/rapidata_config.py +14 -9
  14. rapidata/rapidata_client/config/tracer.py +130 -0
  15. rapidata/rapidata_client/config/upload_config.py +14 -0
  16. rapidata/rapidata_client/datapoints/_datapoint.py +1 -1
  17. rapidata/rapidata_client/datapoints/assets/_media_asset.py +1 -1
  18. rapidata/rapidata_client/datapoints/assets/_sessions.py +2 -2
  19. rapidata/rapidata_client/demographic/demographic_manager.py +16 -14
  20. rapidata/rapidata_client/filter/_base_filter.py +11 -5
  21. rapidata/rapidata_client/filter/age_filter.py +9 -3
  22. rapidata/rapidata_client/filter/and_filter.py +20 -5
  23. rapidata/rapidata_client/filter/campaign_filter.py +7 -1
  24. rapidata/rapidata_client/filter/country_filter.py +8 -2
  25. rapidata/rapidata_client/filter/custom_filter.py +9 -3
  26. rapidata/rapidata_client/filter/gender_filter.py +9 -3
  27. rapidata/rapidata_client/filter/language_filter.py +12 -5
  28. rapidata/rapidata_client/filter/new_user_filter.py +3 -4
  29. rapidata/rapidata_client/filter/not_filter.py +17 -5
  30. rapidata/rapidata_client/filter/or_filter.py +20 -5
  31. rapidata/rapidata_client/filter/response_count_filter.py +6 -0
  32. rapidata/rapidata_client/filter/user_score_filter.py +17 -5
  33. rapidata/rapidata_client/order/_rapidata_dataset.py +45 -17
  34. rapidata/rapidata_client/order/_rapidata_order_builder.py +19 -13
  35. rapidata/rapidata_client/order/rapidata_order.py +60 -48
  36. rapidata/rapidata_client/order/rapidata_order_manager.py +239 -195
  37. rapidata/rapidata_client/order/rapidata_results.py +71 -57
  38. rapidata/rapidata_client/rapidata_client.py +36 -23
  39. rapidata/rapidata_client/selection/_base_selection.py +6 -0
  40. rapidata/rapidata_client/selection/static_selection.py +5 -10
  41. rapidata/rapidata_client/settings/_rapidata_setting.py +8 -0
  42. rapidata/rapidata_client/settings/alert_on_fast_response.py +8 -5
  43. rapidata/rapidata_client/settings/free_text_minimum_characters.py +9 -4
  44. rapidata/rapidata_client/validation/rapidata_validation_set.py +20 -16
  45. rapidata/rapidata_client/validation/rapids/rapids.py +7 -1
  46. rapidata/rapidata_client/validation/validation_set_manager.py +285 -268
  47. rapidata/rapidata_client/workflow/_base_workflow.py +6 -1
  48. rapidata/rapidata_client/workflow/_classify_workflow.py +6 -0
  49. rapidata/rapidata_client/workflow/_compare_workflow.py +6 -0
  50. rapidata/rapidata_client/workflow/_draw_workflow.py +6 -0
  51. rapidata/rapidata_client/workflow/_evaluation_workflow.py +6 -0
  52. rapidata/rapidata_client/workflow/_free_text_workflow.py +6 -0
  53. rapidata/rapidata_client/workflow/_locate_workflow.py +6 -0
  54. rapidata/rapidata_client/workflow/_ranking_workflow.py +12 -0
  55. rapidata/rapidata_client/workflow/_select_words_workflow.py +6 -0
  56. rapidata/rapidata_client/workflow/_timestamp_workflow.py +6 -0
  57. rapidata/service/credential_manager.py +1 -1
  58. rapidata/service/openapi_service.py +2 -2
  59. {rapidata-2.36.2.dist-info → rapidata-2.38.0.dist-info}/METADATA +4 -1
  60. {rapidata-2.36.2.dist-info → rapidata-2.38.0.dist-info}/RECORD +62 -59
  61. rapidata/rapidata_client/logging/__init__.py +0 -2
  62. rapidata/rapidata_client/logging/logger.py +0 -122
  63. rapidata/rapidata_client/logging/output_manager.py +0 -20
  64. {rapidata-2.36.2.dist-info → rapidata-2.38.0.dist-info}/LICENSE +0 -0
  65. {rapidata-2.36.2.dist-info → rapidata-2.38.0.dist-info}/WHEEL +0 -0
@@ -1,38 +1,41 @@
1
1
  import re
2
+ import urllib.parse
3
+ import webbrowser
4
+ from colorama import Fore
2
5
  from typing import Literal, Optional, Sequence
3
- from rapidata.api_client.models.root_filter import RootFilter
4
- from rapidata.api_client.models.filter import Filter
5
- from rapidata.api_client.models.query_model import QueryModel
6
- from rapidata.api_client.models.page_info import PageInfo
7
- from rapidata.api_client.models.create_leaderboard_model import CreateLeaderboardModel
6
+
7
+ from rapidata.api_client.models.and_user_filter_model_filters_inner import (
8
+ AndUserFilterModelFiltersInner,
9
+ )
8
10
  from rapidata.api_client.models.create_benchmark_participant_model import (
9
11
  CreateBenchmarkParticipantModel,
10
12
  )
13
+ from rapidata.api_client.models.create_leaderboard_model import CreateLeaderboardModel
14
+ from rapidata.api_client.models.filter import Filter
15
+ from rapidata.api_client.models.filter_operator import FilterOperator
16
+ from rapidata.api_client.models.file_asset_model import FileAssetModel
17
+ from rapidata.api_client.models.query_model import QueryModel
18
+ from rapidata.api_client.models.page_info import PageInfo
19
+ from rapidata.api_client.models.root_filter import RootFilter
20
+ from rapidata.api_client.models.source_url_metadata_model import SourceUrlMetadataModel
11
21
  from rapidata.api_client.models.submit_prompt_model import SubmitPromptModel
12
22
  from rapidata.api_client.models.submit_prompt_model_prompt_asset import (
13
23
  SubmitPromptModelPromptAsset,
14
24
  )
15
25
  from rapidata.api_client.models.url_asset_input import UrlAssetInput
16
- from rapidata.api_client.models.file_asset_model import FileAssetModel
17
- from rapidata.api_client.models.source_url_metadata_model import SourceUrlMetadataModel
18
- from rapidata.api_client.models.and_user_filter_model_filters_inner import (
19
- AndUserFilterModelFiltersInner,
20
- )
21
- from rapidata.api_client.models.filter_operator import FilterOperator
22
-
23
- from rapidata.rapidata_client.benchmark.participant._participant import (
24
- BenchmarkParticipant,
25
- )
26
- from rapidata.rapidata_client.logging import logger
27
- from rapidata.service.openapi_service import OpenAPIService
28
26
 
27
+ from rapidata.rapidata_client.benchmark._detail_mapper import DetailMapper
29
28
  from rapidata.rapidata_client.benchmark.leaderboard.rapidata_leaderboard import (
30
29
  RapidataLeaderboard,
31
30
  )
31
+ from rapidata.rapidata_client.benchmark.participant._participant import (
32
+ BenchmarkParticipant,
33
+ )
32
34
  from rapidata.rapidata_client.datapoints.assets import MediaAsset
33
- from rapidata.rapidata_client.benchmark._detail_mapper import DetailMapper
34
35
  from rapidata.rapidata_client.filter import RapidataFilter
36
+ from rapidata.rapidata_client.config import logger, managed_print, tracer
35
37
  from rapidata.rapidata_client.settings import RapidataSetting
38
+ from rapidata.service.openapi_service import OpenAPIService
36
39
 
37
40
 
38
41
  class RapidataBenchmark:
@@ -56,6 +59,9 @@ class RapidataBenchmark:
56
59
  self.__leaderboards: list[RapidataLeaderboard] = []
57
60
  self.__identifiers: list[str] = []
58
61
  self.__tags: list[list[str]] = []
62
+ self.__benchmark_page: str = (
63
+ f"https://app.{self.__openapi_service.environment}/mri/benchmarks/{self.id}"
64
+ )
59
65
 
60
66
  def __instantiate_prompts(self) -> None:
61
67
  current_page = 1
@@ -99,98 +105,104 @@ class RapidataBenchmark:
99
105
 
100
106
  @property
101
107
  def identifiers(self) -> list[str]:
102
- if not self.__identifiers:
103
- self.__instantiate_prompts()
108
+ with tracer.start_as_current_span("RapidataBenchmark.identifiers"):
109
+ if not self.__identifiers:
110
+ self.__instantiate_prompts()
104
111
 
105
- return self.__identifiers
112
+ return self.__identifiers
106
113
 
107
114
  @property
108
115
  def prompts(self) -> list[str | None]:
109
116
  """
110
117
  Returns the prompts that are registered for the leaderboard.
111
118
  """
112
- if not self.__prompts:
113
- self.__instantiate_prompts()
119
+ with tracer.start_as_current_span("RapidataBenchmark.prompts"):
120
+ if not self.__prompts:
121
+ self.__instantiate_prompts()
114
122
 
115
- return self.__prompts
123
+ return self.__prompts
116
124
 
117
125
  @property
118
126
  def prompt_assets(self) -> list[str | None]:
119
127
  """
120
128
  Returns the prompt assets that are registered for the benchmark.
121
129
  """
122
- if not self.__prompt_assets:
123
- self.__instantiate_prompts()
130
+ with tracer.start_as_current_span("RapidataBenchmark.prompt_assets"):
131
+ if not self.__prompt_assets:
132
+ self.__instantiate_prompts()
124
133
 
125
- return self.__prompt_assets
134
+ return self.__prompt_assets
126
135
 
127
136
  @property
128
137
  def tags(self) -> list[list[str]]:
129
138
  """
130
139
  Returns the tags that are registered for the benchmark.
131
140
  """
132
- if not self.__tags:
133
- self.__instantiate_prompts()
141
+ with tracer.start_as_current_span("RapidataBenchmark.tags"):
142
+ if not self.__tags:
143
+ self.__instantiate_prompts()
134
144
 
135
- return self.__tags
145
+ return self.__tags
136
146
 
137
147
  @property
138
148
  def leaderboards(self) -> list[RapidataLeaderboard]:
139
149
  """
140
150
  Returns the leaderboards that are registered for the benchmark.
141
151
  """
142
- if not self.__leaderboards:
143
- current_page = 1
144
- total_pages = None
145
-
146
- while True:
147
- leaderboards_result = (
148
- self.__openapi_service.leaderboard_api.leaderboards_get(
149
- request=QueryModel(
150
- filter=RootFilter(
151
- filters=[
152
- Filter(
153
- field="BenchmarkId",
154
- operator=FilterOperator.EQ,
155
- value=self.id,
156
- )
157
- ]
158
- ),
159
- page=PageInfo(index=current_page, size=100),
152
+ with tracer.start_as_current_span("RapidataBenchmark.leaderboards"):
153
+ if not self.__leaderboards:
154
+ current_page = 1
155
+ total_pages = None
156
+
157
+ while True:
158
+ leaderboards_result = (
159
+ self.__openapi_service.leaderboard_api.leaderboards_get(
160
+ request=QueryModel(
161
+ filter=RootFilter(
162
+ filters=[
163
+ Filter(
164
+ field="BenchmarkId",
165
+ operator=FilterOperator.EQ,
166
+ value=self.id,
167
+ )
168
+ ]
169
+ ),
170
+ page=PageInfo(index=current_page, size=100),
171
+ )
160
172
  )
161
173
  )
162
- )
163
-
164
- if leaderboards_result.total_pages is None:
165
- raise ValueError(
166
- "An error occurred while fetching leaderboards: total_pages is None"
167
- )
168
174
 
169
- total_pages = leaderboards_result.total_pages
170
-
171
- self.__leaderboards.extend(
172
- [
173
- RapidataLeaderboard(
174
- leaderboard.name,
175
- leaderboard.instruction,
176
- leaderboard.show_prompt,
177
- leaderboard.show_prompt_asset,
178
- leaderboard.is_inversed,
179
- leaderboard.response_budget,
180
- leaderboard.min_responses,
181
- leaderboard.id,
182
- self.__openapi_service,
175
+ if leaderboards_result.total_pages is None:
176
+ raise ValueError(
177
+ "An error occurred while fetching leaderboards: total_pages is None"
183
178
  )
184
- for leaderboard in leaderboards_result.items
185
- ]
186
- )
187
179
 
188
- if current_page >= total_pages:
189
- break
180
+ total_pages = leaderboards_result.total_pages
181
+
182
+ self.__leaderboards.extend(
183
+ [
184
+ RapidataLeaderboard(
185
+ leaderboard.name,
186
+ leaderboard.instruction,
187
+ leaderboard.show_prompt,
188
+ leaderboard.show_prompt_asset,
189
+ leaderboard.is_inversed,
190
+ leaderboard.response_budget,
191
+ leaderboard.min_responses,
192
+ self.id,
193
+ leaderboard.id,
194
+ self.__openapi_service,
195
+ )
196
+ for leaderboard in leaderboards_result.items
197
+ ]
198
+ )
190
199
 
191
- current_page += 1
200
+ if current_page >= total_pages:
201
+ break
192
202
 
193
- return self.__leaderboards
203
+ current_page += 1
204
+
205
+ return self.__leaderboards
194
206
 
195
207
  def add_prompt(
196
208
  self,
@@ -208,53 +220,66 @@ class RapidataBenchmark:
208
220
  asset: The asset that will be used to evaluate the model. Provided as a link to the asset.
209
221
  tags: The tags can be used to filter the leaderboard results. They will NOT be shown to the users.
210
222
  """
211
- if tags is None:
212
- tags = []
213
-
214
- if not isinstance(identifier, str):
215
- raise ValueError("Identifier must be a string.")
223
+ with tracer.start_as_current_span("RapidataBenchmark.add_prompt"):
224
+ if tags is None:
225
+ tags = []
216
226
 
217
- if prompt is None and asset is None:
218
- raise ValueError("Prompt or asset must be provided.")
227
+ if not isinstance(identifier, str):
228
+ raise ValueError("Identifier must be a string.")
219
229
 
220
- if prompt is not None and not isinstance(prompt, str):
221
- raise ValueError("Prompt must be a string.")
230
+ if prompt is None and asset is None:
231
+ raise ValueError("Prompt or asset must be provided.")
222
232
 
223
- if asset is not None and not isinstance(asset, str):
224
- raise ValueError("Asset must be a string. That is the link to the asset.")
233
+ if prompt is not None and not isinstance(prompt, str):
234
+ raise ValueError("Prompt must be a string.")
225
235
 
226
- if identifier in self.identifiers:
227
- raise ValueError("Identifier already exists in the benchmark.")
228
-
229
- if asset is not None and not re.match(r"^https?://", asset):
230
- raise ValueError("Asset must be a link to the asset.")
236
+ if asset is not None and not isinstance(asset, str):
237
+ raise ValueError(
238
+ "Asset must be a string. That is the link to the asset."
239
+ )
231
240
 
232
- if tags is not None and (
233
- not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)
234
- ):
235
- raise ValueError("Tags must be a list of strings.")
241
+ if identifier in self.identifiers:
242
+ raise ValueError("Identifier already exists in the benchmark.")
243
+
244
+ if asset is not None and not re.match(r"^https?://", asset):
245
+ raise ValueError("Asset must be a link to the asset.")
246
+
247
+ if tags is not None and (
248
+ not isinstance(tags, list)
249
+ or not all(isinstance(tag, str) for tag in tags)
250
+ ):
251
+ raise ValueError("Tags must be a list of strings.")
252
+
253
+ logger.info(
254
+ "Adding identifier %s with prompt %s, asset %s and tags %s to benchmark %s",
255
+ identifier,
256
+ prompt,
257
+ asset,
258
+ tags,
259
+ self.id,
260
+ )
236
261
 
237
- self.__identifiers.append(identifier)
262
+ self.__identifiers.append(identifier)
238
263
 
239
- self.__tags.append(tags)
240
- self.__prompts.append(prompt)
241
- self.__prompt_assets.append(asset)
264
+ self.__tags.append(tags)
265
+ self.__prompts.append(prompt)
266
+ self.__prompt_assets.append(asset)
242
267
 
243
- self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompt_post(
244
- benchmark_id=self.id,
245
- submit_prompt_model=SubmitPromptModel(
246
- identifier=identifier,
247
- prompt=prompt,
248
- promptAsset=(
249
- SubmitPromptModelPromptAsset(
250
- UrlAssetInput(_t="UrlAssetInput", url=asset)
251
- )
252
- if asset is not None
253
- else None
268
+ self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompt_post(
269
+ benchmark_id=self.id,
270
+ submit_prompt_model=SubmitPromptModel(
271
+ identifier=identifier,
272
+ prompt=prompt,
273
+ promptAsset=(
274
+ SubmitPromptModelPromptAsset(
275
+ UrlAssetInput(_t="UrlAssetInput", url=asset)
276
+ )
277
+ if asset is not None
278
+ else None
279
+ ),
280
+ tags=tags,
254
281
  ),
255
- tags=tags,
256
- ),
257
- )
282
+ )
258
283
 
259
284
  def create_leaderboard(
260
285
  self,
@@ -284,54 +309,74 @@ class RapidataBenchmark:
284
309
  filters: The filters that should be applied to the leaderboard. Will determine who can solve answer in the leaderboard. (default: [])
285
310
  settings: The settings that should be applied to the leaderboard. Will determine the behavior of the tasks on the leaderboard. (default: [])
286
311
  """
287
- if not isinstance(min_responses_per_matchup, int):
288
- raise ValueError("Min responses per matchup must be an integer")
289
-
290
- if min_responses_per_matchup < 3:
291
- raise ValueError("Min responses per matchup must be at least 3")
292
-
293
- leaderboard_result = self.__openapi_service.leaderboard_api.leaderboard_post(
294
- create_leaderboard_model=CreateLeaderboardModel(
295
- benchmarkId=self.id,
296
- name=name,
297
- instruction=instruction,
298
- showPrompt=show_prompt,
299
- showPromptAsset=show_prompt_asset,
300
- isInversed=inverse_ranking,
301
- minResponses=min_responses_per_matchup,
302
- responseBudget=DetailMapper.get_budget(level_of_detail),
303
- validationSetId=validation_set_id,
304
- filters=(
305
- [
306
- AndUserFilterModelFiltersInner(filter._to_model())
307
- for filter in filters
308
- ]
309
- if filters
310
- else None
311
- ),
312
- featureFlags=(
313
- [setting._to_feature_flag() for setting in settings]
314
- if settings
315
- else None
316
- ),
312
+ with tracer.start_as_current_span("create_leaderboard"):
313
+ if not isinstance(min_responses_per_matchup, int):
314
+ raise ValueError("Min responses per matchup must be an integer")
315
+
316
+ if min_responses_per_matchup < 3:
317
+ raise ValueError("Min responses per matchup must be at least 3")
318
+
319
+ logger.info(
320
+ "Creating leaderboard %s with instruction %s, show_prompt %s, show_prompt_asset %s, inverse_ranking %s, level_of_detail %s, min_responses_per_matchup %s, validation_set_id %s, filters %s, settings %s",
321
+ name,
322
+ instruction,
323
+ show_prompt,
324
+ show_prompt_asset,
325
+ inverse_ranking,
326
+ level_of_detail,
327
+ min_responses_per_matchup,
328
+ validation_set_id,
329
+ filters,
330
+ settings,
317
331
  )
318
- )
319
332
 
320
- assert (
321
- leaderboard_result.benchmark_id == self.id
322
- ), "The leaderboard was not created for the correct benchmark."
323
-
324
- return RapidataLeaderboard(
325
- name,
326
- instruction,
327
- show_prompt,
328
- show_prompt_asset,
329
- inverse_ranking,
330
- leaderboard_result.response_budget,
331
- min_responses_per_matchup,
332
- leaderboard_result.id,
333
- self.__openapi_service,
334
- )
333
+ leaderboard_result = (
334
+ self.__openapi_service.leaderboard_api.leaderboard_post(
335
+ create_leaderboard_model=CreateLeaderboardModel(
336
+ benchmarkId=self.id,
337
+ name=name,
338
+ instruction=instruction,
339
+ showPrompt=show_prompt,
340
+ showPromptAsset=show_prompt_asset,
341
+ isInversed=inverse_ranking,
342
+ minResponses=min_responses_per_matchup,
343
+ responseBudget=DetailMapper.get_budget(level_of_detail),
344
+ validationSetId=validation_set_id,
345
+ filters=(
346
+ [
347
+ AndUserFilterModelFiltersInner(filter._to_model())
348
+ for filter in filters
349
+ ]
350
+ if filters
351
+ else None
352
+ ),
353
+ featureFlags=(
354
+ [setting._to_feature_flag() for setting in settings]
355
+ if settings
356
+ else None
357
+ ),
358
+ )
359
+ )
360
+ )
361
+
362
+ assert (
363
+ leaderboard_result.benchmark_id == self.id
364
+ ), "The leaderboard was not created for the correct benchmark."
365
+
366
+ logger.info("Leaderboard created with id %s", leaderboard_result.id)
367
+
368
+ return RapidataLeaderboard(
369
+ name,
370
+ instruction,
371
+ show_prompt,
372
+ show_prompt_asset,
373
+ inverse_ranking,
374
+ leaderboard_result.response_budget,
375
+ min_responses_per_matchup,
376
+ self.id,
377
+ leaderboard_result.id,
378
+ self.__openapi_service,
379
+ )
335
380
 
336
381
  def evaluate_model(
337
382
  self, name: str, media: list[str], identifiers: list[str]
@@ -345,65 +390,89 @@ class RapidataBenchmark:
345
390
  identifiers: The identifiers that correspond to the media. The order of the identifiers must match the order of the media.
346
391
  The identifiers that are used must be registered for the benchmark. To see the registered identifiers, use the identifiers property.
347
392
  """
348
- if not media:
349
- raise ValueError("Media must be a non-empty list of strings")
393
+ with tracer.start_as_current_span("evaluate_model"):
394
+ if not media:
395
+ raise ValueError("Media must be a non-empty list of strings")
396
+
397
+ if len(media) != len(identifiers):
398
+ raise ValueError("Media and identifiers must have the same length")
399
+
400
+ if not all(identifier in self.identifiers for identifier in identifiers):
401
+ raise ValueError(
402
+ "All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property.\
403
+ \nTo see the prompts that are associated with the identifiers, use the prompts property."
404
+ )
350
405
 
351
- if len(media) != len(identifiers):
352
- raise ValueError("Media and identifiers must have the same length")
406
+ # happens before the creation of the participant to ensure all media paths are valid
407
+ assets: list[MediaAsset] = []
408
+ for media_path in media:
409
+ assets.append(MediaAsset(media_path))
353
410
 
354
- if not all(identifier in self.identifiers for identifier in identifiers):
355
- raise ValueError(
356
- "All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property.\
357
- \nTo see the prompts that are associated with the identifiers, use the prompts property."
411
+ participant_result = self.__openapi_service.benchmark_api.benchmark_benchmark_id_participants_post(
412
+ benchmark_id=self.id,
413
+ create_benchmark_participant_model=CreateBenchmarkParticipantModel(
414
+ name=name,
415
+ ),
358
416
  )
359
417
 
360
- # happens before the creation of the participant to ensure all media paths are valid
361
- assets: list[MediaAsset] = []
362
- for media_path in media:
363
- assets.append(MediaAsset(media_path))
418
+ logger.info(f"Participant created: {participant_result.participant_id}")
364
419
 
365
- participant_result = self.__openapi_service.benchmark_api.benchmark_benchmark_id_participants_post(
366
- benchmark_id=self.id,
367
- create_benchmark_participant_model=CreateBenchmarkParticipantModel(
368
- name=name,
369
- ),
370
- )
420
+ participant = BenchmarkParticipant(
421
+ name, participant_result.participant_id, self.__openapi_service
422
+ )
371
423
 
372
- logger.info(f"Participant created: {participant_result.participant_id}")
424
+ with tracer.start_as_current_span("upload_media_for_participant"):
425
+ logger.info(
426
+ f"Uploading {len(assets)} media assets to participant {participant.id}"
427
+ )
373
428
 
374
- participant = BenchmarkParticipant(
375
- name, participant_result.participant_id, self.__openapi_service
376
- )
429
+ successful_uploads, failed_uploads = participant.upload_media(
430
+ assets,
431
+ identifiers,
432
+ )
377
433
 
378
- successful_uploads, failed_uploads = participant.upload_media(
379
- assets,
380
- identifiers,
381
- )
434
+ total_uploads = len(assets)
435
+ success_rate = (
436
+ (len(successful_uploads) / total_uploads * 100)
437
+ if total_uploads > 0
438
+ else 0
439
+ )
440
+ logger.info(
441
+ f"Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed ({success_rate:.1f}% success rate)"
442
+ )
382
443
 
383
- total_uploads = len(assets)
384
- success_rate = (
385
- (len(successful_uploads) / total_uploads * 100) if total_uploads > 0 else 0
386
- )
387
- logger.info(
388
- f"Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed ({success_rate:.1f}% success rate)"
389
- )
444
+ if failed_uploads:
445
+ logger.error(
446
+ f"Failed uploads for media: {[asset.path for asset in failed_uploads]}"
447
+ )
448
+ logger.warning(
449
+ "Some uploads failed. The model evaluation may be incomplete."
450
+ )
390
451
 
391
- if failed_uploads:
392
- logger.error(
393
- f"Failed uploads for media: {[asset.path for asset in failed_uploads]}"
394
- )
395
- logger.warning(
396
- "Some uploads failed. The model evaluation may be incomplete."
397
- )
452
+ if len(successful_uploads) == 0:
453
+ raise RuntimeError(
454
+ "No uploads were successful. The model evaluation will not be completed."
455
+ )
398
456
 
399
- if len(successful_uploads) == 0:
400
- raise RuntimeError(
401
- "No uploads were successful. The model evaluation will not be completed."
457
+ self.__openapi_service.participant_api.participants_participant_id_submit_post(
458
+ participant_id=participant_result.participant_id
402
459
  )
403
460
 
404
- self.__openapi_service.participant_api.participants_participant_id_submit_post(
405
- participant_id=participant_result.participant_id
406
- )
461
+ def view(self) -> None:
462
+ """
463
+ Views the benchmark.
464
+ """
465
+ logger.info("Opening benchmark page in browser...")
466
+ could_open_browser = webbrowser.open(self.__benchmark_page)
467
+ if not could_open_browser:
468
+ encoded_url = urllib.parse.quote(
469
+ self.__benchmark_page, safe="%/:=&?~#+!$,;'@()*[]"
470
+ )
471
+ managed_print(
472
+ Fore.RED
473
+ + f"Please open this URL in your browser: '{encoded_url}'"
474
+ + Fore.RESET
475
+ )
407
476
 
408
477
  def __str__(self) -> str:
409
478
  return f"RapidataBenchmark(name={self.name}, id={self.id})"