alchemist-nrel 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. alchemist_core/__init__.py +14 -7
  2. alchemist_core/acquisition/botorch_acquisition.py +15 -6
  3. alchemist_core/audit_log.py +594 -0
  4. alchemist_core/data/experiment_manager.py +76 -5
  5. alchemist_core/models/botorch_model.py +6 -4
  6. alchemist_core/models/sklearn_model.py +74 -8
  7. alchemist_core/session.py +788 -39
  8. alchemist_core/utils/doe.py +200 -0
  9. alchemist_nrel-0.3.1.dist-info/METADATA +185 -0
  10. alchemist_nrel-0.3.1.dist-info/RECORD +66 -0
  11. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/entry_points.txt +1 -0
  12. api/example_client.py +7 -2
  13. api/main.py +21 -4
  14. api/models/requests.py +95 -1
  15. api/models/responses.py +167 -0
  16. api/routers/acquisition.py +25 -0
  17. api/routers/experiments.py +134 -6
  18. api/routers/sessions.py +438 -10
  19. api/routers/visualizations.py +10 -5
  20. api/routers/websocket.py +132 -0
  21. api/run_api.py +56 -0
  22. api/services/session_store.py +285 -54
  23. api/static/NEW_ICON.ico +0 -0
  24. api/static/NEW_ICON.png +0 -0
  25. api/static/NEW_LOGO_DARK.png +0 -0
  26. api/static/NEW_LOGO_LIGHT.png +0 -0
  27. api/static/assets/api-vcoXEqyq.js +1 -0
  28. api/static/assets/index-DWfIKU9j.js +4094 -0
  29. api/static/assets/index-sMIa_1hV.css +1 -0
  30. api/static/index.html +14 -0
  31. api/static/vite.svg +1 -0
  32. ui/gpr_panel.py +7 -2
  33. ui/notifications.py +197 -10
  34. ui/ui.py +1117 -68
  35. ui/variables_setup.py +47 -2
  36. ui/visualizations.py +60 -3
  37. alchemist_core/models/ax_model.py +0 -159
  38. alchemist_nrel-0.2.1.dist-info/METADATA +0 -206
  39. alchemist_nrel-0.2.1.dist-info/RECORD +0 -54
  40. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/WHEEL +0 -0
  41. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/licenses/LICENSE +0 -0
  42. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,594 @@
1
+ """
2
+ Audit Log - Append-only logging for reproducible optimization workflows.
3
+
4
+ This module provides structured logging of optimization decisions to ensure
5
+ research reproducibility and traceability. The audit log captures:
6
+ - Experimental data lock-ins
7
+ - Model training decisions
8
+ - Acquisition function choices
9
+
10
+ Users can explore freely without spamming the log; only explicit "lock-in"
11
+ actions create audit entries.
12
+ """
13
+
14
+ from typing import List, Dict, Any, Optional, Union
15
+ from datetime import datetime
16
+ from dataclasses import dataclass, asdict, field
17
+ import hashlib
18
+ import json
19
+ import uuid
20
+ import pandas as pd
21
+
22
+
23
+ @dataclass
24
+ class SessionMetadata:
25
+ """
26
+ Session metadata for user-friendly session management.
27
+
28
+ Attributes:
29
+ session_id: Unique session identifier (UUID)
30
+ name: User-friendly session name
31
+ created_at: ISO timestamp of session creation
32
+ last_modified: ISO timestamp of last modification
33
+ description: Optional detailed description
34
+ author: Optional author name
35
+ tags: Optional list of tags for organization
36
+ """
37
+ session_id: str
38
+ name: str
39
+ created_at: str
40
+ last_modified: str
41
+ description: str = ""
42
+ author: str = ""
43
+ tags: List[str] = field(default_factory=list)
44
+
45
+ @staticmethod
46
+ def create(name: str = "Untitled Session", description: str = "",
47
+ tags: Optional[List[str]] = None) -> 'SessionMetadata':
48
+ """
49
+ Create new session metadata.
50
+
51
+ Args:
52
+ name: User-friendly session name
53
+ description: Optional description
54
+ tags: Optional tags for organization
55
+
56
+ Returns:
57
+ SessionMetadata instance
58
+ """
59
+ now = datetime.now().isoformat()
60
+ return SessionMetadata(
61
+ session_id=str(uuid.uuid4()),
62
+ name=name,
63
+ created_at=now,
64
+ last_modified=now,
65
+ description=description,
66
+ author="",
67
+ tags=tags or []
68
+ )
69
+
70
+ def update_modified(self):
71
+ """Update last_modified timestamp to now."""
72
+ self.last_modified = datetime.now().isoformat()
73
+
74
+ def to_dict(self) -> Dict[str, Any]:
75
+ """Export to dictionary."""
76
+ return asdict(self)
77
+
78
+ @staticmethod
79
+ def from_dict(data: Dict[str, Any]) -> 'SessionMetadata':
80
+ """Import from dictionary."""
81
+ return SessionMetadata(**data)
82
+
83
+
84
+ @dataclass
85
+ class AuditEntry:
86
+ """
87
+ Single audit log entry.
88
+
89
+ Attributes:
90
+ timestamp: ISO timestamp of entry creation
91
+ entry_type: Type of decision ('data_locked', 'model_locked', 'acquisition_locked')
92
+ parameters: Complete snapshot of decision parameters
93
+ hash: Reproducibility checksum (SHA256 of parameters)
94
+ notes: Optional user notes
95
+ """
96
+ timestamp: str
97
+ entry_type: str
98
+ parameters: Dict[str, Any]
99
+ hash: str
100
+ notes: str = ""
101
+
102
+ @staticmethod
103
+ def create(entry_type: str, parameters: Dict[str, Any],
104
+ notes: str = "") -> 'AuditEntry':
105
+ """
106
+ Create new audit entry with auto-generated timestamp and hash.
107
+
108
+ Args:
109
+ entry_type: Type of entry ('data_locked', 'model_locked', 'acquisition_locked')
110
+ parameters: Parameters to log
111
+ notes: Optional user notes
112
+
113
+ Returns:
114
+ AuditEntry instance
115
+ """
116
+ timestamp = datetime.now().isoformat()
117
+
118
+ # Create reproducibility hash
119
+ # Sort keys for deterministic hashing
120
+ param_str = json.dumps(parameters, sort_keys=True, default=str)
121
+ hash_val = hashlib.sha256(param_str.encode()).hexdigest()[:16]
122
+
123
+ return AuditEntry(
124
+ timestamp=timestamp,
125
+ entry_type=entry_type,
126
+ parameters=parameters,
127
+ hash=hash_val,
128
+ notes=notes
129
+ )
130
+
131
+ def to_dict(self) -> Dict[str, Any]:
132
+ """Export to dictionary."""
133
+ return asdict(self)
134
+
135
+ @staticmethod
136
+ def from_dict(data: Dict[str, Any]) -> 'AuditEntry':
137
+ """Import from dictionary."""
138
+ return AuditEntry(**data)
139
+
140
+
141
+ class AuditLog:
142
+ """
143
+ Append-only audit log for optimization decisions.
144
+
145
+ This class maintains a complete, immutable history of optimization decisions
146
+ to ensure reproducibility and traceability. Only explicit "lock-in" actions
147
+ add entries, preventing log spam from exploration activities.
148
+
149
+ The audit log is structured with:
150
+ - Search space definition (set once)
151
+ - Experimental data table (updated with each lock)
152
+ - Optimization iterations (model + acquisition per iteration)
153
+ """
154
+
155
+ def __init__(self):
156
+ """Initialize empty audit log."""
157
+ self.entries: List[AuditEntry] = []
158
+ self.search_space_definition: Optional[Dict[str, Any]] = None
159
+ self.experiment_data: Optional['pd.DataFrame'] = None
160
+
161
+ def set_search_space(self, variables: List[Dict[str, Any]]):
162
+ """
163
+ Set the search space definition (should only be called once).
164
+
165
+ Args:
166
+ variables: List of variable definitions
167
+ """
168
+ if self.search_space_definition is None:
169
+ self.search_space_definition = {'variables': variables}
170
+
171
+ def lock_data(self, experiment_data: 'pd.DataFrame', notes: str = "", extra_parameters: Optional[Dict[str, Any]] = None) -> AuditEntry:
172
+ """
173
+ Lock in experimental data snapshot.
174
+
175
+ Args:
176
+ experiment_data: DataFrame with all experimental data including Iteration and Reason
177
+ notes: Optional user notes
178
+
179
+ Returns:
180
+ Created AuditEntry
181
+ """
182
+ # Store the experiment data snapshot for markdown/export
183
+ self.experiment_data = experiment_data.copy()
184
+
185
+ # Create hash of data for verification
186
+ data_str = experiment_data.to_json()
187
+ data_hash = hashlib.sha256(data_str.encode()).hexdigest()[:16]
188
+
189
+ params: Dict[str, Any] = {
190
+ 'n_experiments': len(experiment_data),
191
+ 'data_hash': data_hash
192
+ }
193
+
194
+ # Merge any extra parameters (e.g., initial design method/count)
195
+ if extra_parameters:
196
+ params.update(extra_parameters)
197
+
198
+ entry = AuditEntry.create(
199
+ entry_type='data_locked',
200
+ parameters=params,
201
+ notes=notes
202
+ )
203
+ self.entries.append(entry)
204
+ return entry
205
+
206
+ def lock_model(self, backend: str, kernel: str,
207
+ hyperparameters: Dict[str, Any],
208
+ cv_metrics: Optional[Dict[str, float]] = None,
209
+ iteration: Optional[int] = None,
210
+ notes: str = "") -> AuditEntry:
211
+ """
212
+ Lock in trained model configuration.
213
+
214
+ Args:
215
+ backend: Model backend ('sklearn', 'botorch')
216
+ kernel: Kernel type
217
+ hyperparameters: Learned hyperparameters
218
+ cv_metrics: Cross-validation metrics (optional)
219
+ iteration: Iteration number (optional)
220
+ notes: Optional user notes
221
+
222
+ Returns:
223
+ Created AuditEntry
224
+ """
225
+ params = {
226
+ 'backend': backend,
227
+ 'kernel': kernel,
228
+ 'hyperparameters': hyperparameters
229
+ }
230
+ if cv_metrics is not None:
231
+ params['cv_metrics'] = cv_metrics
232
+ if iteration is not None:
233
+ params['iteration'] = iteration
234
+
235
+ entry = AuditEntry.create(
236
+ entry_type='model_locked',
237
+ parameters=params,
238
+ notes=notes
239
+ )
240
+ self.entries.append(entry)
241
+ return entry
242
+
243
+ def lock_acquisition(self, strategy: str, parameters: Dict[str, Any],
244
+ suggestions: List[Dict[str, Any]],
245
+ iteration: Optional[int] = None,
246
+ notes: str = "") -> AuditEntry:
247
+ """
248
+ Lock in acquisition function decision.
249
+
250
+ Args:
251
+ strategy: Acquisition strategy name
252
+ parameters: Acquisition function parameters
253
+ suggestions: Suggested next experiments
254
+ iteration: Iteration number (optional)
255
+ notes: Optional user notes
256
+
257
+ Returns:
258
+ Created AuditEntry
259
+ """
260
+ params = {
261
+ 'strategy': strategy,
262
+ 'parameters': parameters,
263
+ 'suggestions': suggestions
264
+ }
265
+ if iteration is not None:
266
+ params['iteration'] = iteration
267
+
268
+ entry = AuditEntry.create(
269
+ entry_type='acquisition_locked',
270
+ parameters=params,
271
+ notes=notes
272
+ )
273
+ self.entries.append(entry)
274
+ return entry
275
+
276
+ def get_entries(self, entry_type: Optional[str] = None) -> List[AuditEntry]:
277
+ """
278
+ Get audit entries, optionally filtered by type.
279
+
280
+ Args:
281
+ entry_type: Optional filter ('data_locked', 'model_locked', 'acquisition_locked')
282
+
283
+ Returns:
284
+ List of AuditEntry objects
285
+ """
286
+ if entry_type is None:
287
+ return self.entries.copy()
288
+ return [e for e in self.entries if e.entry_type == entry_type]
289
+
290
+ def get_latest(self, entry_type: str) -> Optional[AuditEntry]:
291
+ """
292
+ Get most recent entry of specified type.
293
+
294
+ Args:
295
+ entry_type: Entry type to find
296
+
297
+ Returns:
298
+ Latest AuditEntry or None if not found
299
+ """
300
+ entries = self.get_entries(entry_type)
301
+ return entries[-1] if entries else None
302
+
303
+ def clear(self):
304
+ """
305
+ Clear all entries (use with caution - breaks immutability contract).
306
+
307
+ This should only be used when starting a completely new optimization
308
+ campaign within the same session.
309
+ """
310
+ self.entries = []
311
+
312
+ def to_dict(self) -> List[Dict[str, Any]]:
313
+ """
314
+ Export audit log to dictionary format.
315
+
316
+ Returns:
317
+ Dictionary with search_space, experiment_data, and entries
318
+ """
319
+ result = {
320
+ 'entries': [entry.to_dict() for entry in self.entries]
321
+ }
322
+
323
+ if self.search_space_definition is not None:
324
+ result['search_space'] = self.search_space_definition
325
+
326
+ if self.experiment_data is not None:
327
+ result['experiment_data'] = self.experiment_data.to_dict(orient='records')
328
+
329
+ return result
330
+
331
+ def from_dict(self, data: Union[List[Dict[str, Any]], Dict[str, Any]]):
332
+ """
333
+ Import audit log from dictionary format.
334
+
335
+ Args:
336
+ data: Dictionary with entries (and optionally search_space and experiment_data)
337
+ or legacy list of entry dictionaries
338
+ """
339
+ # Handle legacy format (list of entries)
340
+ if isinstance(data, list):
341
+ self.entries = [AuditEntry.from_dict(entry) for entry in data]
342
+ return
343
+
344
+ # New format (dict with entries, search_space, experiment_data)
345
+ if 'entries' in data:
346
+ self.entries = [AuditEntry.from_dict(entry) for entry in data['entries']]
347
+
348
+ if 'search_space' in data:
349
+ self.search_space_definition = data['search_space']
350
+
351
+ if 'experiment_data' in data:
352
+ self.experiment_data = pd.DataFrame(data['experiment_data'])
353
+
354
+ def to_markdown(self, session_metadata: Optional[Dict[str, Any]] = None) -> str:
355
+ """
356
+ Export audit log to markdown format for publications.
357
+
358
+ Args:
359
+ session_metadata: Optional dictionary of session metadata (name, description, tags, created_at, etc.)
360
+
361
+ Returns:
362
+ Markdown-formatted audit trail with session metadata, search space, data table, and iterations
363
+ """
364
+ lines = ["# Optimization Audit Trail\n"]
365
+
366
+ # If session metadata provided, include a small metadata section
367
+ if session_metadata:
368
+ lines.append("## Session Metadata\n")
369
+ name = session_metadata.get('name', 'Untitled Session')
370
+ created = session_metadata.get('created_at', '')
371
+ last_mod = session_metadata.get('last_modified', '')
372
+ description = session_metadata.get('description', '')
373
+ tags = session_metadata.get('tags', [])
374
+
375
+ lines.append(f"- **Name**: {name}")
376
+ if created:
377
+ lines.append(f"- **Created At**: {created}")
378
+ if last_mod:
379
+ lines.append(f"- **Last Modified**: {last_mod}")
380
+ if description:
381
+ lines.append(f"- **Description**: {description}")
382
+ if tags:
383
+ if isinstance(tags, (list, tuple)):
384
+ tags_str = ', '.join(map(str, tags))
385
+ else:
386
+ tags_str = str(tags)
387
+ lines.append(f"- **Tags**: {tags_str}")
388
+
389
+ lines.append("")
390
+
391
+ # Section 1: Search Space Definition
392
+ if self.search_space_definition:
393
+ lines.append("## Search Space Definition\n")
394
+ for var in self.search_space_definition['variables']:
395
+ var_type = var['type']
396
+ name = var['name']
397
+
398
+ if var_type in ['real', 'integer']:
399
+ lines.append(f"- **{name}** ({var_type}): [{var.get('min', 'N/A')}, {var.get('max', 'N/A')}]")
400
+ else: # categorical
401
+ values = ', '.join(map(str, var.get('values', [])))
402
+ lines.append(f"- **{name}** (categorical): {{{values}}}")
403
+ lines.append("")
404
+
405
+ # Section 2: Experimental Data Table
406
+ if self.experiment_data is not None and len(self.experiment_data) > 0:
407
+ lines.append("## Experimental Data\n")
408
+
409
+ # Generate markdown table
410
+ df = self.experiment_data.copy()
411
+
412
+ # Reorder columns: Iteration, Reason, then variables, then Output
413
+ col_order = []
414
+ if 'Iteration' in df.columns:
415
+ col_order.append('Iteration')
416
+ if 'Reason' in df.columns:
417
+ col_order.append('Reason')
418
+
419
+ # Add variable columns (exclude metadata columns)
420
+ metadata_cols = {'Iteration', 'Reason', 'Output', 'Noise'}
421
+ var_cols = [col for col in df.columns if col not in metadata_cols]
422
+ col_order.extend(var_cols)
423
+
424
+ # Add Output
425
+ if 'Output' in df.columns:
426
+ col_order.append('Output')
427
+
428
+ # Reorder DataFrame
429
+ df = df[[col for col in col_order if col in df.columns]]
430
+
431
+ # Create markdown table
432
+ lines.append("| " + " | ".join(df.columns) + " |")
433
+ lines.append("|" + "|".join(['---'] * len(df.columns)) + "|")
434
+
435
+ for _, row in df.iterrows():
436
+ row_vals = []
437
+ for val in row:
438
+ if isinstance(val, float):
439
+ row_vals.append(f"{val:.4f}")
440
+ else:
441
+ row_vals.append(str(val))
442
+ lines.append("| " + " | ".join(row_vals) + " |")
443
+
444
+ lines.append("")
445
+
446
+ # Section 3: Optimization Iterations
447
+ if len(self.entries) > 0:
448
+ lines.append("## Optimization Iterations\n")
449
+
450
+ # Group entries by iteration and track timestamps
451
+ iterations: Dict[Union[int, str], Dict[str, Any]] = {}
452
+
453
+ for entry in self.entries:
454
+ # Prefer explicit iteration in parameters when available
455
+ iteration = entry.parameters.get('iteration', None)
456
+
457
+ # Special-case data_locked entries that include initial-design metadata
458
+ if entry.entry_type == 'data_locked' and 'initial_design_method' in entry.parameters:
459
+ iteration = 0
460
+
461
+ if iteration is None:
462
+ iteration_key = 'N/A'
463
+ else:
464
+ iteration_key = iteration
465
+
466
+ if iteration_key not in iterations:
467
+ iterations[iteration_key] = {
468
+ 'model': None,
469
+ 'acquisition': None,
470
+ 'data': None,
471
+ 'timestamp': entry.timestamp
472
+ }
473
+
474
+ if entry.entry_type == 'model_locked':
475
+ iterations[iteration_key]['model'] = entry
476
+ elif entry.entry_type == 'acquisition_locked':
477
+ iterations[iteration_key]['acquisition'] = entry
478
+ elif entry.entry_type == 'data_locked':
479
+ iterations[iteration_key]['data'] = entry
480
+
481
+ # Sort iterations: numeric iteration keys first (ascending), then 'N/A'
482
+ def sort_key(item):
483
+ iter_num, data = item
484
+ is_na = (iter_num == 'N/A')
485
+ # Primary: whether N/A (False comes before True), secondary: iteration number or large sentinel
486
+ num_key = iter_num if isinstance(iter_num, int) else 999999
487
+ # Use the stored timestamp as tie-breaker
488
+ return (is_na, num_key, data.get('timestamp', ''))
489
+
490
+ # Output each iteration (skip N/A entries if they have no data)
491
+ for iter_num, iter_data in sorted(iterations.items(), key=sort_key):
492
+ # Skip N/A iteration if it has no model or acquisition
493
+ if iter_num == 'N/A' and not iter_data['model'] and not iter_data['acquisition']:
494
+ continue
495
+
496
+ lines.append(f"### Iteration {iter_num}\n")
497
+
498
+ # Model information
499
+ if iter_data.get('model'):
500
+ entry = iter_data['model']
501
+ params = entry.parameters
502
+
503
+ lines.append(f"**Timestamp**: {entry.timestamp}")
504
+ lines.append("")
505
+
506
+ # Build kernel string with nu parameter if Matern
507
+ kernel = params.get('kernel', 'N/A')
508
+ kernel_str = f"{kernel} kernel"
509
+ hyperparams = params.get('hyperparameters', {})
510
+
511
+ # Try common keys for matern nu
512
+ matern_nu = None
513
+ if kernel == 'Matern':
514
+ matern_nu = hyperparams.get('matern_nu') or hyperparams.get('nu')
515
+ if matern_nu is None:
516
+ # Also try params top-level hyperparameters representation
517
+ matern_nu = params.get('hyperparameters', {}).get('matern_nu')
518
+
519
+ if kernel == 'Matern' and matern_nu is not None:
520
+ kernel_str = f"{kernel} kernel (ν={matern_nu})"
521
+
522
+ lines.append(f"**Model**: {params.get('backend', 'N/A')}, {kernel_str}")
523
+ lines.append("")
524
+
525
+ if 'cv_metrics' in params and params['cv_metrics']:
526
+ metrics = params['cv_metrics']
527
+ r2 = metrics.get('r2', 0)
528
+ rmse = metrics.get('rmse', 0)
529
+ lines.append(f"**Metrics**: R²={r2:.4f}, RMSE={rmse:.4f}")
530
+ else:
531
+ lines.append(f"**Metrics**: Not available")
532
+
533
+ # Display input/output scaling if provided in hyperparameters
534
+ input_scale = hyperparams.get('input_scaling') or hyperparams.get('input_transform_type')
535
+ output_scale = hyperparams.get('output_scaling') or hyperparams.get('output_transform_type')
536
+ if input_scale is not None or output_scale is not None:
537
+ lines.append("")
538
+ lines.append(f"**Input Scaling**: {input_scale if input_scale is not None else 'none'}")
539
+ lines.append(f"**Output Scaling**: {output_scale if output_scale is not None else 'none'}")
540
+
541
+ if entry.notes:
542
+ lines.append("")
543
+ lines.append(f"**Notes**: {entry.notes}")
544
+
545
+ lines.append("")
546
+
547
+ # Acquisition information
548
+ if iter_data.get('acquisition'):
549
+ entry = iter_data['acquisition']
550
+ params = entry.parameters
551
+
552
+ lines.append(f"**Acquisition**: {params.get('strategy', 'N/A')}")
553
+ lines.append("")
554
+
555
+ if 'parameters' in params and params['parameters']:
556
+ acq_params = params['parameters']
557
+ param_str = ', '.join([f"{k}={v}" for k, v in acq_params.items()])
558
+ lines.append(f"**Parameters**: {param_str}")
559
+ lines.append("")
560
+
561
+ if 'suggestions' in params and params['suggestions']:
562
+ suggestions = params['suggestions']
563
+ lines.append(f"**Suggested Next**: {suggestions}")
564
+
565
+ if entry.notes:
566
+ lines.append("")
567
+ lines.append(f"**Notes**: {entry.notes}")
568
+
569
+ lines.append("")
570
+
571
+ # Data information (e.g., initial design)
572
+ if iter_data.get('data'):
573
+ entry = iter_data['data']
574
+ params = entry.parameters
575
+ # If initial design metadata present, print it clearly
576
+ method = params.get('initial_design_method')
577
+ n_points = params.get('initial_design_n_points')
578
+ if method:
579
+ lines.append(f"**Initial Design**: {method} ({n_points if n_points is not None else params.get('n_experiments', 'N/A')} points)")
580
+ lines.append("")
581
+ # Optionally include notes for data lock
582
+ if entry.notes:
583
+ lines.append(f"**Notes**: {entry.notes}")
584
+ lines.append("")
585
+
586
+ return "\n".join(lines)
587
+
588
+ def __len__(self) -> int:
589
+ """Return number of entries."""
590
+ return len(self.entries)
591
+
592
+ def __repr__(self) -> str:
593
+ """String representation."""
594
+ return f"AuditLog({len(self.entries)} entries)"