mlrun 1.7.0rc9__py3-none-any.whl → 1.7.0rc11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (65) hide show
  1. mlrun/__init__.py +1 -0
  2. mlrun/artifacts/model.py +29 -25
  3. mlrun/common/schemas/__init__.py +1 -0
  4. mlrun/common/schemas/alert.py +122 -0
  5. mlrun/common/schemas/auth.py +4 -0
  6. mlrun/common/schemas/client_spec.py +1 -0
  7. mlrun/common/schemas/model_monitoring/constants.py +3 -1
  8. mlrun/config.py +8 -4
  9. mlrun/datastore/base.py +6 -5
  10. mlrun/datastore/sources.py +9 -4
  11. mlrun/datastore/targets.py +11 -3
  12. mlrun/datastore/v3io.py +27 -50
  13. mlrun/db/base.py +44 -2
  14. mlrun/db/httpdb.py +192 -20
  15. mlrun/db/nopdb.py +36 -1
  16. mlrun/execution.py +21 -14
  17. mlrun/feature_store/api.py +6 -3
  18. mlrun/feature_store/feature_set.py +39 -23
  19. mlrun/feature_store/feature_vector.py +2 -1
  20. mlrun/feature_store/steps.py +30 -19
  21. mlrun/features.py +4 -13
  22. mlrun/frameworks/auto_mlrun/auto_mlrun.py +2 -2
  23. mlrun/frameworks/lgbm/__init__.py +1 -1
  24. mlrun/frameworks/lgbm/callbacks/callback.py +2 -4
  25. mlrun/frameworks/lgbm/model_handler.py +1 -1
  26. mlrun/frameworks/pytorch/__init__.py +2 -2
  27. mlrun/frameworks/sklearn/__init__.py +1 -1
  28. mlrun/frameworks/tf_keras/__init__.py +1 -1
  29. mlrun/frameworks/xgboost/__init__.py +1 -1
  30. mlrun/model.py +2 -2
  31. mlrun/model_monitoring/application.py +11 -2
  32. mlrun/model_monitoring/applications/histogram_data_drift.py +3 -3
  33. mlrun/model_monitoring/controller.py +2 -3
  34. mlrun/model_monitoring/stream_processing.py +0 -1
  35. mlrun/model_monitoring/writer.py +32 -0
  36. mlrun/package/packagers_manager.py +1 -0
  37. mlrun/platforms/__init__.py +1 -1
  38. mlrun/platforms/other.py +1 -1
  39. mlrun/projects/operations.py +11 -4
  40. mlrun/projects/project.py +148 -52
  41. mlrun/run.py +72 -40
  42. mlrun/runtimes/mpijob/abstract.py +8 -8
  43. mlrun/runtimes/nuclio/function.py +9 -5
  44. mlrun/runtimes/nuclio/serving.py +9 -8
  45. mlrun/runtimes/pod.py +3 -3
  46. mlrun/secrets.py +6 -2
  47. mlrun/serving/routers.py +3 -1
  48. mlrun/serving/states.py +12 -33
  49. mlrun/serving/v2_serving.py +4 -4
  50. mlrun/utils/helpers.py +1 -1
  51. mlrun/utils/notifications/notification/base.py +12 -0
  52. mlrun/utils/notifications/notification/console.py +2 -0
  53. mlrun/utils/notifications/notification/git.py +3 -1
  54. mlrun/utils/notifications/notification/ipython.py +2 -0
  55. mlrun/utils/notifications/notification/slack.py +41 -13
  56. mlrun/utils/notifications/notification/webhook.py +11 -1
  57. mlrun/utils/retryer.py +2 -2
  58. mlrun/utils/version/version.json +2 -2
  59. {mlrun-1.7.0rc9.dist-info → mlrun-1.7.0rc11.dist-info}/METADATA +1 -1
  60. {mlrun-1.7.0rc9.dist-info → mlrun-1.7.0rc11.dist-info}/RECORD +64 -64
  61. mlrun/datastore/helpers.py +0 -18
  62. {mlrun-1.7.0rc9.dist-info → mlrun-1.7.0rc11.dist-info}/LICENSE +0 -0
  63. {mlrun-1.7.0rc9.dist-info → mlrun-1.7.0rc11.dist-info}/WHEEL +0 -0
  64. {mlrun-1.7.0rc9.dist-info → mlrun-1.7.0rc11.dist-info}/entry_points.txt +0 -0
  65. {mlrun-1.7.0rc9.dist-info → mlrun-1.7.0rc11.dist-info}/top_level.txt +0 -0
@@ -162,13 +162,19 @@ class MapValues(StepToDict, MLRunStep):
162
162
  example::
163
163
 
164
164
  # replace the value "U" with '0' in the age column
165
- graph.to(MapValues(mapping={'age': {'U': '0'}}, with_original_features=True))
165
+ graph.to(MapValues(mapping={"age": {"U": "0"}}, with_original_features=True))
166
166
 
167
167
  # replace integers, example
168
- graph.to(MapValues(mapping={'not': {0: 1, 1: 0}}))
168
+ graph.to(MapValues(mapping={"not": {0: 1, 1: 0}}))
169
169
 
170
170
  # replace by range, use -inf and inf for extended range
171
- graph.to(MapValues(mapping={'numbers': {'ranges': {'negative': [-inf, 0], 'positive': [0, inf]}}}))
171
+ graph.to(
172
+ MapValues(
173
+ mapping={
174
+ "numbers": {"ranges": {"negative": [-inf, 0], "positive": [0, inf]}}
175
+ }
176
+ )
177
+ )
172
178
 
173
179
  :param mapping: a dict with entry per column and the associated old/new values map
174
180
  :param with_original_features: set to True to keep the original features
@@ -424,8 +430,10 @@ class OneHotEncoder(StepToDict, MLRunStep):
424
430
 
425
431
  example::
426
432
 
427
- mapping = {'category': ['food', 'health', 'transportation'],
428
- 'gender': ['male', 'female']}
433
+ mapping = {
434
+ "category": ["food", "health", "transportation"],
435
+ "gender": ["male", "female"],
436
+ }
429
437
  graph.to(OneHotEncoder(mapping=one_hot_encoder_mapping))
430
438
 
431
439
  :param mapping: a dict of per column categories (to map to binary fields)
@@ -542,10 +550,12 @@ class DateExtractor(StepToDict, MLRunStep):
542
550
 
543
551
  # (taken from the fraud-detection end-to-end feature store demo)
544
552
  # Define the Transactions FeatureSet
545
- transaction_set = fstore.FeatureSet("transactions",
546
- entities=[fstore.Entity("source")],
547
- timestamp_key='timestamp',
548
- description="transactions feature set")
553
+ transaction_set = fstore.FeatureSet(
554
+ "transactions",
555
+ entities=[fstore.Entity("source")],
556
+ timestamp_key="timestamp",
557
+ description="transactions feature set",
558
+ )
549
559
 
550
560
  # Get FeatureSet computation graph
551
561
  transaction_graph = transaction_set.graph
@@ -553,11 +563,11 @@ class DateExtractor(StepToDict, MLRunStep):
553
563
  # Add the custom `DateExtractor` step
554
564
  # to the computation graph
555
565
  transaction_graph.to(
556
- class_name='DateExtractor',
557
- name='Extract Dates',
558
- parts = ['hour', 'day_of_week'],
559
- timestamp_col = 'timestamp',
560
- )
566
+ class_name="DateExtractor",
567
+ name="Extract Dates",
568
+ parts=["hour", "day_of_week"],
569
+ timestamp_col="timestamp",
570
+ )
561
571
 
562
572
  :param parts: list of pandas style date-time parts you want to extract.
563
573
  :param timestamp_col: The name of the column containing the timestamps to extract from,
@@ -694,11 +704,12 @@ class DropFeatures(StepToDict, MLRunStep):
694
704
 
695
705
  example::
696
706
 
697
- feature_set = fstore.FeatureSet("fs-new",
698
- entities=[fstore.Entity("id")],
699
- description="feature set",
700
- engine="pandas",
701
- )
707
+ feature_set = fstore.FeatureSet(
708
+ "fs-new",
709
+ entities=[fstore.Entity("id")],
710
+ description="feature set",
711
+ engine="pandas",
712
+ )
702
713
  # Pre-processing graph steps
703
714
  feature_set.graph.to(DropFeatures(features=["age"]))
704
715
  df_pandas = feature_set.ingest(data)
mlrun/features.py CHANGED
@@ -238,10 +238,7 @@ class Validator(ModelObj):
238
238
  from mlrun.features import Validator
239
239
 
240
240
  # Add validator to the feature 'bid' with check type
241
- quotes_set["bid"].validator = Validator(
242
- check_type=True,
243
- severity="info"
244
- )
241
+ quotes_set["bid"].validator = Validator(check_type=True, severity="info")
245
242
 
246
243
  :param check_type: check feature type e.g. True, False
247
244
  :param severity: severity name e.g. info, warning, etc.
@@ -280,10 +277,7 @@ class MinMaxValidator(Validator):
280
277
 
281
278
  # Add validator to the feature 'bid', where valid
282
279
  # minimal value is 52
283
- quotes_set["bid"].validator = MinMaxValidator(
284
- min=52,
285
- severity="info"
286
- )
280
+ quotes_set["bid"].validator = MinMaxValidator(min=52, severity="info")
287
281
 
288
282
  :param check_type: check feature type e.g. True, False
289
283
  :param severity: severity name e.g. info, warning, etc.
@@ -344,9 +338,7 @@ class MinMaxLenValidator(Validator):
344
338
  # Add length validator to the feature 'ticker', where valid
345
339
  # minimal length is 1 and maximal length is 10
346
340
  quotes_set["ticker"].validator = MinMaxLenValidator(
347
- min=1,
348
- max=10,
349
- severity="info"
341
+ min=1, max=10, severity="info"
350
342
  )
351
343
 
352
344
  :param check_type: check feature type e.g. True, False
@@ -408,8 +400,7 @@ class RegexValidator(Validator):
408
400
  # expression '(\b[A-Za-z]{1}[0-9]{7}\b)' where valid values are
409
401
  # e.g. A1234567, z9874563, etc.
410
402
  quotes_set["name"].validator = RegexValidator(
411
- regex=r"(\b[A-Za-z]{1}[0-9]{7}\b)",
412
- severity="info"
403
+ regex=r"(\b[A-Za-z]{1}[0-9]{7}\b)", severity="info"
413
404
  )
414
405
 
415
406
  :param check_type: check feature type e.g. True, False
@@ -363,7 +363,7 @@ class AutoMLRun:
363
363
 
364
364
  {
365
365
  "/.../custom_model.py": "MyModel",
366
- "/.../custom_objects.py": ["object1", "object2"]
366
+ "/.../custom_objects.py": ["object1", "object2"],
367
367
  }
368
368
 
369
369
  All the paths will be accessed from the given 'custom_objects_directory',
@@ -464,7 +464,7 @@ class AutoMLRun:
464
464
 
465
465
  {
466
466
  "/.../custom_model.py": "MyModel",
467
- "/.../custom_objects.py": ["object1", "object2"]
467
+ "/.../custom_objects.py": ["object1", "object2"],
468
468
  }
469
469
 
470
470
  All the paths will be accessed from the given 'custom_objects_directory',
@@ -241,7 +241,7 @@ def apply_mlrun(
241
241
 
242
242
  {
243
243
  "/.../custom_model.py": "MyModel",
244
- "/.../custom_objects.py": ["object1", "object2"]
244
+ "/.../custom_objects.py": ["object1", "object2"],
245
245
  }
246
246
 
247
247
  All the paths will be accessed from the given 'custom_objects_directory', meaning
@@ -63,11 +63,9 @@ class Callback(ABC):
63
63
  def on_train_end(self):
64
64
  print("{self.name}: Done training!")
65
65
 
66
+
66
67
  apply_mlrun()
67
- lgb.train(
68
- ...,
69
- callbacks=[ExampleCallback(name="Example")]
70
- )
68
+ lgb.train(..., callbacks=[ExampleCallback(name="Example")])
71
69
  """
72
70
 
73
71
  def __init__(self, order: int = 10, before_iteration: bool = False):
@@ -103,7 +103,7 @@ class LGBMModelHandler(MLModelHandler):
103
103
 
104
104
  {
105
105
  "/.../custom_model.py": "MyModel",
106
- "/.../custom_objects.py": ["object1", "object2"]
106
+ "/.../custom_objects.py": ["object1", "object2"],
107
107
  }
108
108
 
109
109
  All the paths will be accessed from the given 'custom_objects_directory',
@@ -112,7 +112,7 @@ def train(
112
112
 
113
113
  {
114
114
  "/.../custom_optimizer.py": "optimizer",
115
- "/.../custom_layers.py": ["layer1", "layer2"]
115
+ "/.../custom_layers.py": ["layer1", "layer2"],
116
116
  }
117
117
 
118
118
  All the paths will be accessed from the given 'custom_objects_directory',
@@ -264,7 +264,7 @@ def evaluate(
264
264
 
265
265
  {
266
266
  "/.../custom_optimizer.py": "optimizer",
267
- "/.../custom_layers.py": ["layer1", "layer2"]
267
+ "/.../custom_layers.py": ["layer1", "layer2"],
268
268
  }
269
269
 
270
270
  All the paths will be accessed from the given 'custom_objects_directory', meaning
@@ -92,7 +92,7 @@ def apply_mlrun(
92
92
 
93
93
  {
94
94
  "/.../custom_model.py": "MyModel",
95
- "/.../custom_objects.py": ["object1", "object2"]
95
+ "/.../custom_objects.py": ["object1", "object2"],
96
96
  }
97
97
 
98
98
  All the paths will be accessed from the given 'custom_objects_directory', meaning
@@ -85,7 +85,7 @@ def apply_mlrun(
85
85
 
86
86
  {
87
87
  "/.../custom_optimizer.py": "optimizer",
88
- "/.../custom_layers.py": ["layer1", "layer2"]
88
+ "/.../custom_layers.py": ["layer1", "layer2"],
89
89
  }
90
90
 
91
91
  All the paths will be accessed from the given 'custom_objects_directory',
@@ -90,7 +90,7 @@ def apply_mlrun(
90
90
 
91
91
  {
92
92
  "/.../custom_model.py": "MyModel",
93
- "/.../custom_objects.py": ["object1", "object2"]
93
+ "/.../custom_objects.py": ["object1", "object2"],
94
94
  }
95
95
 
96
96
  All the paths will be accessed from the given 'custom_objects_directory', meaning
mlrun/model.py CHANGED
@@ -931,7 +931,7 @@ class RunSpec(ModelObj):
931
931
 
932
932
  >>> run_spec.inputs = {
933
933
  ... "my_input": "...",
934
- ... "my_hinted_input : pandas.DataFrame": "..."
934
+ ... "my_hinted_input : pandas.DataFrame": "...",
935
935
  ... }
936
936
 
937
937
  :param inputs: The inputs to set.
@@ -1275,7 +1275,7 @@ class RunTemplate(ModelObj):
1275
1275
 
1276
1276
  example::
1277
1277
 
1278
- grid_params = {"p1": [2,4,1], "p2": [10,20]}
1278
+ grid_params = {"p1": [2, 4, 1], "p2": [10, 20]}
1279
1279
  task = mlrun.new_task("grid-search")
1280
1280
  task.with_hyper_params(grid_params, selector="max.accuracy")
1281
1281
  """
@@ -93,7 +93,11 @@ class ModelMonitoringApplicationBase(StepToDict, ABC):
93
93
  endpoint_id: str,
94
94
  output_stream_uri: str,
95
95
  ) -> ModelMonitoringApplicationResult:
96
- self.context.log_artifact(TableArtifact("sample_df_stats", df=self.dict_to_histogram(sample_df_stats)))
96
+ self.context.log_artifact(
97
+ TableArtifact(
98
+ "sample_df_stats", df=self.dict_to_histogram(sample_df_stats)
99
+ )
100
+ )
97
101
  return ModelMonitoringApplicationResult(
98
102
  name="data_drift_test",
99
103
  value=0.5,
@@ -101,6 +105,7 @@ class ModelMonitoringApplicationBase(StepToDict, ABC):
101
105
  status=mm_constant.ResultStatusApp.detected,
102
106
  )
103
107
 
108
+
104
109
  # mlrun: end-code
105
110
  """
106
111
 
@@ -203,7 +208,11 @@ class ModelMonitoringApplicationBase(StepToDict, ABC):
203
208
  json.loads(event[mm_constant.ApplicationEvent.FEATURE_STATS]),
204
209
  ParquetTarget(
205
210
  path=event[mm_constant.ApplicationEvent.SAMPLE_PARQUET_PATH]
206
- ).as_df(start_time=start_time, end_time=end_time, time_column="timestamp"),
211
+ ).as_df(
212
+ start_time=start_time,
213
+ end_time=end_time,
214
+ time_column=mm_constant.FeatureSetFeatures.time_stamp(),
215
+ ),
207
216
  start_time,
208
217
  end_time,
209
218
  pd.Timestamp(event[mm_constant.ApplicationEvent.LAST_REQUEST]),
@@ -22,8 +22,8 @@ import mlrun.artifacts
22
22
  import mlrun.common.model_monitoring.helpers
23
23
  import mlrun.model_monitoring.features_drift_table as mm_drift_table
24
24
  from mlrun.common.schemas.model_monitoring.constants import (
25
- MLRUN_HISTOGRAM_DATA_DRIFT_APP_NAME,
26
25
  EventFieldType,
26
+ HistogramDataDriftApplicationConstants,
27
27
  ResultKindApp,
28
28
  ResultStatusApp,
29
29
  )
@@ -94,7 +94,7 @@ class HistogramDataDriftApplication(ModelMonitoringApplicationBase):
94
94
  and the status is returned.
95
95
  """
96
96
 
97
- NAME: Final[str] = MLRUN_HISTOGRAM_DATA_DRIFT_APP_NAME
97
+ NAME: Final[str] = HistogramDataDriftApplicationConstants.NAME
98
98
  METRIC_KIND: Final[ResultKindApp] = ResultKindApp.data_drift
99
99
 
100
100
  _REQUIRED_METRICS = {HellingerDistance, TotalVarianceDistance}
@@ -148,7 +148,7 @@ class HistogramDataDriftApplication(ModelMonitoringApplicationBase):
148
148
  status = self._value_classifier.value_to_status(value)
149
149
  results.append(
150
150
  ModelMonitoringApplicationResult(
151
- name="general_drift",
151
+ name=HistogramDataDriftApplicationConstants.GENERAL_RESULT_NAME,
152
152
  value=value,
153
153
  kind=self.METRIC_KIND,
154
154
  status=status,
@@ -354,7 +354,7 @@ class MonitoringApplicationController:
354
354
  app.status.state == "ready"
355
355
  # workaround for the default app, as its `status.state` is `None`
356
356
  or app.metadata.name
357
- == mm_constants.MLRUN_HISTOGRAM_DATA_DRIFT_APP_NAME
357
+ == mm_constants.HistogramDataDriftApplicationConstants.NAME
358
358
  )
359
359
  }
360
360
  )
@@ -502,8 +502,7 @@ class MonitoringApplicationController:
502
502
 
503
503
  # Get the current stats:
504
504
  current_stats = calculate_inputs_statistics(
505
- sample_set_statistics=feature_stats,
506
- inputs=df,
505
+ sample_set_statistics=feature_stats, inputs=df
507
506
  )
508
507
 
509
508
  cls._push_to_applications(
@@ -352,7 +352,6 @@ class EventStreamProcessor:
352
352
  rate="10/m",
353
353
  time_col=EventFieldType.TIMESTAMP,
354
354
  container=self.tsdb_container,
355
- access_key=self.v3io_access_key,
356
355
  v3io_frames=self.v3io_framesd,
357
356
  infer_columns_from_data=True,
358
357
  index_cols=[
@@ -23,6 +23,7 @@ from v3io_frames.errors import Error as V3IOFramesError
23
23
  from v3io_frames.frames_pb2 import IGNORE
24
24
 
25
25
  import mlrun.common.model_monitoring
26
+ import mlrun.common.schemas.alert as alert_constants
26
27
  import mlrun.model_monitoring
27
28
  import mlrun.model_monitoring.db.stores
28
29
  import mlrun.utils.v3io_clients
@@ -171,6 +172,29 @@ class ModelMonitoringWriter(StepToDict):
171
172
  event=event,
172
173
  )
173
174
 
175
+ @staticmethod
176
+ def _generate_event_on_drift(
177
+ uid: str, drift_status: str, drift_value: float, project_name: str
178
+ ):
179
+ if (
180
+ drift_status == ResultStatusApp.detected
181
+ or drift_status == ResultStatusApp.potential_detection
182
+ ):
183
+ entity = {
184
+ "kind": alert_constants.EventEntityKind.MODEL,
185
+ "project": project_name,
186
+ "id": uid,
187
+ }
188
+ event_kind = (
189
+ alert_constants.EventKind.DRIFT_DETECTED
190
+ if drift_status == ResultStatusApp.detected
191
+ else alert_constants.EventKind.DRIFT_SUSPECTED
192
+ )
193
+ event_data = mlrun.common.schemas.Event(
194
+ kind=event_kind, entity=entity, value=drift_value
195
+ )
196
+ mlrun.get_run_db().generate_event(event_kind, event_data)
197
+
174
198
  @staticmethod
175
199
  def _reconstruct_event(event: _RawEvent) -> _AppResultEvent:
176
200
  """
@@ -201,4 +225,12 @@ class ModelMonitoringWriter(StepToDict):
201
225
  self._update_tsdb(event)
202
226
  self._update_kv_db(event)
203
227
  _Notifier(event=event, notification_pusher=self._custom_notifier).notify()
228
+
229
+ if mlrun.mlconf.alerts.mode == mlrun.common.schemas.alert.AlertsModes.enabled:
230
+ self._generate_event_on_drift(
231
+ event[WriterEvent.ENDPOINT_ID],
232
+ event[WriterEvent.RESULT_STATUS],
233
+ event[WriterEvent.RESULT_VALUE],
234
+ self.project,
235
+ )
204
236
  logger.info("Completed event DB writes")
@@ -92,6 +92,7 @@ class PackagersManager:
92
92
  from mlrun import Packager
93
93
  from x import XPackager
94
94
 
95
+
95
96
  class YPackager(Packager):
96
97
  pass
97
98
 
@@ -48,7 +48,7 @@ def watch_stream(
48
48
 
49
49
  example::
50
50
 
51
- watch_stream('v3io:///users/admin/mystream')
51
+ watch_stream("v3io:///users/admin/mystream")
52
52
 
53
53
  :param url: stream url
54
54
  :param shard_ids: range or list of shard IDs
mlrun/platforms/other.py CHANGED
@@ -33,7 +33,7 @@ def mount_pvc(pvc_name=None, volume_name="pipeline", volume_mount_path="/mnt/pip
33
33
  Usage::
34
34
 
35
35
  train = train_op(...)
36
- train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))
36
+ train.apply(mount_pvc("claim-name", "pipeline", "/mnt/pipeline"))
37
37
  """
38
38
  if "MLRUN_PVC_MOUNT" in os.environ:
39
39
  mount = os.environ.get("MLRUN_PVC_MOUNT")
@@ -95,8 +95,11 @@ def run_function(
95
95
  MODEL_CLASS = "sklearn.ensemble.RandomForestClassifier"
96
96
  DATA_PATH = "s3://bigdata/data.parquet"
97
97
  function = mlrun.import_function("hub://auto-trainer")
98
- run1 = run_function(function, params={"label_columns": LABELS, "model_class": MODEL_CLASS},
99
- inputs={"dataset": DATA_PATH})
98
+ run1 = run_function(
99
+ function,
100
+ params={"label_columns": LABELS, "model_class": MODEL_CLASS},
101
+ inputs={"dataset": DATA_PATH},
102
+ )
100
103
 
101
104
  example (use with project)::
102
105
 
@@ -115,8 +118,12 @@ def run_function(
115
118
  @dsl.pipeline(name="test pipeline", description="test")
116
119
  def my_pipe(url=""):
117
120
  run1 = run_function("loaddata", params={"url": url}, outputs=["data"])
118
- run2 = run_function("train", params={"label_columns": LABELS, "model_class": MODEL_CLASS},
119
- inputs={"dataset": run1.outputs["data"]})
121
+ run2 = run_function(
122
+ "train",
123
+ params={"label_columns": LABELS, "model_class": MODEL_CLASS},
124
+ inputs={"dataset": run1.outputs["data"]},
125
+ )
126
+
120
127
 
121
128
  project.run(workflow_handler=my_pipe, arguments={"param1": 7})
122
129