uipath 2.1.52__py3-none-any.whl → 2.1.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. uipath/_cli/_evals/{_evaluators/_evaluator_factory.py → _evaluator_factory.py} +24 -23
  2. uipath/_cli/_evals/_models/_evaluation_set.py +23 -18
  3. uipath/_cli/_evals/_models/_evaluator_base_params.py +16 -0
  4. uipath/_cli/_evals/_models/_output.py +85 -0
  5. uipath/_cli/_evals/_runtime.py +102 -10
  6. uipath/_cli/_runtime/_contracts.py +11 -2
  7. uipath/_cli/_utils/_eval_set.py +1 -1
  8. uipath/_cli/_utils/_studio_project.py +30 -29
  9. uipath/_cli/cli_eval.py +46 -61
  10. uipath/eval/evaluators/__init__.py +15 -0
  11. uipath/eval/evaluators/base_evaluator.py +88 -0
  12. uipath/eval/evaluators/deterministic_evaluator_base.py +53 -0
  13. uipath/eval/evaluators/exact_match_evaluator.py +37 -0
  14. uipath/{_cli/_evals/_evaluators/_json_similarity_evaluator.py → eval/evaluators/json_similarity_evaluator.py} +23 -40
  15. uipath/eval/evaluators/llm_as_judge_evaluator.py +137 -0
  16. uipath/eval/evaluators/trajectory_evaluator.py +36 -0
  17. uipath/eval/models/__init__.py +19 -0
  18. uipath/{_cli/_evals/_models/_evaluators.py → eval/models/models.py} +67 -43
  19. {uipath-2.1.52.dist-info → uipath-2.1.54.dist-info}/METADATA +1 -1
  20. {uipath-2.1.52.dist-info → uipath-2.1.54.dist-info}/RECORD +23 -23
  21. uipath/_cli/_evals/_evaluators/__init__.py +0 -22
  22. uipath/_cli/_evals/_evaluators/_deterministic_evaluator_base.py +0 -46
  23. uipath/_cli/_evals/_evaluators/_evaluator_base.py +0 -124
  24. uipath/_cli/_evals/_evaluators/_exact_match_evaluator.py +0 -40
  25. uipath/_cli/_evals/_evaluators/_llm_as_judge_evaluator.py +0 -183
  26. uipath/_cli/_evals/_evaluators/_trajectory_evaluator.py +0 -48
  27. uipath/_cli/_evals/_models/__init__.py +0 -18
  28. uipath/_cli/_evals/_models/_agent_execution_output.py +0 -14
  29. uipath/_cli/_evals/progress_reporter.py +0 -304
  30. {uipath-2.1.52.dist-info → uipath-2.1.54.dist-info}/WHEEL +0 -0
  31. {uipath-2.1.52.dist-info → uipath-2.1.54.dist-info}/entry_points.txt +0 -0
  32. {uipath-2.1.52.dist-info → uipath-2.1.54.dist-info}/licenses/LICENSE +0 -0
@@ -1,18 +1,21 @@
1
1
  from typing import Any, Dict
2
2
 
3
- from .._models import EvaluatorCategory, EvaluatorType
4
- from ._evaluator_base import EvaluatorBase, EvaluatorBaseParams
5
- from ._exact_match_evaluator import ExactMatchEvaluator
6
- from ._json_similarity_evaluator import JsonSimilarityEvaluator
7
- from ._llm_as_judge_evaluator import LlmAsAJudgeEvaluator
8
- from ._trajectory_evaluator import TrajectoryEvaluator
3
+ from uipath._cli._evals._models._evaluator_base_params import EvaluatorBaseParams
4
+ from uipath.eval.evaluators import (
5
+ BaseEvaluator,
6
+ ExactMatchEvaluator,
7
+ JsonSimilarityEvaluator,
8
+ LlmAsAJudgeEvaluator,
9
+ TrajectoryEvaluator,
10
+ )
11
+ from uipath.eval.models.models import EvaluatorCategory, EvaluatorType
9
12
 
10
13
 
11
14
  class EvaluatorFactory:
12
15
  """Factory class for creating evaluator instances based on configuration."""
13
16
 
14
- @staticmethod
15
- def create_evaluator(data: Dict[str, Any]) -> EvaluatorBase:
17
+ @classmethod
18
+ def create_evaluator(cls, data: Dict[str, Any]) -> BaseEvaluator[Any]:
16
19
  """Create an evaluator instance from configuration data.
17
20
 
18
21
  Args:
@@ -25,13 +28,15 @@ class EvaluatorFactory:
25
28
  ValueError: If category is unknown or required fields are missing
26
29
  """
27
30
  # Extract common fields
28
- evaluator_id = data.get("id")
29
- if not evaluator_id:
31
+ name = data.get("name", "")
32
+ if not name:
33
+ raise ValueError("Evaluator configuration must include 'name' field")
34
+ id = data.get("id", "")
35
+ if not id:
30
36
  raise ValueError("Evaluator configuration must include 'id' field")
31
37
 
32
38
  category = EvaluatorCategory.from_int(data.get("category"))
33
39
  evaluator_type = EvaluatorType.from_int(data.get("type", EvaluatorType.Unknown))
34
- name = data.get("name", "")
35
40
  description = data.get("description", "")
36
41
  created_at = data.get("createdAt", "")
37
42
  updated_at = data.get("updatedAt", "")
@@ -39,7 +44,7 @@ class EvaluatorFactory:
39
44
 
40
45
  # Create base parameters
41
46
  base_params = EvaluatorBaseParams(
42
- evaluator_id=evaluator_id,
47
+ id=id,
43
48
  category=category,
44
49
  evaluator_type=evaluator_type,
45
50
  name=name,
@@ -49,7 +54,6 @@ class EvaluatorFactory:
49
54
  target_output_key=target_output_key,
50
55
  )
51
56
 
52
- # Create evaluator based on category
53
57
  match category:
54
58
  case EvaluatorCategory.Deterministic:
55
59
  if evaluator_type == evaluator_type.Equals:
@@ -80,9 +84,8 @@ class EvaluatorFactory:
80
84
  base_params: EvaluatorBaseParams, data: Dict[str, Any]
81
85
  ) -> ExactMatchEvaluator:
82
86
  """Create a deterministic evaluator."""
83
- return ExactMatchEvaluator.from_params(
84
- base_params,
85
- target_output_key=data.get("targetOutputKey", "*"),
87
+ return ExactMatchEvaluator(
88
+ **base_params.model_dump(),
86
89
  )
87
90
 
88
91
  @staticmethod
@@ -90,9 +93,8 @@ class EvaluatorFactory:
90
93
  base_params: EvaluatorBaseParams, data: Dict[str, Any]
91
94
  ) -> JsonSimilarityEvaluator:
92
95
  """Create a deterministic evaluator."""
93
- return JsonSimilarityEvaluator.from_params(
94
- base_params,
95
- target_output_key=data.get("targetOutputKey", "*"),
96
+ return JsonSimilarityEvaluator(
97
+ **base_params.model_dump(),
96
98
  )
97
99
 
98
100
  @staticmethod
@@ -112,16 +114,15 @@ class EvaluatorFactory:
112
114
  "'same-as-agent' model option is not supported by coded agents evaluations. Please select a specific model for the evaluator."
113
115
  )
114
116
 
115
- return LlmAsAJudgeEvaluator.from_params(
116
- base_params,
117
+ return LlmAsAJudgeEvaluator(
118
+ **base_params.model_dump(),
117
119
  prompt=prompt,
118
120
  model=model,
119
- target_output_key=data.get("targetOutputKey", "*"),
120
121
  )
121
122
 
122
123
  @staticmethod
123
124
  def _create_trajectory_evaluator(
124
125
  base_params: EvaluatorBaseParams, data: Dict[str, Any]
125
- ) -> TrajectoryEvaluator:
126
+ ) -> TrajectoryEvaluator[Any]:
126
127
  """Create a trajectory evaluator."""
127
128
  raise NotImplementedError()
@@ -1,40 +1,45 @@
1
1
  from enum import IntEnum
2
2
  from typing import Any, Dict, List
3
3
 
4
- from pydantic import BaseModel, Field
4
+ from pydantic import BaseModel, ConfigDict, Field
5
+ from pydantic.alias_generators import to_camel
5
6
 
6
7
 
7
8
  class EvaluationItem(BaseModel):
8
9
  """Individual evaluation item within an evaluation set."""
9
10
 
11
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
12
+
10
13
  id: str
11
14
  name: str
12
15
  inputs: Dict[str, Any]
13
- expectedOutput: Dict[str, Any]
14
- expectedAgentBehavior: str = ""
15
- simulationInstructions: str = ""
16
- simulateInput: bool = False
17
- inputGenerationInstructions: str = ""
18
- simulateTools: bool = False
19
- toolsToSimulate: List[str] = Field(default_factory=list)
20
- evalSetId: str
21
- createdAt: str
22
- updatedAt: str
16
+ expected_output: Dict[str, Any]
17
+ expected_agent_behavior: str = ""
18
+ simulation_instructions: str = ""
19
+ simulate_input: bool = False
20
+ input_generation_instructions: str = ""
21
+ simulate_tools: bool = False
22
+ tools_to_simulate: List[str] = Field(default_factory=list)
23
+ eval_set_id: str
24
+ created_at: str
25
+ updated_at: str
23
26
 
24
27
 
25
28
  class EvaluationSet(BaseModel):
26
29
  """Complete evaluation set model."""
27
30
 
31
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
32
+
28
33
  id: str
29
- fileName: str
30
- evaluatorRefs: List[str] = Field(default_factory=list)
34
+ file_name: str
35
+ evaluator_refs: List[str] = Field(default_factory=list)
31
36
  evaluations: List[EvaluationItem] = Field(default_factory=list)
32
37
  name: str
33
- batchSize: int = 10
34
- timeoutMinutes: int = 20
35
- modelSettings: List[Dict[str, Any]] = Field(default_factory=list)
36
- createdAt: str
37
- updatedAt: str
38
+ batch_size: int = 10
39
+ timeout_minutes: int = 20
40
+ model_settings: List[Dict[str, Any]] = Field(default_factory=list)
41
+ created_at: str
42
+ updated_at: str
38
43
 
39
44
  def extract_selected_evals(self, eval_ids) -> None:
40
45
  selected_evals: list[EvaluationItem] = []
@@ -0,0 +1,16 @@
1
+ from pydantic import BaseModel
2
+
3
+ from uipath.eval.models.models import EvaluatorCategory, EvaluatorType
4
+
5
+
6
+ class EvaluatorBaseParams(BaseModel):
7
+ """Parameters for initializing the base evaluator."""
8
+
9
+ id: str
10
+ category: EvaluatorCategory
11
+ evaluator_type: EvaluatorType
12
+ name: str
13
+ description: str
14
+ created_at: str
15
+ updated_at: str
16
+ target_output_key: str
@@ -0,0 +1,85 @@
1
+ from typing import List, Optional
2
+
3
+ from opentelemetry.sdk.trace import ReadableSpan
4
+ from pydantic import BaseModel, ConfigDict, model_serializer
5
+ from pydantic.alias_generators import to_camel
6
+
7
+ from uipath._cli._runtime._contracts import UiPathRuntimeResult
8
+ from uipath.eval.models.models import EvaluationResult, ScoreType
9
+
10
+
11
+ class UiPathEvalRunExecutionOutput(BaseModel):
12
+ """Result of a single agent response."""
13
+
14
+ model_config = ConfigDict(arbitrary_types_allowed=True)
15
+
16
+ execution_time: float
17
+ spans: list[ReadableSpan]
18
+ result: UiPathRuntimeResult
19
+
20
+
21
+ class EvaluationResultDto(BaseModel):
22
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
23
+
24
+ score: float
25
+ details: Optional[str] = None
26
+ evaluation_time: Optional[float] = None
27
+
28
+ @model_serializer(mode="wrap")
29
+ def serialize_model(self, serializer, info):
30
+ data = serializer(self)
31
+ if self.details is None and isinstance(data, dict):
32
+ data.pop("details", None)
33
+ return data
34
+
35
+ @classmethod
36
+ def from_evaluation_result(
37
+ cls, evaluation_result: EvaluationResult
38
+ ) -> "EvaluationResultDto":
39
+ score_type = evaluation_result.score_type
40
+ score: float
41
+ if score_type == ScoreType.BOOLEAN:
42
+ score = 100 if evaluation_result.score else 0
43
+ elif score_type == ScoreType.ERROR:
44
+ score = 0
45
+ else:
46
+ score = evaluation_result.score
47
+
48
+ return cls(
49
+ score=score,
50
+ details=evaluation_result.details,
51
+ evaluation_time=evaluation_result.evaluation_time,
52
+ )
53
+
54
+
55
+ class EvaluationRunResultDto(BaseModel):
56
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
57
+
58
+ evaluator_name: str
59
+ result: EvaluationResultDto
60
+
61
+
62
+ class EvaluationRunResult(BaseModel):
63
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
64
+
65
+ evaluation_name: str
66
+ evaluation_run_results: List[EvaluationRunResultDto]
67
+
68
+
69
+ class UiPathEvalOutput(BaseModel):
70
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
71
+
72
+ evaluation_set_name: str
73
+ score: float
74
+ evaluation_set_results: List[EvaluationRunResult]
75
+
76
+ def compute_average_score(self) -> None:
77
+ total_score = 0.0
78
+ total_count = 0
79
+
80
+ for evaluation_set_result in self.evaluation_set_results:
81
+ for evaluation_run_result in evaluation_set_result.evaluation_run_results:
82
+ total_score += evaluation_run_result.result.score
83
+ total_count += 1
84
+
85
+ self.score = total_score / total_count if total_count > 0 else 0.0
@@ -1,10 +1,15 @@
1
+ import json
1
2
  from collections import defaultdict
3
+ from pathlib import Path
2
4
  from time import time
3
- from typing import Dict, Generic, List, Optional, Sequence, TypeVar
5
+ from typing import Any, Dict, Generic, List, Optional, Sequence, TypeVar
4
6
 
5
7
  from opentelemetry.sdk.trace import ReadableSpan
6
8
  from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
7
9
 
10
+ from ...eval.evaluators import BaseEvaluator
11
+ from ...eval.models import EvaluationResult
12
+ from ...eval.models.models import AgentExecution
8
13
  from .._runtime._contracts import (
9
14
  UiPathBaseRuntime,
10
15
  UiPathRuntimeContext,
@@ -13,8 +18,15 @@ from .._runtime._contracts import (
13
18
  UiPathRuntimeStatus,
14
19
  )
15
20
  from .._utils._eval_set import EvalHelpers
16
- from ._models import EvaluationItem
17
- from ._models._agent_execution_output import UiPathEvalRunExecutionOutput
21
+ from ._evaluator_factory import EvaluatorFactory
22
+ from ._models._evaluation_set import EvaluationItem, EvaluationSet
23
+ from ._models._output import (
24
+ EvaluationResultDto,
25
+ EvaluationRunResult,
26
+ EvaluationRunResultDto,
27
+ UiPathEvalOutput,
28
+ UiPathEvalRunExecutionOutput,
29
+ )
18
30
 
19
31
  T = TypeVar("T", bound=UiPathBaseRuntime)
20
32
  C = TypeVar("C", bound=UiPathRuntimeContext)
@@ -86,15 +98,36 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
86
98
  evaluation_set = EvalHelpers.load_eval_set(
87
99
  self.context.eval_set, self.context.eval_ids
88
100
  )
89
- execution_output_list: list[UiPathEvalRunExecutionOutput] = []
101
+ evaluators = self._load_evaluators(evaluation_set)
102
+ results = UiPathEvalOutput(
103
+ evaluation_set_name=evaluation_set.name, score=0, evaluation_set_results=[]
104
+ )
90
105
  for eval_item in evaluation_set.evaluations:
91
- execution_output = await self.execute_runtime(eval_item)
92
- execution_output_list.append(execution_output)
93
-
106
+ evaluation_run_results = EvaluationRunResult(
107
+ evaluation_name=eval_item.name, evaluation_run_results=[]
108
+ )
109
+
110
+ results.evaluation_set_results.append(evaluation_run_results)
111
+ agent_execution_output = await self.execute_runtime(eval_item)
112
+ # we run each evaluator on the agent_output
113
+ for evaluator in evaluators:
114
+ evaluation_result = await self.run_evaluator(
115
+ evaluator=evaluator,
116
+ execution_output=agent_execution_output,
117
+ eval_item=eval_item,
118
+ )
119
+ evaluation_run_results.evaluation_run_results.append(
120
+ EvaluationRunResultDto(
121
+ evaluator_name=evaluator.name,
122
+ result=EvaluationResultDto.from_evaluation_result(
123
+ evaluation_result
124
+ ),
125
+ )
126
+ )
127
+
128
+ results.compute_average_score()
94
129
  self.context.result = UiPathRuntimeResult(
95
- output={
96
- "results": execution_output_list,
97
- },
130
+ output={**results.model_dump(by_alias=True)},
98
131
  status=UiPathRuntimeStatus.SUCCESSFUL,
99
132
  )
100
133
 
@@ -128,6 +161,65 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
128
161
  result=result,
129
162
  )
130
163
 
164
+ async def run_evaluator(
165
+ self,
166
+ evaluator: BaseEvaluator[Any],
167
+ execution_output: UiPathEvalRunExecutionOutput,
168
+ eval_item: EvaluationItem,
169
+ ) -> EvaluationResult:
170
+ agent_execution = AgentExecution(
171
+ agent_input=eval_item.inputs,
172
+ agent_output=execution_output.result.output or {},
173
+ agent_trace=execution_output.spans,
174
+ )
175
+
176
+ result = await evaluator.evaluate(
177
+ agent_execution=agent_execution,
178
+ # at the moment evaluation_criteria is always the expected output
179
+ evaluation_criteria=eval_item.expected_output,
180
+ )
181
+
182
+ return result
183
+
184
+ def _load_evaluators(
185
+ self, evaluation_set: EvaluationSet
186
+ ) -> List[BaseEvaluator[Any]]:
187
+ """Load evaluators referenced by the evaluation set."""
188
+ evaluators = []
189
+ evaluators_dir = Path(self.context.eval_set).parent.parent / "evaluators" # type: ignore
190
+ evaluator_refs = set(evaluation_set.evaluator_refs)
191
+ found_evaluator_ids = set()
192
+
193
+ for file in evaluators_dir.glob("*.json"):
194
+ try:
195
+ with open(file, "r", encoding="utf-8") as f:
196
+ data = json.load(f)
197
+ except json.JSONDecodeError as e:
198
+ raise ValueError(
199
+ f"Invalid JSON in evaluator file '{file}': {str(e)}. "
200
+ f"Please check the file for syntax errors."
201
+ ) from e
202
+
203
+ try:
204
+ evaluator_id = data.get("id")
205
+ if evaluator_id in evaluator_refs:
206
+ evaluator = EvaluatorFactory.create_evaluator(data)
207
+ evaluators.append(evaluator)
208
+ found_evaluator_ids.add(evaluator_id)
209
+ except Exception as e:
210
+ raise ValueError(
211
+ f"Failed to create evaluator from file '{file}': {str(e)}. "
212
+ f"Please verify the evaluator configuration."
213
+ ) from e
214
+
215
+ missing_evaluators = evaluator_refs - found_evaluator_ids
216
+ if missing_evaluators:
217
+ raise ValueError(
218
+ f"Could not find the following evaluators: {missing_evaluators}"
219
+ )
220
+
221
+ return evaluators
222
+
131
223
  async def cleanup(self) -> None:
132
224
  """Cleanup runtime resources."""
133
225
  pass
@@ -125,8 +125,12 @@ class UiPathRuntimeResult(BaseModel):
125
125
 
126
126
  def to_dict(self) -> Dict[str, Any]:
127
127
  """Convert to dictionary format for output."""
128
+ output_data = self.output or {}
129
+ if isinstance(self.output, BaseModel):
130
+ output_data = self.output.model_dump()
131
+
128
132
  result = {
129
- "output": self.output or {},
133
+ "output": output_data,
130
134
  "status": self.status,
131
135
  }
132
136
 
@@ -595,7 +599,12 @@ class UiPathBaseRuntime(ABC):
595
599
  # Write the execution output to file if requested
596
600
  if self.context.execution_output_file:
597
601
  with open(self.context.execution_output_file, "w") as f:
598
- json.dump(execution_result.output or {}, f, indent=2, default=str)
602
+ if isinstance(execution_result.output, BaseModel):
603
+ f.write(execution_result.output.model_dump())
604
+ else:
605
+ json.dump(
606
+ execution_result.output or {}, f, indent=2, default=str
607
+ )
599
608
 
600
609
  # Don't suppress exceptions
601
610
  return False
@@ -4,7 +4,7 @@ from typing import List, Optional
4
4
 
5
5
  import click
6
6
 
7
- from uipath._cli._evals._models import EvaluationSet
7
+ from uipath._cli._evals._models._evaluation_set import EvaluationSet
8
8
  from uipath._cli._utils._console import ConsoleLogger
9
9
 
10
10
  console = ConsoleLogger()
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  import os
3
3
  from functools import wraps
4
+ from pathlib import PurePath
4
5
  from typing import Any, Callable, List, Optional, Union
5
6
 
6
7
  from pydantic import BaseModel, ConfigDict, Field, field_validator
@@ -103,7 +104,7 @@ class ProjectFolder(BaseModel):
103
104
  return v
104
105
 
105
106
 
106
- class ProjectStructure(BaseModel):
107
+ class ProjectStructure(ProjectFolder):
107
108
  """Model representing the complete file structure of a UiPath project.
108
109
 
109
110
  Attributes:
@@ -114,34 +115,7 @@ class ProjectStructure(BaseModel):
114
115
  folder_type: The type of the root folder (optional)
115
116
  """
116
117
 
117
- model_config = ConfigDict(
118
- validate_by_name=True,
119
- validate_by_alias=True,
120
- use_enum_values=True,
121
- arbitrary_types_allowed=True,
122
- extra="allow",
123
- )
124
-
125
- id: Optional[str] = Field(default=None, alias="id")
126
- name: Optional[str] = Field(default=None, alias="name")
127
- folders: List[ProjectFolder] = Field(default_factory=list)
128
- files: List[ProjectFile] = Field(default_factory=list)
129
- folder_type: Optional[str] = Field(default=None, alias="folderType")
130
-
131
- @field_validator("folder_type", mode="before")
132
- @classmethod
133
- def convert_folder_type(cls, v: Union[str, int, None]) -> Optional[str]:
134
- """Convert numeric folder type to string.
135
-
136
- Args:
137
- v: The value to convert
138
-
139
- Returns:
140
- Optional[str]: The converted value or None
141
- """
142
- if isinstance(v, int):
143
- return str(v)
144
- return v
118
+ pass
145
119
 
146
120
 
147
121
  class LockInfo(BaseModel):
@@ -174,6 +148,33 @@ def get_folder_by_name(
174
148
  return None
175
149
 
176
150
 
151
+ def resolve_path(
152
+ folder: ProjectFolder,
153
+ path: PurePath,
154
+ ) -> ProjectFile | ProjectFolder:
155
+ """Resolve a path relative to the folder.
156
+
157
+ Args:
158
+ folder: Project folder
159
+ path: Path relative to the folder
160
+
161
+ Returns: The resolved folder or file. If resolution fails, an assertion is raised.
162
+ """
163
+ root = path.parts
164
+ while len(root) > 1:
165
+ child = next(
166
+ (folder for folder in folder.folders if folder.name == root[0]), None
167
+ )
168
+ assert child, "Path not found."
169
+ folder = child
170
+ root = root[1:]
171
+ file = next((f for f in folder.files if f.name == root[0]), None)
172
+ child = next((folder for folder in folder.folders if folder.name == root[0]), None)
173
+ resolved = file or child
174
+ assert resolved, "Path not found."
175
+ return resolved
176
+
177
+
177
178
  class AddedResource(BaseModel):
178
179
  """Represents a new file to be added during a structural migration."""
179
180