agentensor 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ [bumpversion]
2
+ current_version = 0.0.1
3
+ commit = True
4
+ tag = True
5
+
6
+ [bumpversion:file:pyproject.toml]
7
+ search = version = "{current_version}"
8
+ replace = version = "{new_version}"
9
+
10
+ [bumpversion:file:uv.lock]
11
+ search = version = "{current_version}"
12
+ replace = version = "{new_version}"
@@ -0,0 +1,50 @@
1
+ name: After CI
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows: [CI]
6
+ types: [completed]
7
+
8
+ permissions:
9
+ statuses: write
10
+ pull-requests: write
11
+
12
+ jobs:
13
+ smokeshow:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - uses: astral-sh/setup-uv@v5
18
+ with:
19
+ enable-cache: true
20
+ python-version: '3.12'
21
+
22
+ - uses: dawidd6/action-download-artifact@v6
23
+ with:
24
+ workflow: ci.yml
25
+ name: '(diff-)?coverage-html.*'
26
+ name_is_regexp: true
27
+ commit: ${{ github.event.workflow_run.head_sha }}
28
+ allow_forks: true
29
+ workflow_conclusion: completed
30
+ if_no_artifact_found: warn
31
+
32
+ - run: uvx smokeshow upload coverage-html
33
+ if: hashFiles('coverage-html/*.html') != ''
34
+ env:
35
+ SMOKESHOW_GITHUB_STATUS_DESCRIPTION: Coverage {coverage-percentage}
36
+ SMOKESHOW_GITHUB_COVERAGE_THRESHOLD: 50
37
+ SMOKESHOW_GITHUB_CONTEXT: coverage
38
+ SMOKESHOW_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
39
+ SMOKESHOW_GITHUB_PR_HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
40
+ SMOKESHOW_AUTH_KEY: ${{ secrets.SMOKESHOW_AUTH_KEY }}
41
+
42
+ - run: uvx smokeshow upload diff-coverage-html
43
+ if: hashFiles('diff-coverage-html/*.html') != ''
44
+ env:
45
+ SMOKESHOW_GITHUB_STATUS_DESCRIPTION: Diff coverage {coverage-percentage}
46
+ SMOKESHOW_GITHUB_COVERAGE_THRESHOLD: 95
47
+ SMOKESHOW_GITHUB_CONTEXT: diff-coverage
48
+ SMOKESHOW_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
49
+ SMOKESHOW_GITHUB_PR_HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
50
+ SMOKESHOW_AUTH_KEY: ${{ secrets.SMOKESHOW_AUTH_KEY }}
@@ -0,0 +1,103 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ tags:
8
+ - 'v*'
9
+ pull_request:
10
+ branches:
11
+ - main
12
+ types:
13
+ - opened
14
+ - synchronize
15
+ - ready_for_review
16
+
17
+ permissions:
18
+ id-token: write # Required for PyPI trusted publishing
19
+ contents: read
20
+
21
+ jobs:
22
+ lint:
23
+ if: github.event.pull_request.draft == false
24
+ runs-on: ubuntu-latest
25
+
26
+ strategy:
27
+ fail-fast: false
28
+
29
+ steps:
30
+ - name: Checkout repository
31
+ uses: actions/checkout@v4
32
+
33
+ - name: Setup uv
34
+ uses: astral-sh/setup-uv@v5
35
+ with:
36
+ enable-cache: true
37
+
38
+ - name: Install dependencies
39
+ run: uv sync --all-groups
40
+
41
+ - name: Check style against standards
42
+ run: uv run make lint
43
+
44
+ coverage:
45
+ if: github.event.pull_request.draft == false
46
+ runs-on: ubuntu-latest
47
+
48
+ strategy:
49
+ fail-fast: false
50
+
51
+ steps:
52
+ - name: Checkout repository
53
+ uses: actions/checkout@v4
54
+ with:
55
+ fetch-depth: 0
56
+
57
+ - name: Setup uv
58
+ uses: astral-sh/setup-uv@v5
59
+ with:
60
+ enable-cache: true
61
+
62
+ - name: Install dependencies
63
+ run: uv sync --all-groups
64
+
65
+ - name: Run unit tests with coverage
66
+ run: |
67
+ uv run coverage run -m pytest
68
+ uv run coverage html --show-contexts --title "Coverage for ${{ github.sha }}"
69
+ uv run coverage xml
70
+ uv run diff-cover coverage.xml --html-report index.html
71
+
72
+ - name: Store coverage html
73
+ uses: actions/upload-artifact@v4
74
+ with:
75
+ name: coverage-html
76
+ path: htmlcov
77
+ include-hidden-files: true
78
+
79
+ - name: Store diff coverage html
80
+ uses: actions/upload-artifact@v4
81
+ with:
82
+ name: diff-coverage-html
83
+ path: index.html
84
+
85
+ - name: Check coverage
86
+ run: |
87
+ uv run coverage report --fail-under 95
88
+ uv run diff-cover coverage.xml --fail-under 100
89
+
90
+ build-and-release:
91
+ needs: [lint, coverage]
92
+ if: "success() && startsWith(github.ref, 'refs/tags/')"
93
+ runs-on: ubuntu-latest
94
+
95
+ steps:
96
+ - uses: actions/checkout@v4
97
+ - uses: yezz123/setup-uv@v4
98
+
99
+ - name: Build project
100
+ run: uv build
101
+
102
+ - name: Publish to PyPI
103
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,164 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ xunit-result.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+ cover/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ #Pipfile.lock
97
+
98
+ # poetry
99
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103
+ #poetry.lock
104
+
105
+ # pdm
106
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107
+ #pdm.lock
108
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109
+ # in version control.
110
+ # https://pdm.fming.dev/#use-with-ide
111
+ .pdm.toml
112
+
113
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
114
+ __pypackages__/
115
+
116
+ # Celery stuff
117
+ celerybeat-schedule
118
+ celerybeat.pid
119
+
120
+ # SageMath parsed files
121
+ *.sage.py
122
+
123
+ # Environments
124
+ .env
125
+ .venv
126
+ env/
127
+ venv/
128
+ ENV/
129
+ env.bak/
130
+ venv.bak/
131
+
132
+ # Spyder project settings
133
+ .spyderproject
134
+ .spyproject
135
+
136
+ # Rope project settings
137
+ .ropeproject
138
+
139
+ # mkdocs documentation
140
+ /site
141
+
142
+ # mypy
143
+ .mypy_cache/
144
+ .dmypy.json
145
+ dmypy.json
146
+
147
+ # Pyre type checker
148
+ .pyre/
149
+
150
+ # pytype static type analyzer
151
+ .pytype/
152
+
153
+ # Cython debug symbols
154
+ cython_debug/
155
+
156
+ # PyCharm
157
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
160
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161
+ #.idea/
162
+
163
+ # MacOS
164
+ **/.DS_Store
@@ -0,0 +1,26 @@
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.5.0
4
+ hooks:
5
+ - id: trailing-whitespace
6
+ - id: end-of-file-fixer
7
+ - id: check-yaml
8
+ - id: check-json
9
+ - id: check-case-conflict
10
+ - id: check-docstring-first
11
+ - id: check-builtin-literals
12
+ - id: check-ast
13
+ - id: check-merge-conflict
14
+
15
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
16
+ rev: 'v0.4.6'
17
+ hooks:
18
+ - id: ruff
19
+ args: [--fix]
20
+ - id: ruff-format
21
+
22
+ - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
23
+ rev: v2.13.0
24
+ hooks:
25
+ - id: pretty-format-toml
26
+ args: [--autofix]
@@ -0,0 +1 @@
1
+ 3.12.6
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Shaojie Jiang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,15 @@
1
+ lint:
2
+ ruff check .
3
+ mypy agentensor examples
4
+ ruff format . --check
5
+
6
+ format:
7
+ ruff format .
8
+ ruff check . --select I001 --fix
9
+ ruff check . --select F401 --fix
10
+
11
+ test:
12
+ pytest --cov --cov-report term-missing tests/
13
+
14
+ doc:
15
+ mkdocs serve --dev-addr=0.0.0.0:8080
@@ -0,0 +1,40 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentensor
3
+ Version: 0.0.1
4
+ Summary: Add your description here
5
+ License: MIT License
6
+
7
+ Copyright (c) 2025 Shaojie Jiang
8
+
9
+ Permission is hereby granted, free of charge, to any person obtaining a copy
10
+ of this software and associated documentation files (the "Software"), to deal
11
+ in the Software without restriction, including without limitation the rights
12
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
+ copies of the Software, and to permit persons to whom the Software is
14
+ furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included in all
17
+ copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
+ SOFTWARE.
26
+ License-File: LICENSE
27
+ Requires-Python: >=3.12
28
+ Requires-Dist: logfire>=3.14.0
29
+ Requires-Dist: pydantic-ai>=0.0.55
30
+ Description-Content-Type: text/markdown
31
+
32
+ # AgenTensor
33
+
34
+ [![CI](https://github.com/ShaojieJiang/agentensor/actions/workflows/ci.yml/badge.svg?event=push)](https://github.com/ShaojieJiang/agentensor/actions/workflows/ci.yml?query=branch%3Amain)
35
+ [![Coverage](https://coverage-badge.samuelcolvin.workers.dev/ShaojieJiang/agentensor.svg)](https://coverage-badge.samuelcolvin.workers.dev/redirect/ShaojieJiang/agentensor)
36
+ [![PyPI](https://img.shields.io/pypi/v/agentensor.svg)](https://pypi.python.org/pypi/agentensor)
37
+
38
+ ## TODO
39
+
40
+ - [ ] Add parameter saving
@@ -0,0 +1,9 @@
1
+ # AgenTensor
2
+
3
+ [![CI](https://github.com/ShaojieJiang/agentensor/actions/workflows/ci.yml/badge.svg?event=push)](https://github.com/ShaojieJiang/agentensor/actions/workflows/ci.yml?query=branch%3Amain)
4
+ [![Coverage](https://coverage-badge.samuelcolvin.workers.dev/ShaojieJiang/agentensor.svg)](https://coverage-badge.samuelcolvin.workers.dev/redirect/ShaojieJiang/agentensor)
5
+ [![PyPI](https://img.shields.io/pypi/v/agentensor.svg)](https://pypi.python.org/pypi/agentensor)
6
+
7
+ ## TODO
8
+
9
+ - [ ] Add parameter saving
@@ -0,0 +1 @@
1
+ """Example module."""
@@ -0,0 +1,44 @@
1
+ """Loss functions."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any
5
+ from pydantic_ai import models
6
+ from pydantic_evals.evaluators import EvaluationReason, Evaluator, EvaluatorContext
7
+ from pydantic_evals.evaluators.llm_as_a_judge import judge_input_output, judge_output
8
+ from agentensor.tensor import TextTensor
9
+
10
+
11
+ @dataclass
12
+ class LLMTensorJudge(Evaluator[TextTensor, TextTensor, Any]):
13
+ """LLM judge for text tensors.
14
+
15
+ Adapted from pydantic_evals.evaluators.common.LLMJudge.
16
+ """
17
+
18
+ rubric: str
19
+ model: models.Model | models.KnownModelName | None = None
20
+ include_input: bool = True
21
+
22
+ async def evaluate(
23
+ self,
24
+ ctx: EvaluatorContext[TextTensor, TextTensor, Any],
25
+ ) -> EvaluationReason:
26
+ """Evaluate the text tensor."""
27
+ if self.include_input:
28
+ grading_output = await judge_input_output(
29
+ ctx.inputs.text, ctx.output.text, self.rubric, self.model
30
+ )
31
+ else:
32
+ grading_output = await judge_output(
33
+ ctx.output.text, self.rubric, self.model
34
+ )
35
+ return EvaluationReason(
36
+ value=grading_output.pass_, reason=grading_output.reason
37
+ )
38
+
39
+ def build_serialization_arguments(self) -> dict[str, Any]:
40
+ """Build serialization arguments."""
41
+ result = super().build_serialization_arguments()
42
+ if (model := result.get("model")) and isinstance(model, models.Model):
43
+ result["model"] = f"{model.system}:{model.model_name}"
44
+ return result
@@ -0,0 +1,26 @@
1
+ """Module class."""
2
+
3
+ from dataclasses import dataclass
4
+ from pydantic_graph.nodes import BaseNode, DepsT, NodeRunEndT, StateT
5
+ from agentensor.tensor import TextTensor
6
+
7
+
8
+ @dataclass
9
+ class ModuleState:
10
+ """State of the graph."""
11
+
12
+ input: TextTensor
13
+
14
+
15
+ class AgentModule(BaseNode[StateT, DepsT, NodeRunEndT]):
16
+ """Agent module."""
17
+
18
+ @classmethod
19
+ def get_params(cls) -> list[TextTensor]:
20
+ """Get the parameters of the module."""
21
+ params = []
22
+ for base in cls.__mro__:
23
+ for _, attr in base.__dict__.items():
24
+ if isinstance(attr, TextTensor) and attr.requires_grad:
25
+ params.append(attr)
26
+ return params
@@ -0,0 +1,39 @@
1
+ """Optimizer module."""
2
+
3
+ from pydantic_ai import Agent
4
+ from pydantic_graph import Graph
5
+ from agentensor.module import AgentModule
6
+ from agentensor.tensor import TextTensor
7
+
8
+
9
+ class Optimizer:
10
+ """Optimizer class."""
11
+
12
+ def __init__(self, graph: Graph) -> None:
13
+ """Initialize the optimizer."""
14
+ self.params: list[TextTensor] = [
15
+ param
16
+ for node in graph.get_nodes()
17
+ for param in node.get_params() # type: ignore[attr-defined]
18
+ if issubclass(node, AgentModule)
19
+ ]
20
+ self.agent: Agent = Agent(
21
+ model="openai:gpt-4o-mini",
22
+ system_prompt="Rewrite the system prompt given the feedback.",
23
+ )
24
+
25
+ def step(self) -> None:
26
+ """Step the optimizer."""
27
+ for param in self.params:
28
+ if not param.text_grad:
29
+ continue
30
+ param.text = self.optimize(param.text, param.text_grad)
31
+
32
+ def zero_grad(self) -> None:
33
+ """Zero the gradients."""
34
+ for param in self.params:
35
+ param.text_grad = ""
36
+
37
+ def optimize(self, text: str, grad: str) -> str:
38
+ """Optimize the text."""
39
+ return self.agent.run_sync(f"Feedback: {grad}\nText: {text}").data
@@ -0,0 +1,53 @@
1
+ """Example module."""
2
+
3
+ from __future__ import annotations
4
+ from pydantic_ai import Agent
5
+
6
+
7
+ class TextTensor:
8
+ """A tensor that represents a text."""
9
+
10
+ def __init__(
11
+ self,
12
+ text: str,
13
+ parents: list[TextTensor] | None = None,
14
+ requires_grad: bool = False,
15
+ ) -> None:
16
+ """Initialize a TextTensor."""
17
+ self.text = text
18
+ self.requires_grad = requires_grad
19
+ self.text_grad = ""
20
+ self.agent = Agent(
21
+ model="openai:gpt-4o-mini", system_prompt="Answer the user's question."
22
+ )
23
+ self.parents: list[TextTensor] = parents or []
24
+
25
+ def backward(self, grad: str = "") -> None:
26
+ """Backward pass for the TextTensor.
27
+
28
+ Args:
29
+ grad (str, optional): The gradient to backpropagate. Defaults to "".
30
+ """
31
+ if not grad:
32
+ return
33
+
34
+ if self.requires_grad:
35
+ self.text_grad = grad
36
+ for parent in self.parents:
37
+ if not parent.requires_grad:
38
+ continue
39
+ grad_to_parent = self.calc_grad(parent.text, self.text, grad)
40
+ parent.backward(grad_to_parent)
41
+
42
+ def calc_grad(self, input_text: str, output_text: str, grad: str) -> str:
43
+ """Calculate the gradient for the TextTensor."""
44
+ return self.agent.run_sync(
45
+ f"Here is the input: \n\n>{input_text}\n\nI got this "
46
+ f"output: \n\n>{output_text}\n\nHere is the feedback: \n\n"
47
+ f">{grad}\n\nHow should I improve the input to get a "
48
+ f"better output?"
49
+ ).data
50
+
51
+ def __str__(self) -> str:
52
+ """Return the text as a string."""
53
+ return self.text
@@ -0,0 +1,66 @@
1
+ """Trainer."""
2
+
3
+ from typing import Any
4
+ from pydantic_evals import Dataset
5
+ from pydantic_graph import Graph
6
+ from agentensor.module import AgentModule, ModuleState
7
+ from agentensor.optim import Optimizer
8
+ from agentensor.tensor import TextTensor
9
+
10
+
11
+ class Trainer:
12
+ """Trainer."""
13
+
14
+ def __init__(
15
+ self,
16
+ graph: Graph[ModuleState, None, TextTensor],
17
+ start_node: type[AgentModule],
18
+ dataset: Dataset[TextTensor, TextTensor, Any],
19
+ optimizer: Optimizer,
20
+ epochs: int,
21
+ stop_threshold: float = 0.95,
22
+ ):
23
+ """Initialize the trainer."""
24
+ self.graph = graph
25
+ self.start_node = start_node
26
+ self.dataset = dataset
27
+ self.optimizer = optimizer
28
+ self.epochs = epochs
29
+ self.stop_threshold = stop_threshold
30
+
31
+ async def step(self, x: TextTensor) -> TextTensor:
32
+ """Step the optimizer."""
33
+ state = ModuleState(input=x)
34
+ result = await self.graph.run(self.start_node(), state=state) # type: ignore[arg-type]
35
+ return result.output
36
+
37
+ def train(self) -> None:
38
+ """Train the model."""
39
+ for i in range(self.epochs):
40
+ report = self.dataset.evaluate_sync(self.step)
41
+ report.print(
42
+ include_input=True, include_output=True, include_durations=True
43
+ )
44
+
45
+ # Backward those failed cases
46
+ for case in report.cases:
47
+ losses = []
48
+ for evaluator in case.assertions.values():
49
+ if not evaluator.value:
50
+ assert evaluator.reason
51
+ losses.append(evaluator.reason)
52
+ if losses:
53
+ case.output.backward(" ".join(losses))
54
+
55
+ self.optimizer.step()
56
+ self.optimizer.zero_grad()
57
+
58
+ print(f"Epoch {i + 1}")
59
+ for param in self.optimizer.params:
60
+ print(param.text) # pragma: no cover
61
+ print()
62
+ performance = report.averages().assertions
63
+ assert performance is not None
64
+ if performance >= self.stop_threshold:
65
+ print("Optimization complete.")
66
+ break