zenml-nightly 0.83.1.dev20250709__py3-none-any.whl → 0.83.1.dev20250710__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. zenml/VERSION +1 -1
  2. zenml/cli/login.py +141 -18
  3. zenml/cli/project.py +8 -6
  4. zenml/cli/utils.py +63 -16
  5. zenml/client.py +4 -1
  6. zenml/config/compiler.py +1 -0
  7. zenml/config/retry_config.py +5 -3
  8. zenml/config/step_configurations.py +7 -1
  9. zenml/console.py +4 -1
  10. zenml/constants.py +0 -1
  11. zenml/enums.py +13 -4
  12. zenml/integrations/kubernetes/flavors/kubernetes_orchestrator_flavor.py +58 -4
  13. zenml/integrations/kubernetes/orchestrators/kube_utils.py +172 -0
  14. zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator.py +37 -23
  15. zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator_entrypoint.py +92 -22
  16. zenml/integrations/kubernetes/orchestrators/manifest_utils.py +59 -0
  17. zenml/logger.py +6 -4
  18. zenml/login/web_login.py +13 -6
  19. zenml/models/v2/core/model_version.py +9 -1
  20. zenml/models/v2/core/pipeline_run.py +1 -0
  21. zenml/models/v2/core/step_run.py +35 -1
  22. zenml/orchestrators/base_orchestrator.py +63 -8
  23. zenml/orchestrators/dag_runner.py +3 -1
  24. zenml/orchestrators/publish_utils.py +4 -1
  25. zenml/orchestrators/step_launcher.py +77 -139
  26. zenml/orchestrators/step_run_utils.py +16 -0
  27. zenml/orchestrators/step_runner.py +1 -4
  28. zenml/pipelines/pipeline_decorator.py +6 -1
  29. zenml/pipelines/pipeline_definition.py +7 -0
  30. zenml/zen_server/auth.py +0 -1
  31. zenml/zen_stores/migrations/versions/360fa84718bf_step_run_versioning.py +64 -0
  32. zenml/zen_stores/migrations/versions/85289fea86ff_adding_source_to_logs.py +1 -1
  33. zenml/zen_stores/schemas/pipeline_deployment_schemas.py +21 -0
  34. zenml/zen_stores/schemas/pipeline_run_schemas.py +31 -2
  35. zenml/zen_stores/schemas/step_run_schemas.py +41 -17
  36. zenml/zen_stores/sql_zen_store.py +152 -32
  37. zenml/zen_stores/template_utils.py +29 -9
  38. zenml_nightly-0.83.1.dev20250710.dist-info/METADATA +499 -0
  39. {zenml_nightly-0.83.1.dev20250709.dist-info → zenml_nightly-0.83.1.dev20250710.dist-info}/RECORD +42 -41
  40. zenml_nightly-0.83.1.dev20250709.dist-info/METADATA +0 -538
  41. {zenml_nightly-0.83.1.dev20250709.dist-info → zenml_nightly-0.83.1.dev20250710.dist-info}/LICENSE +0 -0
  42. {zenml_nightly-0.83.1.dev20250709.dist-info → zenml_nightly-0.83.1.dev20250710.dist-info}/WHEEL +0 -0
  43. {zenml_nightly-0.83.1.dev20250709.dist-info → zenml_nightly-0.83.1.dev20250710.dist-info}/entry_points.txt +0 -0
@@ -213,33 +213,53 @@ def generate_config_schema(
213
213
 
214
214
  all_steps: Dict[str, Any] = {}
215
215
  all_steps_required = False
216
- for name, step in deployment.to_model(
216
+ for step_name, step in deployment.to_model(
217
217
  include_metadata=True
218
218
  ).step_configurations.items():
219
219
  step_fields = generic_step_fields.copy()
220
220
  if step.config.parameters:
221
- parameter_fields: Dict[str, Any] = {
222
- name: (Any, FieldInfo(default=...))
223
- for name in step.config.parameters
224
- }
221
+ parameter_fields: Dict[str, Any] = {}
222
+
223
+ for parameter_name in step.config.parameters:
224
+ # Pydantic doesn't allow field names to start with an underscore
225
+ sanitized_parameter_name = parameter_name.lstrip("_")
226
+ while sanitized_parameter_name in parameter_fields:
227
+ sanitized_parameter_name = sanitized_parameter_name + "_"
228
+
229
+ parameter_fields[sanitized_parameter_name] = (
230
+ Any,
231
+ FieldInfo(default=..., validation_alias=parameter_name),
232
+ )
233
+
225
234
  parameters_class = create_model(
226
- f"{name}_parameters", **parameter_fields
235
+ f"{step_name}_parameters", **parameter_fields
227
236
  )
228
237
  step_fields["parameters"] = (
229
238
  parameters_class,
230
239
  FieldInfo(default=...),
231
240
  )
232
241
 
233
- step_model = create_model(name, **step_fields)
242
+ step_model = create_model(step_name, **step_fields)
243
+
244
+ # Pydantic doesn't allow field names to start with an underscore
245
+ sanitized_step_name = step_name.lstrip("_")
246
+ while sanitized_step_name in all_steps:
247
+ sanitized_step_name = sanitized_step_name + "_"
234
248
 
235
249
  if step.config.parameters:
236
250
  # This step has required parameters -> we make this attribute
237
251
  # required and also the parent attribute so these parameters must
238
252
  # always be included
239
253
  all_steps_required = True
240
- all_steps[name] = (step_model, FieldInfo(default=...))
254
+ all_steps[sanitized_step_name] = (
255
+ step_model,
256
+ FieldInfo(default=..., validation_alias=step_name),
257
+ )
241
258
  else:
242
- all_steps[name] = (Optional[step_model], FieldInfo(default=None))
259
+ all_steps[sanitized_step_name] = (
260
+ Optional[step_model],
261
+ FieldInfo(default=None, validation_alias=step_name),
262
+ )
243
263
 
244
264
  all_steps_model = create_model("Steps", **all_steps)
245
265
 
@@ -0,0 +1,499 @@
1
+ Metadata-Version: 2.3
2
+ Name: zenml-nightly
3
+ Version: 0.83.1.dev20250710
4
+ Summary: ZenML: Write production-ready ML code.
5
+ License: Apache-2.0
6
+ Keywords: machine learning,production,pipeline,mlops,devops
7
+ Author: ZenML GmbH
8
+ Author-email: info@zenml.io
9
+ Requires-Python: >=3.9,<3.13
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: Intended Audience :: System Administrators
14
+ Classifier: License :: OSI Approved :: Apache Software License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3 :: Only
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Classifier: Topic :: System :: Distributed Computing
23
+ Classifier: Typing :: Typed
24
+ Provides-Extra: adlfs
25
+ Provides-Extra: azureml
26
+ Provides-Extra: connectors-aws
27
+ Provides-Extra: connectors-azure
28
+ Provides-Extra: connectors-gcp
29
+ Provides-Extra: connectors-kubernetes
30
+ Provides-Extra: dev
31
+ Provides-Extra: gcsfs
32
+ Provides-Extra: s3fs
33
+ Provides-Extra: sagemaker
34
+ Provides-Extra: secrets-aws
35
+ Provides-Extra: secrets-azure
36
+ Provides-Extra: secrets-gcp
37
+ Provides-Extra: secrets-hashicorp
38
+ Provides-Extra: server
39
+ Provides-Extra: templates
40
+ Provides-Extra: terraform
41
+ Provides-Extra: vertex
42
+ Requires-Dist: Jinja2 ; extra == "server"
43
+ Requires-Dist: adlfs (>=2021.10.0) ; extra == "adlfs"
44
+ Requires-Dist: alembic (>=1.8.1,<=1.15.2)
45
+ Requires-Dist: aws-profile-manager (>=0.5.0) ; extra == "connectors-aws"
46
+ Requires-Dist: azure-ai-ml (==1.23.1) ; extra == "azureml"
47
+ Requires-Dist: azure-identity (>=1.4.0) ; extra == "secrets-azure" or extra == "connectors-azure"
48
+ Requires-Dist: azure-keyvault-secrets (>=4.0.0) ; extra == "secrets-azure"
49
+ Requires-Dist: azure-mgmt-containerregistry (>=10.0.0) ; extra == "connectors-azure"
50
+ Requires-Dist: azure-mgmt-containerservice (>=20.0.0) ; extra == "connectors-azure"
51
+ Requires-Dist: azure-mgmt-resource (>=21.0.0) ; extra == "connectors-azure"
52
+ Requires-Dist: azure-mgmt-storage (>=20.0.0) ; extra == "connectors-azure"
53
+ Requires-Dist: azure-storage-blob (>=12.0.0) ; extra == "connectors-azure"
54
+ Requires-Dist: bandit (>=1.7.5,<2.0.0) ; extra == "dev"
55
+ Requires-Dist: bcrypt (==4.0.1)
56
+ Requires-Dist: boto3 (>=1.16.0) ; extra == "secrets-aws" or extra == "connectors-aws"
57
+ Requires-Dist: click (>=8.0.1,<8.1.8)
58
+ Requires-Dist: cloudpickle (>=2.0.0,<3)
59
+ Requires-Dist: copier (>=8.1.0) ; extra == "templates"
60
+ Requires-Dist: coverage[toml] (>=5.5,<6.0) ; extra == "dev"
61
+ Requires-Dist: darglint (>=1.8.1,<2.0.0) ; extra == "dev"
62
+ Requires-Dist: distro (>=1.6.0,<2.0.0)
63
+ Requires-Dist: docker (>=7.1.0,<7.2.0)
64
+ Requires-Dist: fastapi (>=0.100,<=0.115.8) ; extra == "server"
65
+ Requires-Dist: gcsfs (>=2022.11.0) ; extra == "gcsfs"
66
+ Requires-Dist: gitpython (>=3.1.18,<4.0.0)
67
+ Requires-Dist: google-cloud-aiplatform (>=1.34.0) ; extra == "vertex"
68
+ Requires-Dist: google-cloud-artifact-registry (>=1.11.3) ; extra == "connectors-gcp"
69
+ Requires-Dist: google-cloud-container (>=2.21.0) ; extra == "connectors-gcp"
70
+ Requires-Dist: google-cloud-pipeline-components (>=2.19.0) ; extra == "vertex"
71
+ Requires-Dist: google-cloud-secret-manager (>=2.12.5) ; extra == "secrets-gcp"
72
+ Requires-Dist: google-cloud-storage (>=2.9.0) ; extra == "connectors-gcp"
73
+ Requires-Dist: hvac (>=0.11.2) ; extra == "secrets-hashicorp"
74
+ Requires-Dist: hypothesis (>=6.43.1,<7.0.0) ; extra == "dev"
75
+ Requires-Dist: importlib_metadata (<=7.0.0) ; python_version < "3.10"
76
+ Requires-Dist: ipinfo (>=4.4.3) ; extra == "server"
77
+ Requires-Dist: itsdangerous (>=2.2.0,<2.3.0) ; extra == "server"
78
+ Requires-Dist: jinja2-time (>=0.2.0,<0.3.0) ; extra == "templates"
79
+ Requires-Dist: kfp (>=2.6.0) ; extra == "vertex"
80
+ Requires-Dist: kubernetes (>=18.20.0) ; extra == "connectors-kubernetes" or extra == "connectors-aws" or extra == "connectors-gcp" or extra == "connectors-azure"
81
+ Requires-Dist: maison (<2.0) ; extra == "dev"
82
+ Requires-Dist: mike (>=1.1.2,<2.0.0) ; extra == "dev"
83
+ Requires-Dist: mkdocs (>=1.6.1,<2.0.0) ; extra == "dev"
84
+ Requires-Dist: mkdocs-autorefs (>=1.4.0,<2.0.0) ; extra == "dev"
85
+ Requires-Dist: mkdocs-awesome-pages-plugin (>=2.10.1,<3.0.0) ; extra == "dev"
86
+ Requires-Dist: mkdocs-material (==9.6.8) ; extra == "dev"
87
+ Requires-Dist: mkdocstrings[python] (>=0.28.1,<0.29.0) ; extra == "dev"
88
+ Requires-Dist: mypy (==1.7.1) ; extra == "dev"
89
+ Requires-Dist: orjson (>=3.10.0,<3.11.0) ; extra == "server"
90
+ Requires-Dist: packaging (>=24.1)
91
+ Requires-Dist: passlib[bcrypt] (>=1.7.4,<1.8.0)
92
+ Requires-Dist: psutil (>=5.0.0)
93
+ Requires-Dist: pydantic (>=2.0,<2.11.2)
94
+ Requires-Dist: pydantic-settings
95
+ Requires-Dist: pyjwt[crypto] (==2.7.*) ; extra == "server"
96
+ Requires-Dist: pyment (>=0.3.3,<0.4.0) ; extra == "dev"
97
+ Requires-Dist: pymysql (>=1.1.1,<1.2.0)
98
+ Requires-Dist: pytest (>=7.4.0,<8.0.0) ; extra == "dev"
99
+ Requires-Dist: pytest-clarity (>=1.0.1,<2.0.0) ; extra == "dev"
100
+ Requires-Dist: pytest-instafail (>=0.5.0) ; extra == "dev"
101
+ Requires-Dist: pytest-mock (>=3.6.1,<4.0.0) ; extra == "dev"
102
+ Requires-Dist: pytest-randomly (>=3.10.1,<4.0.0) ; extra == "dev"
103
+ Requires-Dist: pytest-rerunfailures (>=13.0) ; extra == "dev"
104
+ Requires-Dist: pytest-split (>=0.8.1,<0.9.0) ; extra == "dev"
105
+ Requires-Dist: python-dateutil (>=2.8.1,<3.0.0)
106
+ Requires-Dist: python-multipart (>=0.0.9,<0.1.0) ; extra == "server"
107
+ Requires-Dist: pyyaml (>=6.0.1)
108
+ Requires-Dist: pyyaml-include (<2.0) ; extra == "templates"
109
+ Requires-Dist: requests (>=2.27.11,<3.0.0) ; extra == "connectors-azure"
110
+ Requires-Dist: rich[jupyter] (>=12.0.0)
111
+ Requires-Dist: ruff (>=0.1.7) ; extra == "templates" or extra == "dev"
112
+ Requires-Dist: s3fs (>=2022.11.0,!=2025.3.1) ; extra == "s3fs"
113
+ Requires-Dist: sagemaker (>=2.237.3) ; extra == "sagemaker"
114
+ Requires-Dist: secure (>=0.3.0,<0.4.0) ; extra == "server"
115
+ Requires-Dist: setuptools (>=70.0.0)
116
+ Requires-Dist: sqlalchemy (>=2.0.0,<3.0.0)
117
+ Requires-Dist: sqlalchemy_utils
118
+ Requires-Dist: sqlmodel (==0.0.18)
119
+ Requires-Dist: tldextract (>=5.1.0,<5.2.0) ; extra == "server"
120
+ Requires-Dist: tox (>=3.24.3,<4.0.0) ; extra == "dev"
121
+ Requires-Dist: types-Markdown (>=3.3.6,<4.0.0) ; extra == "dev"
122
+ Requires-Dist: types-Pillow (>=9.2.1,<10.0.0) ; extra == "dev"
123
+ Requires-Dist: types-PyMySQL (>=1.0.4,<2.0.0) ; extra == "dev"
124
+ Requires-Dist: types-PyYAML (>=6.0.0,<7.0.0) ; extra == "dev"
125
+ Requires-Dist: types-certifi (>=2021.10.8.0,<2022.0.0.0) ; extra == "dev"
126
+ Requires-Dist: types-croniter (>=1.0.2,<2.0.0) ; extra == "dev"
127
+ Requires-Dist: types-futures (>=3.3.1,<4.0.0) ; extra == "dev"
128
+ Requires-Dist: types-paramiko (>=3.4.0) ; extra == "dev"
129
+ Requires-Dist: types-passlib (>=1.7.7,<2.0.0) ; extra == "dev"
130
+ Requires-Dist: types-protobuf (>=3.18.0,<4.0.0) ; extra == "dev"
131
+ Requires-Dist: types-psutil (>=5.8.13,<6.0.0) ; extra == "dev"
132
+ Requires-Dist: types-python-dateutil (>=2.8.2,<3.0.0) ; extra == "dev"
133
+ Requires-Dist: types-python-slugify (>=5.0.2,<6.0.0) ; extra == "dev"
134
+ Requires-Dist: types-redis (>=4.1.19,<5.0.0) ; extra == "dev"
135
+ Requires-Dist: types-requests (>=2.27.11,<3.0.0) ; extra == "dev"
136
+ Requires-Dist: types-setuptools (>=57.4.2,<58.0.0) ; extra == "dev"
137
+ Requires-Dist: types-six (>=1.16.2,<2.0.0) ; extra == "dev"
138
+ Requires-Dist: types-termcolor (>=1.1.2,<2.0.0) ; extra == "dev"
139
+ Requires-Dist: typing-extensions (>=3.7.4) ; extra == "dev"
140
+ Requires-Dist: uvicorn[standard] (>=0.17.5) ; extra == "server"
141
+ Requires-Dist: yamlfix (>=1.16.0,<2.0.0) ; extra == "dev"
142
+ Project-URL: Documentation, https://docs.zenml.io
143
+ Project-URL: Homepage, https://zenml.io
144
+ Project-URL: Repository, https://github.com/zenml-io/zenml
145
+ Description-Content-Type: text/markdown
146
+
147
+ <div align="center">
148
+ <img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=0fcbab94-8fbe-4a38-93e8-c2348450a42e" />
149
+ <h1 align="center">MLOps for Reliable AI - From Classical ML to Agents</h1>
150
+ <h3 align="center">Your unified toolkit for shipping everything from decision trees to complex AI agents, built on the MLOps principles you already trust.</h3>
151
+ </div>
152
+
153
+ <div align="center">
154
+
155
+ <!-- PROJECT LOGO -->
156
+ <br />
157
+ <a href="https://zenml.io">
158
+ <img alt="ZenML Logo" src="docs/book/.gitbook/assets/header.png" alt="ZenML Logo">
159
+ </a>
160
+ <br />
161
+
162
+ [![PyPi][pypi-shield]][pypi-url]
163
+ [![PyPi][pypiversion-shield]][pypi-url]
164
+ [![PyPi][downloads-shield]][downloads-url]
165
+ [![Contributors][contributors-shield]][contributors-url]
166
+ [![License][license-shield]][license-url]
167
+
168
+ </div>
169
+
170
+ <!-- MARKDOWN LINKS & IMAGES -->
171
+ [pypi-shield]: https://img.shields.io/pypi/pyversions/zenml?color=281158
172
+ [pypi-url]: https://pypi.org/project/zenml/
173
+ [pypiversion-shield]: https://img.shields.io/pypi/v/zenml?color=361776
174
+ [downloads-shield]: https://img.shields.io/pypi/dm/zenml?color=431D93
175
+ [downloads-url]: https://pypi.org/project/zenml/
176
+ [contributors-shield]: https://img.shields.io/github/contributors/zenml-io/zenml?color=7A3EF4
177
+ [contributors-url]: https://github.com/zenml-io/zenml/graphs/contributors
178
+ [license-shield]: https://img.shields.io/github/license/zenml-io/zenml?color=9565F6
179
+ [license-url]: https://github.com/zenml-io/zenml/blob/main/LICENSE
180
+
181
+ <div align="center">
182
+ <p>
183
+ <a href="https://zenml.io/features">Features</a> •
184
+ <a href="https://zenml.io/roadmap">Roadmap</a> •
185
+ <a href="https://github.com/zenml-io/zenml/issues">Report Bug</a> •
186
+ <a href="https://zenml.io/pro">Sign up for ZenML Pro</a> •
187
+ <a href="https://www.zenml.io/blog">Blog</a> •
188
+ <a href="https://zenml.io/podcast">Podcast</a>
189
+ <br />
190
+ <br />
191
+ 🎉 For the latest release, see the <a href="https://github.com/zenml-io/zenml/releases">release notes</a>.
192
+ </p>
193
+ </div>
194
+
195
+ ---
196
+
197
+ ## 🚨 The Problem: MLOps Works for Models, But What About AI?
198
+
199
+ ![No MLOps for modern AI](docs/book/.gitbook/assets/readme_problem.png)
200
+
201
+ You're an ML engineer. You've perfected deploying scikit-learn models and wrangling TensorFlow jobs. Your MLOps stack is dialed in. But now, you're being asked to build and ship AI agents, and suddenly your trusted toolkit is starting to crack.
202
+
203
+ - **The Adaptation Struggle:** Your MLOps habits—rigorous testing, versioning, CI/CD—don’t map cleanly onto agent development. How do you version a prompt? How do you regression test a non-deterministic system? The tools that gave you confidence for models now create friction for agents.
204
+
205
+ - **The Divided Stack:** To cope, teams are building a second, parallel stack just for LLM-based systems. Now you’re maintaining two sets of tools, two deployment pipelines, and two mental models. Your classical models live in one world, your agents in another. It's expensive, complex, and slows everyone down.
206
+
207
+ - **The Broken Feedback Loop:** Getting an agent from your local environment to production is a slow, painful journey. By the time you get feedback on performance, cost, or quality, the requirements have already changed. Iteration is a guessing game, not a data-driven process.
208
+
209
+ ## 💡 The Solution: One Framework for your Entire AI Stack
210
+
211
+ Stop maintaining two separate worlds. ZenML is a unified MLOps framework that extends the battle-tested principles you rely on for classical ML to the new world of AI agents. It’s one platform to develop, evaluate, and deploy your entire AI portfolio.
212
+
213
+ ```python
214
+ # Morning: Your sklearn pipeline is still versioned and reproducible.
215
+ train_and_deploy_classifier()
216
+
217
+ # Afternoon: Your new agent evaluation pipeline uses the same logic.
218
+ evaluate_and_deploy_agent()
219
+
220
+ # Same platform. Same principles. New possibilities.
221
+ ```
222
+
223
+ With ZenML, you're not replacing your knowledge; you're extending it. Use the pipelines and practices you already know to version, test, deploy, and monitor everything from classic models to the most advanced agents.
224
+
225
+ ## 💻 See It In Action: Multi-Agent Architecture Comparison
226
+
227
+ **The Challenge:** Your team built three different customer service agents. Which one should go to production? With ZenML, you can build a reproducible pipeline to test them on real data and make a data-driven decision.
228
+
229
+ ```python
230
+ from zenml import pipeline, step
231
+ import pandas as pd
232
+
233
+ @step
234
+ def load_real_conversations() -> pd.DataFrame:
235
+ """Load actual customer queries from a feature store."""
236
+ return load_from_feature_store("customer_queries_sample_1k")
237
+
238
+ @step
239
+ def run_architecture_comparison(queries: pd.DataFrame) -> dict:
240
+ """Test three different agent architectures on the same data."""
241
+ architectures = {
242
+ "single_agent": SingleAgentRAG(),
243
+ "multi_specialist": MultiSpecialistAgents(),
244
+ "hierarchical": HierarchicalAgentTeam()
245
+ }
246
+
247
+ results = {}
248
+ for name, agent in architectures.items():
249
+ # ZenML automatically versions the agent's code, prompts, and tools
250
+ results[name] = agent.batch_process(queries)
251
+ return results
252
+
253
+ @step
254
+ def evaluate_and_decide(results: dict) -> str:
255
+ """Evaluate results and generate a recommendation report."""
256
+ # Compare architectures on quality, cost, latency, etc.
257
+ evaluation_df = evaluate_results(results)
258
+
259
+ # Generate a rich report comparing the architectures
260
+ report = create_comparison_report(evaluation_df)
261
+
262
+ # Automatically tag the winning architecture for a staging deployment
263
+ winner = evaluation_df.sort_values("overall_score").iloc[0]
264
+ tag_for_staging(winner["architecture_name"])
265
+
266
+ return report
267
+
268
+ @pipeline
269
+ def compare_agent_architectures():
270
+ """Your new Friday afternoon ritual: data-driven agent decisions."""
271
+ queries = load_real_conversations()
272
+ results = run_architecture_comparison(queries)
273
+ report = evaluate_and_decide(results)
274
+
275
+ if __name__ == "__main__":
276
+ # Run locally, compare results in the ZenML dashboard
277
+ compare_agent_architectures()
278
+ ```
279
+
280
+ **The Result:** A clear winner is selected based on data, not opinions. You have full lineage from the test data and agent versions to the final report and deployment decision.
281
+
282
+ ## 🔄 The AI Development Lifecycle with ZenML
283
+
284
+ ### From Chaos to Process
285
+
286
+ ![Development lifecycle](docs/book/.gitbook/assets/readme_development_lifecycle.png)
287
+
288
+ <details>
289
+ <summary><b>Click to see your new, structured workflow</b></summary>
290
+
291
+ ### Your New Workflow
292
+
293
+ **Monday: Quick Prototype**
294
+ ```python
295
+ # Start with a local script, just like always
296
+ agent = LangGraphAgent(prompt="You are a helpful assistant...")
297
+ response = agent.chat("Help me with my order")
298
+ ```
299
+
300
+ **Tuesday: Make it a Pipeline**
301
+ ```python
302
+ # Wrap your code in a ZenML step to make it reproducible
303
+ @step
304
+ def customer_service_agent(query: str) -> str:
305
+ return agent.chat(query)
306
+ ```
307
+
308
+ **Wednesday: Add Evaluation**
309
+ ```python
310
+ # Test on real data, not toy examples
311
+ @pipeline
312
+ def eval_pipeline():
313
+ test_data = load_production_samples()
314
+ responses = customer_service_agent.map(test_data)
315
+ scores = evaluate_responses(responses)
316
+ track_experiment(scores)
317
+ ```
318
+
319
+ **Thursday: Compare Architectures**
320
+ ```python
321
+ # Make data-driven architecture decisions
322
+ results = compare_architectures(
323
+ baseline="current_prod",
324
+ challenger="new_multiagent_v2"
325
+ )
326
+ ```
327
+
328
+ **Friday: Ship with Confidence**
329
+ ```python
330
+ # Deploy the new agent with the same command you use for ML models
331
+ zenml stack deploy agent-prod --model="customer_service:challenger"
332
+ ```
333
+ </details>
334
+
335
+ ## 🚀 Get Started (5 minutes)
336
+
337
+ ### For ML Engineers Ready to Tame AI
338
+
339
+ ```bash
340
+ # You know this drill
341
+ pip install "zenml[llm]" # Includes LangChain, LlamaIndex integrations
342
+
343
+ # Initialize (your ML pipelines still work!)
344
+ zenml init
345
+
346
+ # Pull our agent evaluation template
347
+ zenml init --template agent-evaluation-starter
348
+ ```
349
+
350
+ ### Your First AI Pipeline
351
+
352
+ ```python
353
+ # look_familiar.py
354
+ from zenml import pipeline, step
355
+
356
+ @step
357
+ def run_my_agent(test_queries: list[str]) -> list[str]:
358
+ """Your existing agent code, now with MLOps superpowers."""
359
+ # Use ANY framework - LangGraph, CrewAI, raw OpenAI
360
+ agent = YourExistingAgent()
361
+
362
+ # Automatic versioning of prompts, tools, code, and configs
363
+ return [agent.run(q) for q in test_queries]
364
+
365
+ @step
366
+ def evaluate_responses(queries: list[str], responses: list[str]) -> dict:
367
+ """LLM judges + your custom business metrics."""
368
+ quality = llm_judge(queries, responses)
369
+ latency = measure_response_times()
370
+ costs = calculate_token_usage()
371
+
372
+ return {
373
+ "quality": quality.mean(),
374
+ "p95_latency": latency.quantile(0.95),
375
+ "cost_per_query": costs.mean()
376
+ }
377
+
378
+ @pipeline
379
+ def my_first_agent_pipeline():
380
+ # Look ma, no YAML!
381
+ queries = ["How do I return an item?", "What's your refund policy?"]
382
+ responses = run_my_agent(queries)
383
+ metrics = evaluate_responses(queries, responses)
384
+
385
+ # Metrics are auto-logged, versioned, and comparable in the dashboard
386
+ return metrics
387
+
388
+ if __name__ == "__main__":
389
+ my_first_agent_pipeline()
390
+ print("Check your dashboard: http://localhost:8080")
391
+ ```
392
+
393
+ ## 📚 Learn More
394
+
395
+ ### 🖼️ Getting Started Resources
396
+
397
+ The best way to learn about ZenML is through our comprehensive documentation and tutorials:
398
+
399
+ - **[Starter Guide](https://docs.zenml.io/user-guides/starter-guide)** - From zero to production in 30 minutes
400
+ - **[LLMOps Guide](https://docs.zenml.io/user-guides/llmops-guide)** - Specific patterns for LLM applications
401
+ - **[SDK Reference](https://sdkdocs.zenml.io/)** - Complete API documentation
402
+
403
+ For visual learners, start with this 11-minute introduction:
404
+
405
+ [![Introductory Youtube Video](docs/book/.gitbook/assets/readme_youtube_thumbnail.png)](https://www.youtube.com/watch?v=wEVwIkDvUPs)
406
+
407
+ ### 📖 Production Examples
408
+
409
+ 1. **[E2E Batch Inference](examples/e2e/)** - Complete MLOps pipeline with feature engineering
410
+ 2. **[LLM RAG Pipeline](https://github.com/zenml-io/zenml-projects/tree/main/llm-complete-guide)** - Production RAG with evaluation loops
411
+ 3. **[Agentic Workflow (Deep Research)](https://github.com/zenml-io/zenml-projects/tree/main/deep_research)** - Orchestrate your agents with ZenML
412
+ 4. **[Fine-tuning Pipeline](https://github.com/zenml-io/zenml-projects/tree/main/gamesense)** - Fine-tune and deploy LLMs
413
+
414
+ ### 🏢 Deployment Options
415
+
416
+ **For Teams:**
417
+ - **[Self-hosted](https://docs.zenml.io/getting-started/deploying-zenml)** - Deploy on your infrastructure with Helm/Docker
418
+ - **[ZenML Pro](https://cloud.zenml.io/?utm_source=readme)** - Managed service with enterprise support (free trial)
419
+
420
+ **Infrastructure Requirements:**
421
+ - Kubernetes cluster (or local Docker)
422
+ - Object storage (S3/GCS/Azure)
423
+ - PostgreSQL database
424
+ - _[Complete requirements](https://docs.zenml.io/getting-started/deploying-zenml/deploy-with-helm)_
425
+
426
+ ### 🎓 Books & Resources
427
+
428
+ <div align="center">
429
+ <a href="https://www.amazon.com/LLM-Engineers-Handbook-engineering-production/dp/1836200072">
430
+ <img src="docs/book/.gitbook/assets/llm_engineering_handbook_cover.jpg" alt="LLM Engineer's Handbook Cover" width="200"/>
431
+ </a>
432
+ <a href="https://www.amazon.com/-/en/Andrew-McMahon/dp/1837631964">
433
+ <img src="docs/book/.gitbook/assets/ml_engineering_with_python.jpg" alt="Machine Learning Engineering with Python Cover" width="200"/>
434
+ </a>
435
+ </div>
436
+
437
+ ZenML is featured in these comprehensive guides to production AI systems.
438
+
439
+ ## 🤝 Join ML Engineers Building the Future of AI
440
+
441
+ **You're Not Alone:**
442
+ - 💬 [Slack Community](https://zenml.io/slack) - 3000+ ML engineers building with ZenML.
443
+ - 🐛 [GitHub Issues](https://github.com/zenml-io/zenml/issues) - Bug reports and feature requests.
444
+ - 📧 [Enterprise Support](https://zenml.io/pro) - SLAs, dedicated support, professional services.
445
+
446
+ **Real Engineers, Real Stories:**
447
+ > "Same platform for our sklearn models and our RAG pipeline. DevOps loves us now."
448
+ > - ML Platform Lead, European Bank
449
+
450
+ > "We went from 'YOLO prompt updates' to proper evaluation pipelines. Game changer."
451
+ > - Senior ML Engineer, Fortune 500 Retailer
452
+
453
+ > "Finally, I can explain to my PM why agent v2 is actually worse than v1. With data!"
454
+ > - Staff Engineer, Series B Startup
455
+
456
+ **Contribute:**
457
+ - 🌟 [Star us on GitHub](https://github.com/zenml-io/zenml/stargazers) - Help others discover ZenML
458
+ - 🤝 [Contributing Guide](CONTRIBUTING.md) - Start with [`good-first-issue`](https://github.com/issues?q=is%3Aopen+is%3Aissue+archived%3Afalse+user%3Azenml-io+label%3A%22good+first+issue%22)
459
+ - 💻 [Write Integrations](https://docs.zenml.io/how-to/stack-deployment/implement-a-custom-integration) - Add your favorite tools
460
+
461
+ **Stay Updated:**
462
+ - 🗺 [Public Roadmap](https://zenml.io/roadmap) - See what's coming next
463
+ - 📰 [Blog](https://zenml.io/blog) - Best practices and case studies
464
+ - 🎙 [Podcast](https://zenml.io/podcast) - Interviews with ML practitioners
465
+
466
+ ## ❓ FAQs from ML Engineers Like You
467
+
468
+ **Q: "Do I need to rewrite my agents or models to use ZenML?"**
469
+ A: No. Wrap your existing code in a `@step`. Keep using Scikit-Learn, PyTorch, LangGraph, LlamaIndex, or raw API calls. ZenML orchestrates your tools, it doesn't replace them.
470
+
471
+ **Q: "How is this different from LangSmith/Langfuse?"**
472
+ A: They provide excellent observability for LLM applications. We orchestrate the **full MLOps lifecycle for your entire AI stack**. With ZenML, you manage both your classical ML models and your AI agents in one unified framework, from development and evaluation all the way to production deployment.
473
+
474
+ **Q: "Can I use my existing MLflow/W&B setup?"**
475
+ A: Yes! We integrate with both. Your experiments, our pipelines.
476
+
477
+ **Q: "Is this just MLflow with extra steps?"**
478
+ A: No. MLflow tracks experiments. We orchestrate the entire development process – from training and evaluation to deployment and monitoring – for both models and agents.
479
+
480
+ **Q: "What about cost? I can't afford another platform."**
481
+ A: ZenML's open-source version is free forever. You likely already have the required infrastructure (like a Kubernetes cluster and object storage). We just help you make better use of it for MLOps.
482
+
483
+ ### 🛠 VS Code Extension
484
+
485
+ Manage pipelines directly from your editor:
486
+
487
+ <details>
488
+ <summary>🖥️ VS Code Extension in Action!</summary>
489
+ <div align="center">
490
+ <img width="60%" src="docs/book/.gitbook/assets/zenml-extension-shortened.gif" alt="ZenML Extension">
491
+ </div>
492
+ </details>
493
+
494
+ Install from [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=ZenML.zenml-vscode).
495
+
496
+ ## 📜 License
497
+
498
+ ZenML is distributed under the terms of the Apache License Version 2.0. See
499
+ [LICENSE](LICENSE) for details.