zenml-nightly 0.84.0.dev20250713__py3-none-any.whl → 0.84.0.dev20250715__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zenml/VERSION CHANGED
@@ -1 +1 @@
1
- 0.84.0.dev20250713
1
+ 0.84.0.dev20250715
zenml/cli/base.py CHANGED
@@ -92,7 +92,7 @@ ZENML_PROJECT_TEMPLATES = dict(
92
92
  ),
93
93
  llm_finetuning=ZenMLProjectTemplateLocation(
94
94
  github_url="zenml-io/template-llm-finetuning",
95
- github_tag="2024.11.28", # Make sure it is aligned with .github/workflows/update-templates-to-examples.yml
95
+ github_tag="2025.07.14", # Make sure it is aligned with .github/workflows/update-templates-to-examples.yml
96
96
  ),
97
97
  )
98
98
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: zenml-nightly
3
- Version: 0.84.0.dev20250713
3
+ Version: 0.84.0.dev20250715
4
4
  Summary: ZenML: Write production-ready ML code.
5
5
  License: Apache-2.0
6
6
  Keywords: machine learning,production,pipeline,mlops,devops
@@ -144,20 +144,17 @@ Project-URL: Homepage, https://zenml.io
144
144
  Project-URL: Repository, https://github.com/zenml-io/zenml
145
145
  Description-Content-Type: text/markdown
146
146
 
147
- <div align="center">
148
- <img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=0fcbab94-8fbe-4a38-93e8-c2348450a42e" />
149
- <h1 align="center">MLOps for Reliable AI - From Classical ML to Agents</h1>
150
- <h3 align="center">Your unified toolkit for shipping everything from decision trees to complex AI agents, built on the MLOps principles you already trust.</h3>
151
- </div>
152
-
153
147
  <div align="center">
154
148
 
155
149
  <!-- PROJECT LOGO -->
156
150
  <br />
157
151
  <a href="https://zenml.io">
158
- <img alt="ZenML Logo" src="docs/book/.gitbook/assets/header.png" alt="ZenML Logo">
152
+ <img src="docs/book/.gitbook/assets/header.png" alt="ZenML Header">
159
153
  </a>
160
154
  <br />
155
+ <div align="center">
156
+ <h3 align="center">Your unified toolkit for shipping everything from decision trees to complex AI agents, built on the MLOps principles you already trust.</h3>
157
+ </div>
161
158
 
162
159
  [![PyPi][pypi-shield]][pypi-url]
163
160
  [![PyPi][pypiversion-shield]][pypi-url]
@@ -224,172 +221,193 @@ With ZenML, you're not replacing your knowledge; you're extending it. Use the pi
224
221
 
225
222
  ## 💻 See It In Action: Multi-Agent Architecture Comparison
226
223
 
227
- **The Challenge:** Your team built three different customer service agents. Which one should go to production? With ZenML, you can build a reproducible pipeline to test them on real data and make a data-driven decision.
224
+ **The Challenge:** Your team built three different customer service agents. Which one should go to production? With ZenML, you can build a reproducible pipeline to test them on real data and make a data-driven decision, with full observability via Langgraph, LiteLLM & Langfuse.
225
+
226
+ https://github.com/user-attachments/assets/edeb314c-fe07-41ba-b083-cd9ab11db4a7
228
227
 
229
228
  ```python
230
229
  from zenml import pipeline, step
230
+ from zenml.types import HTMLString
231
231
  import pandas as pd
232
232
 
233
233
  @step
234
234
  def load_real_conversations() -> pd.DataFrame:
235
- """Load actual customer queries from a feature store."""
236
- return load_from_feature_store("customer_queries_sample_1k")
235
+ """Load customer service queries for testing."""
236
+ return load_customer_queries()
237
+
238
+ @step
239
+ def train_intent_classifier(queries: pd.DataFrame):
240
+ """Train a scikit-learn classifier alongside your agents."""
241
+ return train_sklearn_pipeline(queries)
242
+
243
+ @step
244
+ def load_prompts() -> dict:
245
+ """Load prompts as versioned ZenML artifacts."""
246
+ return load_agent_prompts_from_files()
237
247
 
238
248
  @step
239
- def run_architecture_comparison(queries: pd.DataFrame) -> dict:
249
+ def run_architecture_comparison(queries: pd.DataFrame, classifier, prompts: dict) -> tuple:
240
250
  """Test three different agent architectures on the same data."""
241
251
  architectures = {
242
- "single_agent": SingleAgentRAG(),
243
- "multi_specialist": MultiSpecialistAgents(),
244
- "hierarchical": HierarchicalAgentTeam()
252
+ "single_agent": SingleAgentRAG(prompts),
253
+ "multi_specialist": MultiSpecialistAgents(prompts),
254
+ "langgraph_workflow": LangGraphAgent(prompts) # Real LangGraph implementation!
245
255
  }
246
256
 
247
- results = {}
248
- for name, agent in architectures.items():
249
- # ZenML automatically versions the agent's code, prompts, and tools
250
- results[name] = agent.batch_process(queries)
251
- return results
257
+ # ZenML automatically versions agent code, prompts, and configurations
258
+ # LiteLLM provides unified access to 100+ LLM providers
259
+ # Langgraph orchestrates a multi-agent graph
260
+ # Langfuse tracks costs, performance, and traces for full observability
261
+ results = test_all_architectures(queries, architectures)
262
+ mermaid_diagram = generate_langgraph_visualization()
263
+
264
+ return results, mermaid_diagram
252
265
 
253
266
  @step
254
- def evaluate_and_decide(results: dict) -> str:
255
- """Evaluate results and generate a recommendation report."""
256
- # Compare architectures on quality, cost, latency, etc.
257
- evaluation_df = evaluate_results(results)
258
-
259
- # Generate a rich report comparing the architectures
260
- report = create_comparison_report(evaluation_df)
261
-
262
- # Automatically tag the winning architecture for a staging deployment
263
- winner = evaluation_df.sort_values("overall_score").iloc[0]
264
- tag_for_staging(winner["architecture_name"])
265
-
266
- return report
267
+ def evaluate_and_decide(queries: pd.DataFrame, results: dict) -> HTMLString:
268
+ """Generate beautiful HTML report with winner selection."""
269
+ return create_styled_comparison_report(results)
267
270
 
268
271
  @pipeline
269
272
  def compare_agent_architectures():
270
- """Your new Friday afternoon ritual: data-driven agent decisions."""
273
+ """Data-driven agent architecture decisions with full MLOps tracking."""
271
274
  queries = load_real_conversations()
272
- results = run_architecture_comparison(queries)
273
- report = evaluate_and_decide(results)
275
+ prompts = load_prompts() # Prompts as versioned artifacts
276
+ classifier = train_intent_classifier(queries)
277
+ results, viz = run_architecture_comparison(queries, classifier, prompts)
278
+ report = evaluate_and_decide(queries, results)
274
279
 
275
280
  if __name__ == "__main__":
276
- # Run locally, compare results in the ZenML dashboard
277
281
  compare_agent_architectures()
282
+ # 🎯 Rich visualizations automatically appear in ZenML dashboard
278
283
  ```
279
284
 
280
- **The Result:** A clear winner is selected based on data, not opinions. You have full lineage from the test data and agent versions to the final report and deployment decision.
281
-
282
- ## 🔄 The AI Development Lifecycle with ZenML
285
+ **🚀 [See the complete working example →](examples/agent_comparison/)**
283
286
 
284
- ### From Chaos to Process
287
+ **The Result:** A clear winner is selected based on data, not opinions. You have full lineage from the test data and agent versions to the final report and deployment decision.
285
288
 
286
289
  ![Development lifecycle](docs/book/.gitbook/assets/readme_development_lifecycle.png)
287
290
 
288
- <details>
289
- <summary><b>Click to see your new, structured workflow</b></summary>
290
-
291
- ### Your New Workflow
292
-
293
- **Monday: Quick Prototype**
294
- ```python
295
- # Start with a local script, just like always
296
- agent = LangGraphAgent(prompt="You are a helpful assistant...")
297
- response = agent.chat("Help me with my order")
298
- ```
291
+ ## 🚀 Get Started (5 minutes)
299
292
 
300
- **Tuesday: Make it a Pipeline**
301
- ```python
302
- # Wrap your code in a ZenML step to make it reproducible
303
- @step
304
- def customer_service_agent(query: str) -> str:
305
- return agent.chat(query)
306
- ```
293
+ ### 🏗️ Architecture Overview
307
294
 
308
- **Wednesday: Add Evaluation**
309
- ```python
310
- # Test on real data, not toy examples
311
- @pipeline
312
- def eval_pipeline():
313
- test_data = load_production_samples()
314
- responses = customer_service_agent.map(test_data)
315
- scores = evaluate_responses(responses)
316
- track_experiment(scores)
317
- ```
295
+ ZenML uses a **client-server architecture** with an integrated web dashboard ([zenml-io/zenml-dashboard](https://github.com/zenml-io/zenml-dashboard)) for pipeline visualization and management:
318
296
 
319
- **Thursday: Compare Architectures**
320
- ```python
321
- # Make data-driven architecture decisions
322
- results = compare_architectures(
323
- baseline="current_prod",
324
- challenger="new_multiagent_v2"
325
- )
326
- ```
327
-
328
- **Friday: Ship with Confidence**
329
- ```python
330
- # Deploy the new agent with the same command you use for ML models
331
- python agent_deployment.py --env=prod --model="customer_service:challenger"
332
- ```
333
- </details>
334
-
335
- ## 🚀 Get Started (5 minutes)
336
-
337
- ### For ML Engineers Ready to Tame AI
297
+ - **Local Development**: `pip install "zenml[server]"` - runs both client and server locally
298
+ - **Production**: Deploy server separately, connect with `pip install zenml` + `zenml login <server-url>`
338
299
 
339
300
  ```bash
340
- # You know this drill
341
- pip install zenml # Includes LangChain, LlamaIndex integrations
342
- zenml integration install langchain llamaindex
301
+ # Install ZenML with server capabilities
302
+ pip install "zenml[server]"
343
303
 
344
- # Initialize (your ML pipelines still work!)
304
+ # Install required dependencies
305
+ pip install scikit-learn openai numpy
306
+
307
+ # Initialize your ZenML repository
345
308
  zenml init
346
309
 
347
- # Pull our agent evaluation template
348
- zenml init --template agent-evaluation-starter
310
+ # Start local server or connect to a remote one
311
+ zenml login
312
+
313
+ # Set OpenAI API key (optional)
314
+ export OPENAI_API_KEY=sk-svv....
349
315
  ```
350
316
 
351
- ### Your First AI Pipeline
317
+ ### Your First Pipeline (2 minutes)
352
318
 
353
319
  ```python
354
- # look_familiar.py
320
+ # simple_pipeline.py
355
321
  from zenml import pipeline, step
322
+ from sklearn.ensemble import RandomForestClassifier
323
+ from sklearn.datasets import make_classification
324
+ from sklearn.model_selection import train_test_split
325
+ from sklearn.metrics import accuracy_score
326
+ from typing import Tuple
327
+ from typing_extensions import Annotated
328
+ import numpy as np
356
329
 
357
330
  @step
358
- def run_my_agent(test_queries: list[str]) -> list[str]:
359
- """Your existing agent code, now with MLOps superpowers."""
360
- # Use ANY framework - LangGraph, CrewAI, raw OpenAI
361
- agent = YourExistingAgent()
362
-
363
- # Automatic versioning of prompts, tools, code, and configs
364
- return [agent.run(q) for q in test_queries]
331
+ def create_dataset() -> Tuple[
332
+ Annotated[np.ndarray, "X_train"],
333
+ Annotated[np.ndarray, "X_test"],
334
+ Annotated[np.ndarray, "y_train"],
335
+ Annotated[np.ndarray, "y_test"]
336
+ ]:
337
+ """Generate a simple classification dataset."""
338
+ X, y = make_classification(n_samples=100, n_features=4, n_classes=2, random_state=42)
339
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
340
+ return X_train, X_test, y_train, y_test
365
341
 
366
342
  @step
367
- def evaluate_responses(queries: list[str], responses: list[str]) -> dict:
368
- """LLM judges + your custom business metrics."""
369
- quality = llm_judge(queries, responses)
370
- latency = measure_response_times()
371
- costs = calculate_token_usage()
372
-
373
- return {
374
- "quality": quality.mean(),
375
- "p95_latency": latency.quantile(0.95),
376
- "cost_per_query": costs.mean()
377
- }
343
+ def train_model(X_train: np.ndarray, y_train: np.ndarray) -> RandomForestClassifier:
344
+ """Train a simple sklearn model."""
345
+ model = RandomForestClassifier(n_estimators=10, random_state=42)
346
+ model.fit(X_train, y_train)
347
+ return model
348
+
349
+ @step
350
+ def evaluate_model(model: RandomForestClassifier, X_test: np.ndarray, y_test: np.ndarray) -> float:
351
+ """Evaluate the model accuracy."""
352
+ predictions = model.predict(X_test)
353
+ return accuracy_score(y_test, predictions)
354
+
355
+ @step
356
+ def generate_summary(accuracy: float) -> str:
357
+ """Use OpenAI to generate a model summary."""
358
+ import openai
359
+
360
+ client = openai.OpenAI() # Set OPENAI_API_KEY environment variable
361
+ response = client.chat.completions.create(
362
+ model="gpt-3.5-turbo",
363
+ messages=[{
364
+ "role": "user",
365
+ "content": f"Write a brief summary of a ML model with {accuracy:.2%} accuracy."
366
+ }],
367
+ max_tokens=50
368
+ )
369
+ return response.choices[0].message.content
378
370
 
379
371
  @pipeline
380
- def my_first_agent_pipeline():
381
- # Look ma, no YAML!
382
- queries = ["How do I return an item?", "What's your refund policy?"]
383
- responses = run_my_agent(queries)
384
- metrics = evaluate_responses(queries, responses)
385
-
386
- # Metrics are auto-logged, versioned, and comparable in the dashboard
387
- return metrics
372
+ def simple_ml_pipeline():
373
+ """A simple pipeline combining sklearn and OpenAI."""
374
+ X_train, X_test, y_train, y_test = create_dataset()
375
+ model = train_model(X_train, y_train)
376
+ accuracy = evaluate_model(model, X_test, y_test)
377
+ try:
378
+ import openai # noqa: F401
379
+ generate_summary(accuracy)
380
+ except ImportError:
381
+ print("OpenAI is not installed. Skipping summary generation.")
382
+
388
383
 
389
384
  if __name__ == "__main__":
390
- my_first_agent_pipeline()
391
- print("Check your dashboard: http://localhost:8080")
385
+ result = simple_ml_pipeline()
386
+ ```
387
+
388
+ Run it:
389
+ ```bash
390
+ export OPENAI_API_KEY="your-api-key-here"
391
+ python simple_pipeline.py
392
+ ```
393
+
394
+ ## 🗣️ Chat With Your Pipelines: ZenML MCP Server
395
+
396
+ Stop clicking through dashboards to understand your ML workflows. The **[ZenML MCP Server](https://github.com/zenml-io/mcp-zenml)** lets you query your pipelines, analyze runs, and trigger deployments using natural language through Claude Desktop, Cursor, or any MCP-compatible client.
397
+
392
398
  ```
399
+ 💬 "Which pipeline runs failed this week and why?"
400
+ 📊 "Show me accuracy metrics for all my customer churn models"
401
+ 🚀 "Trigger the latest fraud detection pipeline with production data"
402
+ ```
403
+
404
+ **Quick Setup:**
405
+ 1. Download the `.dxt` file from [zenml-io/mcp-zenml](https://github.com/zenml-io/mcp-zenml)
406
+ 2. Drag it into Claude Desktop settings
407
+ 3. Add your ZenML server URL and API key
408
+ 4. Start chatting with your ML infrastructure
409
+
410
+ The MCP (Model Context Protocol) integration transforms your ZenML metadata into conversational insights, making pipeline debugging and analysis as easy as asking a question. Perfect for teams who want to democratize access to ML operations without requiring dashboard expertise.
393
411
 
394
412
  ## 📚 Learn More
395
413
 
@@ -399,7 +417,7 @@ The best way to learn about ZenML is through our comprehensive documentation and
399
417
 
400
418
  - **[Starter Guide](https://docs.zenml.io/user-guides/starter-guide)** - From zero to production in 30 minutes
401
419
  - **[LLMOps Guide](https://docs.zenml.io/user-guides/llmops-guide)** - Specific patterns for LLM applications
402
- - **[SDK Reference](https://sdkdocs.zenml.io/)** - Complete API documentation
420
+ - **[SDK Reference](https://sdkdocs.zenml.io/)** - Complete SDK reference
403
421
 
404
422
  For visual learners, start with this 11-minute introduction:
405
423
 
@@ -407,10 +425,11 @@ For visual learners, start with this 11-minute introduction:
407
425
 
408
426
  ### 📖 Production Examples
409
427
 
410
- 1. **[E2E Batch Inference](examples/e2e/)** - Complete MLOps pipeline with feature engineering
411
- 2. **[LLM RAG Pipeline](https://github.com/zenml-io/zenml-projects/tree/main/llm-complete-guide)** - Production RAG with evaluation loops
412
- 3. **[Agentic Workflow (Deep Research)](https://github.com/zenml-io/zenml-projects/tree/main/deep_research)** - Orchestrate your agents with ZenML
413
- 4. **[Fine-tuning Pipeline](https://github.com/zenml-io/zenml-projects/tree/main/gamesense)** - Fine-tune and deploy LLMs
428
+ 1. **[Agent Architecture Comparison](examples/agent_comparison/)** - Compare AI agents with LangGraph workflows, LiteLLM integration, and automatic visualizations via custom materializers
429
+ 2. **[E2E Batch Inference](examples/e2e/)** - Complete MLOps pipeline with feature engineering
430
+ 3. **[LLM RAG Pipeline](https://github.com/zenml-io/zenml-projects/tree/main/llm-complete-guide)** - Production RAG with evaluation loops
431
+ 4. **[Agentic Workflow (Deep Research)](https://github.com/zenml-io/zenml-projects/tree/main/deep_research)** - Orchestrate your agents with ZenML
432
+ 5. **[Fine-tuning Pipeline](https://github.com/zenml-io/zenml-projects/tree/main/gamesense)** - Fine-tune and deploy LLMs
414
433
 
415
434
  ### 🏢 Deployment Options
416
435
 
@@ -419,9 +438,9 @@ For visual learners, start with this 11-minute introduction:
419
438
  - **[ZenML Pro](https://cloud.zenml.io/?utm_source=readme)** - Managed service with enterprise support (free trial)
420
439
 
421
440
  **Infrastructure Requirements:**
422
- - Kubernetes cluster (or local Docker)
441
+ - Docker (or Kubernetes for production)
423
442
  - Object storage (S3/GCS/Azure)
424
- - PostgreSQL database
443
+ - MySQL-compatible database (MySQL 8.0+ or MariaDB)
425
444
  - _[Complete requirements](https://docs.zenml.io/getting-started/deploying-zenml/deploy-with-helm)_
426
445
 
427
446
  ### 🎓 Books & Resources
@@ -447,23 +466,32 @@ ZenML is featured in these comprehensive guides to production AI systems.
447
466
  **Stay Updated:**
448
467
  - 🗺 [Public Roadmap](https://zenml.io/roadmap) - See what's coming next
449
468
  - 📰 [Blog](https://zenml.io/blog) - Best practices and case studies
450
- - 🎙 [Podcast](https://zenml.io/podcast) - Interviews with ML practitioners
469
+ - 🎙 [Slack](https://zenml.io/slack) - Talk with AI practitioners
451
470
 
452
471
  ## ❓ FAQs from ML Engineers Like You
453
472
 
454
473
  **Q: "Do I need to rewrite my agents or models to use ZenML?"**
474
+
455
475
  A: No. Wrap your existing code in a `@step`. Keep using `scikit-learn`, PyTorch, LangGraph, LlamaIndex, or raw API calls. ZenML orchestrates your tools, it doesn't replace them.
456
476
 
457
477
  **Q: "How is this different from LangSmith/Langfuse?"**
478
+
458
479
  A: They provide excellent observability for LLM applications. We orchestrate the **full MLOps lifecycle for your entire AI stack**. With ZenML, you manage both your classical ML models and your AI agents in one unified framework, from development and evaluation all the way to production deployment.
459
480
 
460
481
  **Q: "Can I use my existing MLflow/W&B setup?"**
461
- A: Yes! We integrate with both. Your experiments, our pipelines.
482
+
483
+ A: Yes! ZenML integrates with both [MLflow](https://docs.zenml.io/stacks/experiment-trackers/mlflow) and [Weights & Biases](https://docs.zenml.io/stacks/experiment-trackers/wandb). Your experiments, our pipelines.
462
484
 
463
485
  **Q: "Is this just MLflow with extra steps?"**
486
+
464
487
  A: No. MLflow tracks experiments. We orchestrate the entire development process – from training and evaluation to deployment and monitoring – for both models and agents.
465
488
 
489
+ **Q: "How do I configure ZenML with Kubernetes?"**
490
+
491
+ A: ZenML integrates with Kubernetes through the native Kubernetes orchestrator, Kubeflow, and other K8s-based orchestrators. See our [Kubernetes orchestrator guide](https://docs.zenml.io/stacks/orchestrators/kubernetes) and [Kubeflow guide](https://docs.zenml.io/stacks/orchestrators/kubeflow), plus [deployment documentation](https://docs.zenml.io/getting-started/deploying-zenml/deploy-with-helm).
492
+
466
493
  **Q: "What about cost? I can't afford another platform."**
494
+
467
495
  A: ZenML's open-source version is free forever. You likely already have the required infrastructure (like a Kubernetes cluster and object storage). We just help you make better use of it for MLOps.
468
496
 
469
497
  ### 🛠 VS Code Extension
@@ -1,5 +1,5 @@
1
1
  zenml/README.md,sha256=827dekbOWAs1BpW7VF1a4d7EbwPbjwccX-2zdXBENZo,1777
2
- zenml/VERSION,sha256=pOBmyBiMZi8XhXV265mHje-NdA4WQlAzLA1N0WGNZqo,19
2
+ zenml/VERSION,sha256=qFJ1roBcwmgzUH8Fqj7IiRujTYkq1OMfBINk9yLyCnM,19
3
3
  zenml/__init__.py,sha256=r7JUg2SVDf_dPhS7iU6vudKusEqK4ics7_jFMZhq0o4,2731
4
4
  zenml/actions/__init__.py,sha256=mrt6wPo73iKRxK754_NqsGyJ3buW7RnVeIGXr1xEw8Y,681
5
5
  zenml/actions/base_action.py,sha256=UcaHev6BTuLDwuswnyaPjdA8AgUqB5xPZ-lRtuvf2FU,25553
@@ -30,7 +30,7 @@ zenml/cli/__init__.py,sha256=Q-DTKGMxUCyN8aAchAvjoWJeZ3f55Hnt4FpagiNHSbU,75655
30
30
  zenml/cli/annotator.py,sha256=JRR7_TJOWKyiKGv1kwSjG1Ay6RBWPVgm0X-D0uSBlyE,6976
31
31
  zenml/cli/artifact.py,sha256=7lsAS52DroBTFkFWxkyb-lIDOGP5jPL_Se_RDG_2jgg,9564
32
32
  zenml/cli/authorized_device.py,sha256=_1PzE3BM2SmwtuzRliEMStvbBRKWQmg_lbwCRtn8dBg,4324
33
- zenml/cli/base.py,sha256=oppsoYAgZFPZjw17Bp6U2ufhLQpGAL14Ha_xl8EFXo4,28414
33
+ zenml/cli/base.py,sha256=dxSs5cMv-G9qmcAj9y6hOstItGBe0quF9do7D56KwXU,28414
34
34
  zenml/cli/cli.py,sha256=Pnq468IZ4oqzluA_gZ5PsrdnSPEyHcasIH-xI1_8Y_Q,5454
35
35
  zenml/cli/code_repository.py,sha256=6T3Hgv0vxNGhZ4Lb5TDw8t0Ihzv0qQS6ojFoflQ2de8,9446
36
36
  zenml/cli/config.py,sha256=UI_j0a_zRgEUd2q0zuOi4UgbjiCYjMJ_Y9iSg-wi8Oo,2768
@@ -1347,8 +1347,8 @@ zenml/zen_stores/secrets_stores/sql_secrets_store.py,sha256=LPFW757WCJLP1S8vrvjs
1347
1347
  zenml/zen_stores/sql_zen_store.py,sha256=cXcKBBRKiwM2GtwHzmJb6tiN5NhGWo9n8SWnL1b4_WE,491150
1348
1348
  zenml/zen_stores/template_utils.py,sha256=iCXrXpqzVTY7roqop4Eh9J7DmLW6PQeILZexmw_l3b8,10074
1349
1349
  zenml/zen_stores/zen_store_interface.py,sha256=weiSULdI9AsbCE10a5TcwtybX-BJs9hKhjPJnTapWv4,93023
1350
- zenml_nightly-0.84.0.dev20250713.dist-info/LICENSE,sha256=wbnfEnXnafPbqwANHkV6LUsPKOtdpsd-SNw37rogLtc,11359
1351
- zenml_nightly-0.84.0.dev20250713.dist-info/METADATA,sha256=K66xeC3bc0a_tyWAWbxHeUKUYTYPWUIvX1w8rdyDMc0,21699
1352
- zenml_nightly-0.84.0.dev20250713.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
1353
- zenml_nightly-0.84.0.dev20250713.dist-info/entry_points.txt,sha256=QK3ETQE0YswAM2mWypNMOv8TLtr7EjnqAFq1br_jEFE,43
1354
- zenml_nightly-0.84.0.dev20250713.dist-info/RECORD,,
1350
+ zenml_nightly-0.84.0.dev20250715.dist-info/LICENSE,sha256=wbnfEnXnafPbqwANHkV6LUsPKOtdpsd-SNw37rogLtc,11359
1351
+ zenml_nightly-0.84.0.dev20250715.dist-info/METADATA,sha256=WXHKOfTStxHVpYRJ9SsPCRiXyfoJWXH5UvlsCGSLo7k,24296
1352
+ zenml_nightly-0.84.0.dev20250715.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
1353
+ zenml_nightly-0.84.0.dev20250715.dist-info/entry_points.txt,sha256=QK3ETQE0YswAM2mWypNMOv8TLtr7EjnqAFq1br_jEFE,43
1354
+ zenml_nightly-0.84.0.dev20250715.dist-info/RECORD,,