adaptive-iteration 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,47 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ dist/
9
+ *.egg-info/
10
+ .eggs/
11
+ *.egg
12
+
13
+ # Virtual environments
14
+ .venv/
15
+ venv/
16
+ env/
17
+ ENV/
18
+
19
+ # Pytest
20
+ .pytest_cache/
21
+ .coverage
22
+ htmlcov/
23
+
24
+ # Data / ledger files
25
+ data/
26
+ *.json
27
+ *.tsv
28
+ *.csv
29
+
30
+ # Secrets
31
+ .env
32
+ *.env
33
+ secrets/
34
+ *-key
35
+ *-token
36
+ *_key
37
+ *_token
38
+
39
+ # macOS
40
+ .DS_Store
41
+ .AppleDouble
42
+
43
+ # IDE
44
+ .idea/
45
+ .vscode/
46
+ *.swp
47
+ *.swo
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.4
2
+ Name: adaptive-iteration
3
+ Version: 0.1.0
4
+ Summary: Domain-agnostic adaptive experimentation framework: experiment → measure → learn → challenge.
5
+ Project-URL: Homepage, https://github.com/imaknas/adaptive-iteration
6
+ Project-URL: Repository, https://github.com/imaknas/adaptive-iteration
7
+ License: MIT
8
+ Requires-Python: >=3.10
9
+ Requires-Dist: openai>=1.0.0
10
+ Description-Content-Type: text/markdown
11
+
12
+ # adaptive_iteration
13
+
14
+ **Domain-agnostic adaptive experimentation framework.**
15
+
16
+ A lightweight Python framework that abstracts the **experiment → measure → learn → challenge**
17
+ cycle into reusable components. Bring your own domain; the framework handles the rest.
18
+
19
+ ---
20
+
21
+ ## What It Is
22
+
23
+ ```
24
+ adaptive_iteration/
25
+ ├── core/ # pure Python stdlib, zero domain deps
26
+ │ ├── experiment.py # Experiment / Variant dataclasses + ExperimentState
27
+ │ ├── ledger.py # JSON append-only results ledger
28
+ │ ├── analyzer.py # top/bottom performer detection, variance per dimension
29
+ │ ├── hypothesis.py # HypothesisEngine: ledger + analysis → LLM → Experiment candidates
30
+ │ └── config.py # AdaptiveConfig: JSON config + winner hints
31
+ └── adapters/
32
+ ├── base.py # DomainAdapter ABC (3 methods to implement)
33
+ └── short_video.py # Example adapter: YouTube + Instagram (simulated data)
34
+ ```
35
+
36
+ ---
37
+
38
+ ## Installation
39
+
40
+ ```bash
41
+ # with uv (recommended)
42
+ uv add adaptive-iteration
43
+
44
+ # with pip
45
+ pip install adaptive-iteration
46
+ ```
47
+
48
+ Requires Python 3.10+. The only runtime dependency is `openai` (used only in
49
+ `HypothesisEngine`; the rest of `core/` is stdlib-only).
50
+
51
+ ---
52
+
53
+ ## Quick Start
54
+
55
+ ```python
56
+ import os
57
+ from pathlib import Path
58
+ from adaptive_iteration.core.ledger import Ledger
59
+ from adaptive_iteration.core.analyzer import Analyzer
60
+ from adaptive_iteration.core.hypothesis import HypothesisEngine
61
+ from adaptive_iteration.core.config import AdaptiveConfig
62
+
63
+ # 1. Load / create config
64
+ cfg = AdaptiveConfig(Path("data/adaptive_config.json"))
65
+ cfg.update({"domain": "my_domain", "primary_metric": "conversion_rate"})
66
+
67
+ # 2. Open ledger
68
+ ledger = Ledger(Path("data/adaptive_ledger.json"))
69
+
70
+ # 3. Record experiment results
71
+ ledger.append(
72
+ domain="my_domain",
73
+ experiment_id="exp001",
74
+ variable="cta_style",
75
+ variant="soft",
76
+ metric_values={"conversion_rate": 4.2, "bounce_rate": 31.0},
77
+ winner=True,
78
+ )
79
+
80
+ # 4. Analyse
81
+ analyzer = Analyzer(ledger, primary_metric="conversion_rate")
82
+ analysis = analyzer.analyze(domain="my_domain")
83
+ print(f"Top performers: {[p['variant'] for p in analysis.top_performers]}")
84
+ print(f"Winning patterns: {analysis.winning_patterns}")
85
+
86
+ # 5. Generate next hypotheses (requires OPENAI_API_KEY)
87
+ engine = HypothesisEngine(
88
+ ledger=ledger,
89
+ api_key=os.environ["OPENAI_API_KEY"],
90
+ )
91
+ candidates = engine.generate(domain="my_domain", analysis=analysis)
92
+ for exp in candidates:
93
+ print(f" [{exp.tier}] {exp.variable}: {exp.description}")
94
+ ```
95
+
96
+ ---
97
+
98
+ ## Integrating a New Domain
99
+
100
+ Subclass `DomainAdapter` and implement three methods:
101
+
102
+ ```python
103
+ from adaptive_iteration.adapters.base import DomainAdapter
104
+
105
+ class MyDomainAdapter(DomainAdapter):
106
+
107
+ def collect_metrics(self, item_ids: list[str]) -> list[dict]:
108
+ """Pull raw metrics from your external system for each item_id."""
109
+ results = []
110
+ for item_id in item_ids:
111
+ raw = my_api.get_metrics(item_id)
112
+ results.append({"id": item_id, **raw})
113
+ return results
114
+
115
+ def get_signals(self, metrics: list[dict]) -> dict:
116
+ """Normalise to framework signals."""
117
+ primary = sum(m["conversion_rate"] for m in metrics) / len(metrics)
118
+ return {
119
+ "primary_metric": primary,
120
+ "secondary_metrics": {
121
+ "bounce_rate": sum(m["bounce_rate"] for m in metrics) / len(metrics),
122
+ },
123
+ }
124
+
125
+ def format_context(self, top_items, bottom_items) -> str:
126
+ lines = ["Top performers:"]
127
+ for item in top_items:
128
+ lines.append(f" [{item['id']}] conversion_rate={item.get('conversion_rate')}")
129
+ lines.append("Bottom performers:")
130
+ for item in bottom_items:
131
+ lines.append(f" [{item['id']}] conversion_rate={item.get('conversion_rate')}")
132
+ return "\n".join(lines)
133
+ ```
134
+
135
+ See `adapters/short_video.py` for a complete reference implementation.
136
+
137
+ ---
138
+
139
+ ## Experiment Modes
140
+
141
+ | Mode | When to use | Description |
142
+ |------|-------------|-------------|
143
+ | `interleaved` | Tier 2 | Rotate variants across different items; maximises volume |
144
+ | `paired` | Tier 1 | Generate two variants for the same item; cleaner causal inference |
145
+
146
+ Tier 1 experiments (high-impact variables) should use `paired` mode to eliminate
147
+ confounding factors introduced by item-level differences.
148
+
149
+ ---
150
+
151
+ ## Import Verification
152
+
153
+ ```bash
154
+ python3 -c "from adaptive_iteration.core.experiment import Experiment; print('ok')"
155
+ ```
156
+
157
+ ---
158
+
159
+ ## Design Principles
160
+
161
+ 1. `core/` has **zero external deps** — pure Python stdlib only (`openai` in `hypothesis.py`
162
+ is lazy-imported and optional until you call `HypothesisEngine`).
163
+ 2. `DomainAdapter` is the **only** layer that touches external systems.
164
+ 3. `Ledger` is the **single source of truth** — append-only JSON, no database required.
165
+ 4. `HypothesisEngine` uses **structured JSON output** prompting so parsing is deterministic.
@@ -0,0 +1,154 @@
1
+ # adaptive_iteration
2
+
3
+ **Domain-agnostic adaptive experimentation framework.**
4
+
5
+ A lightweight Python framework that abstracts the **experiment → measure → learn → challenge**
6
+ cycle into reusable components. Bring your own domain; the framework handles the rest.
7
+
8
+ ---
9
+
10
+ ## What It Is
11
+
12
+ ```
13
+ adaptive_iteration/
14
+ ├── core/ # pure Python stdlib, zero domain deps
15
+ │ ├── experiment.py # Experiment / Variant dataclasses + ExperimentState
16
+ │ ├── ledger.py # JSON append-only results ledger
17
+ │ ├── analyzer.py # top/bottom performer detection, variance per dimension
18
+ │ ├── hypothesis.py # HypothesisEngine: ledger + analysis → LLM → Experiment candidates
19
+ │ └── config.py # AdaptiveConfig: JSON config + winner hints
20
+ └── adapters/
21
+ ├── base.py # DomainAdapter ABC (3 methods to implement)
22
+ └── short_video.py # Example adapter: YouTube + Instagram (simulated data)
23
+ ```
24
+
25
+ ---
26
+
27
+ ## Installation
28
+
29
+ ```bash
30
+ # with uv (recommended)
31
+ uv add adaptive-iteration
32
+
33
+ # with pip
34
+ pip install adaptive-iteration
35
+ ```
36
+
37
+ Requires Python 3.10+. The only runtime dependency is `openai` (used only in
38
+ `HypothesisEngine`; the rest of `core/` is stdlib-only).
39
+
40
+ ---
41
+
42
+ ## Quick Start
43
+
44
+ ```python
45
+ import os
46
+ from pathlib import Path
47
+ from adaptive_iteration.core.ledger import Ledger
48
+ from adaptive_iteration.core.analyzer import Analyzer
49
+ from adaptive_iteration.core.hypothesis import HypothesisEngine
50
+ from adaptive_iteration.core.config import AdaptiveConfig
51
+
52
+ # 1. Load / create config
53
+ cfg = AdaptiveConfig(Path("data/adaptive_config.json"))
54
+ cfg.update({"domain": "my_domain", "primary_metric": "conversion_rate"})
55
+
56
+ # 2. Open ledger
57
+ ledger = Ledger(Path("data/adaptive_ledger.json"))
58
+
59
+ # 3. Record experiment results
60
+ ledger.append(
61
+ domain="my_domain",
62
+ experiment_id="exp001",
63
+ variable="cta_style",
64
+ variant="soft",
65
+ metric_values={"conversion_rate": 4.2, "bounce_rate": 31.0},
66
+ winner=True,
67
+ )
68
+
69
+ # 4. Analyse
70
+ analyzer = Analyzer(ledger, primary_metric="conversion_rate")
71
+ analysis = analyzer.analyze(domain="my_domain")
72
+ print(f"Top performers: {[p['variant'] for p in analysis.top_performers]}")
73
+ print(f"Winning patterns: {analysis.winning_patterns}")
74
+
75
+ # 5. Generate next hypotheses (requires OPENAI_API_KEY)
76
+ engine = HypothesisEngine(
77
+ ledger=ledger,
78
+ api_key=os.environ["OPENAI_API_KEY"],
79
+ )
80
+ candidates = engine.generate(domain="my_domain", analysis=analysis)
81
+ for exp in candidates:
82
+ print(f" [{exp.tier}] {exp.variable}: {exp.description}")
83
+ ```
84
+
85
+ ---
86
+
87
+ ## Integrating a New Domain
88
+
89
+ Subclass `DomainAdapter` and implement three methods:
90
+
91
+ ```python
92
+ from adaptive_iteration.adapters.base import DomainAdapter
93
+
94
+ class MyDomainAdapter(DomainAdapter):
95
+
96
+ def collect_metrics(self, item_ids: list[str]) -> list[dict]:
97
+ """Pull raw metrics from your external system for each item_id."""
98
+ results = []
99
+ for item_id in item_ids:
100
+ raw = my_api.get_metrics(item_id)
101
+ results.append({"id": item_id, **raw})
102
+ return results
103
+
104
+ def get_signals(self, metrics: list[dict]) -> dict:
105
+ """Normalise to framework signals."""
106
+ primary = sum(m["conversion_rate"] for m in metrics) / len(metrics)
107
+ return {
108
+ "primary_metric": primary,
109
+ "secondary_metrics": {
110
+ "bounce_rate": sum(m["bounce_rate"] for m in metrics) / len(metrics),
111
+ },
112
+ }
113
+
114
+ def format_context(self, top_items, bottom_items) -> str:
115
+ lines = ["Top performers:"]
116
+ for item in top_items:
117
+ lines.append(f" [{item['id']}] conversion_rate={item.get('conversion_rate')}")
118
+ lines.append("Bottom performers:")
119
+ for item in bottom_items:
120
+ lines.append(f" [{item['id']}] conversion_rate={item.get('conversion_rate')}")
121
+ return "\n".join(lines)
122
+ ```
123
+
124
+ See `adapters/short_video.py` for a complete reference implementation.
125
+
126
+ ---
127
+
128
+ ## Experiment Modes
129
+
130
+ | Mode | When to use | Description |
131
+ |------|-------------|-------------|
132
+ | `interleaved` | Tier 2 | Rotate variants across different items; maximises volume |
133
+ | `paired` | Tier 1 | Generate two variants for the same item; cleaner causal inference |
134
+
135
+ Tier 1 experiments (high-impact variables) should use `paired` mode to eliminate
136
+ confounding factors introduced by item-level differences.
137
+
138
+ ---
139
+
140
+ ## Import Verification
141
+
142
+ ```bash
143
+ python3 -c "from adaptive_iteration.core.experiment import Experiment; print('ok')"
144
+ ```
145
+
146
+ ---
147
+
148
+ ## Design Principles
149
+
150
+ 1. `core/` has **zero external deps** — pure Python stdlib only (`openai` in `hypothesis.py`
151
+ is lazy-imported and optional until you call `HypothesisEngine`).
152
+ 2. `DomainAdapter` is the **only** layer that touches external systems.
153
+ 3. `Ledger` is the **single source of truth** — append-only JSON, no database required.
154
+ 4. `HypothesisEngine` uses **structured JSON output** prompting so parsing is deterministic.
@@ -0,0 +1,12 @@
1
+ """adaptive_iteration — Domain-agnostic adaptive experiment framework.
2
+
3
+ Import paths:
4
+ from adaptive_iteration.core.experiment import Experiment, Variant, ExperimentState
5
+ from adaptive_iteration.core.ledger import Ledger
6
+ from adaptive_iteration.core.analyzer import Analyzer
7
+ from adaptive_iteration.core.hypothesis import HypothesisEngine
8
+ from adaptive_iteration.core.config import AdaptiveConfig
9
+ from adaptive_iteration.adapters.base import DomainAdapter
10
+ from adaptive_iteration.adapters.short_video import ShortVideoAdapter
11
+ """
12
+ __version__ = "0.1.0"
@@ -0,0 +1,5 @@
1
+ # adaptive_iteration.adapters — DomainAdapter interface + example implementations
2
+ from .base import DomainAdapter
3
+ from .short_video import ShortVideoAdapter
4
+
5
+ __all__ = ["DomainAdapter", "ShortVideoAdapter"]
@@ -0,0 +1,81 @@
1
+ """adapters/base.py — DomainAdapter ABC.
2
+
3
+ Every domain (short video, blog, email, ...) must implement these three methods.
4
+ core/ never imports from here; HypothesisEngine receives the outputs as plain dicts/strings.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from abc import ABC, abstractmethod
9
+ from typing import Any
10
+
11
+
12
+ class DomainAdapter(ABC):
13
+ """Bridge between adaptive_iteration.core and a concrete external system.
14
+
15
+ Three required methods
16
+ ----------------------
17
+ collect_metrics(item_ids)
18
+ Pull raw metric data for the given item IDs.
19
+ Returns a list of dicts, one per item. Each dict must include at least
20
+ the item's ID under some consistent key (e.g. "id").
21
+
22
+ get_signals(metrics)
23
+ Normalise raw metrics into framework-friendly signals:
24
+ - "primary_metric": float (the single most-important KPI for ranking)
25
+ - "secondary_metrics": dict (any additional useful metrics)
26
+ Returns a dict.
27
+
28
+ format_context(top_items, bottom_items)
29
+ Produce a human-readable text block describing top and bottom performers.
30
+ This is injected verbatim into the HypothesisEngine prompt.
31
+ Returns a str.
32
+ """
33
+
34
+ # ── Required ───────────────────────────────────────────────────────────────
35
+
36
+ @abstractmethod
37
+ def collect_metrics(self, item_ids: list[str]) -> list[dict[str, Any]]:
38
+ """Fetch raw metrics for *item_ids* from the external system.
39
+
40
+ Parameters
41
+ ----------
42
+ item_ids : list of platform-specific IDs (video IDs, post IDs, …)
43
+
44
+ Returns
45
+ -------
46
+ list[dict]
47
+ One dict per item. Shape is domain-specific; must be consistent with
48
+ what get_signals() expects.
49
+ """
50
+ ...
51
+
52
+ @abstractmethod
53
+ def get_signals(self, metrics: list[dict[str, Any]]) -> dict[str, Any]:
54
+ """Normalise raw metrics into a standard signals dict.
55
+
56
+ Expected output schema
57
+ ----------------------
58
+ {
59
+ "primary_metric": <float>, # main ranking KPI
60
+ "secondary_metrics": {<str>: <float>, ...},
61
+ }
62
+ """
63
+ ...
64
+
65
+ @abstractmethod
66
+ def format_context(
67
+ self,
68
+ top_items: list[dict[str, Any]],
69
+ bottom_items: list[dict[str, Any]],
70
+ ) -> str:
71
+ """Return a text block describing top vs bottom performers.
72
+
73
+ Used verbatim in the HypothesisEngine LLM prompt.
74
+ """
75
+ ...
76
+
77
+ # ── Optional convenience ───────────────────────────────────────────────────
78
+
79
+ def describe(self) -> str:
80
+ """Short human-readable description of this adapter (for logs/docs)."""
81
+ return self.__class__.__name__
@@ -0,0 +1,178 @@
1
+ """adapters/short_video.py — Example ShortVideoAdapter.
2
+
3
+ This is a reference implementation showing how to build a DomainAdapter
4
+ for short-form video platforms (e.g. YouTube Shorts, Instagram Reels).
5
+
6
+ In a real deployment, replace `collect_metrics` and `get_signals` with
7
+ calls to your actual analytics backend.
8
+
9
+ Usage
10
+ -----
11
+ from adaptive_iteration.adapters.short_video import ShortVideoAdapter
12
+
13
+ adapter = ShortVideoAdapter(platform="youtube")
14
+ metrics = adapter.collect_metrics(["video_001", "video_002"])
15
+ signals = adapter.get_signals(metrics)
16
+ """
17
+ from __future__ import annotations
18
+
19
+ import random
20
+ from typing import Any
21
+
22
+ from .base import DomainAdapter
23
+
24
+
25
+ class ShortVideoAdapter(DomainAdapter):
26
+ """Example DomainAdapter for short-video platforms.
27
+
28
+ Supports "youtube" and "instagram" as platform targets.
29
+ Metrics are simulated — replace with your real analytics calls.
30
+
31
+ Parameters
32
+ ----------
33
+ platform : "youtube" or "instagram"
34
+ """
35
+
36
+ SUPPORTED_PLATFORMS = ("youtube", "instagram")
37
+
38
+ def __init__(self, platform: str) -> None:
39
+ if platform not in self.SUPPORTED_PLATFORMS:
40
+ raise ValueError(
41
+ f"Unsupported platform {platform!r}. "
42
+ f"Choose from {self.SUPPORTED_PLATFORMS}."
43
+ )
44
+ self.platform = platform
45
+
46
+ # ── DomainAdapter interface ────────────────────────────────────────────────
47
+
48
+ def collect_metrics(self, item_ids: list[str]) -> list[dict[str, Any]]:
49
+ """Fetch metrics for each item ID.
50
+
51
+ Replace this with real API calls to your analytics provider.
52
+ This implementation returns simulated data for illustration.
53
+ """
54
+ results = []
55
+ for item_id in item_ids:
56
+ if self.platform == "youtube":
57
+ results.append({
58
+ "id": item_id,
59
+ "views": random.randint(500, 50_000),
60
+ "avg_view_pct": round(random.uniform(30, 90), 1),
61
+ "avg_view_sec": round(random.uniform(15, 75), 1),
62
+ "like_rate": round(random.uniform(0.5, 8.0), 2),
63
+ "subscribers_gained": random.randint(0, 50),
64
+ })
65
+ else: # instagram
66
+ reach = random.randint(200, 20_000)
67
+ likes = random.randint(10, int(reach * 0.15))
68
+ results.append({
69
+ "id": item_id,
70
+ "reach": reach,
71
+ "likes": likes,
72
+ "saves": random.randint(0, int(reach * 0.05)),
73
+ "shares": random.randint(0, int(reach * 0.03)),
74
+ "comments": random.randint(0, int(reach * 0.02)),
75
+ "avg_watch_time_ms": random.randint(3_000, 30_000),
76
+ "total_interactions": likes + random.randint(5, 100),
77
+ })
78
+ return results
79
+
80
+ def get_signals(self, metrics: list[dict[str, Any]]) -> dict[str, Any]:
81
+ """Normalise raw metrics into framework signals.
82
+
83
+ YouTube → primary_metric = avg_view_pct (0–100)
84
+ Instagram → primary_metric = engagement_score
85
+ = (saves×3 + shares×2 + comments×2 + likes) / reach × 100
86
+ """
87
+ if not metrics:
88
+ return {"primary_metric": 0.0, "secondary_metrics": {}}
89
+
90
+ if self.platform == "youtube":
91
+ return self._yt_signals(metrics)
92
+ return self._ig_signals(metrics)
93
+
94
+ def format_context(
95
+ self,
96
+ top_items: list[dict[str, Any]],
97
+ bottom_items: list[dict[str, Any]],
98
+ ) -> str:
99
+ lines = [f"Platform: {self.platform}"]
100
+ lines.append("\nTop performers:")
101
+ for item in top_items:
102
+ lines.append(self._fmt(item))
103
+ lines.append("\nBottom performers:")
104
+ for item in bottom_items:
105
+ lines.append(self._fmt(item))
106
+ return "\n".join(lines)
107
+
108
+ def describe(self) -> str:
109
+ return f"ShortVideoAdapter(platform={self.platform!r})"
110
+
111
+ # ── Internal ───────────────────────────────────────────────────────────────
112
+
113
+ def _yt_signals(self, metrics: list[dict]) -> dict[str, Any]:
114
+ valid = [m for m in metrics if m.get("avg_view_pct") is not None]
115
+ if not valid:
116
+ return {"primary_metric": 0.0, "secondary_metrics": {}}
117
+ n = len(metrics)
118
+ return {
119
+ "primary_metric": round(
120
+ sum(m["avg_view_pct"] for m in valid) / len(valid), 2
121
+ ),
122
+ "secondary_metrics": {
123
+ "views": round(sum(m.get("views", 0) for m in metrics) / n, 1),
124
+ "like_rate": round(sum(m.get("like_rate", 0) for m in metrics) / n, 3),
125
+ "avg_view_sec": round(sum(m.get("avg_view_sec", 0) for m in metrics) / n, 1),
126
+ "subscribers_gained": round(sum(m.get("subscribers_gained", 0) for m in metrics) / n, 2),
127
+ },
128
+ }
129
+
130
+ def _ig_signals(self, metrics: list[dict]) -> dict[str, Any]:
131
+ valid = [m for m in metrics if m.get("reach", 0) > 0]
132
+ if not valid:
133
+ return {"primary_metric": 0.0, "secondary_metrics": {}}
134
+ n = len(metrics)
135
+
136
+ def _eng(m: dict) -> float:
137
+ reach = m.get("reach", 1)
138
+ return (
139
+ m.get("saves", 0) * 3
140
+ + m.get("shares", 0) * 2
141
+ + m.get("comments", 0) * 2
142
+ + m.get("likes", 0)
143
+ ) / reach * 100
144
+
145
+ return {
146
+ "primary_metric": round(sum(_eng(m) for m in valid) / len(valid), 4),
147
+ "secondary_metrics": {
148
+ "reach": round(sum(m.get("reach", 0) for m in metrics) / n, 1),
149
+ "like_rate": round(
150
+ sum(m.get("likes", 0) / max(m.get("reach", 1), 1) * 100 for m in metrics) / n, 3
151
+ ),
152
+ "avg_watch_time_s": round(
153
+ sum(m.get("avg_watch_time_ms", 0) for m in metrics) / n / 1000, 2
154
+ ),
155
+ "shares": round(sum(m.get("shares", 0) for m in metrics) / n, 2),
156
+ "saved": round(sum(m.get("saves", 0) for m in metrics) / n, 2),
157
+ "total_interactions": round(sum(m.get("total_interactions", 0) for m in metrics) / n, 2),
158
+ },
159
+ }
160
+
161
+ def _fmt(self, item: dict) -> str:
162
+ item_id = item.get("id", "?")
163
+ if self.platform == "youtube":
164
+ return (
165
+ f" [{item_id}] "
166
+ f"avg_view_pct={item.get('avg_view_pct')}% "
167
+ f"views={item.get('views')} "
168
+ f"like_rate={item.get('like_rate')}%"
169
+ )
170
+ reach = item.get("reach", 0)
171
+ likes = item.get("likes", 0)
172
+ lr = round(likes / reach * 100, 2) if reach > 0 else 0.0
173
+ return (
174
+ f" [{item_id}] "
175
+ f"like_rate={lr}% "
176
+ f"reach={reach} "
177
+ f"avg_watch={item.get('avg_watch_time_ms', 0) // 1000}s"
178
+ )
@@ -0,0 +1,12 @@
1
+ """adaptive_iteration — Domain-agnostic adaptive experiment framework.
2
+
3
+ Import paths:
4
+ from adaptive_iteration.core.experiment import Experiment, Variant, ExperimentState
5
+ from adaptive_iteration.core.ledger import Ledger
6
+ from adaptive_iteration.core.analyzer import Analyzer
7
+ from adaptive_iteration.core.hypothesis import HypothesisEngine
8
+ from adaptive_iteration.core.config import AdaptiveConfig
9
+ from adaptive_iteration.adapters.base import DomainAdapter
10
+ from adaptive_iteration.adapters.short_video import ShortVideoAdapter
11
+ """
12
+ __version__ = "0.1.0"
@@ -0,0 +1,5 @@
1
+ # adaptive_iteration.adapters — DomainAdapter interface + example implementations
2
+ from .base import DomainAdapter
3
+ from .short_video import ShortVideoAdapter
4
+
5
+ __all__ = ["DomainAdapter", "ShortVideoAdapter"]