rnow 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. rnow-0.3.1/LICENSE +21 -0
  2. rnow-0.3.1/PKG-INFO +147 -0
  3. rnow-0.3.1/README.md +117 -0
  4. rnow-0.3.1/pyproject.toml +81 -0
  5. rnow-0.3.1/rnow/__init__.py +5 -0
  6. rnow-0.3.1/rnow/__main__.py +7 -0
  7. rnow-0.3.1/rnow/cli/__init__.py +6 -0
  8. rnow-0.3.1/rnow/cli/auth.py +67 -0
  9. rnow-0.3.1/rnow/cli/blob.py +98 -0
  10. rnow-0.3.1/rnow/cli/commands.py +2329 -0
  11. rnow-0.3.1/rnow/cli/common.py +28 -0
  12. rnow-0.3.1/rnow/cli/cube.py +255 -0
  13. rnow-0.3.1/rnow/cli/main.py +49 -0
  14. rnow-0.3.1/rnow/cli/test.py +712 -0
  15. rnow-0.3.1/rnow/cli/token_count.py +295 -0
  16. rnow-0.3.1/rnow/core/__init__.py +33 -0
  17. rnow-0.3.1/rnow/core/reward.py +333 -0
  18. rnow-0.3.1/rnow/core/tool.py +494 -0
  19. rnow-0.3.1/rnow/models.py +330 -0
  20. rnow-0.3.1/rnow/templates/deepseek-aha/config.yml +26 -0
  21. rnow-0.3.1/rnow/templates/deepseek-aha/rewards.py +36 -0
  22. rnow-0.3.1/rnow/templates/deepseek-aha/train.jsonl +1000 -0
  23. rnow-0.3.1/rnow/templates/mcp-tavily/config.yml +29 -0
  24. rnow-0.3.1/rnow/templates/mcp-tavily/requirements.txt +1 -0
  25. rnow-0.3.1/rnow/templates/mcp-tavily/rewards.py +25 -0
  26. rnow-0.3.1/rnow/templates/mcp-tavily/train.jsonl +500 -0
  27. rnow-0.3.1/rnow/templates/new/config.yml +26 -0
  28. rnow-0.3.1/rnow/templates/new/requirements.txt +1 -0
  29. rnow-0.3.1/rnow/templates/new/rewards.py +0 -0
  30. rnow-0.3.1/rnow/templates/new/train.jsonl +0 -0
  31. rnow-0.3.1/rnow/templates/rl-nextjs/config.yml +27 -0
  32. rnow-0.3.1/rnow/templates/rl-nextjs/requirements.txt +2 -0
  33. rnow-0.3.1/rnow/templates/rl-nextjs/rewards.py +446 -0
  34. rnow-0.3.1/rnow/templates/rl-nextjs/train.jsonl +1000 -0
  35. rnow-0.3.1/rnow/templates/rl-single/config.yml +27 -0
  36. rnow-0.3.1/rnow/templates/rl-single/requirements.txt +1 -0
  37. rnow-0.3.1/rnow/templates/rl-single/rewards.py +14 -0
  38. rnow-0.3.1/rnow/templates/rl-single/train.jsonl +92 -0
  39. rnow-0.3.1/rnow/templates/rl-tools/config.yml +27 -0
  40. rnow-0.3.1/rnow/templates/rl-tools/env.py +38 -0
  41. rnow-0.3.1/rnow/templates/rl-tools/requirements.txt +3 -0
  42. rnow-0.3.1/rnow/templates/rl-tools/rewards.py +25 -0
  43. rnow-0.3.1/rnow/templates/rl-tools/train.jsonl +500 -0
  44. rnow-0.3.1/rnow/templates/sft/config.yml +20 -0
  45. rnow-0.3.1/rnow/templates/sft/train.jsonl +100 -0
  46. rnow-0.3.1/rnow/templates/tutorial-reward/config.yml +27 -0
  47. rnow-0.3.1/rnow/templates/tutorial-reward/requirements.txt +1 -0
  48. rnow-0.3.1/rnow/templates/tutorial-reward/rewards.py +15 -0
  49. rnow-0.3.1/rnow/templates/tutorial-reward/train.jsonl +92 -0
  50. rnow-0.3.1/rnow/templates/tutorial-tool/config.yml +27 -0
  51. rnow-0.3.1/rnow/templates/tutorial-tool/env.py +7 -0
  52. rnow-0.3.1/rnow/templates/tutorial-tool/requirements.txt +3 -0
  53. rnow-0.3.1/rnow/templates/tutorial-tool/rewards.py +7 -0
  54. rnow-0.3.1/rnow/templates/tutorial-tool/train.jsonl +1266 -0
  55. rnow-0.3.1/rnow.egg-info/PKG-INFO +147 -0
  56. rnow-0.3.1/rnow.egg-info/SOURCES.txt +59 -0
  57. rnow-0.3.1/rnow.egg-info/dependency_links.txt +1 -0
  58. rnow-0.3.1/rnow.egg-info/entry_points.txt +2 -0
  59. rnow-0.3.1/rnow.egg-info/requires.txt +25 -0
  60. rnow-0.3.1/rnow.egg-info/top_level.txt +1 -0
  61. rnow-0.3.1/setup.cfg +4 -0
rnow-0.3.1/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 ReinforceNow
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
rnow-0.3.1/PKG-INFO ADDED
@@ -0,0 +1,147 @@
1
+ Metadata-Version: 2.4
2
+ Name: rnow
3
+ Version: 0.3.1
4
+ Summary: ReinforceNow CLI - Reinforcement Learning platform command-line interface
5
+ Requires-Python: <3.15,>=3.10
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: click>=8.0.0
9
+ Requires-Dist: requests>=2.25.0
10
+ Requires-Dist: httpx>=0.24.0
11
+ Requires-Dist: pydantic>=2.0.0
12
+ Requires-Dist: pyyaml>=5.4.0
13
+ Requires-Dist: packaging>=21.0
14
+ Requires-Dist: prompt_toolkit>=3.0.0
15
+ Requires-Dist: tokenizers>=0.15.0
16
+ Requires-Dist: openai-harmony>=0.0.8
17
+ Provides-Extra: test
18
+ Requires-Dist: tinker-cookbook>=0.1.0; extra == "test"
19
+ Provides-Extra: api
20
+ Requires-Dist: fastapi>=0.68.0; extra == "api"
21
+ Requires-Dist: uvicorn>=0.15.0; extra == "api"
22
+ Provides-Extra: mcp
23
+ Requires-Dist: fastmcp>=0.1.0; extra == "mcp"
24
+ Provides-Extra: all
25
+ Requires-Dist: tinker-cookbook>=0.1.0; extra == "all"
26
+ Requires-Dist: fastapi>=0.68.0; extra == "all"
27
+ Requires-Dist: uvicorn>=0.15.0; extra == "all"
28
+ Requires-Dist: fastmcp>=0.1.0; extra == "all"
29
+ Dynamic: license-file
30
+
31
+ <div align="center">
32
+ <img
33
+ alt="ReinforceNow CLI"
34
+ src="./assets/header.png"
35
+ width="100%"
36
+ >
37
+ <br><br>
38
+
39
+ [![PyPI version](https://img.shields.io/pypi/v/rnow?color=blue)](https://pypi.org/project/rnow/)
40
+ [![Docs](https://img.shields.io/badge/docs-reinforcenow.ai-blue)](https://reinforcenow.ai/docs)
41
+ [![Follow on X](https://img.shields.io/badge/Follow_on_X-@reinforcenow-black?labelColor=white)](https://x.com/reinforcenow)
42
+ [![MIT License](https://img.shields.io/badge/license-MIT-green)](./LICENSE)
43
+
44
+ </div>
45
+
46
+ # Documentation
47
+
48
+ See the [documentation](https://www.reinforcenow.ai/docs/getting-started/quickstart) for a technical overview of the platform and [train your first agent](https://www.reinforcenow.ai/docs/getting-started/first-agent)
49
+
50
+ # Quick Start
51
+
52
+ ### 1. Install uv (Python package manager)
53
+
54
+ ```bash
55
+ # macOS/Linux:
56
+ $ curl -LsSf https://astral.sh/uv/install.sh | sh
57
+
58
+ # Windows:
59
+ PS> powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
60
+ ```
61
+
62
+ ### 2. Install ReinforceNow
63
+
64
+ ```bash
65
+ uv init && uv venv --python 3.11
66
+ source .venv/bin/activate # Windows: .\.venv\Scripts\Activate.ps1
67
+ uv pip install rnow
68
+ ```
69
+
70
+ ### 3. Authenticate
71
+
72
+ ```bash
73
+ rnow login
74
+ ```
75
+
76
+ ### 4. Create & Run Your First Project
77
+
78
+ ```bash
79
+ rnow init --template sft
80
+ rnow run
81
+ ```
82
+
83
+ That's it! Your training run will start on ReinforceNow's infrastructure. Monitor progress in the [dashboard](https://reinforcenow.ai/home).
84
+
85
+ ![ReinforceNow Graph](./assets/reinforcenow-graph.png)
86
+
87
+ # Core Concepts
88
+
89
+ Go from raw data to a reliable AI agent in production. ReinforceNow gives you the flexibility to define:
90
+
91
+ ### 1. Reward Functions
92
+
93
+ Define how your model should be evaluated using the `@reward` decorator:
94
+
95
+ ```python
96
+ from rnow.core import reward, RewardArgs
97
+
98
+ @reward
99
+ async def accuracy(args: RewardArgs, messages: list) -> float:
100
+ """Check if the model's answer matches ground truth."""
101
+ response = messages[-1]["content"]
102
+ expected = args.metadata["answer"]
103
+ return 1.0 if expected in response else 0.0
104
+ ```
105
+
106
+ → [Write your first reward function](https://www.reinforcenow.ai/docs/getting-started/first-reward)
107
+
108
+ ### 2. Tools (for Agents)
109
+
110
+ Give your model the ability to call functions during training:
111
+
112
+ ```python
113
+ from rnow.core import tool
114
+
115
+ @tool
116
+ def search(query: str, max_results: int = 5) -> dict:
117
+ """Search the web for information."""
118
+ # Your implementation here
119
+ return {"results": [...]}
120
+ ```
121
+
122
+ → [Train an agent with custom tools](https://www.reinforcenow.ai/docs/getting-started/first-agent)
123
+
124
+ ### 3. Training Data
125
+
126
+ Create a `train.jsonl` file with your prompts and reward assignments:
127
+
128
+ ```json
129
+ {"messages": [{"role": "user", "content": "Balance the equation: Fe + O2 → Fe2O3"}], "rewards": ["accuracy"], "metadata": {"answer": "4Fe + 3O2 → 2Fe2O3"}}
130
+ {"messages": [{"role": "user", "content": "Balance the equation: H2 + O2 → H2O"}], "rewards": ["accuracy"], "metadata": {"answer": "2H2 + O2 → 2H2O"}}
131
+ {"messages": [{"role": "user", "content": "Balance the equation: N2 + H2 → NH3"}], "rewards": ["accuracy"], "metadata": {"answer": "N2 + 3H2 → 2NH3"}}
132
+ ```
133
+
134
+ → [Learn about training data format](https://www.reinforcenow.ai/docs/cli-reference/train-data)
135
+
136
+ # Contributing
137
+
138
+ We welcome contributions! ❤️ Please open an issue to discuss your ideas before submitting a PR
139
+
140
+ <br>
141
+ <div align="center">
142
+ <img
143
+ alt="ReinforceNow"
144
+ src="./assets/footer.png"
145
+ width="100%"
146
+ >
147
+ </div>
rnow-0.3.1/README.md ADDED
@@ -0,0 +1,117 @@
1
+ <div align="center">
2
+ <img
3
+ alt="ReinforceNow CLI"
4
+ src="./assets/header.png"
5
+ width="100%"
6
+ >
7
+ <br><br>
8
+
9
+ [![PyPI version](https://img.shields.io/pypi/v/rnow?color=blue)](https://pypi.org/project/rnow/)
10
+ [![Docs](https://img.shields.io/badge/docs-reinforcenow.ai-blue)](https://reinforcenow.ai/docs)
11
+ [![Follow on X](https://img.shields.io/badge/Follow_on_X-@reinforcenow-black?labelColor=white)](https://x.com/reinforcenow)
12
+ [![MIT License](https://img.shields.io/badge/license-MIT-green)](./LICENSE)
13
+
14
+ </div>
15
+
16
+ # Documentation
17
+
18
+ See the [documentation](https://www.reinforcenow.ai/docs/getting-started/quickstart) for a technical overview of the platform and [train your first agent](https://www.reinforcenow.ai/docs/getting-started/first-agent)
19
+
20
+ # Quick Start
21
+
22
+ ### 1. Install uv (Python package manager)
23
+
24
+ ```bash
25
+ # macOS/Linux:
26
+ $ curl -LsSf https://astral.sh/uv/install.sh | sh
27
+
28
+ # Windows:
29
+ PS> powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
30
+ ```
31
+
32
+ ### 2. Install ReinforceNow
33
+
34
+ ```bash
35
+ uv init && uv venv --python 3.11
36
+ source .venv/bin/activate # Windows: .\.venv\Scripts\Activate.ps1
37
+ uv pip install rnow
38
+ ```
39
+
40
+ ### 3. Authenticate
41
+
42
+ ```bash
43
+ rnow login
44
+ ```
45
+
46
+ ### 4. Create & Run Your First Project
47
+
48
+ ```bash
49
+ rnow init --template sft
50
+ rnow run
51
+ ```
52
+
53
+ That's it! Your training run will start on ReinforceNow's infrastructure. Monitor progress in the [dashboard](https://reinforcenow.ai/home).
54
+
55
+ ![ReinforceNow Graph](./assets/reinforcenow-graph.png)
56
+
57
+ # Core Concepts
58
+
59
+ Go from raw data to a reliable AI agent in production. ReinforceNow gives you the flexibility to define:
60
+
61
+ ### 1. Reward Functions
62
+
63
+ Define how your model should be evaluated using the `@reward` decorator:
64
+
65
+ ```python
66
+ from rnow.core import reward, RewardArgs
67
+
68
+ @reward
69
+ async def accuracy(args: RewardArgs, messages: list) -> float:
70
+ """Check if the model's answer matches ground truth."""
71
+ response = messages[-1]["content"]
72
+ expected = args.metadata["answer"]
73
+ return 1.0 if expected in response else 0.0
74
+ ```
75
+
76
+ → [Write your first reward function](https://www.reinforcenow.ai/docs/getting-started/first-reward)
77
+
78
+ ### 2. Tools (for Agents)
79
+
80
+ Give your model the ability to call functions during training:
81
+
82
+ ```python
83
+ from rnow.core import tool
84
+
85
+ @tool
86
+ def search(query: str, max_results: int = 5) -> dict:
87
+ """Search the web for information."""
88
+ # Your implementation here
89
+ return {"results": [...]}
90
+ ```
91
+
92
+ → [Train an agent with custom tools](https://www.reinforcenow.ai/docs/getting-started/first-agent)
93
+
94
+ ### 3. Training Data
95
+
96
+ Create a `train.jsonl` file with your prompts and reward assignments:
97
+
98
+ ```json
99
+ {"messages": [{"role": "user", "content": "Balance the equation: Fe + O2 → Fe2O3"}], "rewards": ["accuracy"], "metadata": {"answer": "4Fe + 3O2 → 2Fe2O3"}}
100
+ {"messages": [{"role": "user", "content": "Balance the equation: H2 + O2 → H2O"}], "rewards": ["accuracy"], "metadata": {"answer": "2H2 + O2 → 2H2O"}}
101
+ {"messages": [{"role": "user", "content": "Balance the equation: N2 + H2 → NH3"}], "rewards": ["accuracy"], "metadata": {"answer": "N2 + 3H2 → 2NH3"}}
102
+ ```
103
+
104
+ → [Learn about training data format](https://www.reinforcenow.ai/docs/cli-reference/train-data)
105
+
106
+ # Contributing
107
+
108
+ We welcome contributions! ❤️ Please open an issue to discuss your ideas before submitting a PR
109
+
110
+ <br>
111
+ <div align="center">
112
+ <img
113
+ alt="ReinforceNow"
114
+ src="./assets/footer.png"
115
+ width="100%"
116
+ >
117
+ </div>
@@ -0,0 +1,81 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [tool.setuptools.packages.find]
6
+ where = ["."]
7
+ include = ["rnow*"]
8
+
9
+ [tool.setuptools.package-data]
10
+ rnow = ["templates/**/*"]
11
+
12
+ [project]
13
+ name = "rnow"
14
+ version = "0.3.1"
15
+ description = "ReinforceNow CLI - Reinforcement Learning platform command-line interface"
16
+ readme = "README.md"
17
+ requires-python = ">=3.10,<3.15"
18
+ dependencies = [
19
+ # Core CLI deps - lightweight, cross-platform
20
+ "click>=8.0.0",
21
+ "requests>=2.25.0",
22
+ "httpx>=0.24.0",
23
+ "pydantic>=2.0.0",
24
+ "pyyaml>=5.4.0",
25
+ "packaging>=21.0",
26
+ "prompt_toolkit>=3.0.0",
27
+ # Tokenizers for accurate token counting
28
+ "tokenizers>=0.15.0",
29
+ "openai-harmony>=0.0.8",
30
+ ]
31
+
32
+ [project.optional-dependencies]
33
+ # Local testing with ML inference (requires torch)
34
+ test = ["tinker-cookbook>=0.1.0"]
35
+ # API server mode
36
+ api = ["fastapi>=0.68.0", "uvicorn>=0.15.0"]
37
+ # MCP server support (for fetching tool schemas)
38
+ mcp = ["fastmcp>=0.1.0"]
39
+ # All optional features
40
+ all = ["tinker-cookbook>=0.1.0", "fastapi>=0.68.0", "uvicorn>=0.15.0", "fastmcp>=0.1.0"]
41
+
42
+ [project.scripts]
43
+ rnow = "rnow.cli.main:main"
44
+
45
+ # =============================================================================
46
+ # Ruff configuration (linting + formatting)
47
+ # =============================================================================
48
+ [tool.ruff]
49
+ line-length = 100
50
+ target-version = "py310"
51
+
52
+ [tool.ruff.lint]
53
+ select = [
54
+ "E", # pycodestyle errors
55
+ "F", # pyflakes
56
+ "I", # isort
57
+ "W", # pycodestyle warnings
58
+ "UP", # pyupgrade
59
+ "B", # flake8-bugbear
60
+ "SIM", # flake8-simplify
61
+ ]
62
+ ignore = [
63
+ "E501", # line too long (formatter handles)
64
+ "E402", # imports not at top
65
+ "E722", # bare except
66
+ "B008", # function call in default arg
67
+ "B904", # raise from err
68
+ "SIM115", # context manager for open
69
+ ]
70
+
71
+ [tool.ruff.lint.isort]
72
+ known-first-party = ["rnow"]
73
+
74
+ # =============================================================================
75
+ # Mypy configuration (type checking)
76
+ # =============================================================================
77
+ [tool.mypy]
78
+ python_version = "3.10"
79
+ warn_return_any = true
80
+ warn_unused_ignores = true
81
+ ignore_missing_imports = true
@@ -0,0 +1,5 @@
1
+ """
2
+ ReinforceNow CLI - Command-line interface for ReinforceNow RLHF platform.
3
+ """
4
+
5
+ __version__ = "0.8.2"
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env python
2
+ """Entry point for running rnow as a module."""
3
+
4
+ from rnow.cli.main import main
5
+
6
+ if __name__ == "__main__":
7
+ main()
@@ -0,0 +1,6 @@
1
+ # reinforcenow/cli/__init__.py
2
+ # CLI package exports
3
+
4
+ from rnow.cli.main import cli
5
+
6
+ __all__ = ["cli"]
@@ -0,0 +1,67 @@
1
+ # reinforcenow/cli/auth.py
2
+
3
+ import json
4
+ from pathlib import Path
5
+
6
+ import click
7
+
8
+ # Simple home directory paths
9
+ DATA_DIR = Path.home() / ".reinforcenow"
10
+ CREDS_FILE = DATA_DIR / "credentials.json"
11
+ CONFIG_FILE = DATA_DIR / "config.json"
12
+
13
+
14
+ def is_authenticated() -> bool:
15
+ """Check if authenticated."""
16
+ try:
17
+ with open(CREDS_FILE) as f:
18
+ return "api_key" in json.load(f)
19
+ except (FileNotFoundError, json.JSONDecodeError, KeyError):
20
+ return False
21
+
22
+
23
+ def get_auth_headers() -> dict[str, str]:
24
+ """Get auth headers."""
25
+ try:
26
+ with open(CREDS_FILE) as f:
27
+ creds = json.load(f)
28
+ return {
29
+ "Content-Type": "application/json",
30
+ "Authorization": f"Bearer {creds['api_key']}",
31
+ }
32
+ except (FileNotFoundError, json.JSONDecodeError, KeyError):
33
+ raise click.ClickException("Not authenticated. Run 'reinforcenow login'")
34
+
35
+
36
+ def get_active_org_from_config() -> str | None:
37
+ """Get active organization."""
38
+ try:
39
+ with open(CONFIG_FILE) as f:
40
+ return json.load(f).get("active_organization_id")
41
+ except (FileNotFoundError, json.JSONDecodeError):
42
+ return None
43
+
44
+
45
+ def set_active_organization(org_id: str) -> None:
46
+ """Set active organization."""
47
+ DATA_DIR.mkdir(parents=True, exist_ok=True)
48
+
49
+ try:
50
+ with open(CONFIG_FILE) as f:
51
+ config = json.load(f)
52
+ except (FileNotFoundError, json.JSONDecodeError):
53
+ config = {}
54
+
55
+ config["active_organization_id"] = org_id
56
+
57
+ with open(CONFIG_FILE, "w") as f:
58
+ json.dump(config, f, indent=2)
59
+
60
+
61
+ def logout() -> None:
62
+ """Remove credentials."""
63
+ if CREDS_FILE.exists():
64
+ CREDS_FILE.unlink()
65
+ click.echo("✓ Logged out")
66
+ else:
67
+ click.echo("Not logged in")
@@ -0,0 +1,98 @@
1
+ # reinforcenow/cli/blob.py
2
+ """Vercel Blob upload support for large files."""
3
+
4
+ from pathlib import Path
5
+
6
+ import requests
7
+
8
+ from rnow.cli import auth
9
+
10
+ # Size threshold for blob uploads (4MB to stay under 4.5MB limit)
11
+ MAX_INLINE_BYTES = 4 * 1024 * 1024
12
+
13
+ BLOB_API_URL = "https://blob.vercel-storage.com"
14
+ BLOB_API_VERSION = "7"
15
+
16
+
17
+ def request_blob_client_token(base_url: str, pathname: str) -> str:
18
+ """
19
+ Request a client upload token from the backend.
20
+ This token allows direct upload to Vercel Blob.
21
+ """
22
+ headers = auth.get_auth_headers()
23
+ headers["Content-Type"] = "application/json"
24
+
25
+ payload = {
26
+ "type": "blob.generate-client-token",
27
+ "payload": {
28
+ "pathname": pathname,
29
+ "callbackUrl": f"{base_url}/dataset/upload",
30
+ },
31
+ }
32
+
33
+ resp = requests.post(
34
+ f"{base_url}/dataset/upload",
35
+ headers=headers,
36
+ json=payload,
37
+ timeout=30,
38
+ )
39
+ resp.raise_for_status()
40
+ data = resp.json()
41
+
42
+ if data.get("type") != "blob.generate-client-token":
43
+ raise RuntimeError(f"Unexpected response from blob token endpoint: {data}")
44
+
45
+ client_token = data.get("clientToken")
46
+ if not client_token:
47
+ raise RuntimeError("No clientToken returned from blob token endpoint")
48
+
49
+ return client_token
50
+
51
+
52
+ def upload_file_to_blob(base_url: str, local_path: Path, blob_pathname: str) -> dict:
53
+ """
54
+ Upload a file directly to Vercel Blob using a client token.
55
+ Returns the blob JSON (contains url, pathname, etc).
56
+ """
57
+ client_token = request_blob_client_token(base_url, blob_pathname)
58
+
59
+ url = f"{BLOB_API_URL}/{blob_pathname.lstrip('/')}"
60
+ headers = {
61
+ "Authorization": f"Bearer {client_token}",
62
+ "x-api-version": BLOB_API_VERSION,
63
+ "x-content-type": "application/jsonl",
64
+ }
65
+
66
+ with open(local_path, "rb") as f:
67
+ resp = requests.put(url, headers=headers, data=f, timeout=300)
68
+
69
+ resp.raise_for_status()
70
+ return resp.json()
71
+
72
+
73
+ def maybe_upload_to_blob(
74
+ base_url: str,
75
+ file_path: Path,
76
+ dataset_id: str,
77
+ ) -> tuple[str | None, dict | None]:
78
+ """
79
+ Check if file needs blob upload and handle it.
80
+
81
+ Returns:
82
+ (inline_contents, blob_info)
83
+ - If small: inline_contents is file content, blob_info is None
84
+ - If large: inline_contents is None, blob_info has url/pathname
85
+ """
86
+ size = file_path.stat().st_size
87
+
88
+ if size <= MAX_INLINE_BYTES:
89
+ # Small file - return contents for inline upload
90
+ return None, None
91
+
92
+ # Large file - upload to blob
93
+ import uuid
94
+
95
+ blob_pathname = f"datasets/{dataset_id}/{uuid.uuid4().hex[:8]}-{file_path.name}"
96
+
97
+ blob = upload_file_to_blob(base_url, file_path, blob_pathname)
98
+ return None, blob