rnow 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. rnow/__init__.py +5 -0
  2. rnow/__main__.py +7 -0
  3. rnow/cli/__init__.py +6 -0
  4. rnow/cli/auth.py +67 -0
  5. rnow/cli/blob.py +98 -0
  6. rnow/cli/commands.py +2311 -0
  7. rnow/cli/common.py +28 -0
  8. rnow/cli/cube.py +255 -0
  9. rnow/cli/main.py +49 -0
  10. rnow/cli/test.py +728 -0
  11. rnow/cli/token_count.py +295 -0
  12. rnow/core/__init__.py +33 -0
  13. rnow/core/reward.py +333 -0
  14. rnow/core/tool.py +494 -0
  15. rnow/models.py +295 -0
  16. rnow/templates/deepseek-aha/config.yml +26 -0
  17. rnow/templates/deepseek-aha/rewards.py +36 -0
  18. rnow/templates/deepseek-aha/train.jsonl +1000 -0
  19. rnow/templates/mcp-tavily/config.yml +29 -0
  20. rnow/templates/mcp-tavily/requirements.txt +1 -0
  21. rnow/templates/mcp-tavily/rewards.py +25 -0
  22. rnow/templates/mcp-tavily/train.jsonl +500 -0
  23. rnow/templates/new/config.yml +26 -0
  24. rnow/templates/new/requirements.txt +1 -0
  25. rnow/templates/new/rewards.py +0 -0
  26. rnow/templates/new/train.jsonl +0 -0
  27. rnow/templates/rl-nextjs/config.yml +27 -0
  28. rnow/templates/rl-nextjs/requirements.txt +2 -0
  29. rnow/templates/rl-nextjs/rewards.py +446 -0
  30. rnow/templates/rl-nextjs/train.jsonl +1000 -0
  31. rnow/templates/rl-single/config.yml +27 -0
  32. rnow/templates/rl-single/requirements.txt +1 -0
  33. rnow/templates/rl-single/rewards.py +14 -0
  34. rnow/templates/rl-single/train.jsonl +1000 -0
  35. rnow/templates/rl-tools/config.yml +27 -0
  36. rnow/templates/rl-tools/env.py +38 -0
  37. rnow/templates/rl-tools/requirements.txt +3 -0
  38. rnow/templates/rl-tools/rewards.py +25 -0
  39. rnow/templates/rl-tools/train.jsonl +500 -0
  40. rnow/templates/sft/config.yml +20 -0
  41. rnow/templates/sft/train.jsonl +100 -0
  42. rnow/templates/tutorial-reward/config.yml +27 -0
  43. rnow/templates/tutorial-reward/requirements.txt +1 -0
  44. rnow/templates/tutorial-reward/rewards.py +15 -0
  45. rnow/templates/tutorial-reward/train.jsonl +1000 -0
  46. rnow/templates/tutorial-tool/config.yml +27 -0
  47. rnow/templates/tutorial-tool/env.py +7 -0
  48. rnow/templates/tutorial-tool/requirements.txt +3 -0
  49. rnow/templates/tutorial-tool/rewards.py +7 -0
  50. rnow/templates/tutorial-tool/train.jsonl +1266 -0
  51. rnow-0.2.4.dist-info/METADATA +135 -0
  52. rnow-0.2.4.dist-info/RECORD +56 -0
  53. rnow-0.2.4.dist-info/WHEEL +5 -0
  54. rnow-0.2.4.dist-info/entry_points.txt +2 -0
  55. rnow-0.2.4.dist-info/licenses/LICENSE +21 -0
  56. rnow-0.2.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,29 @@
1
+ project_id: ""
2
+ project_name: "SimpleQA - Factual QA Agent"
3
+ dataset_id: ""
4
+ dataset_name: "simpleqa"
5
+ dataset_type: rl
6
+ organization_id: ""
7
+ data:
8
+ train_file: train.jsonl
9
+ batch_size: 32
10
+ group_size: 16
11
+ model:
12
+ path: Qwen/Qwen3-8B
13
+ qlora_rank: 32
14
+ name: "SimpleQA Agent"
15
+ description: "Multi-turn RL model trained on SimpleQA factual questions using Tavily MCP"
16
+ algorithm:
17
+ loss_fn: ppo
18
+ adv_estimator: grpo
19
+ kl_penalty_coef: 0.01
20
+ rollout:
21
+ max_turns: 3
22
+ max_tokens: 4096
23
+ termination_policy: last_tool
24
+ mcp_url: "https://mcp.tavily.com/mcp/?tavilyApiKey=YOUR_TAVILY_API_KEY"
25
+ max_tool_response_chars: 4000
26
+ trainer:
27
+ num_epochs: 4
28
+ learning_rate: 0.0001
29
+ save_step: 63
@@ -0,0 +1 @@
1
+ jellyfish>=1.0.0
@@ -0,0 +1,25 @@
1
+ import re
2
+
3
+ import jellyfish
4
+
5
+ from rnow.core import RewardArgs, reward
6
+
7
+
8
+ @reward
9
+ def accuracy(args: RewardArgs, messages: list) -> float:
10
+ """Check if the final answer matches the expected answer."""
11
+ response = messages[-1].get("content", "")
12
+ expected = args.metadata.get("expected_answer", "").strip().lower()
13
+
14
+ # Extract content after "Final Answer:"
15
+ match = re.search(r"Final Answer:\s*(.+?)(?:\n|$)", response, re.IGNORECASE)
16
+ if not match:
17
+ return 0.0
18
+
19
+ answer = match.group(1).strip().lower()
20
+
21
+ # Use Jaro-Winkler similarity (1.0 = exact match, 0.0 = no similarity)
22
+ similarity = jellyfish.jaro_winkler_similarity(answer, expected)
23
+
24
+ # Require high similarity (>0.9) to count as correct
25
+ return 1.0 if similarity > 0.9 else 0.0