openadapt-ml 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. openadapt_ml/baselines/__init__.py +121 -0
  2. openadapt_ml/baselines/adapter.py +185 -0
  3. openadapt_ml/baselines/cli.py +314 -0
  4. openadapt_ml/baselines/config.py +448 -0
  5. openadapt_ml/baselines/parser.py +922 -0
  6. openadapt_ml/baselines/prompts.py +787 -0
  7. openadapt_ml/benchmarks/__init__.py +13 -107
  8. openadapt_ml/benchmarks/agent.py +297 -374
  9. openadapt_ml/benchmarks/azure.py +62 -24
  10. openadapt_ml/benchmarks/azure_ops_tracker.py +521 -0
  11. openadapt_ml/benchmarks/cli.py +1874 -751
  12. openadapt_ml/benchmarks/trace_export.py +631 -0
  13. openadapt_ml/benchmarks/viewer.py +1236 -0
  14. openadapt_ml/benchmarks/vm_monitor.py +1111 -0
  15. openadapt_ml/benchmarks/waa_deploy/Dockerfile +216 -0
  16. openadapt_ml/benchmarks/waa_deploy/__init__.py +10 -0
  17. openadapt_ml/benchmarks/waa_deploy/api_agent.py +540 -0
  18. openadapt_ml/benchmarks/waa_deploy/start_waa_server.bat +53 -0
  19. openadapt_ml/cloud/azure_inference.py +3 -5
  20. openadapt_ml/cloud/lambda_labs.py +722 -307
  21. openadapt_ml/cloud/local.py +3194 -89
  22. openadapt_ml/cloud/ssh_tunnel.py +595 -0
  23. openadapt_ml/datasets/next_action.py +125 -96
  24. openadapt_ml/evals/grounding.py +32 -9
  25. openadapt_ml/evals/plot_eval_metrics.py +15 -13
  26. openadapt_ml/evals/trajectory_matching.py +120 -57
  27. openadapt_ml/experiments/demo_prompt/__init__.py +19 -0
  28. openadapt_ml/experiments/demo_prompt/format_demo.py +236 -0
  29. openadapt_ml/experiments/demo_prompt/results/experiment_20251231_002125.json +83 -0
  30. openadapt_ml/experiments/demo_prompt/results/experiment_n30_20251231_165958.json +1100 -0
  31. openadapt_ml/experiments/demo_prompt/results/multistep_20251231_025051.json +182 -0
  32. openadapt_ml/experiments/demo_prompt/run_experiment.py +541 -0
  33. openadapt_ml/experiments/representation_shootout/__init__.py +70 -0
  34. openadapt_ml/experiments/representation_shootout/conditions.py +708 -0
  35. openadapt_ml/experiments/representation_shootout/config.py +390 -0
  36. openadapt_ml/experiments/representation_shootout/evaluator.py +659 -0
  37. openadapt_ml/experiments/representation_shootout/runner.py +687 -0
  38. openadapt_ml/experiments/waa_demo/__init__.py +10 -0
  39. openadapt_ml/experiments/waa_demo/demos.py +357 -0
  40. openadapt_ml/experiments/waa_demo/runner.py +732 -0
  41. openadapt_ml/experiments/waa_demo/tasks.py +151 -0
  42. openadapt_ml/export/__init__.py +9 -0
  43. openadapt_ml/export/__main__.py +6 -0
  44. openadapt_ml/export/cli.py +89 -0
  45. openadapt_ml/export/parquet.py +277 -0
  46. openadapt_ml/grounding/detector.py +18 -14
  47. openadapt_ml/ingest/__init__.py +11 -10
  48. openadapt_ml/ingest/capture.py +97 -86
  49. openadapt_ml/ingest/loader.py +120 -69
  50. openadapt_ml/ingest/synthetic.py +344 -193
  51. openadapt_ml/models/api_adapter.py +14 -4
  52. openadapt_ml/models/base_adapter.py +10 -2
  53. openadapt_ml/models/providers/__init__.py +288 -0
  54. openadapt_ml/models/providers/anthropic.py +266 -0
  55. openadapt_ml/models/providers/base.py +299 -0
  56. openadapt_ml/models/providers/google.py +376 -0
  57. openadapt_ml/models/providers/openai.py +342 -0
  58. openadapt_ml/models/qwen_vl.py +46 -19
  59. openadapt_ml/perception/__init__.py +35 -0
  60. openadapt_ml/perception/integration.py +399 -0
  61. openadapt_ml/retrieval/README.md +226 -0
  62. openadapt_ml/retrieval/USAGE.md +391 -0
  63. openadapt_ml/retrieval/__init__.py +91 -0
  64. openadapt_ml/retrieval/demo_retriever.py +843 -0
  65. openadapt_ml/retrieval/embeddings.py +630 -0
  66. openadapt_ml/retrieval/index.py +194 -0
  67. openadapt_ml/retrieval/retriever.py +162 -0
  68. openadapt_ml/runtime/__init__.py +50 -0
  69. openadapt_ml/runtime/policy.py +27 -14
  70. openadapt_ml/runtime/safety_gate.py +471 -0
  71. openadapt_ml/schema/__init__.py +113 -0
  72. openadapt_ml/schema/converters.py +588 -0
  73. openadapt_ml/schema/episode.py +470 -0
  74. openadapt_ml/scripts/capture_screenshots.py +530 -0
  75. openadapt_ml/scripts/compare.py +102 -61
  76. openadapt_ml/scripts/demo_policy.py +4 -1
  77. openadapt_ml/scripts/eval_policy.py +19 -14
  78. openadapt_ml/scripts/make_gif.py +1 -1
  79. openadapt_ml/scripts/prepare_synthetic.py +16 -17
  80. openadapt_ml/scripts/train.py +98 -75
  81. openadapt_ml/segmentation/README.md +920 -0
  82. openadapt_ml/segmentation/__init__.py +97 -0
  83. openadapt_ml/segmentation/adapters/__init__.py +5 -0
  84. openadapt_ml/segmentation/adapters/capture_adapter.py +420 -0
  85. openadapt_ml/segmentation/annotator.py +610 -0
  86. openadapt_ml/segmentation/cache.py +290 -0
  87. openadapt_ml/segmentation/cli.py +674 -0
  88. openadapt_ml/segmentation/deduplicator.py +656 -0
  89. openadapt_ml/segmentation/frame_describer.py +788 -0
  90. openadapt_ml/segmentation/pipeline.py +340 -0
  91. openadapt_ml/segmentation/schemas.py +622 -0
  92. openadapt_ml/segmentation/segment_extractor.py +634 -0
  93. openadapt_ml/training/azure_ops_viewer.py +1097 -0
  94. openadapt_ml/training/benchmark_viewer.py +3255 -19
  95. openadapt_ml/training/shared_ui.py +7 -7
  96. openadapt_ml/training/stub_provider.py +57 -35
  97. openadapt_ml/training/trainer.py +255 -441
  98. openadapt_ml/training/trl_trainer.py +403 -0
  99. openadapt_ml/training/viewer.py +323 -108
  100. openadapt_ml/training/viewer_components.py +180 -0
  101. {openadapt_ml-0.1.0.dist-info → openadapt_ml-0.2.1.dist-info}/METADATA +312 -69
  102. openadapt_ml-0.2.1.dist-info/RECORD +116 -0
  103. openadapt_ml/benchmarks/base.py +0 -366
  104. openadapt_ml/benchmarks/data_collection.py +0 -432
  105. openadapt_ml/benchmarks/runner.py +0 -381
  106. openadapt_ml/benchmarks/waa.py +0 -704
  107. openadapt_ml/schemas/__init__.py +0 -53
  108. openadapt_ml/schemas/sessions.py +0 -122
  109. openadapt_ml/schemas/validation.py +0 -252
  110. openadapt_ml-0.1.0.dist-info/RECORD +0 -55
  111. {openadapt_ml-0.1.0.dist-info → openadapt_ml-0.2.1.dist-info}/WHEEL +0 -0
  112. {openadapt_ml-0.1.0.dist-info → openadapt_ml-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,180 @@
1
+ """Adapter module for openadapt-viewer components.
2
+
3
+ This module provides wrapper functions that adapt openadapt-viewer components
4
+ for openadapt-ml specific use cases, particularly for training visualization.
5
+
6
+ Migration Approach:
7
+ ------------------
8
+ Phase 1 (Foundation): Create this adapter module to establish patterns
9
+ Phase 2 (Integration): Gradually migrate viewer.py to use these adapters
10
+ Phase 3 (Consolidation): Remove duplicate code from viewer.py
11
+ Phase 4 (Completion): Full dependency on openadapt-viewer
12
+
13
+ Design Principles:
14
+ -----------------
15
+ 1. Each function wraps openadapt-viewer components with ML-specific context
16
+ 2. Functions accept openadapt-ml data structures (TrainingState, predictions, etc.)
17
+ 3. No breaking changes to existing viewer.py code
18
+ 4. Can be incrementally adopted in future phases
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ from pathlib import Path
24
+ from typing import Any
25
+
26
+ # Import openadapt-viewer components
27
+ from openadapt_viewer.components import (
28
+ screenshot_display as _screenshot_display,
29
+ playback_controls as _playback_controls,
30
+ metrics_grid as _metrics_grid,
31
+ badge as _badge,
32
+ )
33
+
34
+
35
+ def screenshot_with_predictions(
36
+ screenshot_path: str | Path,
37
+ human_action: dict[str, Any] | None = None,
38
+ predicted_action: dict[str, Any] | None = None,
39
+ step_number: int | None = None,
40
+ show_difference: bool = True,
41
+ ) -> str:
42
+ """Generate screenshot display with human and AI action overlays."""
43
+ overlays = []
44
+
45
+ if human_action:
46
+ overlays.append(
47
+ {
48
+ "type": human_action.get("type", "click"),
49
+ "x": human_action.get("x", 0),
50
+ "y": human_action.get("y", 0),
51
+ "label": "H",
52
+ "variant": "human",
53
+ "color": "#34d399",
54
+ }
55
+ )
56
+
57
+ if predicted_action:
58
+ overlays.append(
59
+ {
60
+ "type": predicted_action.get("type", "click"),
61
+ "x": predicted_action.get("x", 0),
62
+ "y": predicted_action.get("y", 0),
63
+ "label": "AI",
64
+ "variant": "predicted",
65
+ "color": "#00d4aa",
66
+ }
67
+ )
68
+
69
+ caption = f"Step {step_number}" if step_number is not None else None
70
+
71
+ return _screenshot_display(
72
+ image_path=str(screenshot_path),
73
+ overlays=overlays,
74
+ caption=caption,
75
+ )
76
+
77
+
78
+ def training_metrics(
79
+ epoch: int | None = None,
80
+ loss: float | None = None,
81
+ accuracy: float | None = None,
82
+ elapsed_time: float | None = None,
83
+ learning_rate: float | None = None,
84
+ **additional_metrics: Any,
85
+ ) -> str:
86
+ """Generate metrics grid for training statistics."""
87
+ metrics = []
88
+
89
+ if epoch is not None:
90
+ metrics.append({"label": "Epoch", "value": epoch})
91
+
92
+ if loss is not None:
93
+ color = "success" if loss < 0.1 else "warning" if loss < 0.5 else "error"
94
+ metrics.append({"label": "Loss", "value": f"{loss:.4f}", "color": color})
95
+
96
+ if accuracy is not None:
97
+ color = (
98
+ "success" if accuracy > 0.9 else "warning" if accuracy > 0.7 else "error"
99
+ )
100
+ metrics.append(
101
+ {"label": "Accuracy", "value": f"{accuracy:.2%}", "color": color}
102
+ )
103
+
104
+ if elapsed_time is not None:
105
+ hours = int(elapsed_time // 3600)
106
+ minutes = int((elapsed_time % 3600) // 60)
107
+ seconds = int(elapsed_time % 60)
108
+ time_str = f"{hours}h {minutes}m {seconds}s"
109
+ metrics.append({"label": "Elapsed", "value": time_str})
110
+
111
+ if learning_rate is not None:
112
+ metrics.append({"label": "LR", "value": f"{learning_rate:.2e}"})
113
+
114
+ for key, value in additional_metrics.items():
115
+ label = key.replace("_", " ").title()
116
+ metrics.append({"label": label, "value": str(value)})
117
+
118
+ return _metrics_grid(metrics)
119
+
120
+
121
+ def playback_controls(
122
+ step_count: int,
123
+ initial_step: int = 0,
124
+ ) -> str:
125
+ """Generate playback controls for step-by-step viewer."""
126
+ return _playback_controls(
127
+ step_count=step_count,
128
+ initial_step=initial_step,
129
+ )
130
+
131
+
132
+ def correctness_badge(is_correct: bool, show_label: bool = True) -> str:
133
+ """Generate a badge indicating prediction correctness."""
134
+ if is_correct:
135
+ text = "Correct" if show_label else "✓"
136
+ color = "success"
137
+ else:
138
+ text = "Incorrect" if show_label else "✗"
139
+ color = "error"
140
+
141
+ return _badge(text=text, color=color)
142
+
143
+
144
+ def generate_comparison_summary(
145
+ total_steps: int,
146
+ correct_steps: int,
147
+ model_name: str | None = None,
148
+ ) -> str:
149
+ """Generate a summary card for model comparison results."""
150
+ accuracy = correct_steps / total_steps if total_steps > 0 else 0
151
+ incorrect_steps = total_steps - correct_steps
152
+
153
+ metrics = [
154
+ {"label": "Total Steps", "value": total_steps},
155
+ {"label": "Correct", "value": correct_steps, "color": "success"},
156
+ {
157
+ "label": "Incorrect",
158
+ "value": incorrect_steps,
159
+ "color": "error" if incorrect_steps > 0 else "muted",
160
+ },
161
+ {
162
+ "label": "Accuracy",
163
+ "value": f"{accuracy:.1%}",
164
+ "color": "success" if accuracy > 0.9 else "warning",
165
+ },
166
+ ]
167
+
168
+ if model_name:
169
+ metrics.insert(0, {"label": "Model", "value": model_name})
170
+
171
+ return _metrics_grid(metrics)
172
+
173
+
174
+ __all__ = [
175
+ "screenshot_with_predictions",
176
+ "training_metrics",
177
+ "playback_controls",
178
+ "correctness_badge",
179
+ "generate_comparison_summary",
180
+ ]