hyperstudy 0.1.0__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hyperstudy-0.2.0/.github/workflows/publish.yml +59 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/.github/workflows/sync-release-notes.yml +1 -1
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/.github/workflows/test.yml +1 -1
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/CHANGELOG.md +13 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/PKG-INFO +34 -12
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/README.md +32 -11
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/pyproject.toml +2 -1
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/__init__.py +1 -1
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/_http.py +5 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/client.py +187 -1
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/conftest.py +25 -0
- hyperstudy-0.2.0/tests/fixtures/deployment_sessions_response.json +26 -0
- hyperstudy-0.2.0/tests/fixtures/deployment_single_response.json +20 -0
- hyperstudy-0.2.0/tests/fixtures/deployments_list_response.json +28 -0
- hyperstudy-0.2.0/tests/fixtures/pre_experiment_response.json +69 -0
- hyperstudy-0.2.0/tests/fixtures/warnings_response.json +19 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/test_client.py +170 -3
- hyperstudy-0.1.0/.github/workflows/publish.yml +0 -27
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/.gitignore +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/LICENSE +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/_dataframe.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/_display.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/_pagination.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/_types.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/exceptions.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/src/hyperstudy/experiments.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/__init__.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/error_401.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/error_403.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/events_response.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/experiment_single_response.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/experiments_list_response.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/paginated_page1.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/fixtures/paginated_page2.json +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/test_dataframe.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/test_experiments.py +0 -0
- {hyperstudy-0.1.0 → hyperstudy-0.2.0}/tests/test_pagination.py +0 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
name: Publish to PyPI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
release:
|
|
5
|
+
types: [published]
|
|
6
|
+
|
|
7
|
+
jobs:
|
|
8
|
+
test:
|
|
9
|
+
runs-on: ubuntu-latest
|
|
10
|
+
strategy:
|
|
11
|
+
matrix:
|
|
12
|
+
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
|
13
|
+
|
|
14
|
+
steps:
|
|
15
|
+
- uses: actions/checkout@v4
|
|
16
|
+
|
|
17
|
+
- uses: actions/setup-python@v5
|
|
18
|
+
with:
|
|
19
|
+
python-version: ${{ matrix.python-version }}
|
|
20
|
+
|
|
21
|
+
- name: Install dependencies
|
|
22
|
+
run: pip install -e ".[dev,polars]"
|
|
23
|
+
|
|
24
|
+
- name: Lint
|
|
25
|
+
run: ruff check src/
|
|
26
|
+
|
|
27
|
+
- name: Run tests
|
|
28
|
+
run: pytest --cov=hyperstudy --cov-report=term-missing -v
|
|
29
|
+
|
|
30
|
+
publish:
|
|
31
|
+
needs: test
|
|
32
|
+
runs-on: ubuntu-latest
|
|
33
|
+
permissions:
|
|
34
|
+
id-token: write
|
|
35
|
+
attestations: write
|
|
36
|
+
contents: read
|
|
37
|
+
|
|
38
|
+
steps:
|
|
39
|
+
- uses: actions/checkout@v4
|
|
40
|
+
|
|
41
|
+
- uses: actions/setup-python@v5
|
|
42
|
+
with:
|
|
43
|
+
python-version: "3.12"
|
|
44
|
+
|
|
45
|
+
- name: Install build tools
|
|
46
|
+
run: pip install build
|
|
47
|
+
|
|
48
|
+
- name: Build package
|
|
49
|
+
run: python -m build
|
|
50
|
+
|
|
51
|
+
- name: Generate Sigstore attestations
|
|
52
|
+
uses: actions/attest-build-provenance@v2
|
|
53
|
+
with:
|
|
54
|
+
subject-path: dist/*
|
|
55
|
+
|
|
56
|
+
- name: Publish to PyPI
|
|
57
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
58
|
+
with:
|
|
59
|
+
attestations: true
|
|
@@ -1,5 +1,18 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## v0.2.0
|
|
4
|
+
|
|
5
|
+
### Features
|
|
6
|
+
|
|
7
|
+
- Convenience methods for common event categories: `get_questionnaire`, `get_instructions`, `get_consent`
|
|
8
|
+
- Deployment management: `list_deployments`, `get_deployment`, `get_deployment_sessions`
|
|
9
|
+
- API warning surfacing: backend `_warnings` metadata now emitted via Python's `warnings` module
|
|
10
|
+
- `get_all_data` now includes `ratings_sparse`, `questionnaire`, `instructions`, and `consent`
|
|
11
|
+
|
|
12
|
+
### Breaking Changes
|
|
13
|
+
|
|
14
|
+
- `get_all_data` return keys changed: `"ratings"` split into `"ratings_continuous"` and `"ratings_sparse"`
|
|
15
|
+
|
|
3
16
|
## v0.1.0
|
|
4
17
|
|
|
5
18
|
Initial release.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hyperstudy
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: Python SDK for the HyperStudy experiment platform API
|
|
5
5
|
Project-URL: Homepage, https://hyperstudy.io
|
|
6
6
|
Project-URL: Documentation, https://docs.hyperstudy.io/developers/python-sdk
|
|
@@ -17,6 +17,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
17
17
|
Classifier: Programming Language :: Python :: 3.10
|
|
18
18
|
Classifier: Programming Language :: Python :: 3.11
|
|
19
19
|
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
21
|
Classifier: Topic :: Scientific/Engineering
|
|
21
22
|
Requires-Python: >=3.9
|
|
22
23
|
Requires-Dist: pandas>=1.5.0
|
|
@@ -70,15 +71,20 @@ events = hs.get_events("participant_id", scope="participant", room_id="room_id")
|
|
|
70
71
|
All data retrieval methods follow the same pattern:
|
|
71
72
|
|
|
72
73
|
```python
|
|
73
|
-
events
|
|
74
|
-
recordings
|
|
75
|
-
chat
|
|
76
|
-
videochat
|
|
77
|
-
sync
|
|
78
|
-
ratings
|
|
79
|
-
components
|
|
80
|
-
participants
|
|
81
|
-
rooms
|
|
74
|
+
events = hs.get_events("exp_id")
|
|
75
|
+
recordings = hs.get_recordings("exp_id")
|
|
76
|
+
chat = hs.get_chat("exp_id")
|
|
77
|
+
videochat = hs.get_videochat("exp_id")
|
|
78
|
+
sync = hs.get_sync("exp_id")
|
|
79
|
+
ratings = hs.get_ratings("exp_id", kind="continuous")
|
|
80
|
+
components = hs.get_components("exp_id")
|
|
81
|
+
participants = hs.get_participants("exp_id")
|
|
82
|
+
rooms = hs.get_rooms("exp_id")
|
|
83
|
+
|
|
84
|
+
# Convenience methods for common event categories
|
|
85
|
+
questionnaire = hs.get_questionnaire("exp_id")
|
|
86
|
+
instructions = hs.get_instructions("exp_id")
|
|
87
|
+
consent = hs.get_consent("exp_id")
|
|
82
88
|
```
|
|
83
89
|
|
|
84
90
|
### Output Formats
|
|
@@ -125,11 +131,27 @@ hs.update_experiment("exp_id", name="Updated Name")
|
|
|
125
131
|
hs.delete_experiment("exp_id")
|
|
126
132
|
```
|
|
127
133
|
|
|
134
|
+
## Deployments
|
|
135
|
+
|
|
136
|
+
```python
|
|
137
|
+
# List deployments
|
|
138
|
+
deployments = hs.list_deployments()
|
|
139
|
+
deployments = hs.list_deployments(experiment_id="exp_id", status="active")
|
|
140
|
+
|
|
141
|
+
# Get deployment details
|
|
142
|
+
dep = hs.get_deployment("deployment_id")
|
|
143
|
+
|
|
144
|
+
# List sessions/rooms for a deployment
|
|
145
|
+
sessions = hs.get_deployment_sessions("deployment_id")
|
|
146
|
+
```
|
|
147
|
+
|
|
128
148
|
## All Data for a Participant
|
|
129
149
|
|
|
130
150
|
```python
|
|
131
151
|
data = hs.get_all_data("participant_id", room_id="room_id")
|
|
132
|
-
# Returns:
|
|
152
|
+
# Returns dict with keys: events, recordings, chat, videochat, sync,
|
|
153
|
+
# ratings_continuous, ratings_sparse, components, questionnaire,
|
|
154
|
+
# instructions, consent
|
|
133
155
|
```
|
|
134
156
|
|
|
135
157
|
## API Key
|
|
@@ -143,7 +165,7 @@ Full documentation: [docs.hyperstudy.io/developers/python-sdk](https://docs.hype
|
|
|
143
165
|
## Development
|
|
144
166
|
|
|
145
167
|
```bash
|
|
146
|
-
git clone https://github.com/
|
|
168
|
+
git clone https://github.com/hyperstudyio/hyperstudy-pythonsdk.git
|
|
147
169
|
cd hyperstudy-pythonsdk
|
|
148
170
|
pip install -e ".[dev,polars]"
|
|
149
171
|
pytest --cov=hyperstudy
|
|
@@ -37,15 +37,20 @@ events = hs.get_events("participant_id", scope="participant", room_id="room_id")
|
|
|
37
37
|
All data retrieval methods follow the same pattern:
|
|
38
38
|
|
|
39
39
|
```python
|
|
40
|
-
events
|
|
41
|
-
recordings
|
|
42
|
-
chat
|
|
43
|
-
videochat
|
|
44
|
-
sync
|
|
45
|
-
ratings
|
|
46
|
-
components
|
|
47
|
-
participants
|
|
48
|
-
rooms
|
|
40
|
+
events = hs.get_events("exp_id")
|
|
41
|
+
recordings = hs.get_recordings("exp_id")
|
|
42
|
+
chat = hs.get_chat("exp_id")
|
|
43
|
+
videochat = hs.get_videochat("exp_id")
|
|
44
|
+
sync = hs.get_sync("exp_id")
|
|
45
|
+
ratings = hs.get_ratings("exp_id", kind="continuous")
|
|
46
|
+
components = hs.get_components("exp_id")
|
|
47
|
+
participants = hs.get_participants("exp_id")
|
|
48
|
+
rooms = hs.get_rooms("exp_id")
|
|
49
|
+
|
|
50
|
+
# Convenience methods for common event categories
|
|
51
|
+
questionnaire = hs.get_questionnaire("exp_id")
|
|
52
|
+
instructions = hs.get_instructions("exp_id")
|
|
53
|
+
consent = hs.get_consent("exp_id")
|
|
49
54
|
```
|
|
50
55
|
|
|
51
56
|
### Output Formats
|
|
@@ -92,11 +97,27 @@ hs.update_experiment("exp_id", name="Updated Name")
|
|
|
92
97
|
hs.delete_experiment("exp_id")
|
|
93
98
|
```
|
|
94
99
|
|
|
100
|
+
## Deployments
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
# List deployments
|
|
104
|
+
deployments = hs.list_deployments()
|
|
105
|
+
deployments = hs.list_deployments(experiment_id="exp_id", status="active")
|
|
106
|
+
|
|
107
|
+
# Get deployment details
|
|
108
|
+
dep = hs.get_deployment("deployment_id")
|
|
109
|
+
|
|
110
|
+
# List sessions/rooms for a deployment
|
|
111
|
+
sessions = hs.get_deployment_sessions("deployment_id")
|
|
112
|
+
```
|
|
113
|
+
|
|
95
114
|
## All Data for a Participant
|
|
96
115
|
|
|
97
116
|
```python
|
|
98
117
|
data = hs.get_all_data("participant_id", room_id="room_id")
|
|
99
|
-
# Returns:
|
|
118
|
+
# Returns dict with keys: events, recordings, chat, videochat, sync,
|
|
119
|
+
# ratings_continuous, ratings_sparse, components, questionnaire,
|
|
120
|
+
# instructions, consent
|
|
100
121
|
```
|
|
101
122
|
|
|
102
123
|
## API Key
|
|
@@ -110,7 +131,7 @@ Full documentation: [docs.hyperstudy.io/developers/python-sdk](https://docs.hype
|
|
|
110
131
|
## Development
|
|
111
132
|
|
|
112
133
|
```bash
|
|
113
|
-
git clone https://github.com/
|
|
134
|
+
git clone https://github.com/hyperstudyio/hyperstudy-pythonsdk.git
|
|
114
135
|
cd hyperstudy-pythonsdk
|
|
115
136
|
pip install -e ".[dev,polars]"
|
|
116
137
|
pytest --cov=hyperstudy
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "hyperstudy"
|
|
7
|
-
version = "0.
|
|
7
|
+
version = "0.2.0"
|
|
8
8
|
description = "Python SDK for the HyperStudy experiment platform API"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = "MIT"
|
|
@@ -21,6 +21,7 @@ classifiers = [
|
|
|
21
21
|
"Programming Language :: Python :: 3.10",
|
|
22
22
|
"Programming Language :: Python :: 3.11",
|
|
23
23
|
"Programming Language :: Python :: 3.12",
|
|
24
|
+
"Programming Language :: Python :: 3.13",
|
|
24
25
|
"Topic :: Scientific/Engineering",
|
|
25
26
|
]
|
|
26
27
|
dependencies = [
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
|
+
import warnings
|
|
6
7
|
from typing import Any
|
|
7
8
|
|
|
8
9
|
import requests
|
|
@@ -125,4 +126,8 @@ class HttpTransport:
|
|
|
125
126
|
status_code=resp.status_code,
|
|
126
127
|
)
|
|
127
128
|
|
|
129
|
+
# Surface API warnings (e.g. missing Firestore indexes)
|
|
130
|
+
for w in body.get("metadata", {}).get("_warnings", []):
|
|
131
|
+
warnings.warn(w, stacklevel=4)
|
|
132
|
+
|
|
128
133
|
return body
|
|
@@ -277,6 +277,151 @@ class HyperStudy(ExperimentMixin):
|
|
|
277
277
|
output=output, progress=progress,
|
|
278
278
|
)
|
|
279
279
|
|
|
280
|
+
# ------------------------------------------------------------------
|
|
281
|
+
# Convenience: category-filtered events
|
|
282
|
+
# ------------------------------------------------------------------
|
|
283
|
+
|
|
284
|
+
def get_questionnaire(
|
|
285
|
+
self,
|
|
286
|
+
scope_id: str,
|
|
287
|
+
*,
|
|
288
|
+
scope: str = "experiment",
|
|
289
|
+
room_id: str | None = None,
|
|
290
|
+
start_time: str | None = None,
|
|
291
|
+
end_time: str | None = None,
|
|
292
|
+
sort: str | None = None,
|
|
293
|
+
order: str | None = None,
|
|
294
|
+
limit: int | None = None,
|
|
295
|
+
offset: int = 0,
|
|
296
|
+
output: str = "pandas",
|
|
297
|
+
progress: bool = True,
|
|
298
|
+
):
|
|
299
|
+
"""Fetch questionnaire responses.
|
|
300
|
+
|
|
301
|
+
Convenience wrapper around :meth:`get_events` with
|
|
302
|
+
``category="questionnaire"``.
|
|
303
|
+
"""
|
|
304
|
+
return self._fetch_data(
|
|
305
|
+
"events", scope_id,
|
|
306
|
+
scope=scope, room_id=room_id,
|
|
307
|
+
start_time=start_time, end_time=end_time,
|
|
308
|
+
category="questionnaire", sort=sort, order=order,
|
|
309
|
+
limit=limit, offset=offset,
|
|
310
|
+
output=output, progress=progress,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
def get_instructions(
|
|
314
|
+
self,
|
|
315
|
+
scope_id: str,
|
|
316
|
+
*,
|
|
317
|
+
scope: str = "experiment",
|
|
318
|
+
room_id: str | None = None,
|
|
319
|
+
start_time: str | None = None,
|
|
320
|
+
end_time: str | None = None,
|
|
321
|
+
sort: str | None = None,
|
|
322
|
+
order: str | None = None,
|
|
323
|
+
limit: int | None = None,
|
|
324
|
+
offset: int = 0,
|
|
325
|
+
output: str = "pandas",
|
|
326
|
+
progress: bool = True,
|
|
327
|
+
):
|
|
328
|
+
"""Fetch instruction / comprehension-check events.
|
|
329
|
+
|
|
330
|
+
Fetches ``pre_experiment`` events and filters to those whose
|
|
331
|
+
``eventType`` starts with ``"instructions."``.
|
|
332
|
+
"""
|
|
333
|
+
return self._fetch_and_filter(
|
|
334
|
+
"instructions.", scope_id,
|
|
335
|
+
scope=scope, room_id=room_id,
|
|
336
|
+
start_time=start_time, end_time=end_time,
|
|
337
|
+
sort=sort, order=order,
|
|
338
|
+
limit=limit, offset=offset,
|
|
339
|
+
output=output, progress=progress,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
def get_consent(
|
|
343
|
+
self,
|
|
344
|
+
scope_id: str,
|
|
345
|
+
*,
|
|
346
|
+
scope: str = "experiment",
|
|
347
|
+
room_id: str | None = None,
|
|
348
|
+
start_time: str | None = None,
|
|
349
|
+
end_time: str | None = None,
|
|
350
|
+
sort: str | None = None,
|
|
351
|
+
order: str | None = None,
|
|
352
|
+
limit: int | None = None,
|
|
353
|
+
offset: int = 0,
|
|
354
|
+
output: str = "pandas",
|
|
355
|
+
progress: bool = True,
|
|
356
|
+
):
|
|
357
|
+
"""Fetch consent events.
|
|
358
|
+
|
|
359
|
+
Fetches ``pre_experiment`` events and filters to those whose
|
|
360
|
+
``eventType`` starts with ``"consent."``.
|
|
361
|
+
"""
|
|
362
|
+
return self._fetch_and_filter(
|
|
363
|
+
"consent.", scope_id,
|
|
364
|
+
scope=scope, room_id=room_id,
|
|
365
|
+
start_time=start_time, end_time=end_time,
|
|
366
|
+
sort=sort, order=order,
|
|
367
|
+
limit=limit, offset=offset,
|
|
368
|
+
output=output, progress=progress,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# ------------------------------------------------------------------
|
|
372
|
+
# Deployments
|
|
373
|
+
# ------------------------------------------------------------------
|
|
374
|
+
|
|
375
|
+
def list_deployments(
|
|
376
|
+
self,
|
|
377
|
+
*,
|
|
378
|
+
experiment_id: str | None = None,
|
|
379
|
+
status: str | None = None,
|
|
380
|
+
output: str = "pandas",
|
|
381
|
+
):
|
|
382
|
+
"""List deployments for the authenticated user.
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
experiment_id: Filter by experiment.
|
|
386
|
+
status: Filter by deployment status.
|
|
387
|
+
output: ``"pandas"`` (default), ``"polars"``, or ``"dict"``.
|
|
388
|
+
"""
|
|
389
|
+
params: dict[str, Any] = {}
|
|
390
|
+
if experiment_id:
|
|
391
|
+
params["experimentId"] = experiment_id
|
|
392
|
+
if status:
|
|
393
|
+
params["status"] = status
|
|
394
|
+
|
|
395
|
+
body = self._transport.get("deployments", params=params or None)
|
|
396
|
+
data = body.get("data", [])
|
|
397
|
+
return self._convert_output(data, output)
|
|
398
|
+
|
|
399
|
+
def get_deployment(self, deployment_id: str) -> dict[str, Any]:
|
|
400
|
+
"""Get deployment details.
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
Deployment dict.
|
|
404
|
+
"""
|
|
405
|
+
body = self._transport.get(f"deployments/{deployment_id}")
|
|
406
|
+
data = body.get("data", [])
|
|
407
|
+
return data[0] if isinstance(data, list) and data else data
|
|
408
|
+
|
|
409
|
+
def get_deployment_sessions(
|
|
410
|
+
self,
|
|
411
|
+
deployment_id: str,
|
|
412
|
+
*,
|
|
413
|
+
output: str = "pandas",
|
|
414
|
+
):
|
|
415
|
+
"""List rooms/sessions for a deployment.
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
deployment_id: Deployment ID.
|
|
419
|
+
output: ``"pandas"`` (default), ``"polars"``, or ``"dict"``.
|
|
420
|
+
"""
|
|
421
|
+
body = self._transport.get(f"deployments/{deployment_id}/sessions")
|
|
422
|
+
data = body.get("data", [])
|
|
423
|
+
return self._convert_output(data, output)
|
|
424
|
+
|
|
280
425
|
# ------------------------------------------------------------------
|
|
281
426
|
# Convenience: all data for a participant
|
|
282
427
|
# ------------------------------------------------------------------
|
|
@@ -300,14 +445,55 @@ class HyperStudy(ExperimentMixin):
|
|
|
300
445
|
"chat": self.get_chat(participant_id, **common),
|
|
301
446
|
"videochat": self.get_videochat(participant_id, **common),
|
|
302
447
|
"sync": self.get_sync(participant_id, **common),
|
|
303
|
-
"
|
|
448
|
+
"ratings_continuous": self.get_ratings(participant_id, kind="continuous", **common),
|
|
449
|
+
"ratings_sparse": self.get_ratings(participant_id, kind="sparse", **common),
|
|
304
450
|
"components": self.get_components(participant_id, **common),
|
|
451
|
+
"questionnaire": self.get_questionnaire(participant_id, **common),
|
|
452
|
+
"instructions": self.get_instructions(participant_id, **common),
|
|
453
|
+
"consent": self.get_consent(participant_id, **common),
|
|
305
454
|
}
|
|
306
455
|
|
|
307
456
|
# ------------------------------------------------------------------
|
|
308
457
|
# Internal helpers
|
|
309
458
|
# ------------------------------------------------------------------
|
|
310
459
|
|
|
460
|
+
def _fetch_and_filter(
|
|
461
|
+
self,
|
|
462
|
+
event_type_prefix: str,
|
|
463
|
+
scope_id: str,
|
|
464
|
+
*,
|
|
465
|
+
scope: str = "experiment",
|
|
466
|
+
room_id: str | None = None,
|
|
467
|
+
start_time: str | None = None,
|
|
468
|
+
end_time: str | None = None,
|
|
469
|
+
sort: str | None = None,
|
|
470
|
+
order: str | None = None,
|
|
471
|
+
limit: int | None = None,
|
|
472
|
+
offset: int = 0,
|
|
473
|
+
output: str = "pandas",
|
|
474
|
+
progress: bool = True,
|
|
475
|
+
):
|
|
476
|
+
"""Fetch pre_experiment events and filter by eventType prefix.
|
|
477
|
+
|
|
478
|
+
Used by :meth:`get_instructions` and :meth:`get_consent` which
|
|
479
|
+
share the ``pre_experiment`` category but need client-side
|
|
480
|
+
filtering on the ``eventType`` field.
|
|
481
|
+
"""
|
|
482
|
+
# Always fetch as dicts so we can filter before conversion
|
|
483
|
+
raw = self._fetch_data(
|
|
484
|
+
"events", scope_id,
|
|
485
|
+
scope=scope, room_id=room_id,
|
|
486
|
+
start_time=start_time, end_time=end_time,
|
|
487
|
+
category="pre_experiment", sort=sort, order=order,
|
|
488
|
+
limit=limit, offset=offset,
|
|
489
|
+
output="dict", progress=progress,
|
|
490
|
+
)
|
|
491
|
+
filtered = [
|
|
492
|
+
e for e in raw
|
|
493
|
+
if e.get("eventType", "").startswith(event_type_prefix)
|
|
494
|
+
]
|
|
495
|
+
return self._convert_output(filtered, output)
|
|
496
|
+
|
|
311
497
|
def _fetch_data(
|
|
312
498
|
self,
|
|
313
499
|
data_type: str,
|
|
@@ -51,6 +51,31 @@ def paginated_page2():
|
|
|
51
51
|
return load_fixture("paginated_page2.json")
|
|
52
52
|
|
|
53
53
|
|
|
54
|
+
@pytest.fixture
|
|
55
|
+
def pre_experiment_response():
|
|
56
|
+
return load_fixture("pre_experiment_response.json")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@pytest.fixture
|
|
60
|
+
def deployments_list_response():
|
|
61
|
+
return load_fixture("deployments_list_response.json")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@pytest.fixture
|
|
65
|
+
def deployment_single_response():
|
|
66
|
+
return load_fixture("deployment_single_response.json")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@pytest.fixture
|
|
70
|
+
def deployment_sessions_response():
|
|
71
|
+
return load_fixture("deployment_sessions_response.json")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@pytest.fixture
|
|
75
|
+
def warnings_response():
|
|
76
|
+
return load_fixture("warnings_response.json")
|
|
77
|
+
|
|
78
|
+
|
|
54
79
|
@pytest.fixture
|
|
55
80
|
def error_401():
|
|
56
81
|
return load_fixture("error_401.json")
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"status": "success",
|
|
3
|
+
"metadata": {
|
|
4
|
+
"dataType": "sessions",
|
|
5
|
+
"scope": "deployment",
|
|
6
|
+
"scopeId": "dep_001"
|
|
7
|
+
},
|
|
8
|
+
"data": [
|
|
9
|
+
{
|
|
10
|
+
"id": "room_001",
|
|
11
|
+
"experimentId": "exp_abc123",
|
|
12
|
+
"deploymentId": "dep_001",
|
|
13
|
+
"status": "completed",
|
|
14
|
+
"participantCount": 2,
|
|
15
|
+
"createdAt": "2024-06-10T14:00:00.000Z"
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"id": "room_002",
|
|
19
|
+
"experimentId": "exp_abc123",
|
|
20
|
+
"deploymentId": "dep_001",
|
|
21
|
+
"status": "completed",
|
|
22
|
+
"participantCount": 2,
|
|
23
|
+
"createdAt": "2024-06-11T14:00:00.000Z"
|
|
24
|
+
}
|
|
25
|
+
]
|
|
26
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
{
|
|
2
|
+
"status": "success",
|
|
3
|
+
"metadata": {
|
|
4
|
+
"dataType": "deployment",
|
|
5
|
+
"scope": "deployment",
|
|
6
|
+
"scopeId": "dep_001"
|
|
7
|
+
},
|
|
8
|
+
"data": {
|
|
9
|
+
"id": "dep_001",
|
|
10
|
+
"experimentId": "exp_abc123",
|
|
11
|
+
"name": "Pilot Study",
|
|
12
|
+
"status": "active",
|
|
13
|
+
"ownerId": "user_abc",
|
|
14
|
+
"ownerName": "Test User",
|
|
15
|
+
"participantCount": 24,
|
|
16
|
+
"maxParticipants": 100,
|
|
17
|
+
"createdAt": "2024-06-01T10:00:00.000Z",
|
|
18
|
+
"updatedAt": "2024-06-15T10:00:00.000Z"
|
|
19
|
+
}
|
|
20
|
+
}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
{
|
|
2
|
+
"status": "success",
|
|
3
|
+
"metadata": {
|
|
4
|
+
"dataType": "deployments",
|
|
5
|
+
"scope": "user",
|
|
6
|
+
"scopeId": "user_abc"
|
|
7
|
+
},
|
|
8
|
+
"data": [
|
|
9
|
+
{
|
|
10
|
+
"id": "dep_001",
|
|
11
|
+
"experimentId": "exp_abc123",
|
|
12
|
+
"name": "Pilot Study",
|
|
13
|
+
"status": "active",
|
|
14
|
+
"ownerId": "user_abc",
|
|
15
|
+
"ownerName": "Test User",
|
|
16
|
+
"createdAt": "2024-06-01T10:00:00.000Z"
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"id": "dep_002",
|
|
20
|
+
"experimentId": "exp_abc123",
|
|
21
|
+
"name": "Main Study",
|
|
22
|
+
"status": "completed",
|
|
23
|
+
"ownerId": "user_abc",
|
|
24
|
+
"ownerName": "Test User",
|
|
25
|
+
"createdAt": "2024-07-01T10:00:00.000Z"
|
|
26
|
+
}
|
|
27
|
+
]
|
|
28
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
{
|
|
2
|
+
"status": "success",
|
|
3
|
+
"metadata": {
|
|
4
|
+
"dataType": "events",
|
|
5
|
+
"scope": "experiment",
|
|
6
|
+
"scopeId": "exp_abc123",
|
|
7
|
+
"timestamp": "2024-06-15T10:00:00.000Z",
|
|
8
|
+
"query": {
|
|
9
|
+
"category": "pre_experiment",
|
|
10
|
+
"limit": 1000,
|
|
11
|
+
"offset": 0,
|
|
12
|
+
"sort": "timestamp",
|
|
13
|
+
"order": "asc"
|
|
14
|
+
},
|
|
15
|
+
"pagination": {
|
|
16
|
+
"total": 4,
|
|
17
|
+
"returned": 4,
|
|
18
|
+
"hasMore": false,
|
|
19
|
+
"limit": 1000,
|
|
20
|
+
"offset": 0
|
|
21
|
+
}
|
|
22
|
+
},
|
|
23
|
+
"data": [
|
|
24
|
+
{
|
|
25
|
+
"id": "evt_010",
|
|
26
|
+
"experimentId": "exp_abc123",
|
|
27
|
+
"roomId": "room_xyz",
|
|
28
|
+
"participantId": "user_1",
|
|
29
|
+
"onset": 0,
|
|
30
|
+
"timestamp": "2024-06-15T09:59:50.000Z",
|
|
31
|
+
"category": "pre_experiment",
|
|
32
|
+
"eventType": "consent.accepted",
|
|
33
|
+
"content": "agreed"
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"id": "evt_011",
|
|
37
|
+
"experimentId": "exp_abc123",
|
|
38
|
+
"roomId": "room_xyz",
|
|
39
|
+
"participantId": "user_1",
|
|
40
|
+
"onset": 500,
|
|
41
|
+
"timestamp": "2024-06-15T09:59:50.500Z",
|
|
42
|
+
"category": "pre_experiment",
|
|
43
|
+
"eventType": "instructions.page_view",
|
|
44
|
+
"content": "page_1"
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"id": "evt_012",
|
|
48
|
+
"experimentId": "exp_abc123",
|
|
49
|
+
"roomId": "room_xyz",
|
|
50
|
+
"participantId": "user_1",
|
|
51
|
+
"onset": 2000,
|
|
52
|
+
"timestamp": "2024-06-15T09:59:52.000Z",
|
|
53
|
+
"category": "pre_experiment",
|
|
54
|
+
"eventType": "instructions.comprehension_check",
|
|
55
|
+
"content": "correct"
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
"id": "evt_013",
|
|
59
|
+
"experimentId": "exp_abc123",
|
|
60
|
+
"roomId": "room_xyz",
|
|
61
|
+
"participantId": "user_2",
|
|
62
|
+
"onset": 0,
|
|
63
|
+
"timestamp": "2024-06-15T09:59:50.000Z",
|
|
64
|
+
"category": "pre_experiment",
|
|
65
|
+
"eventType": "consent.accepted",
|
|
66
|
+
"content": "agreed"
|
|
67
|
+
}
|
|
68
|
+
]
|
|
69
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
{
|
|
2
|
+
"status": "success",
|
|
3
|
+
"metadata": {
|
|
4
|
+
"dataType": "events",
|
|
5
|
+
"scope": "experiment",
|
|
6
|
+
"scopeId": "exp_abc123",
|
|
7
|
+
"pagination": {
|
|
8
|
+
"total": 0,
|
|
9
|
+
"returned": 0,
|
|
10
|
+
"hasMore": false,
|
|
11
|
+
"limit": 1000,
|
|
12
|
+
"offset": 0
|
|
13
|
+
},
|
|
14
|
+
"_warnings": [
|
|
15
|
+
"MISSING_INDEX: Query for experiment/exp_abc123 requires a composite Firestore index. Results may be incomplete."
|
|
16
|
+
]
|
|
17
|
+
},
|
|
18
|
+
"data": []
|
|
19
|
+
}
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
|
+
import warnings
|
|
6
7
|
|
|
7
8
|
import pandas as pd
|
|
8
9
|
import pytest
|
|
@@ -200,22 +201,29 @@ def test_get_sync_with_aggregation(api_key, events_response):
|
|
|
200
201
|
|
|
201
202
|
|
|
202
203
|
@responses.activate
|
|
203
|
-
def test_get_all_data(api_key, events_response):
|
|
204
|
+
def test_get_all_data(api_key, events_response, pre_experiment_response):
|
|
204
205
|
"""get_all_data returns a dict of DataFrames."""
|
|
205
206
|
# Mock all data type endpoints for participant scope
|
|
206
207
|
for dtype in ("events", "recordings", "chat", "videochat", "sync",
|
|
207
|
-
"ratings/continuous", "components"):
|
|
208
|
+
"ratings/continuous", "ratings/sparse", "components"):
|
|
208
209
|
responses.get(
|
|
209
210
|
f"{BASE_URL}/data/{dtype}/participant/user_1",
|
|
210
211
|
json=events_response,
|
|
211
212
|
status=200,
|
|
212
213
|
)
|
|
214
|
+
# Questionnaire, instructions, consent all hit the events endpoint
|
|
215
|
+
# with different category params — responses matches by URL, so we
|
|
216
|
+
# need a single mock for the events endpoint that handles all calls.
|
|
217
|
+
# The events endpoint is already mocked above, so the category-filtered
|
|
218
|
+
# calls will also match it.
|
|
213
219
|
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
214
220
|
result = client.get_all_data("user_1", room_id="room_xyz")
|
|
215
221
|
|
|
216
222
|
assert isinstance(result, dict)
|
|
217
223
|
assert set(result.keys()) == {
|
|
218
|
-
"events", "recordings", "chat", "videochat", "sync",
|
|
224
|
+
"events", "recordings", "chat", "videochat", "sync",
|
|
225
|
+
"ratings_continuous", "ratings_sparse", "components",
|
|
226
|
+
"questionnaire", "instructions", "consent",
|
|
219
227
|
}
|
|
220
228
|
for v in result.values():
|
|
221
229
|
assert isinstance(v, pd.DataFrame)
|
|
@@ -263,3 +271,162 @@ def test_invalid_scope_raises_value_error(api_key):
|
|
|
263
271
|
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
264
272
|
with pytest.raises(ValueError, match="invalid"):
|
|
265
273
|
client.get_events("exp_abc123", scope="invalid", limit=100)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# ------------------------------------------------------------------
|
|
277
|
+
# Convenience methods — questionnaire, instructions, consent
|
|
278
|
+
# ------------------------------------------------------------------
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
@responses.activate
|
|
282
|
+
def test_get_questionnaire(api_key, events_response):
|
|
283
|
+
"""get_questionnaire passes category=questionnaire in the URL."""
|
|
284
|
+
responses.get(
|
|
285
|
+
f"{BASE_URL}/data/events/experiment/exp_abc123",
|
|
286
|
+
json=events_response,
|
|
287
|
+
status=200,
|
|
288
|
+
)
|
|
289
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
290
|
+
df = client.get_questionnaire("exp_abc123", limit=1000)
|
|
291
|
+
assert isinstance(df, pd.DataFrame)
|
|
292
|
+
assert "category=questionnaire" in responses.calls[0].request.url
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
@responses.activate
|
|
296
|
+
def test_get_instructions_filters_by_event_type(api_key, pre_experiment_response):
|
|
297
|
+
"""get_instructions fetches pre_experiment events and filters to instructions."""
|
|
298
|
+
responses.get(
|
|
299
|
+
f"{BASE_URL}/data/events/experiment/exp_abc123",
|
|
300
|
+
json=pre_experiment_response,
|
|
301
|
+
status=200,
|
|
302
|
+
)
|
|
303
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
304
|
+
df = client.get_instructions("exp_abc123", limit=1000)
|
|
305
|
+
|
|
306
|
+
assert isinstance(df, pd.DataFrame)
|
|
307
|
+
assert len(df) == 2 # 2 instruction events out of 4 pre_experiment
|
|
308
|
+
assert "category=pre_experiment" in responses.calls[0].request.url
|
|
309
|
+
assert all(et.startswith("instructions.") for et in df["eventType"])
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
@responses.activate
|
|
313
|
+
def test_get_consent_filters_by_event_type(api_key, pre_experiment_response):
|
|
314
|
+
"""get_consent fetches pre_experiment events and filters to consent."""
|
|
315
|
+
responses.get(
|
|
316
|
+
f"{BASE_URL}/data/events/experiment/exp_abc123",
|
|
317
|
+
json=pre_experiment_response,
|
|
318
|
+
status=200,
|
|
319
|
+
)
|
|
320
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
321
|
+
df = client.get_consent("exp_abc123", limit=1000)
|
|
322
|
+
|
|
323
|
+
assert isinstance(df, pd.DataFrame)
|
|
324
|
+
assert len(df) == 2 # 2 consent events out of 4 pre_experiment
|
|
325
|
+
assert all(et.startswith("consent.") for et in df["eventType"])
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
@responses.activate
|
|
329
|
+
def test_get_instructions_dict_output(api_key, pre_experiment_response):
|
|
330
|
+
"""get_instructions with output='dict' returns filtered list."""
|
|
331
|
+
responses.get(
|
|
332
|
+
f"{BASE_URL}/data/events/experiment/exp_abc123",
|
|
333
|
+
json=pre_experiment_response,
|
|
334
|
+
status=200,
|
|
335
|
+
)
|
|
336
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
337
|
+
data = client.get_instructions("exp_abc123", output="dict", limit=1000)
|
|
338
|
+
|
|
339
|
+
assert isinstance(data, list)
|
|
340
|
+
assert len(data) == 2
|
|
341
|
+
assert all(e["eventType"].startswith("instructions.") for e in data)
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
# ------------------------------------------------------------------
|
|
345
|
+
# Deployments
|
|
346
|
+
# ------------------------------------------------------------------
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
@responses.activate
|
|
350
|
+
def test_list_deployments(api_key, deployments_list_response):
|
|
351
|
+
"""list_deployments hits /deployments and returns a DataFrame."""
|
|
352
|
+
responses.get(
|
|
353
|
+
f"{BASE_URL}/deployments",
|
|
354
|
+
json=deployments_list_response,
|
|
355
|
+
status=200,
|
|
356
|
+
)
|
|
357
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
358
|
+
df = client.list_deployments()
|
|
359
|
+
|
|
360
|
+
assert isinstance(df, pd.DataFrame)
|
|
361
|
+
assert len(df) == 2
|
|
362
|
+
assert "dep_001" in df["id"].values
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
@responses.activate
|
|
366
|
+
def test_list_deployments_with_filters(api_key, deployments_list_response):
|
|
367
|
+
"""list_deployments passes experiment_id and status as query params."""
|
|
368
|
+
responses.get(
|
|
369
|
+
f"{BASE_URL}/deployments",
|
|
370
|
+
json=deployments_list_response,
|
|
371
|
+
status=200,
|
|
372
|
+
)
|
|
373
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
374
|
+
client.list_deployments(experiment_id="exp_abc123", status="active")
|
|
375
|
+
|
|
376
|
+
url = responses.calls[0].request.url
|
|
377
|
+
assert "experimentId=exp_abc123" in url
|
|
378
|
+
assert "status=active" in url
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
@responses.activate
|
|
382
|
+
def test_get_deployment(api_key, deployment_single_response):
|
|
383
|
+
"""get_deployment returns a single deployment dict."""
|
|
384
|
+
responses.get(
|
|
385
|
+
f"{BASE_URL}/deployments/dep_001",
|
|
386
|
+
json=deployment_single_response,
|
|
387
|
+
status=200,
|
|
388
|
+
)
|
|
389
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
390
|
+
result = client.get_deployment("dep_001")
|
|
391
|
+
|
|
392
|
+
assert isinstance(result, dict)
|
|
393
|
+
assert result["id"] == "dep_001"
|
|
394
|
+
assert result["name"] == "Pilot Study"
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
@responses.activate
|
|
398
|
+
def test_get_deployment_sessions(api_key, deployment_sessions_response):
|
|
399
|
+
"""get_deployment_sessions returns sessions as a DataFrame."""
|
|
400
|
+
responses.get(
|
|
401
|
+
f"{BASE_URL}/deployments/dep_001/sessions",
|
|
402
|
+
json=deployment_sessions_response,
|
|
403
|
+
status=200,
|
|
404
|
+
)
|
|
405
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
406
|
+
df = client.get_deployment_sessions("dep_001")
|
|
407
|
+
|
|
408
|
+
assert isinstance(df, pd.DataFrame)
|
|
409
|
+
assert len(df) == 2
|
|
410
|
+
assert "room_001" in df["id"].values
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
# ------------------------------------------------------------------
|
|
414
|
+
# API warnings
|
|
415
|
+
# ------------------------------------------------------------------
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
@responses.activate
|
|
419
|
+
def test_api_warnings_surfaced(api_key, warnings_response):
|
|
420
|
+
"""API _warnings in metadata are surfaced via Python warnings."""
|
|
421
|
+
responses.get(
|
|
422
|
+
f"{BASE_URL}/data/events/experiment/exp_abc123",
|
|
423
|
+
json=warnings_response,
|
|
424
|
+
status=200,
|
|
425
|
+
)
|
|
426
|
+
client = HyperStudy(api_key=api_key, base_url=BASE_URL)
|
|
427
|
+
with warnings.catch_warnings(record=True) as caught:
|
|
428
|
+
warnings.simplefilter("always")
|
|
429
|
+
client.get_events("exp_abc123", limit=1000)
|
|
430
|
+
|
|
431
|
+
assert len(caught) == 1
|
|
432
|
+
assert "MISSING_INDEX" in str(caught[0].message)
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
name: Publish to PyPI
|
|
2
|
-
|
|
3
|
-
on:
|
|
4
|
-
release:
|
|
5
|
-
types: [published]
|
|
6
|
-
|
|
7
|
-
jobs:
|
|
8
|
-
publish:
|
|
9
|
-
runs-on: ubuntu-latest
|
|
10
|
-
permissions:
|
|
11
|
-
id-token: write
|
|
12
|
-
|
|
13
|
-
steps:
|
|
14
|
-
- uses: actions/checkout@v4
|
|
15
|
-
|
|
16
|
-
- uses: actions/setup-python@v5
|
|
17
|
-
with:
|
|
18
|
-
python-version: "3.12"
|
|
19
|
-
|
|
20
|
-
- name: Install build tools
|
|
21
|
-
run: pip install build
|
|
22
|
-
|
|
23
|
-
- name: Build package
|
|
24
|
-
run: python -m build
|
|
25
|
-
|
|
26
|
-
- name: Publish to PyPI
|
|
27
|
-
uses: pypa/gh-action-pypi-publish@release/v1
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|