arize-phoenix 11.38.0__py3-none-any.whl → 12.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arize-phoenix might be problematic. Click here for more details.
- {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.2.0.dist-info}/METADATA +3 -3
- {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.2.0.dist-info}/RECORD +83 -58
- phoenix/config.py +1 -11
- phoenix/db/bulk_inserter.py +8 -0
- phoenix/db/facilitator.py +1 -1
- phoenix/db/helpers.py +202 -33
- phoenix/db/insertion/dataset.py +7 -0
- phoenix/db/insertion/document_annotation.py +1 -1
- phoenix/db/insertion/helpers.py +2 -2
- phoenix/db/insertion/session_annotation.py +176 -0
- phoenix/db/insertion/span_annotation.py +1 -1
- phoenix/db/insertion/trace_annotation.py +1 -1
- phoenix/db/insertion/types.py +29 -3
- phoenix/db/migrations/versions/01a8342c9cdf_add_user_id_on_datasets.py +40 -0
- phoenix/db/migrations/versions/0df286449799_add_session_annotations_table.py +105 -0
- phoenix/db/migrations/versions/272b66ff50f8_drop_single_indices.py +119 -0
- phoenix/db/migrations/versions/58228d933c91_dataset_labels.py +67 -0
- phoenix/db/migrations/versions/699f655af132_experiment_tags.py +57 -0
- phoenix/db/migrations/versions/735d3d93c33e_add_composite_indices.py +41 -0
- phoenix/db/migrations/versions/ab513d89518b_add_user_id_on_dataset_versions.py +40 -0
- phoenix/db/migrations/versions/d0690a79ea51_users_on_experiments.py +40 -0
- phoenix/db/migrations/versions/deb2c81c0bb2_dataset_splits.py +139 -0
- phoenix/db/migrations/versions/e76cbd66ffc3_add_experiments_dataset_examples.py +87 -0
- phoenix/db/models.py +306 -46
- phoenix/server/api/context.py +15 -2
- phoenix/server/api/dataloaders/__init__.py +8 -2
- phoenix/server/api/dataloaders/dataset_example_splits.py +40 -0
- phoenix/server/api/dataloaders/dataset_labels.py +36 -0
- phoenix/server/api/dataloaders/session_annotations_by_session.py +29 -0
- phoenix/server/api/dataloaders/table_fields.py +2 -2
- phoenix/server/api/dataloaders/trace_annotations_by_trace.py +27 -0
- phoenix/server/api/helpers/playground_clients.py +66 -35
- phoenix/server/api/helpers/playground_users.py +26 -0
- phoenix/server/api/input_types/{SpanAnnotationFilter.py → AnnotationFilter.py} +22 -14
- phoenix/server/api/input_types/CreateProjectSessionAnnotationInput.py +37 -0
- phoenix/server/api/input_types/UpdateAnnotationInput.py +34 -0
- phoenix/server/api/mutations/__init__.py +8 -0
- phoenix/server/api/mutations/chat_mutations.py +8 -3
- phoenix/server/api/mutations/dataset_label_mutations.py +291 -0
- phoenix/server/api/mutations/dataset_mutations.py +5 -0
- phoenix/server/api/mutations/dataset_split_mutations.py +423 -0
- phoenix/server/api/mutations/project_session_annotations_mutations.py +161 -0
- phoenix/server/api/queries.py +53 -0
- phoenix/server/api/routers/auth.py +5 -5
- phoenix/server/api/routers/oauth2.py +5 -23
- phoenix/server/api/routers/v1/__init__.py +2 -0
- phoenix/server/api/routers/v1/annotations.py +320 -0
- phoenix/server/api/routers/v1/datasets.py +5 -0
- phoenix/server/api/routers/v1/experiments.py +10 -3
- phoenix/server/api/routers/v1/sessions.py +111 -0
- phoenix/server/api/routers/v1/traces.py +1 -2
- phoenix/server/api/routers/v1/users.py +7 -0
- phoenix/server/api/subscriptions.py +5 -2
- phoenix/server/api/types/Dataset.py +8 -0
- phoenix/server/api/types/DatasetExample.py +18 -0
- phoenix/server/api/types/DatasetLabel.py +23 -0
- phoenix/server/api/types/DatasetSplit.py +32 -0
- phoenix/server/api/types/Experiment.py +0 -4
- phoenix/server/api/types/Project.py +16 -0
- phoenix/server/api/types/ProjectSession.py +88 -3
- phoenix/server/api/types/ProjectSessionAnnotation.py +68 -0
- phoenix/server/api/types/Prompt.py +18 -1
- phoenix/server/api/types/Span.py +5 -5
- phoenix/server/api/types/Trace.py +61 -0
- phoenix/server/app.py +13 -14
- phoenix/server/cost_tracking/model_cost_manifest.json +132 -2
- phoenix/server/dml_event.py +13 -0
- phoenix/server/static/.vite/manifest.json +39 -39
- phoenix/server/static/assets/{components-BQPHTBfv.js → components-BG6v0EM8.js} +705 -385
- phoenix/server/static/assets/{index-BL5BMgJU.js → index-CSVcULw1.js} +13 -13
- phoenix/server/static/assets/{pages-C0Y17J0T.js → pages-DgaM7kpM.js} +1356 -1155
- phoenix/server/static/assets/{vendor-BdjZxMii.js → vendor-BqTEkGQU.js} +183 -183
- phoenix/server/static/assets/{vendor-arizeai-CHYlS8jV.js → vendor-arizeai-DlOj0PQQ.js} +15 -24
- phoenix/server/static/assets/{vendor-codemirror-Di6t4HnH.js → vendor-codemirror-B2PHH5yZ.js} +3 -3
- phoenix/server/static/assets/{vendor-recharts-C9wCDYj3.js → vendor-recharts-CKsi4IjN.js} +1 -1
- phoenix/server/static/assets/{vendor-shiki-MNnmOotP.js → vendor-shiki-DN26BkKE.js} +1 -1
- phoenix/server/utils.py +74 -0
- phoenix/session/session.py +25 -5
- phoenix/version.py +1 -1
- phoenix/server/api/dataloaders/experiment_repetition_counts.py +0 -39
- {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.2.0.dist-info}/WHEEL +0 -0
- {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.2.0.dist-info}/entry_points.txt +0 -0
- {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.2.0.dist-info}/licenses/IP_NOTICE +0 -0
- {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""add session annotations table
|
|
2
|
+
|
|
3
|
+
Revision ID: 0df286449799
|
|
4
|
+
Revises: 735d3d93c33e
|
|
5
|
+
Create Date: 2025-08-06 11:27:01.479664
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, Sequence, Union
|
|
9
|
+
|
|
10
|
+
import sqlalchemy as sa
|
|
11
|
+
from alembic import op
|
|
12
|
+
from sqlalchemy import JSON
|
|
13
|
+
from sqlalchemy.dialects import postgresql
|
|
14
|
+
from sqlalchemy.ext.compiler import compiles
|
|
15
|
+
|
|
16
|
+
# revision identifiers, used by Alembic.
|
|
17
|
+
revision: str = "0df286449799"
|
|
18
|
+
down_revision: Union[str, None] = "735d3d93c33e"
|
|
19
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
20
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class JSONB(JSON):
|
|
24
|
+
# See https://docs.sqlalchemy.org/en/20/core/custom_types.html
|
|
25
|
+
__visit_name__ = "JSONB"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@compiles(JSONB, "sqlite")
|
|
29
|
+
def _(*args: Any, **kwargs: Any) -> str:
|
|
30
|
+
# See https://docs.sqlalchemy.org/en/20/core/custom_types.html
|
|
31
|
+
return "JSONB"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
JSON_ = (
|
|
35
|
+
JSON()
|
|
36
|
+
.with_variant(
|
|
37
|
+
postgresql.JSONB(),
|
|
38
|
+
"postgresql",
|
|
39
|
+
)
|
|
40
|
+
.with_variant(
|
|
41
|
+
JSONB(),
|
|
42
|
+
"sqlite",
|
|
43
|
+
)
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
_Integer = sa.Integer().with_variant(
|
|
47
|
+
sa.BigInteger(),
|
|
48
|
+
"postgresql",
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def upgrade() -> None:
|
|
53
|
+
op.create_table(
|
|
54
|
+
"project_session_annotations",
|
|
55
|
+
sa.Column("id", _Integer, primary_key=True),
|
|
56
|
+
sa.Column(
|
|
57
|
+
"project_session_id",
|
|
58
|
+
_Integer,
|
|
59
|
+
sa.ForeignKey("project_sessions.id", ondelete="CASCADE"),
|
|
60
|
+
nullable=False,
|
|
61
|
+
index=True,
|
|
62
|
+
),
|
|
63
|
+
sa.Column("name", sa.String, nullable=False),
|
|
64
|
+
sa.Column("label", sa.String),
|
|
65
|
+
sa.Column("score", sa.Float),
|
|
66
|
+
sa.Column("explanation", sa.String),
|
|
67
|
+
sa.Column("metadata", JSON_, nullable=False),
|
|
68
|
+
sa.Column(
|
|
69
|
+
"annotator_kind",
|
|
70
|
+
sa.String,
|
|
71
|
+
sa.CheckConstraint(
|
|
72
|
+
"annotator_kind IN ('LLM', 'CODE', 'HUMAN')",
|
|
73
|
+
name="valid_annotator_kind",
|
|
74
|
+
),
|
|
75
|
+
nullable=False,
|
|
76
|
+
),
|
|
77
|
+
sa.Column(
|
|
78
|
+
"user_id",
|
|
79
|
+
_Integer,
|
|
80
|
+
sa.ForeignKey("users.id", ondelete="SET NULL"),
|
|
81
|
+
nullable=True,
|
|
82
|
+
),
|
|
83
|
+
sa.Column("identifier", sa.String, server_default="", nullable=False),
|
|
84
|
+
sa.Column(
|
|
85
|
+
"source",
|
|
86
|
+
sa.String,
|
|
87
|
+
sa.CheckConstraint("source IN ('API', 'APP')", name="valid_source"),
|
|
88
|
+
nullable=False,
|
|
89
|
+
),
|
|
90
|
+
sa.Column(
|
|
91
|
+
"created_at", sa.TIMESTAMP(timezone=True), server_default=sa.func.now(), nullable=False
|
|
92
|
+
),
|
|
93
|
+
sa.Column(
|
|
94
|
+
"updated_at",
|
|
95
|
+
sa.TIMESTAMP(timezone=True),
|
|
96
|
+
server_default=sa.func.now(),
|
|
97
|
+
onupdate=sa.func.now(),
|
|
98
|
+
nullable=False,
|
|
99
|
+
),
|
|
100
|
+
sa.UniqueConstraint("name", "project_session_id", "identifier"),
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def downgrade() -> None:
|
|
105
|
+
op.drop_table("project_session_annotations")
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""drop single indices from traces, project_sessions, and experiment_runs
|
|
2
|
+
|
|
3
|
+
Revision ID: 272b66ff50f8
|
|
4
|
+
Revises: a20694b15f82
|
|
5
|
+
Create Date: 2025-08-11 20:37:46.941940
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
from alembic import op
|
|
12
|
+
|
|
13
|
+
# revision identifiers, used by Alembic.
|
|
14
|
+
revision: str = "272b66ff50f8"
|
|
15
|
+
down_revision: Union[str, None] = "a20694b15f82"
|
|
16
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
17
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def upgrade() -> None:
|
|
21
|
+
op.drop_index(
|
|
22
|
+
"ix_traces_project_rowid",
|
|
23
|
+
table_name="traces",
|
|
24
|
+
if_exists=True,
|
|
25
|
+
)
|
|
26
|
+
op.drop_index(
|
|
27
|
+
"ix_traces_start_time",
|
|
28
|
+
table_name="traces",
|
|
29
|
+
if_exists=True,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
op.drop_index(
|
|
33
|
+
"ix_project_sessions_project_id",
|
|
34
|
+
table_name="project_sessions",
|
|
35
|
+
if_exists=True,
|
|
36
|
+
)
|
|
37
|
+
op.drop_index(
|
|
38
|
+
"ix_project_sessions_start_time",
|
|
39
|
+
table_name="project_sessions",
|
|
40
|
+
if_exists=True,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
op.drop_index(
|
|
44
|
+
"ix_experiment_runs_experiment_id",
|
|
45
|
+
table_name="experiment_runs",
|
|
46
|
+
if_exists=True,
|
|
47
|
+
)
|
|
48
|
+
op.drop_index(
|
|
49
|
+
"ix_experiment_run_annotations_experiment_run_id",
|
|
50
|
+
table_name="experiment_run_annotations",
|
|
51
|
+
if_exists=True,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
op.drop_index(
|
|
55
|
+
"ix_dataset_example_revisions_dataset_example_id",
|
|
56
|
+
table_name="dataset_example_revisions",
|
|
57
|
+
if_exists=True,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
op.drop_index(
|
|
61
|
+
"ix_span_cost_details_span_cost_id",
|
|
62
|
+
table_name="span_cost_details",
|
|
63
|
+
if_exists=True,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def downgrade() -> None:
|
|
68
|
+
op.create_index(
|
|
69
|
+
"ix_traces_project_rowid",
|
|
70
|
+
"traces",
|
|
71
|
+
["project_rowid"],
|
|
72
|
+
if_not_exists=True,
|
|
73
|
+
)
|
|
74
|
+
op.create_index(
|
|
75
|
+
"ix_traces_start_time",
|
|
76
|
+
"traces",
|
|
77
|
+
["start_time"],
|
|
78
|
+
if_not_exists=True,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
op.create_index(
|
|
82
|
+
"ix_project_sessions_project_id",
|
|
83
|
+
"project_sessions",
|
|
84
|
+
["project_id"],
|
|
85
|
+
if_not_exists=True,
|
|
86
|
+
)
|
|
87
|
+
op.create_index(
|
|
88
|
+
"ix_project_sessions_start_time",
|
|
89
|
+
"project_sessions",
|
|
90
|
+
["start_time"],
|
|
91
|
+
if_not_exists=True,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
op.create_index(
|
|
95
|
+
"ix_experiment_runs_experiment_id",
|
|
96
|
+
"experiment_runs",
|
|
97
|
+
["experiment_id"],
|
|
98
|
+
if_not_exists=True,
|
|
99
|
+
)
|
|
100
|
+
op.create_index(
|
|
101
|
+
"ix_experiment_run_annotations_experiment_run_id",
|
|
102
|
+
"experiment_run_annotations",
|
|
103
|
+
["experiment_run_id"],
|
|
104
|
+
if_not_exists=True,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
op.create_index(
|
|
108
|
+
"ix_dataset_example_revisions_dataset_example_id",
|
|
109
|
+
"dataset_example_revisions",
|
|
110
|
+
["dataset_example_id"],
|
|
111
|
+
if_not_exists=True,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
op.create_index(
|
|
115
|
+
"ix_span_cost_details_span_cost_id",
|
|
116
|
+
"span_cost_details",
|
|
117
|
+
["span_cost_id"],
|
|
118
|
+
if_not_exists=True,
|
|
119
|
+
)
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""dataset_labels
|
|
2
|
+
|
|
3
|
+
Revision ID: 58228d933c91
|
|
4
|
+
Revises: 699f655af132
|
|
5
|
+
Create Date: 2025-09-05 17:47:34.637329
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "58228d933c91"
|
|
16
|
+
down_revision: Union[str, None] = "699f655af132"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
_Integer = sa.Integer().with_variant(
|
|
21
|
+
sa.BigInteger(),
|
|
22
|
+
"postgresql",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def upgrade() -> None:
|
|
27
|
+
op.create_table(
|
|
28
|
+
"dataset_labels",
|
|
29
|
+
sa.Column("id", _Integer, primary_key=True),
|
|
30
|
+
sa.Column("name", sa.String, nullable=False, unique=True),
|
|
31
|
+
sa.Column("description", sa.String, nullable=True),
|
|
32
|
+
sa.Column("color", sa.String, nullable=False),
|
|
33
|
+
sa.Column(
|
|
34
|
+
"user_id",
|
|
35
|
+
_Integer,
|
|
36
|
+
sa.ForeignKey("users.id", ondelete="SET NULL"),
|
|
37
|
+
nullable=True,
|
|
38
|
+
index=True,
|
|
39
|
+
),
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
op.create_table(
|
|
43
|
+
"datasets_dataset_labels",
|
|
44
|
+
sa.Column(
|
|
45
|
+
"dataset_id",
|
|
46
|
+
_Integer,
|
|
47
|
+
sa.ForeignKey("datasets.id", ondelete="CASCADE"),
|
|
48
|
+
nullable=False,
|
|
49
|
+
),
|
|
50
|
+
sa.Column(
|
|
51
|
+
"dataset_label_id",
|
|
52
|
+
_Integer,
|
|
53
|
+
sa.ForeignKey("dataset_labels.id", ondelete="CASCADE"),
|
|
54
|
+
nullable=False,
|
|
55
|
+
# index on the second element of the composite primary key
|
|
56
|
+
index=True,
|
|
57
|
+
),
|
|
58
|
+
sa.PrimaryKeyConstraint(
|
|
59
|
+
"dataset_id",
|
|
60
|
+
"dataset_label_id",
|
|
61
|
+
),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def downgrade() -> None:
|
|
66
|
+
op.drop_table("datasets_dataset_labels")
|
|
67
|
+
op.drop_table("dataset_labels")
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""experiment_tags
|
|
2
|
+
|
|
3
|
+
Revision ID: 699f655af132
|
|
4
|
+
Revises: d0690a79ea51
|
|
5
|
+
Create Date: 2025-09-05 13:14:22.676233
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "699f655af132"
|
|
16
|
+
down_revision: Union[str, None] = "d0690a79ea51"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
_Integer = sa.Integer().with_variant(
|
|
21
|
+
sa.BigInteger(),
|
|
22
|
+
"postgresql",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def upgrade() -> None:
|
|
27
|
+
op.create_table(
|
|
28
|
+
"experiment_tags",
|
|
29
|
+
sa.Column("id", _Integer, primary_key=True),
|
|
30
|
+
sa.Column(
|
|
31
|
+
"experiment_id",
|
|
32
|
+
_Integer,
|
|
33
|
+
sa.ForeignKey("experiments.id", ondelete="CASCADE"),
|
|
34
|
+
nullable=False,
|
|
35
|
+
index=True,
|
|
36
|
+
),
|
|
37
|
+
sa.Column(
|
|
38
|
+
"dataset_id",
|
|
39
|
+
_Integer,
|
|
40
|
+
sa.ForeignKey("datasets.id", ondelete="CASCADE"),
|
|
41
|
+
nullable=False,
|
|
42
|
+
),
|
|
43
|
+
sa.Column(
|
|
44
|
+
"user_id",
|
|
45
|
+
_Integer,
|
|
46
|
+
sa.ForeignKey("users.id", ondelete="SET NULL"),
|
|
47
|
+
nullable=True,
|
|
48
|
+
index=True,
|
|
49
|
+
),
|
|
50
|
+
sa.Column("name", sa.String, nullable=False),
|
|
51
|
+
sa.Column("description", sa.String, nullable=True),
|
|
52
|
+
sa.UniqueConstraint("dataset_id", "name"),
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def downgrade() -> None:
|
|
57
|
+
op.drop_table("experiment_tags")
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""add composite indices to traces and project_sessions
|
|
2
|
+
|
|
3
|
+
Revision ID: 735d3d93c33e
|
|
4
|
+
Revises: 272b66ff50f8
|
|
5
|
+
Create Date: 2025-08-11 20:52:47.477712
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
from alembic import op
|
|
12
|
+
|
|
13
|
+
# revision identifiers, used by Alembic.
|
|
14
|
+
revision: str = "735d3d93c33e"
|
|
15
|
+
down_revision: Union[str, None] = "272b66ff50f8"
|
|
16
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
17
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def upgrade() -> None:
|
|
21
|
+
op.execute(
|
|
22
|
+
"CREATE INDEX IF NOT EXISTS ix_traces_project_rowid_start_time "
|
|
23
|
+
"ON traces (project_rowid, start_time DESC)"
|
|
24
|
+
)
|
|
25
|
+
op.execute(
|
|
26
|
+
"CREATE INDEX IF NOT EXISTS ix_project_sessions_project_id_start_time "
|
|
27
|
+
"ON project_sessions (project_id, start_time DESC)"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def downgrade() -> None:
|
|
32
|
+
op.drop_index(
|
|
33
|
+
"ix_traces_project_rowid_start_time",
|
|
34
|
+
table_name="traces",
|
|
35
|
+
if_exists=True,
|
|
36
|
+
)
|
|
37
|
+
op.drop_index(
|
|
38
|
+
"ix_project_sessions_project_id_start_time",
|
|
39
|
+
table_name="project_sessions",
|
|
40
|
+
if_exists=True,
|
|
41
|
+
)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""add user_id on dataset_versions
|
|
2
|
+
|
|
3
|
+
Revision ID: ab513d89518b
|
|
4
|
+
Revises: 01a8342c9cdf
|
|
5
|
+
Create Date: 2025-09-26 11:00:06.961920
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "ab513d89518b"
|
|
16
|
+
down_revision: Union[str, None] = "01a8342c9cdf"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
_Integer = sa.Integer().with_variant(
|
|
21
|
+
sa.BigInteger(),
|
|
22
|
+
"postgresql",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def upgrade() -> None:
|
|
27
|
+
with op.batch_alter_table("dataset_versions") as batch_op:
|
|
28
|
+
batch_op.add_column(
|
|
29
|
+
sa.Column(
|
|
30
|
+
"user_id",
|
|
31
|
+
_Integer,
|
|
32
|
+
sa.ForeignKey("users.id", ondelete="SET NULL"),
|
|
33
|
+
nullable=True,
|
|
34
|
+
),
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def downgrade() -> None:
|
|
39
|
+
with op.batch_alter_table("dataset_versions") as batch_op:
|
|
40
|
+
batch_op.drop_column("user_id")
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""add user_id on experiments
|
|
2
|
+
|
|
3
|
+
Revision ID: d0690a79ea51
|
|
4
|
+
Revises: ab513d89518b
|
|
5
|
+
Create Date: 2025-08-26 19:12:47.849806
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "d0690a79ea51"
|
|
16
|
+
down_revision: Union[str, None] = "ab513d89518b"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
_Integer = sa.Integer().with_variant(
|
|
21
|
+
sa.BigInteger(),
|
|
22
|
+
"postgresql",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def upgrade() -> None:
|
|
27
|
+
with op.batch_alter_table("experiments") as batch_op:
|
|
28
|
+
batch_op.add_column(
|
|
29
|
+
sa.Column(
|
|
30
|
+
"user_id",
|
|
31
|
+
_Integer,
|
|
32
|
+
sa.ForeignKey("users.id", ondelete="SET NULL"),
|
|
33
|
+
nullable=True,
|
|
34
|
+
),
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def downgrade() -> None:
|
|
39
|
+
with op.batch_alter_table("experiments") as batch_op:
|
|
40
|
+
batch_op.drop_column("user_id")
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""dataset_splits
|
|
2
|
+
|
|
3
|
+
Revision ID: deb2c81c0bb2
|
|
4
|
+
Revises: 58228d933c91
|
|
5
|
+
Create Date: 2025-09-08 15:50:12.066217
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Any, Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
from sqlalchemy import JSON
|
|
14
|
+
from sqlalchemy.dialects import postgresql
|
|
15
|
+
from sqlalchemy.ext.compiler import compiles
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class JSONB(JSON):
|
|
19
|
+
# See https://docs.sqlalchemy.org/en/20/core/custom_types.html
|
|
20
|
+
__visit_name__ = "JSONB"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@compiles(JSONB, "sqlite")
|
|
24
|
+
def _(*args: Any, **kwargs: Any) -> str:
|
|
25
|
+
# See https://docs.sqlalchemy.org/en/20/core/custom_types.html
|
|
26
|
+
return "JSONB"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
JSON_ = (
|
|
30
|
+
JSON()
|
|
31
|
+
.with_variant(
|
|
32
|
+
postgresql.JSONB(),
|
|
33
|
+
"postgresql",
|
|
34
|
+
)
|
|
35
|
+
.with_variant(
|
|
36
|
+
JSONB(),
|
|
37
|
+
"sqlite",
|
|
38
|
+
)
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
_Integer = sa.Integer().with_variant(
|
|
42
|
+
sa.BigInteger(),
|
|
43
|
+
"postgresql",
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# revision identifiers, used by Alembic.
|
|
48
|
+
revision: str = "deb2c81c0bb2"
|
|
49
|
+
down_revision: Union[str, None] = "e76cbd66ffc3"
|
|
50
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
51
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def upgrade() -> None:
|
|
55
|
+
# Create dataset_splits table
|
|
56
|
+
op.create_table(
|
|
57
|
+
"dataset_splits",
|
|
58
|
+
sa.Column("id", _Integer, primary_key=True),
|
|
59
|
+
sa.Column(
|
|
60
|
+
"user_id",
|
|
61
|
+
_Integer,
|
|
62
|
+
sa.ForeignKey("users.id", ondelete="SET NULL"),
|
|
63
|
+
nullable=True,
|
|
64
|
+
index=True,
|
|
65
|
+
),
|
|
66
|
+
sa.Column("name", sa.String, nullable=False, unique=True),
|
|
67
|
+
sa.Column("description", sa.String, nullable=True),
|
|
68
|
+
sa.Column("color", sa.String, nullable=False),
|
|
69
|
+
sa.Column("metadata", JSON_, nullable=False),
|
|
70
|
+
sa.Column(
|
|
71
|
+
"created_at",
|
|
72
|
+
sa.TIMESTAMP(timezone=True),
|
|
73
|
+
nullable=False,
|
|
74
|
+
server_default=sa.func.now(),
|
|
75
|
+
),
|
|
76
|
+
sa.Column(
|
|
77
|
+
"updated_at",
|
|
78
|
+
sa.TIMESTAMP(timezone=True),
|
|
79
|
+
nullable=False,
|
|
80
|
+
server_default=sa.func.now(),
|
|
81
|
+
onupdate=sa.func.now(),
|
|
82
|
+
),
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Create crosswalk table: dataset_splits_dataset_examples
|
|
86
|
+
op.create_table(
|
|
87
|
+
"dataset_splits_dataset_examples",
|
|
88
|
+
sa.Column(
|
|
89
|
+
"dataset_split_id",
|
|
90
|
+
_Integer,
|
|
91
|
+
sa.ForeignKey("dataset_splits.id", ondelete="CASCADE"),
|
|
92
|
+
nullable=False,
|
|
93
|
+
),
|
|
94
|
+
sa.Column(
|
|
95
|
+
"dataset_example_id",
|
|
96
|
+
_Integer,
|
|
97
|
+
sa.ForeignKey("dataset_examples.id", ondelete="CASCADE"),
|
|
98
|
+
nullable=False,
|
|
99
|
+
# index on the second element of the composite primary key
|
|
100
|
+
index=True,
|
|
101
|
+
),
|
|
102
|
+
sa.PrimaryKeyConstraint(
|
|
103
|
+
"dataset_split_id",
|
|
104
|
+
"dataset_example_id",
|
|
105
|
+
),
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Create experiments_dataset_splits table The rational of this table is to
|
|
109
|
+
# gather examples for a specific dataset split for a specific experiment.
|
|
110
|
+
# Select all dataset examples where examples belong to a dataset split and
|
|
111
|
+
# examples belong to a experiment.
|
|
112
|
+
|
|
113
|
+
op.create_table(
|
|
114
|
+
"experiments_dataset_splits",
|
|
115
|
+
sa.Column(
|
|
116
|
+
"experiment_id",
|
|
117
|
+
_Integer,
|
|
118
|
+
sa.ForeignKey("experiments.id", ondelete="CASCADE"),
|
|
119
|
+
nullable=False,
|
|
120
|
+
),
|
|
121
|
+
sa.Column(
|
|
122
|
+
"dataset_split_id",
|
|
123
|
+
_Integer,
|
|
124
|
+
sa.ForeignKey("dataset_splits.id", ondelete="CASCADE"),
|
|
125
|
+
nullable=False,
|
|
126
|
+
# index on the second element of the composite primary key
|
|
127
|
+
index=True,
|
|
128
|
+
),
|
|
129
|
+
sa.PrimaryKeyConstraint(
|
|
130
|
+
"experiment_id",
|
|
131
|
+
"dataset_split_id",
|
|
132
|
+
),
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def downgrade() -> None:
|
|
137
|
+
op.drop_table("experiments_dataset_splits")
|
|
138
|
+
op.drop_table("dataset_splits_dataset_examples")
|
|
139
|
+
op.drop_table("dataset_splits")
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""add experiments_dataset_examples junction table
|
|
2
|
+
|
|
3
|
+
Revision ID: e76cbd66ffc3
|
|
4
|
+
Revises: deb2c81c0bb2
|
|
5
|
+
Create Date: 2025-09-23 12:33:13.554164
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
_Integer = sa.Integer().with_variant(
|
|
15
|
+
sa.BigInteger(),
|
|
16
|
+
"postgresql",
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# revision identifiers, used by Alembic.
|
|
20
|
+
revision: str = "e76cbd66ffc3"
|
|
21
|
+
down_revision: Union[str, None] = "58228d933c91"
|
|
22
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
23
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
24
|
+
|
|
25
|
+
BACKFILL = """\
|
|
26
|
+
INSERT INTO experiments_dataset_examples (
|
|
27
|
+
experiment_id,
|
|
28
|
+
dataset_example_id,
|
|
29
|
+
dataset_example_revision_id
|
|
30
|
+
)
|
|
31
|
+
SELECT
|
|
32
|
+
ranked.experiment_id,
|
|
33
|
+
ranked.dataset_example_id,
|
|
34
|
+
ranked.dataset_example_revision_id
|
|
35
|
+
FROM (
|
|
36
|
+
SELECT
|
|
37
|
+
e.id as experiment_id,
|
|
38
|
+
der.dataset_example_id,
|
|
39
|
+
der.id as dataset_example_revision_id,
|
|
40
|
+
der.revision_kind,
|
|
41
|
+
ROW_NUMBER() OVER (
|
|
42
|
+
PARTITION BY e.id, der.dataset_example_id
|
|
43
|
+
ORDER BY der.dataset_version_id DESC
|
|
44
|
+
) as rn
|
|
45
|
+
FROM experiments e
|
|
46
|
+
JOIN dataset_examples de ON de.dataset_id = e.dataset_id
|
|
47
|
+
JOIN dataset_example_revisions der ON der.dataset_example_id = de.id
|
|
48
|
+
WHERE der.dataset_version_id <= e.dataset_version_id
|
|
49
|
+
) ranked
|
|
50
|
+
WHERE ranked.rn = 1
|
|
51
|
+
AND ranked.revision_kind != 'DELETE'
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def upgrade() -> None:
|
|
56
|
+
op.create_table(
|
|
57
|
+
"experiments_dataset_examples",
|
|
58
|
+
sa.Column(
|
|
59
|
+
"experiment_id",
|
|
60
|
+
_Integer,
|
|
61
|
+
sa.ForeignKey("experiments.id", ondelete="CASCADE"),
|
|
62
|
+
nullable=False,
|
|
63
|
+
),
|
|
64
|
+
sa.Column(
|
|
65
|
+
"dataset_example_id",
|
|
66
|
+
_Integer,
|
|
67
|
+
sa.ForeignKey("dataset_examples.id", ondelete="CASCADE"),
|
|
68
|
+
nullable=False,
|
|
69
|
+
index=True,
|
|
70
|
+
),
|
|
71
|
+
sa.Column(
|
|
72
|
+
"dataset_example_revision_id",
|
|
73
|
+
_Integer,
|
|
74
|
+
sa.ForeignKey("dataset_example_revisions.id", ondelete="CASCADE"),
|
|
75
|
+
nullable=False,
|
|
76
|
+
index=True,
|
|
77
|
+
),
|
|
78
|
+
sa.PrimaryKeyConstraint(
|
|
79
|
+
"experiment_id",
|
|
80
|
+
"dataset_example_id",
|
|
81
|
+
),
|
|
82
|
+
)
|
|
83
|
+
op.execute(BACKFILL)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def downgrade() -> None:
|
|
87
|
+
op.drop_table("experiments_dataset_examples")
|