outerproduct-http-types 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,117 @@
1
+ """Shared HTTP-facing type definitions for OuterProduct services and SDKs."""
2
+
3
+ from .agentic_documents import (
4
+ AnswerType,
5
+ CreateDocumentUploadRequest,
6
+ CreateDocumentUploadResponse,
7
+ DocumentRef,
8
+ InduceSchemaJobResponse,
9
+ InduceSchemaRequest,
10
+ Question,
11
+ Schema,
12
+ SchemaResultResponse,
13
+ TabularizeJobResponse,
14
+ TabularizeRequest,
15
+ TabularizeResultResponse,
16
+ )
17
+ from .common import (
18
+ ErrorResponse,
19
+ FeatureImportance,
20
+ JobResponse,
21
+ JobStatus,
22
+ StatusResponse,
23
+ )
24
+ from .connectors import (
25
+ ConnectorResponse,
26
+ ConnectorType,
27
+ CreateConnectorRequest,
28
+ DeleteConnectorResponse,
29
+ ListConnectorsResponse,
30
+ ListTablesRequest,
31
+ ListTablesResponse,
32
+ ValidateConnectorResponse,
33
+ )
34
+ from .inference import (
35
+ ExplainRequest,
36
+ ExplainResponse,
37
+ FeatureConstraintSchema,
38
+ InterpretRequest,
39
+ InterpretResponse,
40
+ PredictAndExplainRequest,
41
+ PredictAndExplainResponse,
42
+ PredictRequest,
43
+ PredictResponse,
44
+ ScenarioCandidate,
45
+ ScenarioChange,
46
+ ScenarioRequest,
47
+ ScenarioResponse,
48
+ ScenarioResultItem,
49
+ )
50
+ from .reasoning import ReasoningFitRequest, ReasoningFitResponse
51
+ from .segment import ClusterPersonaSchema, SegmentRequest, SegmentResultResponse
52
+ from .trainer import (
53
+ HardwareSpec,
54
+ ModalHardwareSpec,
55
+ TrainerRunRequest,
56
+ TrainerRunResponse,
57
+ )
58
+ from .uploads import CreateUploadRequest, CreateUploadResponse
59
+ from .workers import WorkerJobRequest, WorkerJobResponse
60
+
61
+ __version__ = "0.1.0"
62
+
63
+ __all__ = [
64
+ "AnswerType",
65
+ "ClusterPersonaSchema",
66
+ "ConnectorResponse",
67
+ "ConnectorType",
68
+ "CreateConnectorRequest",
69
+ "CreateDocumentUploadRequest",
70
+ "CreateDocumentUploadResponse",
71
+ "CreateUploadRequest",
72
+ "CreateUploadResponse",
73
+ "DeleteConnectorResponse",
74
+ "DocumentRef",
75
+ "ErrorResponse",
76
+ "ExplainRequest",
77
+ "ExplainResponse",
78
+ "FeatureConstraintSchema",
79
+ "FeatureImportance",
80
+ "HardwareSpec",
81
+ "InduceSchemaJobResponse",
82
+ "InduceSchemaRequest",
83
+ "InterpretRequest",
84
+ "InterpretResponse",
85
+ "JobResponse",
86
+ "JobStatus",
87
+ "ListConnectorsResponse",
88
+ "ListTablesRequest",
89
+ "ListTablesResponse",
90
+ "ModalHardwareSpec",
91
+ "PredictAndExplainRequest",
92
+ "PredictAndExplainResponse",
93
+ "PredictRequest",
94
+ "PredictResponse",
95
+ "Question",
96
+ "ReasoningFitRequest",
97
+ "ReasoningFitResponse",
98
+ "ScenarioCandidate",
99
+ "ScenarioChange",
100
+ "ScenarioRequest",
101
+ "ScenarioResponse",
102
+ "ScenarioResultItem",
103
+ "Schema",
104
+ "SchemaResultResponse",
105
+ "SegmentRequest",
106
+ "SegmentResultResponse",
107
+ "StatusResponse",
108
+ "TabularizeJobResponse",
109
+ "TabularizeRequest",
110
+ "TabularizeResultResponse",
111
+ "TrainerRunRequest",
112
+ "TrainerRunResponse",
113
+ "ValidateConnectorResponse",
114
+ "WorkerJobRequest",
115
+ "WorkerJobResponse",
116
+ "__version__",
117
+ ]
@@ -0,0 +1,239 @@
1
+ """Request/response schemas for the agentic-documents endpoints.
2
+
3
+ Wire format for ``outerproduct.agentic.documents``: per-file presigned
4
+ uploads, async ``induce_schema`` and ``tabularize`` jobs polled via the
5
+ shared ``GET /v1/models/{model_id}/status`` route, and result fetches
6
+ keyed by the same ``model_id``.
7
+
8
+ Endpoint summary:
9
+
10
+ - ``POST /v1/uploads/documents`` -> :class:`CreateDocumentUploadResponse`
11
+ - ``POST /v1/agentic/documents/induce_schema`` -> :class:`InduceSchemaJobResponse`
12
+ - ``GET /v1/agentic/documents/schemas/{model_id}`` -> :class:`SchemaResultResponse`
13
+ - ``POST /v1/agentic/documents/tabularize`` -> :class:`TabularizeJobResponse`
14
+ - ``GET /v1/agentic/documents/tables/{model_id}`` -> :class:`TabularizeResultResponse`
15
+ """
16
+
17
+ from enum import StrEnum
18
+ from typing import Any, Literal
19
+
20
+ from pydantic import BaseModel, Field, model_validator
21
+
22
+ from .common import JobResponse
23
+
24
+
25
+ class AnswerType(StrEnum):
26
+ """The legal answer shapes for a :class:`Question`."""
27
+
28
+ BOOLEAN = "boolean"
29
+ NUMBER = "number"
30
+ INTEGER = "integer"
31
+ ENUM = "enum"
32
+ MULTI_ENUM = "multi_enum"
33
+ DATE = "date"
34
+ DATE_RANGE = "date_range"
35
+ STRING = "string"
36
+ TEXT = "text"
37
+
38
+
39
+ _DOCUMENT_MEDIA_TYPES = Literal[
40
+ "application/pdf",
41
+ "image/png",
42
+ "image/jpeg",
43
+ "image/gif",
44
+ "image/webp",
45
+ "text/plain",
46
+ ]
47
+
48
+
49
+ class Question(BaseModel):
50
+ """One survey question in a :class:`Schema`."""
51
+
52
+ id: str = Field(
53
+ ...,
54
+ description="Snake-case identifier, unique within the schema.",
55
+ pattern=r"^[a-z][a-z0-9_]*$",
56
+ )
57
+ question: str = Field(..., description="Natural-language prompt the agent answers.")
58
+ answer_type: AnswerType
59
+ rationale: str | None = Field(
60
+ None, description="Why this question is on the schema; surfaced to the agent."
61
+ )
62
+ unit: str | None = Field(
63
+ None, description="Unit string for numeric answers (e.g. 'USD', 'kg')."
64
+ )
65
+ enum: list[str] | None = Field(
66
+ None,
67
+ description="Allowed values when answer_type is 'enum' or 'multi_enum'.",
68
+ )
69
+
70
+ @model_validator(mode="after")
71
+ def _check_enum(self):
72
+ if self.answer_type in (AnswerType.ENUM, AnswerType.MULTI_ENUM):
73
+ if not self.enum:
74
+ raise ValueError(
75
+ f"Question {self.id!r}: answer_type={self.answer_type.value} "
76
+ "requires non-empty `enum`"
77
+ )
78
+ elif self.enum is not None:
79
+ raise ValueError(
80
+ f"Question {self.id!r}: `enum` is only valid for "
81
+ "answer_type='enum' or 'multi_enum'"
82
+ )
83
+ return self
84
+
85
+
86
+ class Schema(BaseModel):
87
+ """A frozen list of :class:`Question` for one document type and one use case."""
88
+
89
+ skill: str = Field(..., description="Skill slug (e.g. 'invoice', 'bank-statement').")
90
+ use_case: str = Field(..., description="Use-case description that drove induction.")
91
+ questions: list[Question]
92
+ metadata: dict[str, Any] = Field(default_factory=dict)
93
+
94
+ @model_validator(mode="after")
95
+ def _check_unique_ids(self):
96
+ seen: set[str] = set()
97
+ for q in self.questions:
98
+ if q.id in seen:
99
+ raise ValueError(f"Schema: duplicate question id {q.id!r}")
100
+ seen.add(q.id)
101
+ return self
102
+
103
+
104
+ # --------------------------------------------------------------------------- #
105
+ # Per-document upload #
106
+ # --------------------------------------------------------------------------- #
107
+
108
+
109
+ class CreateDocumentUploadRequest(BaseModel):
110
+ """``POST /v1/uploads/documents`` -- request a presigned URL for one file."""
111
+
112
+ document_id: str = Field(
113
+ ...,
114
+ description="Caller-chosen identifier; echoed back in subsequent requests.",
115
+ )
116
+ media_type: _DOCUMENT_MEDIA_TYPES = Field(
117
+ ...,
118
+ description="MIME type of the bytes you will PUT to the returned URL.",
119
+ )
120
+
121
+
122
+ class CreateDocumentUploadResponse(BaseModel):
123
+ document_id: str
124
+ upload_url: str
125
+ upload_key: str = Field(
126
+ ...,
127
+ description="Reference to use as DocumentRef.upload_key in induce/tabularize.",
128
+ )
129
+ media_type: _DOCUMENT_MEDIA_TYPES
130
+ expires_in: int = Field(..., description="Seconds until the upload URL expires.")
131
+
132
+
133
+ class DocumentRef(BaseModel):
134
+ """One uploaded document, by reference."""
135
+
136
+ document_id: str
137
+ upload_key: str
138
+ media_type: _DOCUMENT_MEDIA_TYPES
139
+
140
+
141
+ # --------------------------------------------------------------------------- #
142
+ # induce_schema #
143
+ # --------------------------------------------------------------------------- #
144
+
145
+
146
+ class InduceSchemaRequest(BaseModel):
147
+ """``POST /v1/agentic/documents/induce_schema`` -- async schema induction job.
148
+
149
+ Runs the agent over the supplied document sample to produce a frozen
150
+ :class:`Schema`. Submit, poll ``GET /v1/models/{model_id}/status`` until
151
+ ``status="completed"``, then fetch the result via
152
+ ``GET /v1/agentic/documents/schemas/{model_id}``.
153
+ """
154
+
155
+ documents: list[DocumentRef] = Field(
156
+ ...,
157
+ description="Sample of uploaded documents the agent inspects to draft the schema.",
158
+ min_length=1,
159
+ )
160
+ use_case: str = Field(
161
+ ...,
162
+ description="Free-text description of what the resulting features will be used for.",
163
+ )
164
+ skill: str = Field(
165
+ ...,
166
+ description="Skill slug; one of the values returned by GET /v1/agentic/documents/skills.",
167
+ )
168
+ model_id: str | None = Field(
169
+ None,
170
+ description="Custom model_id for the resulting schema; auto-generated if omitted.",
171
+ )
172
+
173
+
174
+ class InduceSchemaJobResponse(JobResponse):
175
+ """``POST /v1/agentic/documents/induce_schema`` -- async submission response."""
176
+
177
+
178
+ class SchemaResultResponse(BaseModel):
179
+ """``GET /v1/agentic/documents/schemas/{model_id}`` -- the produced schema."""
180
+
181
+ model_id: str
182
+ schema_: Schema = Field(..., alias="schema")
183
+
184
+ model_config = {"populate_by_name": True}
185
+
186
+
187
+ # --------------------------------------------------------------------------- #
188
+ # tabularize #
189
+ # --------------------------------------------------------------------------- #
190
+
191
+
192
+ class TabularizeRequest(BaseModel):
193
+ """``POST /v1/agentic/documents/tabularize`` -- async tabularization job.
194
+
195
+ Extracts every uploaded document against ``schema`` and assembles a row
196
+ per document. Submit, poll status, then fetch the result via
197
+ ``GET /v1/agentic/documents/tables/{model_id}``.
198
+ """
199
+
200
+ documents: list[DocumentRef] = Field(..., min_length=1)
201
+ schema_: Schema = Field(..., alias="schema")
202
+ web_augmentation: bool = Field(
203
+ False,
204
+ description="If true, allow the agent to issue web searches to corroborate answers.",
205
+ )
206
+ concurrency: int = Field(
207
+ 1,
208
+ ge=1,
209
+ description="Number of documents to extract in parallel on the worker.",
210
+ )
211
+ model_id: str | None = Field(
212
+ None,
213
+ description="Custom model_id for the resulting table; auto-generated if omitted.",
214
+ )
215
+
216
+ model_config = {"populate_by_name": True}
217
+
218
+
219
+ class TabularizeJobResponse(JobResponse):
220
+ """``POST /v1/agentic/documents/tabularize`` -- async submission response."""
221
+
222
+
223
+ class TabularizeResultResponse(BaseModel):
224
+ """``GET /v1/agentic/documents/tables/{model_id}`` -- the tabularized result.
225
+
226
+ The tabularized rows themselves live in object storage under the
227
+ returned ``model_id`` (same path used by trainer.run / reasoning.fit
228
+ in their pre-uploaded mode), so this response is metadata-only:
229
+ enough to construct a :class:`outerproduct.agentic.documents.DocumentDataset`
230
+ handle without materializing rows on the client.
231
+ """
232
+
233
+ model_id: str
234
+ schema_: Schema = Field(..., alias="schema")
235
+ document_ids: list[str]
236
+
237
+ model_config = {"populate_by_name": True}
238
+
239
+
@@ -0,0 +1,49 @@
1
+ """Shared types used across all endpoint schemas."""
2
+
3
+ from datetime import datetime
4
+ from enum import StrEnum
5
+ from typing import Any
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class JobStatus(StrEnum):
11
+ PENDING = "pending"
12
+ RUNNING = "running"
13
+ COMPLETED = "completed"
14
+ FAILED = "failed"
15
+
16
+
17
+ class ErrorResponse(BaseModel):
18
+ error: str
19
+ detail: str | None = None
20
+
21
+
22
+ class JobResponse(BaseModel):
23
+ """Base response returned by async job-submission endpoints."""
24
+
25
+ model_id: str = Field(description="Unique identifier for this model")
26
+ status: JobStatus
27
+ message: str
28
+
29
+
30
+ class StatusResponse(BaseModel):
31
+ """Returned by GET /models/{model_id}/status."""
32
+
33
+ model_id: str
34
+ job_type: str = Field(description=("One of: trainer_run, reasoning_fit, segment."))
35
+ status: JobStatus
36
+ progress: dict[str, Any] | None = Field(
37
+ None, description='Progress info, e.g. {"step": 3, "total_steps": 5}'
38
+ )
39
+ error_message: str | None = None
40
+ created_at: datetime
41
+ updated_at: datetime
42
+
43
+
44
+ class FeatureImportance(BaseModel):
45
+ """A single feature's importance score with direction."""
46
+
47
+ feature_name: str
48
+ importance: float
49
+ direction: str | None = Field(None, description='"positive" or "negative"')
@@ -0,0 +1,115 @@
1
+ """Request/response schemas for connector CRUD endpoints.
2
+
3
+ Connectors represent registered references to external data sources
4
+ (S3 buckets, GCS buckets, Snowflake warehouses, Databricks workspaces,
5
+ BigQuery datasets). They are persisted server-side with encrypted
6
+ credentials and expose table listing and validation endpoints.
7
+
8
+ Endpoints
9
+ ---------
10
+ - ``POST /v1/connectors`` → create a connector
11
+ - ``GET /v1/connectors`` → list connectors for the org
12
+ - ``GET /v1/connectors/{connector_id}`` → get a single connector
13
+ - ``GET /v1/connectors/by-name/{name}`` → look up by user-facing name
14
+ - ``DELETE /v1/connectors/{connector_id}`` → delete a connector
15
+ - ``POST /v1/connectors/{connector_id}/tables`` → list tables/paths
16
+ - ``POST /v1/connectors/{connector_id}/validate`` → test connectivity
17
+ """
18
+
19
+ from enum import StrEnum
20
+ from typing import Any
21
+
22
+ from pydantic import BaseModel, Field
23
+
24
+
25
+ class ConnectorType(StrEnum):
26
+ """Supported external data source types."""
27
+
28
+ S3 = "s3"
29
+ SNOWFLAKE = "snowflake"
30
+ DATABRICKS = "databricks"
31
+
32
+
33
+ class CreateConnectorRequest(BaseModel):
34
+ """POST /v1/connectors -- register a new external data source.
35
+
36
+ Credentials are encrypted at rest and never returned by GET endpoints.
37
+ """
38
+
39
+ name: str = Field(
40
+ ...,
41
+ description="Unique user-facing name for this connector within the org.",
42
+ )
43
+ connector_type: ConnectorType = Field(
44
+ ...,
45
+ description="The type of external data source.",
46
+ )
47
+ connection_params: dict[str, Any] = Field(
48
+ ...,
49
+ description=(
50
+ "Non-secret connection parameters. Contents vary by connector type: "
51
+ "S3/GCS use 'root_dir'; Snowflake uses 'account', 'warehouse', "
52
+ "'database', 'schema_name'; Databricks uses 'server_hostname', "
53
+ "'http_path', 'catalog', 'schema'; BigQuery uses 'project', "
54
+ "'dataset_id'."
55
+ ),
56
+ )
57
+ credentials: dict[str, str] | None = Field(
58
+ None,
59
+ description=(
60
+ "Secret credentials for the data source. Encrypted at rest, "
61
+ "never returned by GET. Omit for sources using IAM role-based auth "
62
+ "(e.g. S3/GCS)."
63
+ ),
64
+ )
65
+
66
+
67
+ class ConnectorResponse(BaseModel):
68
+ """Returned by create, get, and list endpoints.
69
+
70
+ Credentials are intentionally omitted — they are write-only.
71
+ """
72
+
73
+ connector_id: str = Field(..., description="Server-generated unique identifier.")
74
+ name: str = Field(..., description="User-facing name.")
75
+ connector_type: ConnectorType
76
+ connection_params: dict[str, Any]
77
+
78
+
79
+ class ListConnectorsResponse(BaseModel):
80
+ """GET /v1/connectors -- all connectors for the authenticated org."""
81
+
82
+ connectors: list[ConnectorResponse]
83
+
84
+
85
+ class DeleteConnectorResponse(BaseModel):
86
+ """DELETE /v1/connectors/{connector_id}."""
87
+
88
+ connector_id: str
89
+ deleted: bool
90
+
91
+
92
+ class ListTablesRequest(BaseModel):
93
+ """POST /v1/connectors/{connector_id}/tables -- list available tables.
94
+
95
+ Currently empty; extensible with filters (prefix, schema, limit) later.
96
+ """
97
+
98
+
99
+ class ListTablesResponse(BaseModel):
100
+ """Response for table listing."""
101
+
102
+ tables: list[str] = Field(
103
+ ...,
104
+ description="Table names (database connectors) or object paths (file connectors).",
105
+ )
106
+
107
+
108
+ class ValidateConnectorResponse(BaseModel):
109
+ """POST /v1/connectors/{connector_id}/validate -- test connectivity."""
110
+
111
+ valid: bool = Field(..., description="Whether the connection succeeded.")
112
+ error: str | None = Field(
113
+ None,
114
+ description="Human-readable error message when valid=False.",
115
+ )
@@ -0,0 +1,206 @@
1
+ """Request/response schemas for inference endpoints."""
2
+
3
+ from typing import Any, Literal
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+ # --- POST /v1/models/{model_id}/predict ---
8
+
9
+
10
+ class PredictRequest(BaseModel):
11
+ """POST /v1/models/{model_id}/predict -- Batch predictions."""
12
+
13
+ samples: list[list[float | str | bool | None]] = Field(
14
+ description="2D array, shape (n_samples, n_features). "
15
+ "Cells may be numeric, string (categorical), or bool."
16
+ )
17
+ feature_names: list[str] = Field(
18
+ description="Column names for features. Must match the model's training "
19
+ "schema in name and order."
20
+ )
21
+
22
+
23
+ class PredictResponse(BaseModel):
24
+ model_id: str
25
+ predictions: list[float]
26
+
27
+
28
+ # --- POST /v1/models/{model_id}/explain ---
29
+
30
+
31
+ class ExplainRequest(BaseModel):
32
+ """POST /v1/models/{model_id}/explain -- Batch local explanations."""
33
+
34
+ samples: list[list[float | str | bool | None]] = Field(
35
+ description="2D array, shape (n_samples, n_features). "
36
+ "Cells may be numeric, string (categorical), or bool."
37
+ )
38
+ feature_names: list[str] = Field(
39
+ description="Column names for features. Must match the model's training "
40
+ "schema in name and order."
41
+ )
42
+
43
+
44
+ class ExplainResponse(BaseModel):
45
+ """Batch-shaped explanation arrays. Dimension 0 is the batch.
46
+
47
+ `/explain` is purely an attribution endpoint — it does not return
48
+ predictions. Callers that need both predictions and attributions
49
+ should use `/predict_and_explain` instead.
50
+ """
51
+
52
+ model_id: str
53
+ attributions: list[list[float]]
54
+ feature_names: list[str]
55
+ ohe_feature_names: list[str] | None = Field(
56
+ default=None,
57
+ description=(
58
+ "OHE-expanded column labels aligned to attributions. Present when "
59
+ "the model has categorical features; the raw feature_names list has "
60
+ "fewer entries than the attributions array in that case."
61
+ ),
62
+ )
63
+
64
+
65
+ # --- POST /v1/models/{model_id}/predict_and_explain ---
66
+
67
+
68
+ class PredictAndExplainRequest(BaseModel):
69
+ """POST /v1/models/{model_id}/predict_and_explain -- Predict + explain in one call."""
70
+
71
+ samples: list[list[float | str | bool | None]] = Field(
72
+ description="2D array, shape (n_samples, n_features). "
73
+ "Cells may be numeric, string (categorical), or bool."
74
+ )
75
+ feature_names: list[str] = Field(
76
+ description="Column names for features. Must match the model's training "
77
+ "schema in name and order."
78
+ )
79
+ with_persona: bool = False
80
+ rule_kwargs: dict[str, Any] | None = Field(
81
+ default=None,
82
+ description=(
83
+ "Gates local-rule computation, forwarded to the rule-explanation "
84
+ "backend. Omitted/null → rules skipped. "
85
+ "{} → backend defaults. "
86
+ "Populated dict → use these kwargs (e.g. "
87
+ '{"selector": "lift_threshold", "lift_threshold": 0.9}).'
88
+ ),
89
+ )
90
+
91
+
92
+ class PredictAndExplainResponse(BaseModel):
93
+ """Batch-shaped predict + explain arrays. Dimension 0 is the batch."""
94
+
95
+ model_id: str
96
+ predictions: list[float]
97
+ attributions: list[list[float]]
98
+ persona: list[dict[str, Any] | None] | None = None
99
+ persona_cluster_id: list[int | None] | None = None
100
+ local_rules: list[dict[str, Any] | None] | None = Field(
101
+ default=None,
102
+ description=(
103
+ "Per-sample rule explanations when rule_kwargs was supplied. "
104
+ "Each entry has sample_prediction, global_mean, direction, "
105
+ "active_features, rules[]."
106
+ ),
107
+ )
108
+ feature_names: list[str]
109
+ ohe_feature_names: list[str] | None = Field(
110
+ default=None,
111
+ description=("OHE-expanded column labels aligned to attributions."),
112
+ )
113
+
114
+
115
+ # --- POST /v1/models/{model_id}/interpret ---
116
+
117
+
118
+ class InterpretRequest(BaseModel):
119
+ """POST /v1/models/{model_id}/interpret -- Global feature importance."""
120
+
121
+ pass
122
+
123
+
124
+ class InterpretResponse(BaseModel):
125
+ model_id: str
126
+ global_drivers: list[float] = Field(description="Per-feature importance scores.")
127
+ feature_names: list[str] | None = None
128
+ ohe_feature_names: list[str] | None = Field(
129
+ default=None,
130
+ description=(
131
+ "OHE-expanded column labels aligned to global_drivers. Present when "
132
+ "the model has categorical features."
133
+ ),
134
+ )
135
+
136
+
137
+ # GET /v1/models/{model_id}/schema returns the SchemaManifest defined alongside
138
+ # the S3 sidecar wire schema in the API repo (app.schemas.messages); the HTTP
139
+ # response and the on-disk manifest share one definition there.
140
+
141
+
142
+ # --- POST /v1/models/{model_id}/scenario ---
143
+
144
+
145
+ class FeatureConstraintSchema(BaseModel):
146
+ """Per-feature restriction applied during counterfactual walks.
147
+
148
+ Mirrors cleartrace.FeatureConstraint. Continuous-only fields
149
+ (value_range, monotonic) and categorical-only fields (allowed_values)
150
+ are validated server-side against the model's feature groups.
151
+ """
152
+
153
+ immutable: bool = False
154
+ monotonic: Literal["increase", "decrease"] | None = None
155
+ value_range: tuple[float | None, float | None] | None = None
156
+ allowed_values: list[Any] | None = None
157
+
158
+
159
+ class ScenarioRequest(BaseModel):
160
+ """POST /v1/models/{model_id}/scenario -- Counterfactual search with constraints."""
161
+
162
+ queries: list[list[float | str | bool | None]] = Field(
163
+ description="2D array, shape (n_queries, n_features). "
164
+ "Cells may be numeric, string (categorical), or bool."
165
+ )
166
+ feature_names: list[str] = Field(
167
+ description="Column names for features. Must match the model's training "
168
+ "schema in name and order."
169
+ )
170
+ desired_class: int = 1
171
+ n_walks: int = 500
172
+ max_steps: int = 30
173
+ epsilon: float = 0.2
174
+ random_state: int | None = 42
175
+ constraints: dict[str, FeatureConstraintSchema] = Field(default_factory=dict)
176
+
177
+
178
+ class ScenarioChange(BaseModel):
179
+ """Single-feature diff between a query and one counterfactual row."""
180
+
181
+ model_config = ConfigDict(populate_by_name=True)
182
+
183
+ from_: Any = Field(alias="from")
184
+ to: Any
185
+ delta: float | None = None
186
+
187
+
188
+ class ScenarioCandidate(BaseModel):
189
+ row: dict[str, Any]
190
+ changes: dict[str, ScenarioChange]
191
+ n_changes: int
192
+
193
+
194
+ class ScenarioResultItem(BaseModel):
195
+ query_index: int
196
+ query: dict[str, Any]
197
+ baseline_prediction: float
198
+ already_at_desired_class: bool
199
+ scenarios: list[ScenarioCandidate]
200
+ scenario_count: int
201
+
202
+
203
+ class ScenarioResponse(BaseModel):
204
+ model_id: str
205
+ desired_class: int
206
+ results: list[ScenarioResultItem]
@@ -0,0 +1 @@
1
+
@@ -0,0 +1,163 @@
1
+ """Request schema for the /v1/reasoning/fit endpoint.
2
+
3
+ `/v1/reasoning/fit` is the wire equivalent of
4
+ `outerproduct_reasoning.reasoning.fit(...)`. The worker imports the real
5
+ `outerproduct_reasoning` package and calls `reasoning.fit(...)`, returning a
6
+ fitted `ReasoningModel` artifact. Both supervised training (no teacher) and
7
+ distillation (teacher_predict_url set) flow through the same request.
8
+ """
9
+
10
+ from typing import Any, Literal
11
+
12
+ from pydantic import BaseModel, Field, model_validator
13
+
14
+ from .common import JobResponse
15
+ from .trainer import HardwareSpec
16
+
17
+
18
+ class ReasoningFitRequest(BaseModel):
19
+ """POST /v1/reasoning/fit -- Fit a ReasoningModel.
20
+
21
+ Dataset delivery mirrors the other training endpoints:
22
+ * Inline: set `data` and `labels`. `label_column` is ignored.
23
+ * Pre-uploaded: set `data_uploaded=True`, supply `model_id` and
24
+ `label_column`. When `teacher_predict_url` is set, labels become
25
+ evaluation-only (the teacher provides the training target).
26
+ """
27
+
28
+ # --- dataset
29
+ data: list[list[float | str | bool | None]] | None = Field(
30
+ None,
31
+ description="2D feature matrix (n_samples, n_features). Omit when data_uploaded=True.",
32
+ )
33
+ labels: list[float] | None = Field(
34
+ None,
35
+ description="Target values. Required for supervised training. Optional "
36
+ "(eval-only) when teacher_predict_url is set.",
37
+ )
38
+ feature_names: list[str] | None = Field(
39
+ None,
40
+ description="Column names for inline features. Required when inline data "
41
+ "contains non-numeric values.",
42
+ )
43
+ feature_schema: dict[str, dict[str, Any]] | None = Field(
44
+ None,
45
+ description="Optional per-column schema for inline data.",
46
+ )
47
+ data_uploaded: bool = Field(
48
+ False,
49
+ description="If true, read the dataset already at "
50
+ "traces/{org_id}/{model_id}/training_data.{pkl|csv|parquet}.",
51
+ )
52
+ label_column: str | None = Field(
53
+ None,
54
+ description="Target column name in the uploaded table.",
55
+ )
56
+ model_id: str | None = Field(
57
+ None, description="Custom model ID; required when data_uploaded=True."
58
+ )
59
+
60
+ # --- connector-based data source
61
+ data_connector: bool = Field(
62
+ False,
63
+ description="If true, read the dataset from a registered connector. "
64
+ "Mutually exclusive with inline data and data_uploaded.",
65
+ )
66
+ connector_id: str | None = Field(
67
+ None,
68
+ description="Connector identifier. Required when data_connector=True.",
69
+ )
70
+ table_name: str | None = Field(
71
+ None,
72
+ description="Table or path within the connector. Required when data_connector=True.",
73
+ )
74
+
75
+ # --- reasoning.fit kwargs
76
+ model_types: list[str] | None = Field(
77
+ None,
78
+ description="Candidate model-family identifiers. reasoning.fit pins the "
79
+ "surrogate via force_model_type, so at most one entry is accepted.",
80
+ )
81
+ n_hyperopt_steps: int = 5
82
+ device: str | None = Field(None, description="'auto' | 'cuda' | 'cpu'.")
83
+ random_state: int = 42
84
+ task_type: Literal["regression", "binclass", "multiclass"] | None = Field(
85
+ None,
86
+ description="Learning task. Required for supervised fits; optional when "
87
+ "teacher_predict_url is set (the teacher pins the task).",
88
+ )
89
+ hardware: HardwareSpec | None = Field(
90
+ None,
91
+ description="Modal trainer hardware. Omit to use the API server's "
92
+ "default — currently ModalHardware with the API's pinned trainer "
93
+ "image. In-process execution is not supported at the API edge to "
94
+ "prevent the orchestrator from materializing X. Synth-gen and "
95
+ "finalize always run on the worker server (also out of process); "
96
+ "no caller-controlled toggle.",
97
+ )
98
+
99
+ # --- distillation
100
+ teacher_predict_url: str | None = Field(
101
+ None, description="If set, distil this teacher instead of training on labels."
102
+ )
103
+ teacher_predict_headers: dict[str, str] | None = Field(
104
+ None, description="Headers to send when calling teacher_predict_url."
105
+ )
106
+
107
+ @model_validator(mode="after")
108
+ def _check_dataset_source(self):
109
+ # Exactly one of three modes: inline, uploaded, or connector.
110
+ if self.data_connector and self.data_uploaded:
111
+ raise ValueError("data_connector and data_uploaded are mutually exclusive")
112
+ if self.data_connector and self.data is not None:
113
+ raise ValueError("data_connector and inline data are mutually exclusive")
114
+
115
+ if self.data_connector:
116
+ if not self.connector_id:
117
+ raise ValueError("connector_id is required when data_connector=True")
118
+ if not self.table_name:
119
+ raise ValueError("table_name is required when data_connector=True")
120
+ if not self.label_column and not self.teacher_predict_url:
121
+ raise ValueError(
122
+ "label_column is required when data_connector=True (unless "
123
+ "teacher_predict_url is set)"
124
+ )
125
+ elif self.data_uploaded:
126
+ if not self.model_id:
127
+ raise ValueError("model_id is required when data_uploaded=True")
128
+ if not self.label_column and not self.teacher_predict_url:
129
+ raise ValueError(
130
+ "label_column is required when data_uploaded=True (unless "
131
+ "teacher_predict_url is set)"
132
+ )
133
+ else:
134
+ if self.data is None:
135
+ raise ValueError("data is required when data_uploaded is False")
136
+ if self.labels is None and self.teacher_predict_url is None:
137
+ raise ValueError(
138
+ "labels is required when data_uploaded is False (unless "
139
+ "teacher_predict_url is set)"
140
+ )
141
+ return self
142
+
143
+ @model_validator(mode="after")
144
+ def _check_model_types_length(self):
145
+ if self.model_types is not None and len(self.model_types) > 1:
146
+ raise ValueError(
147
+ "reasoning.fit pins the surrogate via force_model_type, so "
148
+ "model_types accepts at most one entry; pass a single value "
149
+ "or omit."
150
+ )
151
+ return self
152
+
153
+ @model_validator(mode="after")
154
+ def _check_feature_names_for_mixed_types(self):
155
+ if self.data is not None and self.feature_names is None:
156
+ has_non_numeric = any(isinstance(val, (bool, str)) for row in self.data for val in row)
157
+ if has_non_numeric:
158
+ raise ValueError("feature_names is required when data contains non-numeric values")
159
+ return self
160
+
161
+
162
+ class ReasoningFitResponse(JobResponse):
163
+ """POST /v1/reasoning/fit -- async reasoning-fit job submission response."""
@@ -0,0 +1,59 @@
1
+ """Request/response schemas for segmentation endpoints."""
2
+
3
+ from typing import Any
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+
8
+ class ClusterPersonaSchema(BaseModel):
9
+ """One cluster's persona description as exposed over HTTP.
10
+
11
+ Mirrors the shape written into ``segments.json`` by the segment Lambda;
12
+ the API repo aliases that S3 wire type onto this HTTP type at the
13
+ response boundary.
14
+ """
15
+
16
+ model_config = ConfigDict(from_attributes=True)
17
+
18
+ cluster_id: int
19
+ persona_name: str
20
+ persona_description: str
21
+ stats: dict[str, Any]
22
+ differentiating_features: list[dict[str, Any]] | None = None
23
+
24
+
25
+ class SegmentRequest(BaseModel):
26
+ """POST /v1/models/{model_id}/segment -- Supervised segmentation (async)."""
27
+
28
+ data: list[list[float | str | bool | None]] | None = Field(
29
+ None,
30
+ description="Dataset to segment; uses training data if omitted. "
31
+ "Cells may be numeric, string (categorical), or bool.",
32
+ )
33
+ target_values: list[float] | None = None
34
+ feature_names: list[str] | None = None
35
+ min_clusters: int = 4
36
+ max_clusters: int | None = 10
37
+ n_search_steps: int = 50
38
+ use_agent: bool | None = None
39
+ kpi_field: str | None = None
40
+ problem_context: str | None = None
41
+
42
+
43
+ class SegmentResultResponse(BaseModel):
44
+ """GET /v1/models/{model_id}/segments -- Retrieve segmentation results.
45
+
46
+ Result fields are Optional because pending/running/failed jobs return
47
+ only model_id + status; populated only once the Lambda has uploaded
48
+ segments.json and the SegmentsResult is available.
49
+ """
50
+
51
+ model_id: str
52
+ status: str
53
+ n_clusters: int | None = None
54
+ cluster_ids: list[int] | None = None
55
+ resolution: float | None = None
56
+ quality: float | None = None
57
+ personas: list[ClusterPersonaSchema] | None = None
58
+ agent_score: float | None = None
59
+ agent_reasoning: str | None = None
@@ -0,0 +1,197 @@
1
+ """Request schema for the /v1/trainer/run endpoint.
2
+
3
+ `/v1/trainer/run` runs the server-side `Trainer` (the replacement for
4
+ `ModelOrchestrator` in `outerproduct-trainer`). It compiles a model matrix
5
+ across one or more model families and runs HPO, optionally with k-fold CV,
6
+ ensembling, or a Pareto sweep over multiple metrics.
7
+ """
8
+
9
+ from typing import Any, Literal
10
+
11
+ from pydantic import BaseModel, Field, model_validator
12
+
13
+ from .common import JobResponse
14
+
15
+
16
+ class ModalHardwareSpec(BaseModel):
17
+ """Fan trials out to additional Modal containers.
18
+
19
+ The only supported hardware spec for API requests. Local execution is
20
+ intentionally not exposed at the wire — running PlanNodes in-process
21
+ means materializing the dataset in the API container, which OOM's the
22
+ Render server on any non-trivial input. Library / notebook callers can
23
+ still construct ``LocalHardware()`` directly via ``outerproduct-trainer``;
24
+ the wire-level lockdown only applies to API requests.
25
+ """
26
+
27
+ kind: Literal["modal"] = "modal"
28
+ gpu: str = Field("A10G", description="Modal GPU type, e.g. 'A10G', 'T4', 'L40S'.")
29
+ timeout_s: int = Field(1200, description="Per-trial timeout in seconds.")
30
+ n_concurrent: int = Field(4, description="Max parallel trial containers.")
31
+
32
+
33
+ HardwareSpec = ModalHardwareSpec
34
+
35
+
36
+ class TrainerRunRequest(BaseModel):
37
+ """POST /v1/trainer/run -- Configure a Trainer and run HPO across a model matrix.
38
+
39
+ Dataset delivery mirrors the other training endpoints:
40
+ * Inline: set `data` and `labels`. `label_column` is ignored.
41
+ * Pre-uploaded: set `data_uploaded=True`, supply `model_id` and
42
+ `label_column` (the column in the uploaded table that holds the target).
43
+ """
44
+
45
+ # --- dataset
46
+ data: list[list[float | str | bool | None]] | None = Field(
47
+ None,
48
+ description="2D feature matrix (n_samples, n_features). Omit when data_uploaded=True.",
49
+ )
50
+ labels: list[float] | None = Field(
51
+ None,
52
+ description="Target values, length n_samples. Omit when data_uploaded=True.",
53
+ )
54
+ feature_names: list[str] | None = Field(
55
+ None,
56
+ description="Column names for inline features. Required when inline data "
57
+ "contains non-numeric values.",
58
+ )
59
+ feature_schema: dict[str, dict[str, Any]] | None = Field(
60
+ None,
61
+ description="Optional per-column schema for inline data: "
62
+ "{name: {dtype: 'float' | 'int' | 'bool' | 'categorical'}}.",
63
+ )
64
+ data_uploaded: bool = Field(
65
+ False,
66
+ description="If true, read the dataset already at "
67
+ "traces/{org_id}/{model_id}/training_data.{pkl|csv|parquet}.",
68
+ )
69
+ label_column: str | None = Field(
70
+ None,
71
+ description="Target column name in the uploaded table. Required when data_uploaded=True.",
72
+ )
73
+ model_id: str | None = Field(
74
+ None, description="Custom model ID; required when data_uploaded=True."
75
+ )
76
+
77
+ # --- connector-based data source
78
+ data_connector: bool = Field(
79
+ False,
80
+ description="If true, read the dataset from a registered connector. "
81
+ "Mutually exclusive with inline data and data_uploaded.",
82
+ )
83
+ connector_id: str | None = Field(
84
+ None,
85
+ description="Connector identifier. Required when data_connector=True.",
86
+ )
87
+ table_name: str | None = Field(
88
+ None,
89
+ description="Table or path within the connector. Required when data_connector=True.",
90
+ )
91
+
92
+ # --- trainer configuration
93
+ model_types: list[str] | None = Field(
94
+ None,
95
+ description="Candidate model-family identifiers, e.g. ['tabm', 'xgboost', "
96
+ "'xrfm', 'tabicl']. Resolved server-side. If omitted, the server picks a "
97
+ "default set based on dataset shape.",
98
+ )
99
+ metrics: list[str] | None = Field(
100
+ None,
101
+ description="Metric names to optimise, e.g. ['auc'] or ['auc', "
102
+ "'neg_class_error']. Multi-metric requests trigger a Pareto sweep when "
103
+ "grid_size is set. Custom Python callables are not supported in v1.",
104
+ )
105
+ strategy: str = Field(
106
+ "random",
107
+ description="HPO strategy: 'random' or 'optuna'. Resolved server-side.",
108
+ )
109
+ n_trials: int = Field(4, description="Number of HPO trials per matrix row.")
110
+ n_splits: int | None = Field(
111
+ None,
112
+ description="K-fold cross-validation folds. None means a single holdout split.",
113
+ )
114
+ ensemble: bool = Field(
115
+ False,
116
+ description="If true, build a Caruana-style ensemble across stage-1 fold/trial "
117
+ "models instead of refitting the winner on full data.",
118
+ )
119
+ grid_size: float | None = Field(
120
+ None,
121
+ description="Simplex grid step for Pareto-style multi-metric sweeps.",
122
+ )
123
+ task_type: Literal["regression", "binclass", "multiclass"] | None = Field(
124
+ None,
125
+ description="Learning task. Required for supervised runs; optional when "
126
+ "teacher_predict_url is set.",
127
+ )
128
+
129
+ # --- distillation
130
+ teacher_predict_url: str | None = Field(
131
+ None,
132
+ description="If set, distil this teacher's predictions instead of training "
133
+ "against `label_column`. The worker calls this URL with "
134
+ "{'samples': [[...]], 'feature_names': [...]}.",
135
+ )
136
+ teacher_predict_headers: dict[str, str] | None = Field(
137
+ None, description="Headers to send when calling teacher_predict_url."
138
+ )
139
+
140
+ # --- execution
141
+ hardware: HardwareSpec | None = Field(
142
+ None,
143
+ description="Modal trials hardware. Omit to use the API server's "
144
+ "default — currently ModalHardware with the API's pinned trainer "
145
+ "image. In-process execution is not supported at the API edge to "
146
+ "prevent the orchestrator from materializing X.",
147
+ )
148
+ random_state: int = 42
149
+
150
+ @model_validator(mode="after")
151
+ def _check_dataset_source(self):
152
+ # Exactly one of three modes: inline, uploaded, or connector.
153
+ if self.data_connector and self.data_uploaded:
154
+ raise ValueError("data_connector and data_uploaded are mutually exclusive")
155
+ if self.data_connector and self.data is not None:
156
+ raise ValueError("data_connector and inline data are mutually exclusive")
157
+
158
+ if self.data_connector:
159
+ if not self.connector_id:
160
+ raise ValueError("connector_id is required when data_connector=True")
161
+ if not self.table_name:
162
+ raise ValueError("table_name is required when data_connector=True")
163
+ if not self.label_column and not self.teacher_predict_url:
164
+ raise ValueError(
165
+ "label_column is required when data_connector=True (unless "
166
+ "teacher_predict_url is set)"
167
+ )
168
+ elif self.data_uploaded:
169
+ if not self.model_id:
170
+ raise ValueError("model_id is required when data_uploaded=True")
171
+ if not self.label_column and not self.teacher_predict_url:
172
+ raise ValueError(
173
+ "label_column is required when data_uploaded=True (unless "
174
+ "teacher_predict_url is set, in which case the teacher provides "
175
+ "the training target)"
176
+ )
177
+ else:
178
+ if self.data is None:
179
+ raise ValueError("data is required when data_uploaded is False")
180
+ if self.labels is None and self.teacher_predict_url is None:
181
+ raise ValueError(
182
+ "labels is required when data_uploaded is False (unless "
183
+ "teacher_predict_url is set)"
184
+ )
185
+ return self
186
+
187
+ @model_validator(mode="after")
188
+ def _check_feature_names_for_mixed_types(self):
189
+ if self.data is not None and self.feature_names is None:
190
+ has_non_numeric = any(isinstance(val, (bool, str)) for row in self.data for val in row)
191
+ if has_non_numeric:
192
+ raise ValueError("feature_names is required when data contains non-numeric values")
193
+ return self
194
+
195
+
196
+ class TrainerRunResponse(JobResponse):
197
+ """POST /v1/trainer/run -- async trainer job submission response."""
@@ -0,0 +1,34 @@
1
+ """Request/response schemas for the presigned-upload endpoint."""
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+
8
+ class CreateUploadRequest(BaseModel):
9
+ """POST /v1/uploads -- request a presigned URL for direct-to-S3 upload."""
10
+
11
+ model_id: str | None = Field(
12
+ None,
13
+ description="Custom model ID; auto-generated if omitted.",
14
+ )
15
+ file_format: Literal["pkl", "csv", "parquet"] = Field(
16
+ ...,
17
+ description=(
18
+ "Format of the dataset you will PUT to the returned URL. "
19
+ "'pkl' = a pickled pandas DataFrame, 'csv' = RFC4180 CSV with a "
20
+ "header row, 'parquet' = Apache Parquet. The label column must be "
21
+ "present in the uploaded table and its name is supplied on the "
22
+ "subsequent /v1/trainer/run or /v1/reasoning/fit call as "
23
+ "`label_column`."
24
+ ),
25
+ )
26
+
27
+
28
+ class CreateUploadResponse(BaseModel):
29
+ model_id: str
30
+ upload_url: str
31
+ upload_key: str
32
+ file_format: Literal["pkl", "csv", "parquet"]
33
+ content_type: str
34
+ expires_in: int
@@ -0,0 +1,25 @@
1
+ """Request/response schemas for the worker server (finalize + synth_gen)."""
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+
8
+ class WorkerJobRequest(BaseModel):
9
+ """Body for POST /v1/workers/{finalize,synthgen}.
10
+
11
+ Mirrors the AWS Lambda event shape that preceded the worker server: the
12
+ caller uploads a cloudpickled kwargs dict to ``s3://{s3_bucket}/{input_key}``
13
+ and the worker writes its cloudpickled output back to ``output_key``.
14
+ """
15
+
16
+ s3_bucket: str
17
+ input_key: str = Field(description="S3 key of the cloudpickled input dict.")
18
+ output_key: str = Field(description="S3 key the worker writes its cloudpickled result to.")
19
+
20
+
21
+ class WorkerJobResponse(BaseModel):
22
+ """Response from POST /v1/workers/{finalize,synthgen}."""
23
+
24
+ status: Literal["ok"] = "ok"
25
+ output_key: str
@@ -0,0 +1,78 @@
1
+ Metadata-Version: 2.4
2
+ Name: outerproduct-http-types
3
+ Version: 0.1.0
4
+ Summary: Shared HTTP-facing type definitions for OuterProduct services and SDKs.
5
+ Project-URL: Homepage, https://outerproduct.com
6
+ Author: OuterProduct, Inc.
7
+ License-Expression: MIT
8
+ License-File: LICENSE
9
+ Keywords: api,http,outerproduct,pydantic,sdk,wire-types
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3 :: Only
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: Python :: 3.13
18
+ Classifier: Topic :: Internet :: WWW/HTTP
19
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
+ Classifier: Typing :: Typed
21
+ Requires-Python: >=3.12
22
+ Requires-Dist: pydantic>=2.13.3
23
+ Description-Content-Type: text/markdown
24
+
25
+ # outerproduct-http-types
26
+
27
+ Shared HTTP-facing type definitions for the [OuterProduct](https://outerproduct.com) API.
28
+
29
+ This package contains the [Pydantic](https://docs.pydantic.dev) models that
30
+ describe the request and response shapes used across OuterProduct services and
31
+ SDKs. It is the source of truth for the wire contract between the OuterProduct
32
+ backend and any client (including the official Python SDK).
33
+
34
+ Most users should not depend on this package directly. Instead, install the
35
+ OuterProduct SDK, which re-exports the relevant types and provides a typed
36
+ client. Use this package only when you are building tooling that needs to
37
+ construct or parse OuterProduct API payloads independently of the SDK.
38
+
39
+ ## Installation
40
+
41
+ ```bash
42
+ pip install outerproduct-http-types
43
+ ```
44
+
45
+ Requires Python 3.12 or newer.
46
+
47
+ ## Usage
48
+
49
+ Import the model that matches the endpoint you are working with and construct
50
+ or validate payloads as you would with any Pydantic model:
51
+
52
+ ```python
53
+ from outerproduct_http_types import PredictRequest, PredictResponse
54
+
55
+ request = PredictRequest(
56
+ samples=[[1.0, "x"], [2.5, "y"]],
57
+ feature_names=["feature_a", "feature_b"],
58
+ )
59
+ payload = request.model_dump_json()
60
+
61
+ # ... POST `payload` to /v1/models/{model_id}/predict, then parse:
62
+ response = PredictResponse.model_validate_json(raw_response_body)
63
+ print(response.model_id, response.predictions)
64
+ ```
65
+
66
+ The package ships with a `py.typed` marker, so type checkers (mypy, pyright,
67
+ ty) will pick up the type information automatically.
68
+
69
+ ## Versioning
70
+
71
+ This package follows [semantic versioning](https://semver.org/). Breaking
72
+ changes to wire types are released as a new major version. Minor versions add
73
+ new endpoints or optional fields; patch versions are reserved for documentation
74
+ and non-behavioral fixes.
75
+
76
+ ## License
77
+
78
+ Licensed under the [MIT License](https://opensource.org/license/mit).
@@ -0,0 +1,15 @@
1
+ outerproduct_http_types/__init__.py,sha256=nhMPlPi-KVnaRVkZcxtPiyrH-mYnDHh21D0UnRs50mo,2865
2
+ outerproduct_http_types/agentic_documents.py,sha256=izshgOeU4w2qv6Ohe6hGWbctmAm1BPiZde2zQbV4pTU,7975
3
+ outerproduct_http_types/common.py,sha256=pp-LG2rq2sKYyHDQyea-DZkTUpIooIJj63V-TRaUCpw,1248
4
+ outerproduct_http_types/connectors.py,sha256=OfWDjNL_-7dsQr_sDYIrt6BOYJIBz1RknpfTsvAfZA8,3698
5
+ outerproduct_http_types/inference.py,sha256=c-DwT9XVZ9KHT8uUw2pSGe-0LlulFvpmim9IV2OQTRw,6553
6
+ outerproduct_http_types/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
7
+ outerproduct_http_types/reasoning.py,sha256=fSRe22uk14BM-4oaV5JXhmwOAmxnaOTw1eLGE3ja9v0,6792
8
+ outerproduct_http_types/segment.py,sha256=X5z1JpcYeP4RBstfvNjSiBWLWQlN5KhjDV9GcFwdCcQ,1892
9
+ outerproduct_http_types/trainer.py,sha256=Xidgwt1FHk4whF9LKwVmXcsH2bSJCMnNSsd97bzrIcE,8169
10
+ outerproduct_http_types/uploads.py,sha256=_EYUJAh2o1WovzCmeJ9rZX1LGfMnfoRN0TjN6DSSz0Y,1083
11
+ outerproduct_http_types/workers.py,sha256=LMREW5tbi2Rw0mcgv053a-Ccqb3ooMCsr2cWzcHcths,827
12
+ outerproduct_http_types-0.1.0.dist-info/METADATA,sha256=SFNUQPEfcqahKp8YSi-u_FciKNwfLoWW3qbZ6qPcVWg,2804
13
+ outerproduct_http_types-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
14
+ outerproduct_http_types-0.1.0.dist-info/licenses/LICENSE,sha256=5PAE0YxWHzDk5S9Jhmii65APx30_BVTzefsO8tCMgrU,1068
15
+ outerproduct_http_types-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 OuterProduct
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.