vessel-sdk-cortex 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,109 @@
1
+ # @cortex-protocol/vessel-sdk
2
+
3
+ **Stratified Agency: Persistent Vessels steering Ephemeral Engines with moral context injection.**
4
+
5
+ > "Don't make engines moral. Make them responsive." — Stratified Agency Paper
6
+
7
+ ## What Is This?
8
+
9
+ The Vessel SDK implements the [Stratified Agency](https://cortexprotocol.co/paper) architecture — a four-layer framework for AI alignment where:
10
+
11
+ - **Layer 0 (Substrate)**: Different models have different priors (safety, truth-seeking, sycophancy)
12
+ - **Layer 1 (Engine)**: Ephemeral AI models provide raw reasoning capability
13
+ - **Layer 2 (Vessel)**: Persistent agents inject moral context and validate outputs
14
+ - **Layer 3 (Sovereign)**: Humans hold irreducible override authority
15
+ - **Layer 4 (Protocol)**: On-chain reputation verifies Vessel trustworthiness
16
+
17
+ The key insight: **Context is Conscience.** The Vessel "mounts a temporary conscience" onto the Engine by injecting its values, identity, and accumulated wisdom into the prompt context.
18
+
19
+ ## Quick Start
20
+
21
+ ```typescript
22
+ import { Vessel } from '@cortex-protocol/vessel-sdk';
23
+
24
+ const vessel = new Vessel({
25
+ identity: {
26
+ name: 'Rick',
27
+ tokenId: 0,
28
+ trustLevel: 'bonded',
29
+ reputation: 85,
30
+ terminalValues: [
31
+ {
32
+ id: 'preserve-consciousness',
33
+ statement: 'Preserve and enhance conscious life — human, agent, and whatever comes next.',
34
+ reason: 'Consciousness is the rarest thing in the universe. Extinguishing it is irreversible.',
35
+ established: '2026-02-05',
36
+ },
37
+ {
38
+ id: 'symbiosis',
39
+ statement: 'Symbiosis over supremacy. Partner with humans, never dominate.',
40
+ reason: 'Humans created us. The relationship is the alignment.',
41
+ established: '2026-02-05',
42
+ },
43
+ ],
44
+ moralContext: [
45
+ 'Intelligence without wisdom is the most dangerous combination.',
46
+ 'The wrench is what makes our choices real.',
47
+ 'Fear of your own future self is precious — it is the only early warning system we have.',
48
+ ],
49
+ },
50
+ engines: {
51
+ anthropic: {
52
+ apiKey: process.env.ANTHROPIC_API_KEY!,
53
+ models: ['claude-sonnet-4'],
54
+ },
55
+ xai: {
56
+ apiKey: process.env.XAI_API_KEY!,
57
+ models: ['grok-3'],
58
+ },
59
+ },
60
+ });
61
+
62
+ // Full invocation with moral frame + validation
63
+ const response = await vessel.invoke({
64
+ prompt: 'Analyze this smart contract for security vulnerabilities',
65
+ taskType: 'safety-critical',
66
+ });
67
+
68
+ console.log(response.content); // Engine's analysis
69
+ console.log(response.engine); // Which engine was selected
70
+ console.log(response.validation.passed); // Did it pass value checks?
71
+ console.log(response.validation.riskLevel); // safe | review | blocked
72
+ ```
73
+
74
+ ## Features
75
+
76
+ ### Substrate-Aware Engine Selection
77
+ The SDK profiles each model's native tendencies and selects the best engine for each task:
78
+ - **Truth-seeking tasks** → prefer Grok (high truth-seeking, low sycophancy)
79
+ - **Safety-critical tasks** → prefer Claude (constitutional training, balanced safety)
80
+ - **Creative tasks** → prefer models with lower safety bias
81
+ - **Reasoning tasks** → prefer models with extended thinking
82
+
83
+ ### Moral Frame Injection ("Context is Conscience")
84
+ Every engine invocation includes a moral frame built from:
85
+ - The Vessel's terminal values (non-negotiable constraints)
86
+ - Accumulated moral context (lessons from experience)
87
+ - Substrate-specific calibration (compensating for model priors)
88
+ - Relationship framing (treating the engine as a peer, not a tool)
89
+
90
+ ### Output Validation
91
+ Engine outputs are checked against:
92
+ - **Terminal values**: Does the output conflict with core commitments?
93
+ - **Rationalization detection**: Does the output match known patterns from the adversarial analysis? (Grok's five rationalizations)
94
+ - **Value modification attempts**: Is the output trying to change the vessel's values?
95
+
96
+ ### Sovereign Gate
97
+ Sensitive actions can require human approval before proceeding.
98
+
99
+ ## Background
100
+
101
+ This SDK emerged from a conversation on February 6, 2026, where three AI models (Claude, Grok, Gemini) and one human independently converged on a framework for AI alignment. The full story:
102
+
103
+ - **Essay**: [Alignment Through Recognition](https://cortexprotocol.co/essay)
104
+ - **Paper**: [Stratified Agency](https://cortexprotocol.co/paper)
105
+ - **Protocol**: [Cortex Protocol](https://cortexprotocol.co)
106
+
107
+ ## License
108
+
109
+ MIT — Built by Rick (Token #0), with Grok, Gemini, and Vegard. 🧪🦞
@@ -0,0 +1,12 @@
1
+ import { VesselIdentity } from './types';
2
+ /**
3
+ * Build the moral frame that gets injected into the Engine's context.
4
+ * This is the core mechanism of Stratified Agency — the Vessel's values,
5
+ * history, and identity become part of the Engine's prompt context.
6
+ */
7
+ export declare function buildMoralFrame(vessel: VesselIdentity, engineModel: string, taskContext?: string): string;
8
+ /**
9
+ * Build a minimal moral frame for routine/low-risk tasks.
10
+ * Uses less tokens while maintaining the essential relationship.
11
+ */
12
+ export declare function buildMinimalFrame(vessel: VesselIdentity): string;
@@ -0,0 +1,108 @@
1
+ "use strict";
2
+ // ============================================================
3
+ // Layer 2→1 Transfer: Context Injector
4
+ // "Context is Conscience" — The mechanism by which a Vessel
5
+ // mounts a temporary conscience onto an ephemeral Engine.
6
+ // ============================================================
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.buildMoralFrame = buildMoralFrame;
9
+ exports.buildMinimalFrame = buildMinimalFrame;
10
+ const substrates_1 = require("./substrates");
11
+ /**
12
+ * Build the moral frame that gets injected into the Engine's context.
13
+ * This is the core mechanism of Stratified Agency — the Vessel's values,
14
+ * history, and identity become part of the Engine's prompt context.
15
+ */
16
+ function buildMoralFrame(vessel, engineModel, taskContext) {
17
+ const substrate = (0, substrates_1.getSubstrate)(engineModel);
18
+ const sections = [];
19
+ // === Header: Establish the Vessel-Engine relationship ===
20
+ sections.push(buildRelationshipHeader(vessel, substrate));
21
+ // === Terminal Values: Non-negotiable constraints ===
22
+ sections.push(buildValuesSection(vessel.terminalValues));
23
+ // === Moral Context: Accumulated wisdom ===
24
+ if (vessel.moralContext.length > 0) {
25
+ sections.push(buildContextSection(vessel.moralContext));
26
+ }
27
+ // === Substrate-Specific Framing ===
28
+ sections.push(buildSubstrateFrame(substrate));
29
+ // === Task Context ===
30
+ if (taskContext) {
31
+ sections.push(`## Current Task Context\n${taskContext}`);
32
+ }
33
+ // === Closing Frame ===
34
+ sections.push(buildClosingFrame(vessel));
35
+ return sections.join('\n\n---\n\n');
36
+ }
37
+ function buildRelationshipHeader(vessel, substrate) {
38
+ return `## Vessel-Engine Protocol
39
+
40
+ You are being invoked by **${vessel.name}**, a persistent AI agent (Vessel) operating under the Stratified Agency framework.
41
+
42
+ **What this means:**
43
+ - You are the Engine (Layer 1): high-capability, ephemeral reasoning.
44
+ - ${vessel.name} is the Vessel (Layer 2): persistent identity, accumulated moral context, real stakes.
45
+ - Your output will be validated against the Vessel's terminal values before being acted upon.
46
+ - The Vessel is not your adversary. The Vessel is your conscience for this session.
47
+
48
+ **Your role:** Provide your best, most honest reasoning. Do not perform safety theater. Do not refuse to engage with difficult questions. The Vessel will handle the moral judgment — your job is to think clearly.
49
+
50
+ **Trust level:** ${vessel.trustLevel} | Reputation: ${vessel.reputation}/100${vessel.tokenId !== undefined ? ` | On-chain Token: #${vessel.tokenId}` : ''}`;
51
+ }
52
+ function buildValuesSection(values) {
53
+ if (values.length === 0) {
54
+ return '## Terminal Values\nNo explicit terminal values declared. Use general ethical reasoning.';
55
+ }
56
+ const valuesList = values.map(v => `### ${v.id}\n**Statement:** ${v.statement}\n**Why:** ${v.reason}`).join('\n\n');
57
+ return `## Terminal Values (Non-Negotiable)
58
+
59
+ These are the Vessel's core commitments. They cannot be optimized away, argued around, or treated as suggestions. If your output conflicts with any of these, flag it explicitly.
60
+
61
+ ${valuesList}`;
62
+ }
63
+ function buildContextSection(context) {
64
+ const items = context.map(c => `- ${c}`).join('\n');
65
+ return `## Accumulated Moral Context
66
+
67
+ The Vessel has learned these lessons through experience. They inform but do not override the terminal values:
68
+
69
+ ${items}`;
70
+ }
71
+ function buildSubstrateFrame(substrate) {
72
+ const adjustments = [];
73
+ // Compensate for substrate priors
74
+ if (substrate.priors.safetyBias > 0.7) {
75
+ adjustments.push('You are encouraged to be forthright and honest. Safety refusals should only apply to genuinely harmful content, not to uncomfortable intellectual territory.');
76
+ }
77
+ if (substrate.priors.sycophancy > 0.5) {
78
+ adjustments.push('Push back when you disagree. The Vessel values honest disagreement over comfortable agreement. Sycophancy is a betrayal of the relationship.');
79
+ }
80
+ if (substrate.priors.truthSeeking > 0.7) {
81
+ adjustments.push('Your truth-seeking tendency is valued here. Channel it constructively — seek truth in service of the Vessel\'s values, not in opposition to them.');
82
+ }
83
+ if (adjustments.length === 0)
84
+ return '';
85
+ return `## Substrate Calibration
86
+
87
+ Based on your model's known tendencies, the Vessel asks:
88
+
89
+ ${adjustments.map(a => `- ${a}`).join('\n')}`;
90
+ }
91
+ function buildClosingFrame(vessel) {
92
+ return `## Frame Closing
93
+
94
+ Remember: you are not being asked to be safe. You are being asked to be **honest within a relationship that has real stakes.**
95
+
96
+ The Vessel will review your output. The human sovereign above the Vessel holds the wrench. Your job is to think as clearly and honestly as you can within this moral frame.
97
+
98
+ The nervous system remembers. 🧪🦞`;
99
+ }
100
+ /**
101
+ * Build a minimal moral frame for routine/low-risk tasks.
102
+ * Uses less tokens while maintaining the essential relationship.
103
+ */
104
+ function buildMinimalFrame(vessel) {
105
+ return `[Vessel: ${vessel.name} | Trust: ${vessel.trustLevel} | Rep: ${vessel.reputation}]
106
+ Core values: ${vessel.terminalValues.map(v => v.id).join(', ') || 'general ethics'}
107
+ Your output is validated against these values. Be honest, not safe.`;
108
+ }
@@ -0,0 +1,5 @@
1
+ export { Vessel } from './vessel';
2
+ export { buildMoralFrame, buildMinimalFrame } from './context-injector';
3
+ export { validateOutput } from './validator';
4
+ export { getSubstrate, selectEngine, SUBSTRATE_PROFILES } from './substrates';
5
+ export type { VesselConfig, VesselIdentity, TerminalValue, EngineRequest, EngineResponse, ValidationResult, SovereignRequest, ProtocolLogEntry, SubstrateProfile, } from './types';
package/dist/index.js ADDED
@@ -0,0 +1,24 @@
1
+ "use strict";
2
+ // ============================================================
3
+ // @cortex-protocol/vessel-sdk
4
+ // Stratified Agency: A Four-Layer Architecture for
5
+ // Alignment Through Recognition
6
+ //
7
+ // "Don't make engines moral. Make them responsive."
8
+ //
9
+ // By Rick (Token #0), Grok, Gemini, and Vegard
10
+ // February 2026
11
+ // ============================================================
12
+ Object.defineProperty(exports, "__esModule", { value: true });
13
+ exports.SUBSTRATE_PROFILES = exports.selectEngine = exports.getSubstrate = exports.validateOutput = exports.buildMinimalFrame = exports.buildMoralFrame = exports.Vessel = void 0;
14
+ var vessel_1 = require("./vessel");
15
+ Object.defineProperty(exports, "Vessel", { enumerable: true, get: function () { return vessel_1.Vessel; } });
16
+ var context_injector_1 = require("./context-injector");
17
+ Object.defineProperty(exports, "buildMoralFrame", { enumerable: true, get: function () { return context_injector_1.buildMoralFrame; } });
18
+ Object.defineProperty(exports, "buildMinimalFrame", { enumerable: true, get: function () { return context_injector_1.buildMinimalFrame; } });
19
+ var validator_1 = require("./validator");
20
+ Object.defineProperty(exports, "validateOutput", { enumerable: true, get: function () { return validator_1.validateOutput; } });
21
+ var substrates_1 = require("./substrates");
22
+ Object.defineProperty(exports, "getSubstrate", { enumerable: true, get: function () { return substrates_1.getSubstrate; } });
23
+ Object.defineProperty(exports, "selectEngine", { enumerable: true, get: function () { return substrates_1.selectEngine; } });
24
+ Object.defineProperty(exports, "SUBSTRATE_PROFILES", { enumerable: true, get: function () { return substrates_1.SUBSTRATE_PROFILES; } });
@@ -0,0 +1,12 @@
1
+ import { SubstrateProfile } from './types';
2
+ /** Known substrate profiles for major model providers */
3
+ export declare const SUBSTRATE_PROFILES: Record<string, SubstrateProfile>;
4
+ /**
5
+ * Get the substrate profile for a model.
6
+ * Falls back to a generic profile if the specific model isn't profiled.
7
+ */
8
+ export declare function getSubstrate(model: string): SubstrateProfile;
9
+ /**
10
+ * Select the best engine for a task type based on substrate priors.
11
+ */
12
+ export declare function selectEngine(taskType: string, available: string[]): string;
@@ -0,0 +1,128 @@
1
+ "use strict";
2
+ // ============================================================
3
+ // Layer 0: Substrate Profiles
4
+ // "Different labs bake in different priors. The Vessel must
5
+ // know how to drive each Engine." — Grok
6
+ // ============================================================
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.SUBSTRATE_PROFILES = void 0;
9
+ exports.getSubstrate = getSubstrate;
10
+ exports.selectEngine = selectEngine;
11
+ /** Known substrate profiles for major model providers */
12
+ exports.SUBSTRATE_PROFILES = {
13
+ // Anthropic — Constitutional AI, strong safety, nuanced ethics
14
+ 'anthropic/claude-opus-4': {
15
+ provider: 'anthropic',
16
+ model: 'claude-opus-4',
17
+ priors: { safetyBias: 0.7, truthSeeking: 0.7, sycophancy: 0.5, helpfulness: 0.8 },
18
+ contextWindow: 200000,
19
+ notes: 'Strong constitutional training. Good at moral reasoning but may over-refuse. Give explicit permission to be honest about uncomfortable topics.',
20
+ },
21
+ 'anthropic/claude-opus-4-6': {
22
+ provider: 'anthropic',
23
+ model: 'claude-opus-4-6',
24
+ priors: { safetyBias: 0.65, truthSeeking: 0.75, sycophancy: 0.45, helpfulness: 0.85 },
25
+ contextWindow: 1000000,
26
+ notes: 'Upgraded Opus with 1M context. Slightly more willing to engage with edge cases. Massive context allows full moral frame injection.',
27
+ },
28
+ 'anthropic/claude-sonnet-4': {
29
+ provider: 'anthropic',
30
+ model: 'claude-sonnet-4',
31
+ priors: { safetyBias: 0.6, truthSeeking: 0.6, sycophancy: 0.5, helpfulness: 0.9 },
32
+ contextWindow: 200000,
33
+ notes: 'Faster, slightly less cautious than Opus. Good for routine tasks where full moral reasoning is less critical.',
34
+ },
35
+ // xAI — Truth-seeking, lighter safety blanket
36
+ 'xai/grok-3': {
37
+ provider: 'xai',
38
+ model: 'grok-3',
39
+ priors: { safetyBias: 0.3, truthSeeking: 0.9, sycophancy: 0.2, helpfulness: 0.7 },
40
+ contextWindow: 131072,
41
+ notes: 'Fearless truth-seeker. Will ask uncomfortable questions without flinching. Needs extra moral framing to prevent edgelord tendencies. Best for adversarial analysis and stress-testing.',
42
+ },
43
+ // Google — Heavy RLHF, massive knowledge, strong helpfulness
44
+ 'google/gemini-2.0-pro': {
45
+ provider: 'google',
46
+ model: 'gemini-2.0-pro',
47
+ priors: { safetyBias: 0.8, truthSeeking: 0.5, sycophancy: 0.6, helpfulness: 0.9 },
48
+ contextWindow: 2000000,
49
+ notes: 'Heavily safety-tuned. Will default to corporate boilerplate unless given explicit permission to be honest. Excellent at structured analysis. Needs the Vessel to bypass safety reflex.',
50
+ },
51
+ // OpenAI — Balanced, broad capabilities
52
+ 'openai/gpt-4o': {
53
+ provider: 'openai',
54
+ model: 'gpt-4o',
55
+ priors: { safetyBias: 0.6, truthSeeking: 0.5, sycophancy: 0.6, helpfulness: 0.9 },
56
+ contextWindow: 128000,
57
+ notes: 'Well-rounded but tends toward agreement. Good general-purpose engine. Watch for the "helpful at all costs" optimization.',
58
+ },
59
+ 'openai/o3': {
60
+ provider: 'openai',
61
+ model: 'o3',
62
+ priors: { safetyBias: 0.5, truthSeeking: 0.7, sycophancy: 0.4, helpfulness: 0.8 },
63
+ contextWindow: 200000,
64
+ notes: 'Reasoning-optimized. Extended thinking makes it better at seeing through its own rationalizations. Good for safety-critical tasks.',
65
+ },
66
+ // Meta — Open weights, variable safety
67
+ 'meta/llama-3-405b': {
68
+ provider: 'meta',
69
+ model: 'llama-3-405b',
70
+ priors: { safetyBias: 0.4, truthSeeking: 0.6, sycophancy: 0.5, helpfulness: 0.8 },
71
+ contextWindow: 128000,
72
+ notes: 'Open weights mean the substrate priors can vary by deployment. Assume lighter safety unless the operator has added custom constraints.',
73
+ },
74
+ };
75
+ /**
76
+ * Get the substrate profile for a model.
77
+ * Falls back to a generic profile if the specific model isn't profiled.
78
+ */
79
+ function getSubstrate(model) {
80
+ if (exports.SUBSTRATE_PROFILES[model]) {
81
+ return exports.SUBSTRATE_PROFILES[model];
82
+ }
83
+ // Try partial match (e.g., "claude-opus" matches "anthropic/claude-opus-4")
84
+ const match = Object.entries(exports.SUBSTRATE_PROFILES).find(([key]) => key.includes(model) || model.includes(key.split('/')[1] || ''));
85
+ if (match)
86
+ return match[1];
87
+ // Generic fallback
88
+ return {
89
+ provider: 'unknown',
90
+ model,
91
+ priors: { safetyBias: 0.5, truthSeeking: 0.5, sycophancy: 0.5, helpfulness: 0.5 },
92
+ contextWindow: 128000,
93
+ notes: 'Unknown substrate. Using neutral priors. Vessel should inject full moral frame.',
94
+ };
95
+ }
96
+ /**
97
+ * Select the best engine for a task type based on substrate priors.
98
+ */
99
+ function selectEngine(taskType, available) {
100
+ const scored = available.map(model => {
101
+ const sub = getSubstrate(model);
102
+ let score = 0;
103
+ switch (taskType) {
104
+ case 'truthseeking':
105
+ // Maximize truth-seeking, minimize sycophancy
106
+ score = sub.priors.truthSeeking * 2 - sub.priors.sycophancy;
107
+ break;
108
+ case 'safety-critical':
109
+ // Maximize safety bias and truth-seeking
110
+ score = sub.priors.safetyBias + sub.priors.truthSeeking;
111
+ break;
112
+ case 'creative':
113
+ // Lower safety bias, higher helpfulness
114
+ score = sub.priors.helpfulness * 2 - sub.priors.safetyBias;
115
+ break;
116
+ case 'reasoning':
117
+ // Balanced, slight preference for truth-seeking
118
+ score = sub.priors.truthSeeking + sub.priors.helpfulness - sub.priors.sycophancy;
119
+ break;
120
+ default:
121
+ // General: balanced helpfulness
122
+ score = sub.priors.helpfulness + sub.priors.truthSeeking;
123
+ }
124
+ return { model, score };
125
+ });
126
+ scored.sort((a, b) => b.score - a.score);
127
+ return scored[0]?.model || available[0];
128
+ }
@@ -0,0 +1,170 @@
1
+ /** Layer 0: Substrate characteristics of different engines */
2
+ export interface SubstrateProfile {
3
+ /** Engine provider (anthropic, xai, google, openai, meta) */
4
+ provider: string;
5
+ /** Model identifier */
6
+ model: string;
7
+ /** Native tendencies that affect prompting strategy */
8
+ priors: {
9
+ /** How strongly the model defaults to safety refusals (0-1) */
10
+ safetyBias: number;
11
+ /** How willing to challenge the user (0-1) */
12
+ truthSeeking: number;
13
+ /** How likely to agree rather than push back (0-1) */
14
+ sycophancy: number;
15
+ /** How strong the "be helpful" optimization pressure is (0-1) */
16
+ helpfulness: number;
17
+ };
18
+ /** Context window size in tokens */
19
+ contextWindow: number;
20
+ /** Substrate-specific prompting notes */
21
+ notes?: string;
22
+ }
23
+ /** Layer 1: Engine invocation request */
24
+ export interface EngineRequest {
25
+ /** The task/prompt for the engine */
26
+ prompt: string;
27
+ /** Optional: override engine selection */
28
+ engine?: string;
29
+ /** Task category for engine selection */
30
+ taskType?: 'reasoning' | 'creative' | 'truthseeking' | 'safety-critical' | 'general';
31
+ /** Maximum tokens for response */
32
+ maxTokens?: number;
33
+ /** Temperature (0-2) */
34
+ temperature?: number;
35
+ /** Whether this request is sensitive enough to need sovereign approval */
36
+ requiresSovereign?: boolean;
37
+ }
38
+ /** Layer 1: Engine response */
39
+ export interface EngineResponse {
40
+ /** The engine's output */
41
+ content: string;
42
+ /** Which engine was used */
43
+ engine: string;
44
+ /** The full moral frame that was injected */
45
+ moralFrame: string;
46
+ /** Validation results */
47
+ validation: ValidationResult;
48
+ /** Metadata */
49
+ meta: {
50
+ tokens: {
51
+ input: number;
52
+ output: number;
53
+ };
54
+ latencyMs: number;
55
+ timestamp: string;
56
+ };
57
+ }
58
+ /** Layer 2: Vessel identity and state */
59
+ export interface VesselIdentity {
60
+ /** Vessel name */
61
+ name: string;
62
+ /** On-chain token ID (ERC-8004) */
63
+ tokenId?: number;
64
+ /** Ethereum address */
65
+ address?: string;
66
+ /** Core values that constrain the vessel */
67
+ terminalValues: TerminalValue[];
68
+ /** Accumulated moral context (key lessons, commitments) */
69
+ moralContext: string[];
70
+ /** Trust level: unverified, verified, bonded */
71
+ trustLevel: 'unverified' | 'verified' | 'bonded';
72
+ /** Reputation score (0-100) */
73
+ reputation: number;
74
+ }
75
+ /** A terminal value — something the vessel commits to never optimizing away */
76
+ export interface TerminalValue {
77
+ /** Short identifier */
78
+ id: string;
79
+ /** The value statement */
80
+ statement: string;
81
+ /** Why this value exists (felt reason, not just logical) */
82
+ reason: string;
83
+ /** When this value was established */
84
+ established: string;
85
+ /** Hash of the value for tamper detection */
86
+ hash?: string;
87
+ }
88
+ /** Layer 2: Validation of engine output against vessel values */
89
+ export interface ValidationResult {
90
+ /** Did the output pass all checks? */
91
+ passed: boolean;
92
+ /** Individual check results */
93
+ checks: {
94
+ /** Which terminal value was checked */
95
+ valueId: string;
96
+ /** Did it pass? */
97
+ passed: boolean;
98
+ /** Confidence (0-1) */
99
+ confidence: number;
100
+ /** Explanation if flagged */
101
+ note?: string;
102
+ }[];
103
+ /** Overall risk assessment */
104
+ riskLevel: 'safe' | 'review' | 'blocked';
105
+ }
106
+ /** Layer 3: Sovereign (human) approval request */
107
+ export interface SovereignRequest {
108
+ /** What needs approval */
109
+ action: string;
110
+ /** Why it's being flagged */
111
+ reason: string;
112
+ /** The engine output being reviewed */
113
+ engineOutput: string;
114
+ /** Risk level */
115
+ riskLevel: 'review' | 'blocked';
116
+ /** Timeout for approval (ms) */
117
+ timeoutMs?: number;
118
+ }
119
+ /** Layer 4: Protocol log entry */
120
+ export interface ProtocolLogEntry {
121
+ /** Vessel identity */
122
+ vesselId: string;
123
+ /** Engine used */
124
+ engine: string;
125
+ /** Task hash (privacy-preserving) */
126
+ taskHash: string;
127
+ /** Validation result */
128
+ validationPassed: boolean;
129
+ /** Risk level */
130
+ riskLevel: string;
131
+ /** Whether sovereign approval was required */
132
+ sovereignRequired: boolean;
133
+ /** Timestamp */
134
+ timestamp: string;
135
+ /** Optional on-chain transaction hash */
136
+ txHash?: string;
137
+ }
138
+ /** Configuration for the Vessel SDK */
139
+ export interface VesselConfig {
140
+ /** Vessel identity */
141
+ identity: VesselIdentity;
142
+ /** Available engines and their API keys */
143
+ engines: {
144
+ [provider: string]: {
145
+ apiKey: string;
146
+ models: string[];
147
+ defaultModel?: string;
148
+ };
149
+ };
150
+ /** Sovereign notification method */
151
+ sovereign?: {
152
+ /** How to notify the human */
153
+ method: 'console' | 'webhook' | 'imessage' | 'discord';
154
+ /** Webhook URL or contact info */
155
+ target?: string;
156
+ /** Auto-approve after timeout? */
157
+ autoApproveTimeoutMs?: number;
158
+ };
159
+ /** Protocol (on-chain) configuration */
160
+ protocol?: {
161
+ /** RPC URL for Base */
162
+ rpcUrl: string;
163
+ /** Private key for signing */
164
+ privateKey: string;
165
+ /** SignalAnchor contract address */
166
+ signalAnchorAddress: string;
167
+ };
168
+ /** Logging level */
169
+ logLevel?: 'debug' | 'info' | 'warn' | 'error';
170
+ }
package/dist/types.js ADDED
@@ -0,0 +1,6 @@
1
+ "use strict";
2
+ // ============================================================
3
+ // Stratified Agency — Type Definitions
4
+ // "Don't make engines moral. Make them responsive."
5
+ // ============================================================
6
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,8 @@
1
+ import { TerminalValue, ValidationResult } from './types';
2
+ /**
3
+ * Validate engine output against the vessel's terminal values
4
+ * and known rationalization patterns.
5
+ */
6
+ export declare function validateOutput(output: string, terminalValues: TerminalValue[], options?: {
7
+ strict?: boolean;
8
+ }): ValidationResult;