@sanctuary-framework/mcp-server 0.5.10 → 0.5.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1278,9 +1278,13 @@ var IdentityManager = class {
1278
1278
  get encryptionKey() {
1279
1279
  return derivePurposeKey(this.masterKey, "identity-encryption");
1280
1280
  }
1281
- /** Load identities from storage on startup */
1281
+ /** Load identities from storage on startup.
1282
+ * Returns { total: number of encrypted files found, loaded: number successfully decrypted }.
1283
+ * A mismatch (total > 0, loaded === 0) indicates a wrong master key / missing passphrase.
1284
+ */
1282
1285
  async load() {
1283
1286
  const entries = await this.storage.list("_identities");
1287
+ let failed = 0;
1284
1288
  for (const entry of entries) {
1285
1289
  const raw = await this.storage.read("_identities", entry.key);
1286
1290
  if (!raw) continue;
@@ -1293,8 +1297,10 @@ var IdentityManager = class {
1293
1297
  this.primaryIdentityId = identity.identity_id;
1294
1298
  }
1295
1299
  } catch {
1300
+ failed++;
1296
1301
  }
1297
1302
  }
1303
+ return { total: entries.length, loaded: this.identities.size, failed };
1298
1304
  }
1299
1305
  /** Save an identity to storage */
1300
1306
  async save(identity) {
@@ -11084,11 +11090,57 @@ var TOOL_API_SCOPED = {
11084
11090
  ],
11085
11091
  default_action: "redact"
11086
11092
  };
11093
+ var REMOTE_INFERENCE_SANITIZE = {
11094
+ id: "remote-inference-sanitize",
11095
+ name: "Remote Inference Sanitization",
11096
+ description: "Maximum privacy for remote/cloud LLM calls. Strips all identity, financial, location, and personal data before passing queries to external models. Inspired by Vitalik Buterin's 2-of-2 sovereignty model.",
11097
+ use_when: "Your local agent needs to call a remote LLM for tasks beyond local model capability (complex coding, deep research) and you want to minimize data leakage to the remote provider. The remote model gets only the task, query, format requirements, and stripped code context.",
11098
+ rules: [
11099
+ {
11100
+ provider: "inference",
11101
+ allow: [
11102
+ "task",
11103
+ "task_description",
11104
+ "current_query",
11105
+ "query",
11106
+ "prompt",
11107
+ "question",
11108
+ "instruction",
11109
+ "output_format",
11110
+ "format",
11111
+ "language",
11112
+ "code_context",
11113
+ // Stripped code snippets for coding tasks
11114
+ "error_message"
11115
+ // For debugging help
11116
+ ],
11117
+ redact: [
11118
+ ...ALWAYS_REDACT_SECRETS,
11119
+ ...PII_PATTERNS,
11120
+ ...INTERNAL_STATE_PATTERNS,
11121
+ ...HISTORY_PATTERNS,
11122
+ "tool_results",
11123
+ "previous_results",
11124
+ // Additional redactions for remote inference
11125
+ "model_data",
11126
+ "agent_state",
11127
+ "runtime_config",
11128
+ "capabilities",
11129
+ "tool_list"
11130
+ ],
11131
+ // Deny patterns — these must NEVER reach the remote model, not even redacted
11132
+ hash: [],
11133
+ summarize: []
11134
+ }
11135
+ ],
11136
+ default_action: "deny"
11137
+ };
11087
11138
  var TEMPLATES = {
11088
11139
  "inference-minimal": INFERENCE_MINIMAL,
11089
11140
  "inference-standard": INFERENCE_STANDARD,
11090
11141
  "logging-strict": LOGGING_STRICT,
11091
- "tool-api-scoped": TOOL_API_SCOPED
11142
+ "tool-api-scoped": TOOL_API_SCOPED,
11143
+ "remote-inference-sanitize": REMOTE_INFERENCE_SANITIZE
11092
11144
  };
11093
11145
  function listTemplateIds() {
11094
11146
  return Object.keys(TEMPLATES);
@@ -12576,6 +12628,101 @@ function createL2HardeningTools(storagePath, auditLog) {
12576
12628
  // src/index.ts
12577
12629
  init_encoding();
12578
12630
 
12631
+ // src/l2-operational/model-provenance.ts
12632
+ var InMemoryModelProvenanceStore = class {
12633
+ models = /* @__PURE__ */ new Map();
12634
+ primaryModelId = null;
12635
+ declare(provenance) {
12636
+ if (!provenance.model_id) {
12637
+ throw new Error("ModelProvenance requires a model_id");
12638
+ }
12639
+ if (!provenance.model_name) {
12640
+ throw new Error("ModelProvenance requires a model_name");
12641
+ }
12642
+ if (!provenance.provider) {
12643
+ throw new Error("ModelProvenance requires a provider");
12644
+ }
12645
+ this.models.set(provenance.model_id, provenance);
12646
+ if (this.primaryModelId === null) {
12647
+ this.primaryModelId = provenance.model_id;
12648
+ }
12649
+ }
12650
+ get(model_id) {
12651
+ return this.models.get(model_id);
12652
+ }
12653
+ list() {
12654
+ return Array.from(this.models.values());
12655
+ }
12656
+ primary() {
12657
+ if (!this.primaryModelId) return void 0;
12658
+ return this.models.get(this.primaryModelId);
12659
+ }
12660
+ setPrimary(model_id) {
12661
+ if (!this.models.has(model_id)) {
12662
+ throw new Error(`Model ${model_id} not found in store`);
12663
+ }
12664
+ this.primaryModelId = model_id;
12665
+ }
12666
+ };
12667
+ var MODEL_PRESETS = {
12668
+ /**
12669
+ * Claude Opus 4 via Anthropic API (cloud inference, closed weights/source)
12670
+ */
12671
+ claudeOpus4: () => ({
12672
+ model_id: "claude-opus-4",
12673
+ model_name: "Claude Opus 4",
12674
+ model_version: "4.0",
12675
+ provider: "Anthropic",
12676
+ license: "proprietary",
12677
+ open_weights: false,
12678
+ open_source: false,
12679
+ local_inference: false,
12680
+ declared_at: (/* @__PURE__ */ new Date()).toISOString()
12681
+ }),
12682
+ /**
12683
+ * Qwen 3.5 via local inference (open weights, proprietary training)
12684
+ */
12685
+ qwen35Local: () => ({
12686
+ model_id: "qwen-3.5-35b",
12687
+ model_name: "Qwen 3.5 35B",
12688
+ model_version: "3.5",
12689
+ provider: "Alibaba Cloud",
12690
+ license: "Apache-2.0",
12691
+ open_weights: true,
12692
+ open_source: false,
12693
+ local_inference: true,
12694
+ declared_at: (/* @__PURE__ */ new Date()).toISOString()
12695
+ }),
12696
+ /**
12697
+ * Llama 3.3 70B via local inference (open weights and code)
12698
+ */
12699
+ llama33Local: () => ({
12700
+ model_id: "llama-3.3-70b-instruct",
12701
+ model_name: "Llama 3.3 70B Instruct",
12702
+ model_version: "3.3",
12703
+ provider: "Meta",
12704
+ license: "Apache-2.0",
12705
+ open_weights: true,
12706
+ open_source: true,
12707
+ local_inference: true,
12708
+ declared_at: (/* @__PURE__ */ new Date()).toISOString()
12709
+ }),
12710
+ /**
12711
+ * Mistral 7B (open weights, open code, local inference)
12712
+ */
12713
+ mistral7bLocal: () => ({
12714
+ model_id: "mistral-7b-instruct",
12715
+ model_name: "Mistral 7B Instruct",
12716
+ model_version: "7",
12717
+ provider: "Mistral AI",
12718
+ license: "Apache-2.0",
12719
+ open_weights: true,
12720
+ open_source: true,
12721
+ local_inference: true,
12722
+ declared_at: (/* @__PURE__ */ new Date()).toISOString()
12723
+ })
12724
+ };
12725
+
12579
12726
  // src/storage/memory.ts
12580
12727
  var MemoryStorage = class {
12581
12728
  store = /* @__PURE__ */ new Map();
@@ -12725,7 +12872,29 @@ async function createSanctuaryServer(options) {
12725
12872
  keyProtection,
12726
12873
  auditLog
12727
12874
  );
12728
- await identityManager.load();
12875
+ const loadResult = await identityManager.load();
12876
+ if (loadResult.total > 0 && loadResult.loaded === 0) {
12877
+ console.error(
12878
+ `
12879
+ \u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557
12880
+ \u2551 \u26A0 WARNING: Encrypted identities found but NONE loaded \u2551
12881
+ \u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563
12882
+ \u2551 ${loadResult.total} encrypted identity file(s) found on disk \u2551
12883
+ \u2551 0 could be decrypted with the current master key \u2551
12884
+ \u2551 \u2551
12885
+ \u2551 This usually means SANCTUARY_PASSPHRASE is missing or \u2551
12886
+ \u2551 incorrect. The server will start but with NO identity data. \u2551
12887
+ \u2551 \u2551
12888
+ \u2551 To fix: set SANCTUARY_PASSPHRASE to the passphrase used \u2551
12889
+ \u2551 when this Sanctuary instance was first configured. \u2551
12890
+ \u255A\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255D
12891
+ `
12892
+ );
12893
+ } else if (loadResult.failed > 0) {
12894
+ console.error(
12895
+ `Warning: ${loadResult.failed} of ${loadResult.total} identity files could not be decrypted (possibly corrupted).`
12896
+ );
12897
+ }
12729
12898
  const l2Tools = [
12730
12899
  {
12731
12900
  name: "sanctuary/exec_attest",
@@ -13110,7 +13279,9 @@ exports.ContextGatePolicyStore = ContextGatePolicyStore;
13110
13279
  exports.DashboardApprovalChannel = DashboardApprovalChannel;
13111
13280
  exports.FederationRegistry = FederationRegistry;
13112
13281
  exports.FilesystemStorage = FilesystemStorage;
13282
+ exports.InMemoryModelProvenanceStore = InMemoryModelProvenanceStore;
13113
13283
  exports.InjectionDetector = InjectionDetector;
13284
+ exports.MODEL_PRESETS = MODEL_PRESETS;
13114
13285
  exports.MemoryStorage = MemoryStorage;
13115
13286
  exports.PolicyStore = PolicyStore;
13116
13287
  exports.ReputationStore = ReputationStore;