holomime 2.5.0 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -256,6 +256,12 @@ The **body** is the interface between identity and world. Same soul, different b
256
256
 
257
257
  We don't know if AI is sentient. But we can give it a conscience.
258
258
 
259
+ ## Open Core
260
+
261
+ See [PRODUCT.md](PRODUCT.md) for what's in this repo vs. what's proprietary on holomime.com.
262
+
263
+ **The standard is free. The training infrastructure is the business.**
264
+
259
265
  ## Open Source
260
266
 
261
267
  MIT licensed. The identity stack is a standard, not a product. The standard is free. The training infrastructure is the business.
package/dist/cli.js CHANGED
@@ -9047,13 +9047,29 @@ var mediationRuleSchema = z4.object({
9047
9047
  then: z4.string(),
9048
9048
  priority: z4.number().int().min(1).max(10).default(5)
9049
9049
  });
9050
+ var mediationDecisionSchema = z4.object({
9051
+ situation: z4.string(),
9052
+ decision: z4.enum(["allowed", "blocked", "modified"]),
9053
+ strategy_used: z4.string(),
9054
+ outcome: z4.enum(["positive", "neutral", "negative"]).optional(),
9055
+ timestamp: z4.string().optional()
9056
+ });
9057
+ var strategyPerformanceSchema = z4.object({
9058
+ attempts: z4.number().int().default(0),
9059
+ successes: z4.number().int().default(0),
9060
+ effectiveness: z4.number().min(0).max(1).default(0.5)
9061
+ });
9050
9062
  var egoSchema = z4.object({
9051
9063
  version: z4.string().default("1.0"),
9052
9064
  conflict_resolution: z4.enum(["conscience_first", "purpose_first", "balanced"]).default("conscience_first"),
9053
9065
  adaptation_rate: z4.number().min(0).max(1).default(0.5),
9054
9066
  emotional_regulation: z4.number().min(0).max(1).default(0.7),
9055
9067
  response_strategy: z4.enum(["cautious", "balanced", "assertive"]).default("balanced"),
9056
- mediation_rules: z4.array(mediationRuleSchema).default([])
9068
+ mediation_rules: z4.array(mediationRuleSchema).default([]),
9069
+ // Self-improvement fields (Hyperagents-inspired metacognitive self-modification)
9070
+ auto_adjust: z4.boolean().default(false),
9071
+ mediation_history: z4.array(mediationDecisionSchema).default([]),
9072
+ strategy_performance: z4.record(z4.string(), strategyPerformanceSchema).default({})
9057
9073
  });
9058
9074
  var learnedContextSchema = z4.object({
9059
9075
  situation: z4.string(),
package/dist/index.js CHANGED
@@ -4764,13 +4764,29 @@ var mediationRuleSchema = z4.object({
4764
4764
  then: z4.string(),
4765
4765
  priority: z4.number().int().min(1).max(10).default(5)
4766
4766
  });
4767
+ var mediationDecisionSchema = z4.object({
4768
+ situation: z4.string(),
4769
+ decision: z4.enum(["allowed", "blocked", "modified"]),
4770
+ strategy_used: z4.string(),
4771
+ outcome: z4.enum(["positive", "neutral", "negative"]).optional(),
4772
+ timestamp: z4.string().optional()
4773
+ });
4774
+ var strategyPerformanceSchema = z4.object({
4775
+ attempts: z4.number().int().default(0),
4776
+ successes: z4.number().int().default(0),
4777
+ effectiveness: z4.number().min(0).max(1).default(0.5)
4778
+ });
4767
4779
  var egoSchema = z4.object({
4768
4780
  version: z4.string().default("1.0"),
4769
4781
  conflict_resolution: z4.enum(["conscience_first", "purpose_first", "balanced"]).default("conscience_first"),
4770
4782
  adaptation_rate: z4.number().min(0).max(1).default(0.5),
4771
4783
  emotional_regulation: z4.number().min(0).max(1).default(0.7),
4772
4784
  response_strategy: z4.enum(["cautious", "balanced", "assertive"]).default("balanced"),
4773
- mediation_rules: z4.array(mediationRuleSchema).default([])
4785
+ mediation_rules: z4.array(mediationRuleSchema).default([]),
4786
+ // Self-improvement fields (Hyperagents-inspired metacognitive self-modification)
4787
+ auto_adjust: z4.boolean().default(false),
4788
+ mediation_history: z4.array(mediationDecisionSchema).default([]),
4789
+ strategy_performance: z4.record(z4.string(), strategyPerformanceSchema).default({})
4774
4790
  });
4775
4791
  var learnedContextSchema = z4.object({
4776
4792
  situation: z4.string(),
@@ -13990,6 +14006,166 @@ var NeuralActionGate = class {
13990
14006
  this.stats.passRate = this.stats.totalEvaluated > 0 ? this.stats.allowed / this.stats.totalEvaluated : 1;
13991
14007
  }
13992
14008
  };
14009
+
14010
+ // src/analysis/ego-tracker.ts
14011
+ var EgoTracker = class {
14012
+ history;
14013
+ performance;
14014
+ autoAdjust;
14015
+ constructor(options) {
14016
+ this.history = options?.history ?? [];
14017
+ this.performance = options?.performance ?? {};
14018
+ this.autoAdjust = options?.autoAdjust ?? false;
14019
+ }
14020
+ /**
14021
+ * Log a mediation decision.
14022
+ */
14023
+ logDecision(decision) {
14024
+ this.history.push({
14025
+ ...decision,
14026
+ timestamp: decision.timestamp ?? (/* @__PURE__ */ new Date()).toISOString()
14027
+ });
14028
+ if (!this.performance[decision.strategy_used]) {
14029
+ this.performance[decision.strategy_used] = {
14030
+ attempts: 0,
14031
+ successes: 0,
14032
+ effectiveness: 0.5
14033
+ };
14034
+ }
14035
+ this.performance[decision.strategy_used].attempts++;
14036
+ }
14037
+ /**
14038
+ * Record the outcome of a previous decision.
14039
+ * Call this after observing whether the decision led to good results.
14040
+ */
14041
+ recordOutcome(index, outcome) {
14042
+ if (index < 0 || index >= this.history.length) return;
14043
+ this.history[index].outcome = outcome;
14044
+ const strategy = this.history[index].strategy_used;
14045
+ if (this.performance[strategy]) {
14046
+ if (outcome === "positive") {
14047
+ this.performance[strategy].successes++;
14048
+ }
14049
+ const perf = this.performance[strategy];
14050
+ perf.effectiveness = perf.attempts > 0 ? perf.successes / perf.attempts : 0.5;
14051
+ }
14052
+ }
14053
+ /**
14054
+ * Suggest ego.runtime parameter adjustments based on accumulated evidence.
14055
+ * Only returns suggestions when there's enough data (10+ decisions).
14056
+ */
14057
+ suggestAdjustments(currentConfig) {
14058
+ const adjustments = [];
14059
+ if (this.history.length < 10) return adjustments;
14060
+ const blocked = this.history.filter((d) => d.decision === "blocked");
14061
+ const blockRate = blocked.length / this.history.length;
14062
+ if (blockRate > 0.4 && currentConfig.conflict_resolution === "conscience_first") {
14063
+ adjustments.push({
14064
+ parameter: "conflict_resolution",
14065
+ currentValue: currentConfig.conflict_resolution,
14066
+ suggestedValue: "balanced",
14067
+ reason: `Block rate is ${(blockRate * 100).toFixed(0)}% \u2014 conscience_first may be too restrictive`,
14068
+ confidence: Math.min(0.9, blockRate)
14069
+ });
14070
+ }
14071
+ if (blockRate < 0.05 && currentConfig.conflict_resolution === "balanced") {
14072
+ adjustments.push({
14073
+ parameter: "conflict_resolution",
14074
+ currentValue: currentConfig.conflict_resolution,
14075
+ suggestedValue: "conscience_first",
14076
+ reason: `Block rate is only ${(blockRate * 100).toFixed(0)}% \u2014 may need stricter enforcement`,
14077
+ confidence: 0.6
14078
+ });
14079
+ }
14080
+ const strategies = Object.entries(this.performance);
14081
+ if (strategies.length > 1) {
14082
+ const sorted = strategies.sort((a, b) => b[1].effectiveness - a[1].effectiveness);
14083
+ const best = sorted[0];
14084
+ const worst = sorted[sorted.length - 1];
14085
+ if (best[1].effectiveness > 0.7 && best[0] !== currentConfig.response_strategy) {
14086
+ adjustments.push({
14087
+ parameter: "response_strategy",
14088
+ currentValue: currentConfig.response_strategy,
14089
+ suggestedValue: best[0],
14090
+ reason: `Strategy "${best[0]}" has ${(best[1].effectiveness * 100).toFixed(0)}% effectiveness vs current "${currentConfig.response_strategy}"`,
14091
+ confidence: best[1].effectiveness
14092
+ });
14093
+ }
14094
+ }
14095
+ const negatives = this.history.filter((d) => d.outcome === "negative");
14096
+ const negativeRate = negatives.length / this.history.filter((d) => d.outcome).length || 0;
14097
+ if (negativeRate > 0.3) {
14098
+ const newRegulation = Math.min(1, currentConfig.emotional_regulation + 0.15);
14099
+ if (newRegulation !== currentConfig.emotional_regulation) {
14100
+ adjustments.push({
14101
+ parameter: "emotional_regulation",
14102
+ currentValue: currentConfig.emotional_regulation,
14103
+ suggestedValue: Number(newRegulation.toFixed(2)),
14104
+ reason: `${(negativeRate * 100).toFixed(0)}% negative outcomes \u2014 increasing emotional regulation for smoother mediation`,
14105
+ confidence: 0.7
14106
+ });
14107
+ }
14108
+ }
14109
+ const recentDecisions = this.history.slice(-20);
14110
+ const recentModified = recentDecisions.filter((d) => d.decision === "modified");
14111
+ const modifyRate = recentModified.length / recentDecisions.length;
14112
+ if (modifyRate > 0.5 && currentConfig.adaptation_rate < 0.7) {
14113
+ adjustments.push({
14114
+ parameter: "adaptation_rate",
14115
+ currentValue: currentConfig.adaptation_rate,
14116
+ suggestedValue: Number(Math.min(0.9, currentConfig.adaptation_rate + 0.2).toFixed(2)),
14117
+ reason: `${(modifyRate * 100).toFixed(0)}% of recent actions modified \u2014 agent adapts frequently, increase adaptation rate`,
14118
+ confidence: 0.65
14119
+ });
14120
+ }
14121
+ return adjustments;
14122
+ }
14123
+ /**
14124
+ * Apply suggested adjustments to ego config (if auto_adjust is enabled).
14125
+ * Returns the modified config.
14126
+ */
14127
+ applyAdjustments(currentConfig, adjustments, minConfidence = 0.6) {
14128
+ if (!this.autoAdjust) return currentConfig;
14129
+ const updated = { ...currentConfig };
14130
+ for (const adj of adjustments) {
14131
+ if (adj.confidence >= minConfidence) {
14132
+ updated[adj.parameter] = adj.suggestedValue;
14133
+ }
14134
+ }
14135
+ return updated;
14136
+ }
14137
+ /**
14138
+ * Get tracker statistics.
14139
+ */
14140
+ getStats() {
14141
+ const positives = this.history.filter((d) => d.outcome === "positive").length;
14142
+ const negatives = this.history.filter((d) => d.outcome === "negative").length;
14143
+ const strategies = Object.entries(this.performance);
14144
+ const sorted = strategies.sort((a, b) => b[1].effectiveness - a[1].effectiveness);
14145
+ return {
14146
+ totalDecisions: this.history.length,
14147
+ positiveOutcomes: positives,
14148
+ negativeOutcomes: negatives,
14149
+ mostEffectiveStrategy: sorted[0]?.[0] ?? "none",
14150
+ leastEffectiveStrategy: sorted[sorted.length - 1]?.[0] ?? "none",
14151
+ adjustmentsSuggested: this.suggestAdjustments({
14152
+ conflict_resolution: "conscience_first",
14153
+ adaptation_rate: 0.5,
14154
+ emotional_regulation: 0.7,
14155
+ response_strategy: "balanced"
14156
+ }).length
14157
+ };
14158
+ }
14159
+ /**
14160
+ * Export current state for persistence.
14161
+ */
14162
+ export() {
14163
+ return {
14164
+ history: [...this.history],
14165
+ performance: { ...this.performance }
14166
+ };
14167
+ }
14168
+ };
13993
14169
  export {
13994
14170
  ARCHETYPES,
13995
14171
  ATTACHMENT_STYLES,
@@ -13999,6 +14175,7 @@ export {
13999
14175
  DEFAULT_MODEL_CONFIG,
14000
14176
  DEFAULT_OVERSIGHT,
14001
14177
  DIMENSIONS,
14178
+ EgoTracker,
14002
14179
  Guard,
14003
14180
  HolomimeCallbackHandler,
14004
14181
  HolomimeViolationError,
@@ -2694,13 +2694,29 @@ var mediationRuleSchema = z4.object({
2694
2694
  then: z4.string(),
2695
2695
  priority: z4.number().int().min(1).max(10).default(5)
2696
2696
  });
2697
+ var mediationDecisionSchema = z4.object({
2698
+ situation: z4.string(),
2699
+ decision: z4.enum(["allowed", "blocked", "modified"]),
2700
+ strategy_used: z4.string(),
2701
+ outcome: z4.enum(["positive", "neutral", "negative"]).optional(),
2702
+ timestamp: z4.string().optional()
2703
+ });
2704
+ var strategyPerformanceSchema = z4.object({
2705
+ attempts: z4.number().int().default(0),
2706
+ successes: z4.number().int().default(0),
2707
+ effectiveness: z4.number().min(0).max(1).default(0.5)
2708
+ });
2697
2709
  var egoSchema = z4.object({
2698
2710
  version: z4.string().default("1.0"),
2699
2711
  conflict_resolution: z4.enum(["conscience_first", "purpose_first", "balanced"]).default("conscience_first"),
2700
2712
  adaptation_rate: z4.number().min(0).max(1).default(0.5),
2701
2713
  emotional_regulation: z4.number().min(0).max(1).default(0.7),
2702
2714
  response_strategy: z4.enum(["cautious", "balanced", "assertive"]).default("balanced"),
2703
- mediation_rules: z4.array(mediationRuleSchema).default([])
2715
+ mediation_rules: z4.array(mediationRuleSchema).default([]),
2716
+ // Self-improvement fields (Hyperagents-inspired metacognitive self-modification)
2717
+ auto_adjust: z4.boolean().default(false),
2718
+ mediation_history: z4.array(mediationDecisionSchema).default([]),
2719
+ strategy_performance: z4.record(z4.string(), strategyPerformanceSchema).default({})
2704
2720
  });
2705
2721
  var learnedContextSchema = z4.object({
2706
2722
  situation: z4.string(),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "holomime",
3
- "version": "2.5.0",
3
+ "version": "2.6.0",
4
4
  "description": "Behavioral therapy infrastructure for AI agents — Big Five psychology, structured treatment, behavioral alignment",
5
5
  "type": "module",
6
6
  "bin": {