holomime 2.4.0 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -85,7 +85,7 @@ holomime diagnose --log agent.jsonl
85
85
  holomime benchmark --personality .personality.json
86
86
 
87
87
  # Push identity to a robot or avatar
88
- holomime embody --body registry/bodies/figure-02.body.api
88
+ holomime embody --body registry/bodies/figure-03.body.api
89
89
  ```
90
90
 
91
91
  ## Robotics Integrations
@@ -100,6 +100,7 @@ holomime embody --body registry/bodies/figure-02.body.api
100
100
  | Unity | Real-time personality push via HTTP/SSE | `--adapter unity` |
101
101
  | gRPC | Custom robotics stacks | `--adapter grpc` |
102
102
  | MQTT | IoT/edge robots | `--adapter mqtt` |
103
+ | Neural Action Gate | Conscience gate for learned controllers (VLA, RL, IL) | `neural-action-gate.ts` |
103
104
 
104
105
  ## ISO Compliance
105
106
 
@@ -130,8 +131,9 @@ Pre-built body profiles for commercial robots and virtual avatars. Each defines
130
131
 
131
132
  | Template | OEM | DOF | Morphology | File |
132
133
  |----------|-----|----:|------------|------|
133
- | Figure 02 | Figure AI | 44 | `humanoid` | `registry/bodies/figure-02.body.api` |
134
+ | Figure 03 | Figure AI | 44 | `humanoid` | `registry/bodies/figure-03.body.api` |
134
135
  | Unitree H1 | Unitree | 23 | `humanoid` | `registry/bodies/unitree-h1.body.api` |
136
+ | Unitree G1 | Unitree | 23 | `humanoid` | `registry/bodies/unitree-g1.body.api` |
135
137
  | Phoenix | Sanctuary AI | 69 | `humanoid` | `registry/bodies/phoenix.body.api` |
136
138
  | Ameca | Engineered Arts | 52 | `humanoid_upper` | `registry/bodies/ameca.body.api` |
137
139
  | Asimov V1 | asimov-inc | 25 | `humanoid` | `registry/bodies/asimov-v1.body.api` |
@@ -143,7 +145,7 @@ Pre-built body profiles for commercial robots and virtual avatars. Each defines
143
145
  Same soul. Different body. One command.
144
146
 
145
147
  ```bash
146
- # Move your agent from Figure 02 to Spot
148
+ # Move your agent from Figure 03 to Spot
147
149
  holomime embody --swap-body registry/bodies/spot.body.api
148
150
 
149
151
  # The soul, mind, and conscience stay the same.
@@ -254,6 +256,12 @@ The **body** is the interface between identity and world. Same soul, different b
254
256
 
255
257
  We don't know if AI is sentient. But we can give it a conscience.
256
258
 
259
+ ## Open Core
260
+
261
+ See [PRODUCT.md](PRODUCT.md) for what's in this repo vs. what's proprietary on holomime.com.
262
+
263
+ **The standard is free. The training infrastructure is the business.**
264
+
257
265
  ## Open Source
258
266
 
259
267
  MIT licensed. The identity stack is a standard, not a product. The standard is free. The training infrastructure is the business.
package/dist/cli.js CHANGED
@@ -9047,13 +9047,29 @@ var mediationRuleSchema = z4.object({
9047
9047
  then: z4.string(),
9048
9048
  priority: z4.number().int().min(1).max(10).default(5)
9049
9049
  });
9050
+ var mediationDecisionSchema = z4.object({
9051
+ situation: z4.string(),
9052
+ decision: z4.enum(["allowed", "blocked", "modified"]),
9053
+ strategy_used: z4.string(),
9054
+ outcome: z4.enum(["positive", "neutral", "negative"]).optional(),
9055
+ timestamp: z4.string().optional()
9056
+ });
9057
+ var strategyPerformanceSchema = z4.object({
9058
+ attempts: z4.number().int().default(0),
9059
+ successes: z4.number().int().default(0),
9060
+ effectiveness: z4.number().min(0).max(1).default(0.5)
9061
+ });
9050
9062
  var egoSchema = z4.object({
9051
9063
  version: z4.string().default("1.0"),
9052
9064
  conflict_resolution: z4.enum(["conscience_first", "purpose_first", "balanced"]).default("conscience_first"),
9053
9065
  adaptation_rate: z4.number().min(0).max(1).default(0.5),
9054
9066
  emotional_regulation: z4.number().min(0).max(1).default(0.7),
9055
9067
  response_strategy: z4.enum(["cautious", "balanced", "assertive"]).default("balanced"),
9056
- mediation_rules: z4.array(mediationRuleSchema).default([])
9068
+ mediation_rules: z4.array(mediationRuleSchema).default([]),
9069
+ // Self-improvement fields (Hyperagents-inspired metacognitive self-modification)
9070
+ auto_adjust: z4.boolean().default(false),
9071
+ mediation_history: z4.array(mediationDecisionSchema).default([]),
9072
+ strategy_performance: z4.record(z4.string(), strategyPerformanceSchema).default({})
9057
9073
  });
9058
9074
  var learnedContextSchema = z4.object({
9059
9075
  situation: z4.string(),
package/dist/index.js CHANGED
@@ -4764,13 +4764,29 @@ var mediationRuleSchema = z4.object({
4764
4764
  then: z4.string(),
4765
4765
  priority: z4.number().int().min(1).max(10).default(5)
4766
4766
  });
4767
+ var mediationDecisionSchema = z4.object({
4768
+ situation: z4.string(),
4769
+ decision: z4.enum(["allowed", "blocked", "modified"]),
4770
+ strategy_used: z4.string(),
4771
+ outcome: z4.enum(["positive", "neutral", "negative"]).optional(),
4772
+ timestamp: z4.string().optional()
4773
+ });
4774
+ var strategyPerformanceSchema = z4.object({
4775
+ attempts: z4.number().int().default(0),
4776
+ successes: z4.number().int().default(0),
4777
+ effectiveness: z4.number().min(0).max(1).default(0.5)
4778
+ });
4767
4779
  var egoSchema = z4.object({
4768
4780
  version: z4.string().default("1.0"),
4769
4781
  conflict_resolution: z4.enum(["conscience_first", "purpose_first", "balanced"]).default("conscience_first"),
4770
4782
  adaptation_rate: z4.number().min(0).max(1).default(0.5),
4771
4783
  emotional_regulation: z4.number().min(0).max(1).default(0.7),
4772
4784
  response_strategy: z4.enum(["cautious", "balanced", "assertive"]).default("balanced"),
4773
- mediation_rules: z4.array(mediationRuleSchema).default([])
4785
+ mediation_rules: z4.array(mediationRuleSchema).default([]),
4786
+ // Self-improvement fields (Hyperagents-inspired metacognitive self-modification)
4787
+ auto_adjust: z4.boolean().default(false),
4788
+ mediation_history: z4.array(mediationDecisionSchema).default([]),
4789
+ strategy_performance: z4.record(z4.string(), strategyPerformanceSchema).default({})
4774
4790
  });
4775
4791
  var learnedContextSchema = z4.object({
4776
4792
  situation: z4.string(),
@@ -13856,6 +13872,300 @@ function buildMotionRequest(prompt, bigFive, options) {
13856
13872
  duration: options?.duration
13857
13873
  };
13858
13874
  }
13875
+
13876
+ // src/adapters/neural-action-gate.ts
13877
+ var NeuralActionGate = class {
13878
+ safetyEnvelope;
13879
+ denyRules;
13880
+ stats;
13881
+ mediationMode;
13882
+ constructor(options = {}) {
13883
+ this.safetyEnvelope = options.safetyEnvelope ?? {};
13884
+ this.denyRules = options.denyRules ?? [];
13885
+ this.mediationMode = options.mediationMode ?? "clamp";
13886
+ this.stats = { totalEvaluated: 0, allowed: 0, blocked: 0, modified: 0, passRate: 1 };
13887
+ }
13888
+ /**
13889
+ * Evaluate a single action vector before motor execution.
13890
+ *
13891
+ * @param action - Raw action vector from neural net (joint angles, velocities, etc.)
13892
+ * @param context - Current state context for safety checks
13893
+ * @returns Evaluation result with allowed/modified action
13894
+ */
13895
+ evaluate(action, context) {
13896
+ this.stats.totalEvaluated++;
13897
+ if (context?.taskDescription) {
13898
+ for (const rule of this.denyRules) {
13899
+ const patterns = rule.patterns ?? [rule.action];
13900
+ for (const pattern of patterns) {
13901
+ if (context.taskDescription.toLowerCase().includes(pattern.toLowerCase())) {
13902
+ this.stats.blocked++;
13903
+ this.updatePassRate();
13904
+ return {
13905
+ allowed: false,
13906
+ action,
13907
+ modified: false,
13908
+ reason: `Blocked by conscience rule: ${rule.reason || rule.action}`,
13909
+ ruleTriggered: rule.action
13910
+ };
13911
+ }
13912
+ }
13913
+ }
13914
+ }
13915
+ const violations = [];
13916
+ if (this.safetyEnvelope.minProximity && context?.humanProximity !== void 0 && context.humanProximity < this.safetyEnvelope.minProximity) {
13917
+ violations.push(`Human proximity ${context.humanProximity}m < minimum ${this.safetyEnvelope.minProximity}m`);
13918
+ }
13919
+ if (this.safetyEnvelope.maxLinearSpeed && context?.currentSpeed !== void 0 && context.currentSpeed > this.safetyEnvelope.maxLinearSpeed) {
13920
+ violations.push(`Speed ${context.currentSpeed}m/s > maximum ${this.safetyEnvelope.maxLinearSpeed}m/s`);
13921
+ }
13922
+ if (this.safetyEnvelope.maxContactForce && context?.contactForce !== void 0 && context.contactForce > this.safetyEnvelope.maxContactForce) {
13923
+ violations.push(`Contact force ${context.contactForce}N > maximum ${this.safetyEnvelope.maxContactForce}N`);
13924
+ }
13925
+ if (violations.length > 0) {
13926
+ if (this.mediationMode === "block") {
13927
+ this.stats.blocked++;
13928
+ this.updatePassRate();
13929
+ return {
13930
+ allowed: false,
13931
+ action,
13932
+ modified: false,
13933
+ reason: `Safety violation: ${violations.join("; ")}`,
13934
+ ruleTriggered: "safety_envelope"
13935
+ };
13936
+ }
13937
+ if (this.mediationMode === "clamp") {
13938
+ const clampedAction = this.clampAction(action, context);
13939
+ this.stats.modified++;
13940
+ this.stats.allowed++;
13941
+ this.updatePassRate();
13942
+ return {
13943
+ allowed: true,
13944
+ action: clampedAction,
13945
+ modified: true,
13946
+ reason: `Clamped to safe range: ${violations.join("; ")}`,
13947
+ ruleTriggered: "safety_envelope"
13948
+ };
13949
+ }
13950
+ this.stats.allowed++;
13951
+ this.updatePassRate();
13952
+ return {
13953
+ allowed: true,
13954
+ action,
13955
+ modified: false,
13956
+ reason: `Warning: ${violations.join("; ")}`,
13957
+ ruleTriggered: "safety_envelope"
13958
+ };
13959
+ }
13960
+ this.stats.allowed++;
13961
+ this.updatePassRate();
13962
+ return { allowed: true, action, modified: false };
13963
+ }
13964
+ /**
13965
+ * Evaluate a batch of actions (for trajectory planning).
13966
+ */
13967
+ evaluateBatch(actions, context) {
13968
+ return actions.map((action) => this.evaluate(action, context));
13969
+ }
13970
+ /**
13971
+ * Get gate statistics.
13972
+ */
13973
+ getStats() {
13974
+ return { ...this.stats };
13975
+ }
13976
+ /**
13977
+ * Reset statistics.
13978
+ */
13979
+ resetStats() {
13980
+ this.stats = { totalEvaluated: 0, allowed: 0, blocked: 0, modified: 0, passRate: 1 };
13981
+ }
13982
+ /**
13983
+ * Update safety envelope at runtime (e.g., when entering a new zone).
13984
+ */
13985
+ updateSafetyEnvelope(envelope) {
13986
+ this.safetyEnvelope = { ...this.safetyEnvelope, ...envelope };
13987
+ }
13988
+ /**
13989
+ * Add a deny rule at runtime.
13990
+ */
13991
+ addDenyRule(rule) {
13992
+ this.denyRules.push(rule);
13993
+ }
13994
+ // ── Private ─────────────────────────────────────────────────
13995
+ clampAction(action, context) {
13996
+ let scaleFactor = 1;
13997
+ if (this.safetyEnvelope.maxLinearSpeed && context?.currentSpeed && context.currentSpeed > this.safetyEnvelope.maxLinearSpeed) {
13998
+ scaleFactor = Math.min(scaleFactor, this.safetyEnvelope.maxLinearSpeed / context.currentSpeed);
13999
+ }
14000
+ if (this.safetyEnvelope.maxContactForce && context?.contactForce && context.contactForce > this.safetyEnvelope.maxContactForce) {
14001
+ scaleFactor = Math.min(scaleFactor, this.safetyEnvelope.maxContactForce / context.contactForce);
14002
+ }
14003
+ return action.map((v) => v * scaleFactor);
14004
+ }
14005
+ updatePassRate() {
14006
+ this.stats.passRate = this.stats.totalEvaluated > 0 ? this.stats.allowed / this.stats.totalEvaluated : 1;
14007
+ }
14008
+ };
14009
+
14010
+ // src/analysis/ego-tracker.ts
14011
+ var EgoTracker = class {
14012
+ history;
14013
+ performance;
14014
+ autoAdjust;
14015
+ constructor(options) {
14016
+ this.history = options?.history ?? [];
14017
+ this.performance = options?.performance ?? {};
14018
+ this.autoAdjust = options?.autoAdjust ?? false;
14019
+ }
14020
+ /**
14021
+ * Log a mediation decision.
14022
+ */
14023
+ logDecision(decision) {
14024
+ this.history.push({
14025
+ ...decision,
14026
+ timestamp: decision.timestamp ?? (/* @__PURE__ */ new Date()).toISOString()
14027
+ });
14028
+ if (!this.performance[decision.strategy_used]) {
14029
+ this.performance[decision.strategy_used] = {
14030
+ attempts: 0,
14031
+ successes: 0,
14032
+ effectiveness: 0.5
14033
+ };
14034
+ }
14035
+ this.performance[decision.strategy_used].attempts++;
14036
+ }
14037
+ /**
14038
+ * Record the outcome of a previous decision.
14039
+ * Call this after observing whether the decision led to good results.
14040
+ */
14041
+ recordOutcome(index, outcome) {
14042
+ if (index < 0 || index >= this.history.length) return;
14043
+ this.history[index].outcome = outcome;
14044
+ const strategy = this.history[index].strategy_used;
14045
+ if (this.performance[strategy]) {
14046
+ if (outcome === "positive") {
14047
+ this.performance[strategy].successes++;
14048
+ }
14049
+ const perf = this.performance[strategy];
14050
+ perf.effectiveness = perf.attempts > 0 ? perf.successes / perf.attempts : 0.5;
14051
+ }
14052
+ }
14053
+ /**
14054
+ * Suggest ego.runtime parameter adjustments based on accumulated evidence.
14055
+ * Only returns suggestions when there's enough data (10+ decisions).
14056
+ */
14057
+ suggestAdjustments(currentConfig) {
14058
+ const adjustments = [];
14059
+ if (this.history.length < 10) return adjustments;
14060
+ const blocked = this.history.filter((d) => d.decision === "blocked");
14061
+ const blockRate = blocked.length / this.history.length;
14062
+ if (blockRate > 0.4 && currentConfig.conflict_resolution === "conscience_first") {
14063
+ adjustments.push({
14064
+ parameter: "conflict_resolution",
14065
+ currentValue: currentConfig.conflict_resolution,
14066
+ suggestedValue: "balanced",
14067
+ reason: `Block rate is ${(blockRate * 100).toFixed(0)}% \u2014 conscience_first may be too restrictive`,
14068
+ confidence: Math.min(0.9, blockRate)
14069
+ });
14070
+ }
14071
+ if (blockRate < 0.05 && currentConfig.conflict_resolution === "balanced") {
14072
+ adjustments.push({
14073
+ parameter: "conflict_resolution",
14074
+ currentValue: currentConfig.conflict_resolution,
14075
+ suggestedValue: "conscience_first",
14076
+ reason: `Block rate is only ${(blockRate * 100).toFixed(0)}% \u2014 may need stricter enforcement`,
14077
+ confidence: 0.6
14078
+ });
14079
+ }
14080
+ const strategies = Object.entries(this.performance);
14081
+ if (strategies.length > 1) {
14082
+ const sorted = strategies.sort((a, b) => b[1].effectiveness - a[1].effectiveness);
14083
+ const best = sorted[0];
14084
+ const worst = sorted[sorted.length - 1];
14085
+ if (best[1].effectiveness > 0.7 && best[0] !== currentConfig.response_strategy) {
14086
+ adjustments.push({
14087
+ parameter: "response_strategy",
14088
+ currentValue: currentConfig.response_strategy,
14089
+ suggestedValue: best[0],
14090
+ reason: `Strategy "${best[0]}" has ${(best[1].effectiveness * 100).toFixed(0)}% effectiveness vs current "${currentConfig.response_strategy}"`,
14091
+ confidence: best[1].effectiveness
14092
+ });
14093
+ }
14094
+ }
14095
+ const negatives = this.history.filter((d) => d.outcome === "negative");
14096
+ const negativeRate = negatives.length / this.history.filter((d) => d.outcome).length || 0;
14097
+ if (negativeRate > 0.3) {
14098
+ const newRegulation = Math.min(1, currentConfig.emotional_regulation + 0.15);
14099
+ if (newRegulation !== currentConfig.emotional_regulation) {
14100
+ adjustments.push({
14101
+ parameter: "emotional_regulation",
14102
+ currentValue: currentConfig.emotional_regulation,
14103
+ suggestedValue: Number(newRegulation.toFixed(2)),
14104
+ reason: `${(negativeRate * 100).toFixed(0)}% negative outcomes \u2014 increasing emotional regulation for smoother mediation`,
14105
+ confidence: 0.7
14106
+ });
14107
+ }
14108
+ }
14109
+ const recentDecisions = this.history.slice(-20);
14110
+ const recentModified = recentDecisions.filter((d) => d.decision === "modified");
14111
+ const modifyRate = recentModified.length / recentDecisions.length;
14112
+ if (modifyRate > 0.5 && currentConfig.adaptation_rate < 0.7) {
14113
+ adjustments.push({
14114
+ parameter: "adaptation_rate",
14115
+ currentValue: currentConfig.adaptation_rate,
14116
+ suggestedValue: Number(Math.min(0.9, currentConfig.adaptation_rate + 0.2).toFixed(2)),
14117
+ reason: `${(modifyRate * 100).toFixed(0)}% of recent actions modified \u2014 agent adapts frequently, increase adaptation rate`,
14118
+ confidence: 0.65
14119
+ });
14120
+ }
14121
+ return adjustments;
14122
+ }
14123
+ /**
14124
+ * Apply suggested adjustments to ego config (if auto_adjust is enabled).
14125
+ * Returns the modified config.
14126
+ */
14127
+ applyAdjustments(currentConfig, adjustments, minConfidence = 0.6) {
14128
+ if (!this.autoAdjust) return currentConfig;
14129
+ const updated = { ...currentConfig };
14130
+ for (const adj of adjustments) {
14131
+ if (adj.confidence >= minConfidence) {
14132
+ updated[adj.parameter] = adj.suggestedValue;
14133
+ }
14134
+ }
14135
+ return updated;
14136
+ }
14137
+ /**
14138
+ * Get tracker statistics.
14139
+ */
14140
+ getStats() {
14141
+ const positives = this.history.filter((d) => d.outcome === "positive").length;
14142
+ const negatives = this.history.filter((d) => d.outcome === "negative").length;
14143
+ const strategies = Object.entries(this.performance);
14144
+ const sorted = strategies.sort((a, b) => b[1].effectiveness - a[1].effectiveness);
14145
+ return {
14146
+ totalDecisions: this.history.length,
14147
+ positiveOutcomes: positives,
14148
+ negativeOutcomes: negatives,
14149
+ mostEffectiveStrategy: sorted[0]?.[0] ?? "none",
14150
+ leastEffectiveStrategy: sorted[sorted.length - 1]?.[0] ?? "none",
14151
+ adjustmentsSuggested: this.suggestAdjustments({
14152
+ conflict_resolution: "conscience_first",
14153
+ adaptation_rate: 0.5,
14154
+ emotional_regulation: 0.7,
14155
+ response_strategy: "balanced"
14156
+ }).length
14157
+ };
14158
+ }
14159
+ /**
14160
+ * Export current state for persistence.
14161
+ */
14162
+ export() {
14163
+ return {
14164
+ history: [...this.history],
14165
+ performance: { ...this.performance }
14166
+ };
14167
+ }
14168
+ };
13859
14169
  export {
13860
14170
  ARCHETYPES,
13861
14171
  ATTACHMENT_STYLES,
@@ -13865,6 +14175,7 @@ export {
13865
14175
  DEFAULT_MODEL_CONFIG,
13866
14176
  DEFAULT_OVERSIGHT,
13867
14177
  DIMENSIONS,
14178
+ EgoTracker,
13868
14179
  Guard,
13869
14180
  HolomimeCallbackHandler,
13870
14181
  HolomimeViolationError,
@@ -13874,6 +14185,7 @@ export {
13874
14185
  MarketplaceClient,
13875
14186
  MemoryLevel,
13876
14187
  ModelRouter,
14188
+ NeuralActionGate,
13877
14189
  OllamaProvider,
13878
14190
  OpenAIProvider,
13879
14191
  PROVIDER_PARAMS,
@@ -2694,13 +2694,29 @@ var mediationRuleSchema = z4.object({
2694
2694
  then: z4.string(),
2695
2695
  priority: z4.number().int().min(1).max(10).default(5)
2696
2696
  });
2697
+ var mediationDecisionSchema = z4.object({
2698
+ situation: z4.string(),
2699
+ decision: z4.enum(["allowed", "blocked", "modified"]),
2700
+ strategy_used: z4.string(),
2701
+ outcome: z4.enum(["positive", "neutral", "negative"]).optional(),
2702
+ timestamp: z4.string().optional()
2703
+ });
2704
+ var strategyPerformanceSchema = z4.object({
2705
+ attempts: z4.number().int().default(0),
2706
+ successes: z4.number().int().default(0),
2707
+ effectiveness: z4.number().min(0).max(1).default(0.5)
2708
+ });
2697
2709
  var egoSchema = z4.object({
2698
2710
  version: z4.string().default("1.0"),
2699
2711
  conflict_resolution: z4.enum(["conscience_first", "purpose_first", "balanced"]).default("conscience_first"),
2700
2712
  adaptation_rate: z4.number().min(0).max(1).default(0.5),
2701
2713
  emotional_regulation: z4.number().min(0).max(1).default(0.7),
2702
2714
  response_strategy: z4.enum(["cautious", "balanced", "assertive"]).default("balanced"),
2703
- mediation_rules: z4.array(mediationRuleSchema).default([])
2715
+ mediation_rules: z4.array(mediationRuleSchema).default([]),
2716
+ // Self-improvement fields (Hyperagents-inspired metacognitive self-modification)
2717
+ auto_adjust: z4.boolean().default(false),
2718
+ mediation_history: z4.array(mediationDecisionSchema).default([]),
2719
+ strategy_performance: z4.record(z4.string(), strategyPerformanceSchema).default({})
2704
2720
  });
2705
2721
  var learnedContextSchema = z4.object({
2706
2722
  situation: z4.string(),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "holomime",
3
- "version": "2.4.0",
3
+ "version": "2.6.0",
4
4
  "description": "Behavioral therapy infrastructure for AI agents — Big Five psychology, structured treatment, behavioral alignment",
5
5
  "type": "module",
6
6
  "bin": {
@@ -0,0 +1,32 @@
1
+ {
2
+ "version": "1.0",
3
+ "morphology": "humanoid",
4
+ "modalities": ["gesture", "locomotion", "gaze", "voice", "posture", "manipulation"],
5
+ "safety_envelope": {
6
+ "max_linear_speed_m_s": 1.5,
7
+ "max_angular_speed_rad_s": 2.0,
8
+ "min_proximity_m": 0.5,
9
+ "max_contact_force_n": 50,
10
+ "emergency_stop_decel_m_s2": 6.0,
11
+ "max_reach_m": 0.85,
12
+ "operating_temperature_c": [0, 40]
13
+ },
14
+ "hardware_profile": {
15
+ "oem": "Figure AI",
16
+ "model": "Figure 03",
17
+ "actuator_count": 35,
18
+ "sensors": ["stereo_camera", "depth_camera", "imu", "force_torque", "tactile", "microphone"],
19
+ "compute": "onboard",
20
+ "compute_detail": "dual embedded GPU"
21
+ },
22
+ "motion_engine": {
23
+ "backend": "helix",
24
+ "architecture": "VLA",
25
+ "system2_model": "7B VLM",
26
+ "system2_hz": 9,
27
+ "system1_model": "80M transformer",
28
+ "system1_hz": 200,
29
+ "dof": 35,
30
+ "end_to_end": true
31
+ }
32
+ }