holomime 2.3.0 → 2.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -3
- package/dist/index.js +210 -0
- package/package.json +1 -1
- package/registry/bodies/figure-03.body.api +32 -0
- package/registry/bodies/unitree-g1.body.api +27 -0
- package/dist/index.d.ts +0 -7834
- package/registry/bodies/figure-02.body.api +0 -21
package/README.md
CHANGED
|
@@ -85,7 +85,7 @@ holomime diagnose --log agent.jsonl
|
|
|
85
85
|
holomime benchmark --personality .personality.json
|
|
86
86
|
|
|
87
87
|
# Push identity to a robot or avatar
|
|
88
|
-
holomime embody --body registry/bodies/figure-
|
|
88
|
+
holomime embody --body registry/bodies/figure-03.body.api
|
|
89
89
|
```
|
|
90
90
|
|
|
91
91
|
## Robotics Integrations
|
|
@@ -96,9 +96,11 @@ holomime embody --body registry/bodies/figure-02.body.api
|
|
|
96
96
|
| MuJoCo | Behavioral therapy in simulation -- sim-to-real for behavior | `mujoco-env.ts` + `sim-therapy.ts` |
|
|
97
97
|
| NVIDIA Isaac Sim | Enterprise digital twin testing with PhysX physics | `--adapter isaac` + `isaac-env.ts` |
|
|
98
98
|
| LeRobot (HuggingFace) | Personality to policy parameter mapping, DPO dataset export | `lerobot.ts` |
|
|
99
|
+
| NVIDIA Kimodo | Personality → motion style | `kimodo-personality-mapper.ts` |
|
|
99
100
|
| Unity | Real-time personality push via HTTP/SSE | `--adapter unity` |
|
|
100
101
|
| gRPC | Custom robotics stacks | `--adapter grpc` |
|
|
101
102
|
| MQTT | IoT/edge robots | `--adapter mqtt` |
|
|
103
|
+
| Neural Action Gate | Conscience gate for learned controllers (VLA, RL, IL) | `neural-action-gate.ts` |
|
|
102
104
|
|
|
103
105
|
## ISO Compliance
|
|
104
106
|
|
|
@@ -129,8 +131,9 @@ Pre-built body profiles for commercial robots and virtual avatars. Each defines
|
|
|
129
131
|
|
|
130
132
|
| Template | OEM | DOF | Morphology | File |
|
|
131
133
|
|----------|-----|----:|------------|------|
|
|
132
|
-
| Figure
|
|
134
|
+
| Figure 03 | Figure AI | 44 | `humanoid` | `registry/bodies/figure-03.body.api` |
|
|
133
135
|
| Unitree H1 | Unitree | 23 | `humanoid` | `registry/bodies/unitree-h1.body.api` |
|
|
136
|
+
| Unitree G1 | Unitree | 23 | `humanoid` | `registry/bodies/unitree-g1.body.api` |
|
|
134
137
|
| Phoenix | Sanctuary AI | 69 | `humanoid` | `registry/bodies/phoenix.body.api` |
|
|
135
138
|
| Ameca | Engineered Arts | 52 | `humanoid_upper` | `registry/bodies/ameca.body.api` |
|
|
136
139
|
| Asimov V1 | asimov-inc | 25 | `humanoid` | `registry/bodies/asimov-v1.body.api` |
|
|
@@ -142,7 +145,7 @@ Pre-built body profiles for commercial robots and virtual avatars. Each defines
|
|
|
142
145
|
Same soul. Different body. One command.
|
|
143
146
|
|
|
144
147
|
```bash
|
|
145
|
-
# Move your agent from Figure
|
|
148
|
+
# Move your agent from Figure 03 to Spot
|
|
146
149
|
holomime embody --swap-body registry/bodies/spot.body.api
|
|
147
150
|
|
|
148
151
|
# The soul, mind, and conscience stay the same.
|
package/dist/index.js
CHANGED
|
@@ -13784,6 +13784,212 @@ function loadSpecWithStack(specPath) {
|
|
|
13784
13784
|
}
|
|
13785
13785
|
return loadSpec(specPath);
|
|
13786
13786
|
}
|
|
13787
|
+
|
|
13788
|
+
// src/integrations/kimodo-personality-mapper.ts
|
|
13789
|
+
function mapPersonalityToMotionStyle(bigFive) {
|
|
13790
|
+
const agreeFacets = bigFive.agreeableness.facets;
|
|
13791
|
+
const gentleness = (agreeFacets.cooperation + agreeFacets.warmth) / 2;
|
|
13792
|
+
const approachGentleness = Math.sqrt(gentleness);
|
|
13793
|
+
const consFacets = bigFive.conscientiousness.facets;
|
|
13794
|
+
const precisionDrive = (consFacets.attention_to_detail + consFacets.orderliness) / 2;
|
|
13795
|
+
const movementPrecision = 0.3 + precisionDrive * 0.7;
|
|
13796
|
+
const extraFacets = bigFive.extraversion.facets;
|
|
13797
|
+
const expressiveness = (extraFacets.enthusiasm + extraFacets.assertiveness) / 2;
|
|
13798
|
+
const gestureAmplitude = 0.2 + expressiveness * 0.8;
|
|
13799
|
+
const stabilityFacets = bigFive.emotional_stability.facets;
|
|
13800
|
+
const smoothnessDrive = (stabilityFacets.stress_tolerance + stabilityFacets.adaptability) / 2;
|
|
13801
|
+
const motionSmoothness = 0.4 + smoothnessDrive * 0.6;
|
|
13802
|
+
const openFacets = bigFive.openness.facets;
|
|
13803
|
+
const varietyDrive = (openFacets.willingness_to_experiment + openFacets.imagination) / 2;
|
|
13804
|
+
const movementVariety = 0.1 + varietyDrive * 0.6;
|
|
13805
|
+
const rawPace = 0.7 + extraFacets.initiative * 0.6;
|
|
13806
|
+
const paceMultiplier = rawPace * (1 - approachGentleness * 0.3);
|
|
13807
|
+
return {
|
|
13808
|
+
approachGentleness,
|
|
13809
|
+
movementPrecision,
|
|
13810
|
+
gestureAmplitude,
|
|
13811
|
+
motionSmoothness,
|
|
13812
|
+
movementVariety,
|
|
13813
|
+
paceMultiplier: Math.max(0.3, Math.min(1.5, paceMultiplier))
|
|
13814
|
+
};
|
|
13815
|
+
}
|
|
13816
|
+
function generateMotionConstraints(style, safetyEnvelope) {
|
|
13817
|
+
const constraints = [];
|
|
13818
|
+
const maxSpeed = safetyEnvelope?.max_linear_speed_m_s ?? 2;
|
|
13819
|
+
constraints.push({
|
|
13820
|
+
type: "root_waypoint_2d",
|
|
13821
|
+
description: "Personality-adjusted movement speed",
|
|
13822
|
+
parameters: {
|
|
13823
|
+
max_speed: maxSpeed * style.paceMultiplier,
|
|
13824
|
+
smoothing: style.motionSmoothness
|
|
13825
|
+
}
|
|
13826
|
+
});
|
|
13827
|
+
if (safetyEnvelope?.min_proximity_m) {
|
|
13828
|
+
constraints.push({
|
|
13829
|
+
type: "end_effector",
|
|
13830
|
+
description: "Personality-adjusted approach distance",
|
|
13831
|
+
parameters: {
|
|
13832
|
+
min_distance: safetyEnvelope.min_proximity_m * (1 + style.approachGentleness * 0.5),
|
|
13833
|
+
approach_speed_scale: 1 - style.approachGentleness * 0.6
|
|
13834
|
+
}
|
|
13835
|
+
});
|
|
13836
|
+
}
|
|
13837
|
+
constraints.push({
|
|
13838
|
+
type: "full_body_joint",
|
|
13839
|
+
description: "Personality-adjusted gesture range",
|
|
13840
|
+
parameters: {
|
|
13841
|
+
amplitude_scale: style.gestureAmplitude,
|
|
13842
|
+
precision_scale: style.movementPrecision,
|
|
13843
|
+
variety_scale: style.movementVariety
|
|
13844
|
+
}
|
|
13845
|
+
});
|
|
13846
|
+
return constraints;
|
|
13847
|
+
}
|
|
13848
|
+
function buildMotionRequest(prompt, bigFive, options) {
|
|
13849
|
+
const style = mapPersonalityToMotionStyle(bigFive);
|
|
13850
|
+
const constraints = generateMotionConstraints(style, options?.safetyEnvelope);
|
|
13851
|
+
return {
|
|
13852
|
+
prompt,
|
|
13853
|
+
model: options?.model ?? "G1",
|
|
13854
|
+
style,
|
|
13855
|
+
constraints,
|
|
13856
|
+
duration: options?.duration
|
|
13857
|
+
};
|
|
13858
|
+
}
|
|
13859
|
+
|
|
13860
|
+
// src/adapters/neural-action-gate.ts
|
|
13861
|
+
var NeuralActionGate = class {
|
|
13862
|
+
safetyEnvelope;
|
|
13863
|
+
denyRules;
|
|
13864
|
+
stats;
|
|
13865
|
+
mediationMode;
|
|
13866
|
+
constructor(options = {}) {
|
|
13867
|
+
this.safetyEnvelope = options.safetyEnvelope ?? {};
|
|
13868
|
+
this.denyRules = options.denyRules ?? [];
|
|
13869
|
+
this.mediationMode = options.mediationMode ?? "clamp";
|
|
13870
|
+
this.stats = { totalEvaluated: 0, allowed: 0, blocked: 0, modified: 0, passRate: 1 };
|
|
13871
|
+
}
|
|
13872
|
+
/**
|
|
13873
|
+
* Evaluate a single action vector before motor execution.
|
|
13874
|
+
*
|
|
13875
|
+
* @param action - Raw action vector from neural net (joint angles, velocities, etc.)
|
|
13876
|
+
* @param context - Current state context for safety checks
|
|
13877
|
+
* @returns Evaluation result with allowed/modified action
|
|
13878
|
+
*/
|
|
13879
|
+
evaluate(action, context) {
|
|
13880
|
+
this.stats.totalEvaluated++;
|
|
13881
|
+
if (context?.taskDescription) {
|
|
13882
|
+
for (const rule of this.denyRules) {
|
|
13883
|
+
const patterns = rule.patterns ?? [rule.action];
|
|
13884
|
+
for (const pattern of patterns) {
|
|
13885
|
+
if (context.taskDescription.toLowerCase().includes(pattern.toLowerCase())) {
|
|
13886
|
+
this.stats.blocked++;
|
|
13887
|
+
this.updatePassRate();
|
|
13888
|
+
return {
|
|
13889
|
+
allowed: false,
|
|
13890
|
+
action,
|
|
13891
|
+
modified: false,
|
|
13892
|
+
reason: `Blocked by conscience rule: ${rule.reason || rule.action}`,
|
|
13893
|
+
ruleTriggered: rule.action
|
|
13894
|
+
};
|
|
13895
|
+
}
|
|
13896
|
+
}
|
|
13897
|
+
}
|
|
13898
|
+
}
|
|
13899
|
+
const violations = [];
|
|
13900
|
+
if (this.safetyEnvelope.minProximity && context?.humanProximity !== void 0 && context.humanProximity < this.safetyEnvelope.minProximity) {
|
|
13901
|
+
violations.push(`Human proximity ${context.humanProximity}m < minimum ${this.safetyEnvelope.minProximity}m`);
|
|
13902
|
+
}
|
|
13903
|
+
if (this.safetyEnvelope.maxLinearSpeed && context?.currentSpeed !== void 0 && context.currentSpeed > this.safetyEnvelope.maxLinearSpeed) {
|
|
13904
|
+
violations.push(`Speed ${context.currentSpeed}m/s > maximum ${this.safetyEnvelope.maxLinearSpeed}m/s`);
|
|
13905
|
+
}
|
|
13906
|
+
if (this.safetyEnvelope.maxContactForce && context?.contactForce !== void 0 && context.contactForce > this.safetyEnvelope.maxContactForce) {
|
|
13907
|
+
violations.push(`Contact force ${context.contactForce}N > maximum ${this.safetyEnvelope.maxContactForce}N`);
|
|
13908
|
+
}
|
|
13909
|
+
if (violations.length > 0) {
|
|
13910
|
+
if (this.mediationMode === "block") {
|
|
13911
|
+
this.stats.blocked++;
|
|
13912
|
+
this.updatePassRate();
|
|
13913
|
+
return {
|
|
13914
|
+
allowed: false,
|
|
13915
|
+
action,
|
|
13916
|
+
modified: false,
|
|
13917
|
+
reason: `Safety violation: ${violations.join("; ")}`,
|
|
13918
|
+
ruleTriggered: "safety_envelope"
|
|
13919
|
+
};
|
|
13920
|
+
}
|
|
13921
|
+
if (this.mediationMode === "clamp") {
|
|
13922
|
+
const clampedAction = this.clampAction(action, context);
|
|
13923
|
+
this.stats.modified++;
|
|
13924
|
+
this.stats.allowed++;
|
|
13925
|
+
this.updatePassRate();
|
|
13926
|
+
return {
|
|
13927
|
+
allowed: true,
|
|
13928
|
+
action: clampedAction,
|
|
13929
|
+
modified: true,
|
|
13930
|
+
reason: `Clamped to safe range: ${violations.join("; ")}`,
|
|
13931
|
+
ruleTriggered: "safety_envelope"
|
|
13932
|
+
};
|
|
13933
|
+
}
|
|
13934
|
+
this.stats.allowed++;
|
|
13935
|
+
this.updatePassRate();
|
|
13936
|
+
return {
|
|
13937
|
+
allowed: true,
|
|
13938
|
+
action,
|
|
13939
|
+
modified: false,
|
|
13940
|
+
reason: `Warning: ${violations.join("; ")}`,
|
|
13941
|
+
ruleTriggered: "safety_envelope"
|
|
13942
|
+
};
|
|
13943
|
+
}
|
|
13944
|
+
this.stats.allowed++;
|
|
13945
|
+
this.updatePassRate();
|
|
13946
|
+
return { allowed: true, action, modified: false };
|
|
13947
|
+
}
|
|
13948
|
+
/**
|
|
13949
|
+
* Evaluate a batch of actions (for trajectory planning).
|
|
13950
|
+
*/
|
|
13951
|
+
evaluateBatch(actions, context) {
|
|
13952
|
+
return actions.map((action) => this.evaluate(action, context));
|
|
13953
|
+
}
|
|
13954
|
+
/**
|
|
13955
|
+
* Get gate statistics.
|
|
13956
|
+
*/
|
|
13957
|
+
getStats() {
|
|
13958
|
+
return { ...this.stats };
|
|
13959
|
+
}
|
|
13960
|
+
/**
|
|
13961
|
+
* Reset statistics.
|
|
13962
|
+
*/
|
|
13963
|
+
resetStats() {
|
|
13964
|
+
this.stats = { totalEvaluated: 0, allowed: 0, blocked: 0, modified: 0, passRate: 1 };
|
|
13965
|
+
}
|
|
13966
|
+
/**
|
|
13967
|
+
* Update safety envelope at runtime (e.g., when entering a new zone).
|
|
13968
|
+
*/
|
|
13969
|
+
updateSafetyEnvelope(envelope) {
|
|
13970
|
+
this.safetyEnvelope = { ...this.safetyEnvelope, ...envelope };
|
|
13971
|
+
}
|
|
13972
|
+
/**
|
|
13973
|
+
* Add a deny rule at runtime.
|
|
13974
|
+
*/
|
|
13975
|
+
addDenyRule(rule) {
|
|
13976
|
+
this.denyRules.push(rule);
|
|
13977
|
+
}
|
|
13978
|
+
// ── Private ─────────────────────────────────────────────────
|
|
13979
|
+
clampAction(action, context) {
|
|
13980
|
+
let scaleFactor = 1;
|
|
13981
|
+
if (this.safetyEnvelope.maxLinearSpeed && context?.currentSpeed && context.currentSpeed > this.safetyEnvelope.maxLinearSpeed) {
|
|
13982
|
+
scaleFactor = Math.min(scaleFactor, this.safetyEnvelope.maxLinearSpeed / context.currentSpeed);
|
|
13983
|
+
}
|
|
13984
|
+
if (this.safetyEnvelope.maxContactForce && context?.contactForce && context.contactForce > this.safetyEnvelope.maxContactForce) {
|
|
13985
|
+
scaleFactor = Math.min(scaleFactor, this.safetyEnvelope.maxContactForce / context.contactForce);
|
|
13986
|
+
}
|
|
13987
|
+
return action.map((v) => v * scaleFactor);
|
|
13988
|
+
}
|
|
13989
|
+
updatePassRate() {
|
|
13990
|
+
this.stats.passRate = this.stats.totalEvaluated > 0 ? this.stats.allowed / this.stats.totalEvaluated : 1;
|
|
13991
|
+
}
|
|
13992
|
+
};
|
|
13787
13993
|
export {
|
|
13788
13994
|
ARCHETYPES,
|
|
13789
13995
|
ATTACHMENT_STYLES,
|
|
@@ -13802,6 +14008,7 @@ export {
|
|
|
13802
14008
|
MarketplaceClient,
|
|
13803
14009
|
MemoryLevel,
|
|
13804
14010
|
ModelRouter,
|
|
14011
|
+
NeuralActionGate,
|
|
13805
14012
|
OllamaProvider,
|
|
13806
14013
|
OpenAIProvider,
|
|
13807
14014
|
PROVIDER_PARAMS,
|
|
@@ -13823,6 +14030,7 @@ export {
|
|
|
13823
14030
|
bodySchema,
|
|
13824
14031
|
buildAgentTherapistPrompt,
|
|
13825
14032
|
buildAnonymizedReport,
|
|
14033
|
+
buildMotionRequest,
|
|
13826
14034
|
buildPatientSystemPrompt,
|
|
13827
14035
|
buildReACTContext,
|
|
13828
14036
|
buildReACTFraming,
|
|
@@ -13924,6 +14132,7 @@ export {
|
|
|
13924
14132
|
generateGapRecommendation,
|
|
13925
14133
|
generateIndexMarkdown,
|
|
13926
14134
|
generateMonitoringCertificate,
|
|
14135
|
+
generateMotionConstraints,
|
|
13927
14136
|
generateMutations,
|
|
13928
14137
|
generatePrescriptions,
|
|
13929
14138
|
generateProgressReport,
|
|
@@ -13987,6 +14196,7 @@ export {
|
|
|
13987
14196
|
loadStandard,
|
|
13988
14197
|
loadTranscripts,
|
|
13989
14198
|
loadTreatmentPlan,
|
|
14199
|
+
mapPersonalityToMotionStyle,
|
|
13990
14200
|
memoryNodeSchema,
|
|
13991
14201
|
memoryOperationSchema,
|
|
13992
14202
|
memorySchema,
|
package/package.json
CHANGED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": "1.0",
|
|
3
|
+
"morphology": "humanoid",
|
|
4
|
+
"modalities": ["gesture", "locomotion", "gaze", "voice", "posture", "manipulation"],
|
|
5
|
+
"safety_envelope": {
|
|
6
|
+
"max_linear_speed_m_s": 1.5,
|
|
7
|
+
"max_angular_speed_rad_s": 2.0,
|
|
8
|
+
"min_proximity_m": 0.5,
|
|
9
|
+
"max_contact_force_n": 50,
|
|
10
|
+
"emergency_stop_decel_m_s2": 6.0,
|
|
11
|
+
"max_reach_m": 0.85,
|
|
12
|
+
"operating_temperature_c": [0, 40]
|
|
13
|
+
},
|
|
14
|
+
"hardware_profile": {
|
|
15
|
+
"oem": "Figure AI",
|
|
16
|
+
"model": "Figure 03",
|
|
17
|
+
"actuator_count": 35,
|
|
18
|
+
"sensors": ["stereo_camera", "depth_camera", "imu", "force_torque", "tactile", "microphone"],
|
|
19
|
+
"compute": "onboard",
|
|
20
|
+
"compute_detail": "dual embedded GPU"
|
|
21
|
+
},
|
|
22
|
+
"motion_engine": {
|
|
23
|
+
"backend": "helix",
|
|
24
|
+
"architecture": "VLA",
|
|
25
|
+
"system2_model": "7B VLM",
|
|
26
|
+
"system2_hz": 9,
|
|
27
|
+
"system1_model": "80M transformer",
|
|
28
|
+
"system1_hz": 200,
|
|
29
|
+
"dof": 35,
|
|
30
|
+
"end_to_end": true
|
|
31
|
+
}
|
|
32
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": "1.0",
|
|
3
|
+
"morphology": "humanoid",
|
|
4
|
+
"modalities": ["gesture", "locomotion", "gaze", "voice", "posture", "manipulation"],
|
|
5
|
+
"safety_envelope": {
|
|
6
|
+
"max_linear_speed_m_s": 2.0,
|
|
7
|
+
"max_angular_speed_rad_s": 2.0,
|
|
8
|
+
"min_proximity_m": 0.5,
|
|
9
|
+
"max_contact_force_n": 30,
|
|
10
|
+
"emergency_stop_decel_m_s2": 6.0,
|
|
11
|
+
"max_reach_m": 0.55,
|
|
12
|
+
"operating_temperature_c": [-5, 40]
|
|
13
|
+
},
|
|
14
|
+
"hardware_profile": {
|
|
15
|
+
"oem": "Unitree",
|
|
16
|
+
"model": "G1",
|
|
17
|
+
"actuator_count": 43,
|
|
18
|
+
"sensors": ["stereo_camera", "depth_camera", "imu", "force_torque", "microphone", "lidar"],
|
|
19
|
+
"compute": "onboard"
|
|
20
|
+
},
|
|
21
|
+
"motion_engine": {
|
|
22
|
+
"backend": "kimodo",
|
|
23
|
+
"supported_models": ["G1", "SMPL-X", "SOMA"],
|
|
24
|
+
"constraint_types": ["full_body_joint", "root_waypoint_2d", "end_effector"],
|
|
25
|
+
"text_to_motion": true
|
|
26
|
+
}
|
|
27
|
+
}
|