yootd 0.0.88 → 0.0.89
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/video-player/index.d.ts +6 -0
- package/dist/video-player/index.js +106 -79
- package/package.json +1 -1
@@ -7,6 +7,7 @@ export interface VideoPlayerRef {
|
|
7
7
|
useFaceExpression: (type: 'emotion' | 'character' | 'anchor' | 'pose', use: boolean) => void;
|
8
8
|
currentTime: () => number;
|
9
9
|
}
|
10
|
+
type AIFeature = 'emotion' | 'character' | 'anchor' | 'pose';
|
10
11
|
export interface VideoPlayerProps {
|
11
12
|
/**
|
12
13
|
* 视频地址
|
@@ -44,6 +45,10 @@ export interface VideoPlayerProps {
|
|
44
45
|
* 是否旋转全屏
|
45
46
|
*/
|
46
47
|
rotateWhenFullScreen?: boolean;
|
48
|
+
/**
|
49
|
+
* AI 功能列表
|
50
|
+
*/
|
51
|
+
aiFeatures?: AIFeature[];
|
47
52
|
/**
|
48
53
|
* 视频封面
|
49
54
|
*/
|
@@ -88,3 +93,4 @@ export interface VideoPlayerProps {
|
|
88
93
|
onError?: (error: string) => void;
|
89
94
|
}
|
90
95
|
export declare const VideoPlayer: React.ForwardRefExoticComponent<VideoPlayerProps & React.RefAttributes<VideoPlayerRef>>;
|
96
|
+
export {};
|
@@ -43,6 +43,8 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
43
43
|
autoPlay = _ref.autoPlay,
|
44
44
|
muted = _ref.muted,
|
45
45
|
style = _ref.style,
|
46
|
+
_ref$aiFeatures = _ref.aiFeatures,
|
47
|
+
aiFeatures = _ref$aiFeatures === void 0 ? ['emotion', 'character', 'anchor', 'pose'] : _ref$aiFeatures,
|
46
48
|
_ref$retry = _ref.retry,
|
47
49
|
retry = _ref$retry === void 0 ? false : _ref$retry,
|
48
50
|
_ref$retryInterval = _ref.retryInterval,
|
@@ -125,49 +127,53 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
125
127
|
_useState28 = _slicedToArray(_useState27, 2),
|
126
128
|
showSettingPanel = _useState28[0],
|
127
129
|
setShowSettingPanel = _useState28[1];
|
128
|
-
var _useState29 = useState(
|
130
|
+
var _useState29 = useState(false),
|
131
|
+
_useState30 = _slicedToArray(_useState29, 2),
|
132
|
+
tfInitialized = _useState30[0],
|
133
|
+
setTfInitialized = _useState30[1];
|
134
|
+
var _useState31 = useState({
|
129
135
|
emotion: false,
|
130
136
|
character: false,
|
131
137
|
anchor: false,
|
132
138
|
pose: false
|
133
139
|
}),
|
134
|
-
_useState30 = _slicedToArray(_useState29, 2),
|
135
|
-
aiFeature = _useState30[0],
|
136
|
-
setAIFeature = _useState30[1];
|
137
|
-
var _useState31 = useState(null),
|
138
140
|
_useState32 = _slicedToArray(_useState31, 2),
|
139
|
-
|
140
|
-
|
141
|
-
var _useState33 = useState(
|
141
|
+
aiFeature = _useState32[0],
|
142
|
+
setAIFeature = _useState32[1];
|
143
|
+
var _useState33 = useState(null),
|
142
144
|
_useState34 = _slicedToArray(_useState33, 2),
|
143
|
-
|
144
|
-
|
145
|
-
var _useState35 = useState(
|
146
|
-
width: 0,
|
147
|
-
height: 0
|
148
|
-
}),
|
145
|
+
options = _useState34[0],
|
146
|
+
setOptions = _useState34[1];
|
147
|
+
var _useState35 = useState(false),
|
149
148
|
_useState36 = _slicedToArray(_useState35, 2),
|
150
|
-
|
151
|
-
|
149
|
+
faceApiInited = _useState36[0],
|
150
|
+
setFaceApiInited = _useState36[1];
|
152
151
|
var _useState37 = useState({
|
153
152
|
width: 0,
|
154
153
|
height: 0
|
155
154
|
}),
|
156
155
|
_useState38 = _slicedToArray(_useState37, 2),
|
157
|
-
|
158
|
-
|
159
|
-
var _useState39 = useState(
|
156
|
+
canvasSize = _useState38[0],
|
157
|
+
setCanvasSize = _useState38[1];
|
158
|
+
var _useState39 = useState({
|
159
|
+
width: 0,
|
160
|
+
height: 0
|
161
|
+
}),
|
160
162
|
_useState40 = _slicedToArray(_useState39, 2),
|
161
|
-
|
162
|
-
|
163
|
+
videoEleSize = _useState40[0],
|
164
|
+
setVideoEleSize = _useState40[1];
|
163
165
|
var _useState41 = useState(false),
|
164
166
|
_useState42 = _slicedToArray(_useState41, 2),
|
165
|
-
|
166
|
-
|
167
|
-
var _useState43 = useState(),
|
167
|
+
isSeeking = _useState42[0],
|
168
|
+
setIsSeeking = _useState42[1];
|
169
|
+
var _useState43 = useState(false),
|
168
170
|
_useState44 = _slicedToArray(_useState43, 2),
|
169
|
-
|
170
|
-
|
171
|
+
isError = _useState44[0],
|
172
|
+
setIsError = _useState44[1];
|
173
|
+
var _useState45 = useState(),
|
174
|
+
_useState46 = _slicedToArray(_useState45, 2),
|
175
|
+
features = _useState46[0],
|
176
|
+
setFeatures = _useState46[1];
|
171
177
|
useImperativeHandle(ref, function () {
|
172
178
|
return {
|
173
179
|
play: function play() {
|
@@ -636,67 +642,87 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
636
642
|
useEffect(function () {
|
637
643
|
var loaded = true;
|
638
644
|
_asyncToGenerator( /*#__PURE__*/_regeneratorRuntime().mark(function _callee2() {
|
639
|
-
var faceapi, posedetection,
|
645
|
+
var tf, faceapi, posedetection, detectorConfig, _poseDetector;
|
640
646
|
return _regeneratorRuntime().wrap(function _callee2$(_context2) {
|
641
647
|
while (1) switch (_context2.prev = _context2.next) {
|
642
648
|
case 0:
|
643
|
-
|
644
|
-
|
649
|
+
if (loaded) {
|
650
|
+
_context2.next = 2;
|
651
|
+
break;
|
652
|
+
}
|
653
|
+
return _context2.abrupt("return");
|
645
654
|
case 2:
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
case 5:
|
650
|
-
posedetection = _context2.sent;
|
651
|
-
_context2.next = 8;
|
652
|
-
return import('@tensorflow/tfjs');
|
653
|
-
case 8:
|
654
|
-
tf = _context2.sent;
|
655
|
-
console.log('tf', tf.ready);
|
656
|
-
if (!(loaded && !faceApiInited && (aiFeature.emotion || aiFeature.character || aiFeature.anchor))) {
|
657
|
-
_context2.next = 34;
|
655
|
+
_context2.prev = 2;
|
656
|
+
if (!((aiFeature.anchor || aiFeature.emotion || aiFeature.character || aiFeature.pose) && !tfInitialized)) {
|
657
|
+
_context2.next = 12;
|
658
658
|
break;
|
659
659
|
}
|
660
|
-
_context2.
|
661
|
-
|
660
|
+
_context2.next = 6;
|
661
|
+
return import('@tensorflow/tfjs');
|
662
|
+
case 6:
|
663
|
+
tf = _context2.sent;
|
664
|
+
_context2.next = 9;
|
662
665
|
return tf.ready();
|
663
|
-
case
|
664
|
-
_context2.next =
|
666
|
+
case 9:
|
667
|
+
_context2.next = 11;
|
665
668
|
return tf.setBackend('webgl');
|
666
|
-
case
|
667
|
-
|
669
|
+
case 11:
|
670
|
+
setTfInitialized(true);
|
671
|
+
case 12:
|
672
|
+
_context2.next = 17;
|
673
|
+
break;
|
674
|
+
case 14:
|
675
|
+
_context2.prev = 14;
|
676
|
+
_context2.t0 = _context2["catch"](2);
|
677
|
+
console.error('tensorflow 加载失败: ', _context2.t0);
|
678
|
+
case 17:
|
679
|
+
_context2.prev = 17;
|
680
|
+
if (!((aiFeature.anchor || aiFeature.emotion || aiFeature.character) && !faceApiInited)) {
|
681
|
+
_context2.next = 35;
|
682
|
+
break;
|
683
|
+
}
|
684
|
+
_context2.next = 21;
|
685
|
+
return import('@vladmandic/face-api');
|
686
|
+
case 21:
|
687
|
+
faceapi = _context2.sent;
|
688
|
+
_context2.next = 24;
|
668
689
|
return faceapi.nets.ssdMobilenetv1.load("".concat(OSS, "/model-zoo/face-api-models/model"));
|
669
|
-
case
|
670
|
-
_context2.next =
|
690
|
+
case 24:
|
691
|
+
_context2.next = 26;
|
671
692
|
return faceapi.nets.ageGenderNet.load("".concat(OSS, "/face-api-models/model"));
|
672
|
-
case
|
673
|
-
_context2.next =
|
693
|
+
case 26:
|
694
|
+
_context2.next = 28;
|
674
695
|
return faceapi.nets.faceLandmark68Net.load("".concat(OSS, "/model-zoo/face-api-models/model"));
|
675
|
-
case
|
676
|
-
_context2.next =
|
696
|
+
case 28:
|
697
|
+
_context2.next = 30;
|
677
698
|
return faceapi.nets.faceRecognitionNet.load("".concat(OSS, "/model-zoo/face-api-models/model"));
|
678
|
-
case
|
679
|
-
_context2.next =
|
699
|
+
case 30:
|
700
|
+
_context2.next = 32;
|
680
701
|
return faceapi.nets.faceExpressionNet.load("".concat(OSS, "/model-zoo/face-api-models/model"));
|
681
|
-
case
|
702
|
+
case 32:
|
682
703
|
setOptions(new faceapi.SsdMobilenetv1Options({
|
683
704
|
minConfidence: MIN_SCORE,
|
684
705
|
maxResults: MAX_RESULTS
|
685
706
|
}));
|
686
707
|
detectAllFacesRef.current = faceapi.detectAllFaces;
|
687
708
|
setFaceApiInited(true);
|
688
|
-
|
709
|
+
case 35:
|
710
|
+
_context2.next = 40;
|
689
711
|
break;
|
690
|
-
case
|
691
|
-
_context2.prev =
|
692
|
-
_context2.
|
693
|
-
console.error(_context2.
|
694
|
-
case
|
712
|
+
case 37:
|
713
|
+
_context2.prev = 37;
|
714
|
+
_context2.t1 = _context2["catch"](17);
|
715
|
+
console.error('faceapi 加载失败: ', _context2.t1);
|
716
|
+
case 40:
|
717
|
+
_context2.prev = 40;
|
695
718
|
if (!(aiFeature.pose && poseDetector == null)) {
|
696
|
-
_context2.next =
|
719
|
+
_context2.next = 51;
|
697
720
|
break;
|
698
721
|
}
|
699
|
-
_context2.
|
722
|
+
_context2.next = 44;
|
723
|
+
return import('@tensorflow-models/pose-detection');
|
724
|
+
case 44:
|
725
|
+
posedetection = _context2.sent;
|
700
726
|
detectorConfig = {
|
701
727
|
modelType: posedetection.movenet.modelType.MULTIPOSE_LIGHTNING,
|
702
728
|
enableTracking: true,
|
@@ -704,28 +730,29 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
704
730
|
scoreThreshold: 0.3,
|
705
731
|
modelUrl: "".concat(OSS, "/model-zoo/pose-detection-models/model.json")
|
706
732
|
};
|
707
|
-
_context2.next =
|
733
|
+
_context2.next = 48;
|
708
734
|
return posedetection.createDetector(posedetection.SupportedModels.MoveNet, detectorConfig);
|
709
|
-
case
|
735
|
+
case 48:
|
710
736
|
_poseDetector = _context2.sent;
|
711
737
|
getAdjacentPairsRef.current = posedetection.util.getAdjacentPairs;
|
712
738
|
setPoseDetector(_poseDetector);
|
713
|
-
|
739
|
+
case 51:
|
740
|
+
_context2.next = 56;
|
714
741
|
break;
|
715
|
-
case
|
716
|
-
_context2.prev =
|
717
|
-
_context2.
|
718
|
-
console.error(_context2.
|
719
|
-
case
|
742
|
+
case 53:
|
743
|
+
_context2.prev = 53;
|
744
|
+
_context2.t2 = _context2["catch"](40);
|
745
|
+
console.error('pose detection 加载失败: ', _context2.t2);
|
746
|
+
case 56:
|
720
747
|
case "end":
|
721
748
|
return _context2.stop();
|
722
749
|
}
|
723
|
-
}, _callee2, null, [[
|
750
|
+
}, _callee2, null, [[2, 14], [17, 37], [40, 53]]);
|
724
751
|
}))();
|
725
752
|
return function () {
|
726
753
|
loaded = false;
|
727
754
|
};
|
728
|
-
}, [aiFeature, faceApiInited, poseDetector]);
|
755
|
+
}, [aiFeature, faceApiInited, poseDetector, tfInitialized]);
|
729
756
|
useEffect(function () {
|
730
757
|
var player = null;
|
731
758
|
if (isLive && videoRef.current != null) {
|
@@ -1053,7 +1080,7 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
1053
1080
|
onClick: function onClick(e) {
|
1054
1081
|
return e.stopPropagation();
|
1055
1082
|
}
|
1056
|
-
}, /*#__PURE__*/React.createElement("div", {
|
1083
|
+
}, aiFeatures !== null && aiFeatures !== void 0 && aiFeatures.includes('emotion') ? /*#__PURE__*/React.createElement("div", {
|
1057
1084
|
className: "".concat(bem.b('container').e('controls-setting').e('panel').e('item'))
|
1058
1085
|
}, /*#__PURE__*/React.createElement("div", null, "\u9762\u90E8\u8868\u60C5"), /*#__PURE__*/React.createElement("input", {
|
1059
1086
|
onClick: function onClick(e) {
|
@@ -1069,7 +1096,7 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
1069
1096
|
});
|
1070
1097
|
});
|
1071
1098
|
}
|
1072
|
-
})), /*#__PURE__*/React.createElement("div", {
|
1099
|
+
})) : null, aiFeatures !== null && aiFeatures !== void 0 && aiFeatures.includes('character') ? /*#__PURE__*/React.createElement("div", {
|
1073
1100
|
className: "".concat(bem.b('container').e('controls-setting').e('panel').e('item'))
|
1074
1101
|
}, /*#__PURE__*/React.createElement("div", null, "\u9762\u90E8\u7279\u5F81"), /*#__PURE__*/React.createElement("input", {
|
1075
1102
|
onClick: function onClick(e) {
|
@@ -1085,7 +1112,7 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
1085
1112
|
});
|
1086
1113
|
});
|
1087
1114
|
}
|
1088
|
-
})), /*#__PURE__*/React.createElement("div", {
|
1115
|
+
})) : null, aiFeatures !== null && aiFeatures !== void 0 && aiFeatures.includes('anchor') ? /*#__PURE__*/React.createElement("div", {
|
1089
1116
|
className: "".concat(bem.b('container').e('controls-setting').e('panel').e('item'))
|
1090
1117
|
}, /*#__PURE__*/React.createElement("div", null, "\u9762\u90E8\u951A\u70B9"), /*#__PURE__*/React.createElement("input", {
|
1091
1118
|
onClick: function onClick(e) {
|
@@ -1101,7 +1128,7 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
1101
1128
|
});
|
1102
1129
|
});
|
1103
1130
|
}
|
1104
|
-
})), /*#__PURE__*/React.createElement("div", {
|
1131
|
+
})) : null, aiFeatures !== null && aiFeatures !== void 0 && aiFeatures.includes('pose') ? /*#__PURE__*/React.createElement("div", {
|
1105
1132
|
className: "".concat(bem.b('container').e('controls-setting').e('panel').e('item'))
|
1106
1133
|
}, /*#__PURE__*/React.createElement("div", null, "\u80A2\u4F53\u7EBF\u6761"), /*#__PURE__*/React.createElement("input", {
|
1107
1134
|
onClick: function onClick(e) {
|
@@ -1117,7 +1144,7 @@ export var VideoPlayer = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
1117
1144
|
});
|
1118
1145
|
});
|
1119
1146
|
}
|
1120
|
-
}))) : null), !isFullScreen ? /*#__PURE__*/React.createElement("svg", {
|
1147
|
+
})) : null) : null), !isFullScreen ? /*#__PURE__*/React.createElement("svg", {
|
1121
1148
|
className: "".concat(bem.b('container').e('controls-full-screen')),
|
1122
1149
|
onClick: handleFullScreen,
|
1123
1150
|
viewBox: "0 0 1024 1024",
|