interface CustomOptions {
runningMode?: string;
numPoses?: number;
smoothLandmarks?: boolean;
maxNumFaces?: number;
minDetectionConfidence?: number;
minTrackingConfidence?: number;
}
const UserPose: React.FC = () => {
const webcamRef = useRef<Webcam>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
const poseRef = useRef<Pose>();
const landMarkRef = useRef<HTMLDivElement>(null);
useEffect(() => {
const initializePoseDetection = async () => {
const pose = new Pose({
locateFile: (file: string) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/${file}`;
},
});
const options: CustomOptions = {
smoothLandmarks: true,
runningMode: "VIDEO",
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
};
pose.setOptions(options);
pose.onResults(onResults);
poseRef.current = pose;
// Initialize camera
if (webcamRef.current && webcamRef.current.video) {
const camera = new cam.Camera(webcamRef.current.video, {
onFrame: async () => {
if (webcamRef.current && webcamRef.current.video) {
await pose.send({ image: webcamRef.current.video });
}
},
width: 1280,
height: 720,
});
camera.start();
}
};
initializePoseDetection();
return () => {
// Cleanup
if (poseRef.current) {
poseRef.current.close();
}
};
}, []);
function onResults(results: any) {
// <IonLabel>{(JSON.stringify(results.poseLandmarks))}</IonLabel>
const canvasElement = canvasRef.current;
if (canvasElement) {
const canvasCtx = canvasElement.getContext("2d");
if (canvasCtx) {
// Set canvas size
canvasElement.width = results.image.width;
canvasElement.height = results.image.height;
// Draw video feed
canvasCtx.drawImage(
results.image,
0,
0,
canvasElement.width,
canvasElement.height
);
// Check if pose landmarks are available
if (results.poseLandmarks && results.poseLandmarks.length > 0) {
// Scale landmarks to fit the canvas
const landmarks = results.poseLandmarks.map((landmark: any) => ({
x: landmark.x * canvasElement.width,
y: landmark.y * canvasElement.height
}));
drawLandmarks(
canvasCtx,
results.poseLandmarks,
{ color: 'red', lineWidth: 2, radius: 3 }
);
drawConnectors(
canvasCtx,
results.poseLandmarks,
mediapipePose.POSE_CONNECTIONS,
{ color: '#3240CF', lineWidth: 2 }
);
}
}
}
}
return (
<div className="App">
<Webcam
ref={webcamRef}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zIndex: 9,
width: "100%",
height: "100%",
// transform: "rotate(90deg)"
}}
/>
<canvas
ref={canvasRef}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zIndex: 9,
width: "100%",
height: "100%",
}}
></canvas>
<div ref={landMarkRef}></div>
</div>
);
};
This is the code that works on web.According to the official mediapipe documentation you are supposed to add a dependency into your build.gradle which I did,yet I cant get this code to work on the emulator.It runs on the emulator but for some reason I cannot see the landmark points and the connectors on my phone screen.Does anyone know how to fix it?