Loading

トップへ(mam-mam.net/)

Detecting Faces and Estimating 468 3D Face Landmarks from WebCam Images on a Website — Using Google MediaPipe/face_mesh

Japanese

This demo uses Google MediaPipe/face_mesh.js and JavaScript to detect faces from a USB WebCam image and estimate 468 3D face landmarks for display.

After granting camera access, the video feed will appear shortly

Please position your face within the camera view. The system will estimate and display all 468 3D face landmarks.

Source code



<!DOCTYPE html>
<html lang="ja">
<head>
  <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=0.5, maximum-scale=2.0,user-scalable=yes">
  <script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/npm/@mediapipe/control_utils/control_utils.js" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/face_mesh.js" crossorigin="anonymous"></script>

  <script type="module">
    let video,can,ctx;
    window.addEventListener('load', async function(event){
      video = document.querySelector('.video');
      can = document.querySelector('.can');
      ctx = can.getContext('2d');

      const faceMesh = new FaceMesh({locateFile: (file) => {
        return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
      }});
      faceMesh.setOptions({
        maxNumFaces: 1,
        minDetectionConfidence: 0.5,
        minTrackingConfidence: 0.5
      });
      faceMesh.onResults(onResults);

      const camera = new Camera(video, {
        onFrame: async function(){
          await faceMesh.send({image: video});
        },
        width: 640,
        height: 480
      });
      camera.start();
    });

    function onResults(results) {
      ctx.save();
      ctx.clearRect(0, 0, can.width, can.height);
      ctx.drawImage(results.image, 0, 0, can.width, can.height);
      if (results.multiFaceLandmarks) {
        for (const landmarks of results.multiFaceLandmarks) {
          // Display the detailed face mesh
          drawConnectors(ctx, landmarks, FACEMESH_TESSELATION, {
            color: '#CCC',
            lineWidth: 1
          });
          // Display the face oval
          drawConnectors(ctx, landmarks, FACEMESH_FACE_OVAL, {
            color: '#DDD',
            lineWidth: 2
          });
          // Display face contours (including eye contours)
          // drawConnectors(ctx, landmarks, FACEMESH_CONTOURS, {color:'#FFF', lineWidth:2});
          // Right eye
          drawConnectors(ctx, landmarks, FACEMESH_RIGHT_EYE, {
            color: '#F00',
            lineWidth: 1
          });
          // Right eyebrow
          drawConnectors(ctx, landmarks, FACEMESH_RIGHT_EYEBROW, {
            color: '#F44',
            lineWidth: 2
          });
          // Left eye
          drawConnectors(ctx, landmarks, FACEMESH_LEFT_EYE, {
            color: '#00F',
            lineWidth: 1
          });
          // Left eyebrow
          drawConnectors(ctx, landmarks, FACEMESH_LEFT_EYEBROW, {
            color: '#44F',
            lineWidth: 2
          });
          // Lips
          drawConnectors(ctx, landmarks, FACEMESH_LIPS, {
            color: '#0F0',
            lineWidth: 1
          });
        }
      }
      ctx.restore();
    }
  </script>
</head>
<body>
  <div class="container">
    <video class="video" style="display:none;"></video>
    <canvas class="can" width="640" height="480" style="transform:scale(-1,1);width:90%;max-width:640px;height:auto;"></canvas>
  </div>
</body>
</html>