Loading

トップへ(mam-mam.net/)

Recognize Faces from WebCam Images on a Website and Retrieve Coordinates of Eyes, Mouth, Nose, and More

Japanese

Using JavaScript on a website, this demo performs real‑time face recognition from a WebCam feed and retrieves coordinates for the eyes, mouth, nose, and more, then draws lines over them.

Please allow camera access. The video feed will appear shortly.

Position your face in front of the camera. The system will recognize your face and draw lines over the eyes, mouth, nose, eyebrows, and jawline. Please remove any masks.
(If the camera feed does not display correctly, try reloading the page.)


Libraries Used

We used face-api.js (https://github.com/justadudewhohacks/face-api.js).
License information for face-api.js
MIT License
Copyright (c) 2018 Vincent Mühler

Source Code



<script src="face-api.min.js"></script>


<div><canvas id="preview"></canvas></div>
<style>
 #preview{
   max-width:100%;
   width:640px;
   height:auto;
 }
</style>


<script>
var video,prev,prev_ctx,prevW,prevH;

async function loadImage(src){
  return new Promise(function(resolve,reject){
    let img=new Image();
    img.onload=function(){resolve(img);}
    img.onerror=function(e){reject(e);}
    img.src=src;
  });
}

window.addEventListener('DOMContentLoaded',async function(event){
  prev=document.querySelector("#preview");
  prev_ctx=prev.getContext("2d" ,{willReadFrequently:false});
  await Promise.all([
    // Use TinyFaceDetectorModel
    faceapi.nets.tinyFaceDetector.loadFromUri('./models'),
    // Use faceLandmark68TinyNet
    faceapi.nets.faceLandmark68TinyNet.loadFromUri("./models"),
  ]);

  // Generate the <video> element
  video=document.createElement('video');
  video.setAttribute("autoplay","");
  video.setAttribute("muted","");
  video.setAttribute("playsinline","");
  video.onloadedmetadata = function(e){video.play();};
  prev=document.getElementById("preview");
  prev_ctx=prev.getContext("2d", {willReadFrequently:true,alpha:false});

  // Display mirrored horizontally
  prev.style.transform="scaleX(-1)";


  // Camera permission dialog will appear
  navigator.mediaDevices.getUserMedia(
    // Microphone off, camera settings: prefer front camera, prefer 640×480
    // Note: "environment" = rear camera, "user" = front camera
    {"audio":false,"video":{"facingMode":"user","width":{"ideal":640},"height":{"ideal":480}}}
  ).then( // When permission is granted
    function(stream){
      video.srcObject = stream;
      // Start scanning after 0.5 seconds
      setTimeout(Scan,500,true);
    }
  ).catch(
    // When permission is denied
    function(){
      // Error
    }
  );
});

async function Scan(first){
  if(first){
    // Perform initial setup only once (because final camera resolution is unknown)
    // Selected width and height
    prevW=video.videoWidth;
    prevH=video.videoHeight;
    // Internal canvas size
    prev.setAttribute("width",prevW);
    prev.setAttribute("height",prevH);
  }

  // Perform face detection from the webcam feed
  const useTinyModel = true;
  const detection = await faceapi.detectAllFaces(
      video , 
      new faceapi.TinyFaceDetectorOptions({
        inputSize:416,      // Maximum pixel size to detect; e.g., 32 detects faces as small as 32×32
        scoreThreshold:0.5, // Threshold; higher = more accurate, lower = more sensitive
      })
  ).withFaceLandmarks(true); // true = use tiny model

  //prev_ctx.clearRect(0, 0, prevW, prevH);
  prev_ctx.drawImage(video,0,0,prevW,prevH);
  for(let i=0;i<detection.length;i++){
    // Jaw outline
    const jawOutline = detection[i].landmarks.getJawOutline();
    drawPath(jawOutline,prev_ctx,2,"#00F");
    // Nose
    const nose = detection[i].landmarks.getNose();
    drawPath(nose,prev_ctx,1,"#0FF");
    // Mouth
    const mouth = detection[i].landmarks.getMouth();
    drawPath(mouth,prev_ctx,1,"#0F0");
    // Left eye
    const leftEye = detection[i].landmarks.getLeftEye();
    drawPath(leftEye,prev_ctx,1,"#F00");
    // Right eye
    const rightEye = detection[i].landmarks.getRightEye();
    drawPath(rightEye,prev_ctx,1,"#F00");
    // Left eyebrow
    const leftEyeBrow = detection[i].landmarks.getLeftEyeBrow();
    drawPath(leftEyeBrow,prev_ctx,1,"#FF0");
    // Right eyebrow
    const rightEyeBrow = detection[i].landmarks.getRightEyeBrow();
    drawPath(rightEyeBrow,prev_ctx,1,"#FF0");
    // Array of center coordinates for left eye, right eye, and mouth (3 points)
    const ref=detection[i].landmarks.getRefPointsForAlignment();
    for(let i=0;i<ref.length;i++){
      prev_ctx.fillStyle="#000";
      prev_ctx.beginPath();
      prev_ctx.arc(ref[i].x,ref[i].y,8,0,Math.PI*2,true);
      prev_ctx.fill();
    }
  }
  setTimeout(Scan,50,false);
}

function drawPath(arr,ctx,lineWidth,color){
  ctx.lineWidth=lineWidth;
  ctx.strokeStyle=color;
  ctx.beginPath();
  ctx.moveTo(arr[0].x,arr[0].y);
  for(let i=1;i<arr.length;i++){
    ctx.lineTo(arr[i].x,arr[i].y);
  }
  ctx.stroke();
}
</script>