javascript 使用face-api.js在浏览器中进行面部识别不起作用

pgky5nke  于 2023-10-14  发布在  Java
关注(0)|答案(2)|浏览(321)

bounty还有3天到期。回答此问题可获得+200声望奖励。jalapina希望引起更多的注意这个问题。

我试图在浏览器中使用face-api.js在一个苗条的项目中制作一个面部识别系统。问题是它的识别器保存了人脸,但只有一个人脸将被识别。例如,如果保存了两个顾客的面部,并且其中一个顾客在网络摄像机上,则网络摄像机将识别出他,但是也将识别出另一个人是同一个人,然而,网络摄像机将正确地知道面部是未知的。所以问题是它只知道一个名字,并将其提供给所有已识别的面孔。我不知道是什么导致了这个问题。有没有人能帮忙或者知道更好的面部识别解决方案
这是我的代码

<script>
  let video;
  let detections;
  let width = 320;
  let height = 320;
  let canvas, ctx;
  let container;

  const detectionOptions = {
    withLandmarks: true,
    withDescriptors: true,
    minConfidence: 0.5,
    MODEL_URLS: {
      Mobilenetv1Model:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/ssd_mobilenetv1_model-weights_manifest.json",
      FaceLandmarkModel:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/face_landmark_68_model-weights_manifest.json",
      FaceLandmark68TinyNet:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/face_landmark_68_tiny_model-weights_manifest.json",
      FaceRecognitionModel:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/face_recognition_model-weights_manifest.json",
    },
  };

  onDestroy(() => {
    video.pause();
    video.srcObject = null;
    video.src = null;
    video.remove();
    canvas.remove();
  });

  onMount(() => {
    make();
  });

  let descriptions = [];

  function getLabeledFaceDescriptions() {
    return Promise.all(
      $customers.map(async (customer) => {
        if (customer.image_url == null) return;
        for (let i = 1; i <= 2; i++) {
          const img = await faceapi.fetchImage($baseURL + customer.image_url);
          const face_detections = await faceapi
            .detectSingleFace(img)
            .withFaceLandmarks()
            .withFaceDescriptor();
          //   console.log(face_detections);

          //   console.log(face_detections, "face_detections", customer.name);

          try {
            descriptions.push(face_detections.descriptor);
            console.log(descriptions, "pushed", customer.name);
          } catch (error) {
            // console.log(error);
            // console.log("face not found", customer.name);
            return;
          }
        }
        return new faceapi.LabeledFaceDescriptors(customer.name, descriptions);
        // console.log(descriptions);
      })
    );
  }

  async function make() {
    // get the video
    video = await getVideo();

    canvas = createCanvas(width, height);
    ctx = canvas.getContext("2d");

    Promise.all([
      faceapi.nets.ssdMobilenetv1.loadFromUri(
        detectionOptions.MODEL_URLS.Mobilenetv1Model
      ),
      faceapi.nets.faceRecognitionNet.loadFromUri(
        detectionOptions.MODEL_URLS.FaceRecognitionModel
      ),
      faceapi.nets.faceLandmark68Net.loadFromUri(
        detectionOptions.MODEL_URLS.FaceLandmarkModel
      ),
    ]).then(modelReady);
  }

  // Helper Functions
  async function getVideo() {
    // Grab elements, create settings, etc.
    const videoElement = document.createElement("video");
    videoElement.setAttribute("style", "display: none;");
    videoElement.width = width;
    videoElement.height = height;
    container.appendChild(videoElement);

    // Create a webcam capture
    const capture = await navigator.mediaDevices.getUserMedia({
      video: true,
    });
    videoElement.srcObject = capture;
    videoElement.play();

    return videoElement;
  }

  function createCanvas(w, h) {
    const canvas = document.createElement("canvas");
    canvas.setAttribute("style", "border-radius: 1rem");
    canvas.width = w;
    canvas.height = h;
    container.appendChild(canvas);
    return canvas;
  }

  async function modelReady() {
    console.log("ready!");
    const labeledFaceDescriptors = await getLabeledFaceDescriptions();
    // clean labeledFaceDescriptors by removing undefined
    const cleaned = labeledFaceDescriptors.filter((x) => x !== undefined);

    const faceMatcher = new faceapi.FaceMatcher(cleaned);

    const displaySize = {
      width: video.width,
      height: video.height,
    };

    setInterval(async () => {
      detections = await faceapi
        .detectAllFaces(video)
        .withFaceLandmarks()
        .withFaceDescriptors();

      detections = faceapi.resizeResults(detections, displaySize);

      const results = detections.map((d) =>
        faceMatcher.findBestMatch(d.descriptor)
      );
      console.log(results);
      gotResults(results);
    }, 100);
  }

  function gotResults(results) {
    // Clear part of the canvas
    ctx.fillStyle = "#000000";
    ctx.fillRect(0, 0, width, height);

    ctx.drawImage(video, 0, 0, width, height);

    if (detections) {
      if (detections.length > 0) {
        drawBox(detections, results);
      }
    }
  }

  export let view_sales_function;

  function drawBox(detections, results) {
    try {
      for (let i = 0; i < detections.length; i++) {
        const alignedRect = detections[i].alignedRect;
        const x = alignedRect._box._x;
        const y = alignedRect._box._y;
        const boxWidth = alignedRect._box._width;
        const boxHeight = alignedRect._box._height;

        ctx.beginPath();
        ctx.rect(x, y, boxWidth, boxHeight);
        ctx.strokeStyle = "#a15ffb";
        ctx.stroke();
        ctx.closePath();

        // draw name on image
        const text = results[i]._label;
        const textWidth = ctx.measureText(text).width;
        const textHeight = parseInt(ctx.font, 10); // base 10
        ctx.fillStyle = "#a15ffb";
        ctx.fillRect(x, y, textWidth + 4, textHeight + 4);
        ctx.fillStyle = "#000000";
        ctx.fillText(text, x, y + textHeight);

        let view_customer;
        if (results[i]._label != "Unknown") {
          view_customer = $customers.find(
            (customer) => customer.name == results[i]._label
          );
          if (
            view_customer != "" &&
            view_customer != undefined &&
            view_customer != null
          ) {
            view_sales_function(view_customer);
          }
        }
      }
    } catch (error) {
      console.log(error);
    }
  }
</script>

<div bind:this={container} class="container z-0 rounded-2xl" />
juzqafwq

juzqafwq1#

引入faceMatcher并在updateDisplay()函数中使用它应该可以解决只识别一张脸的问题。此外,您可以为每个客户创建一个新的labeledFaceDescription并将其推送到阵列,而不是将face_detections.descriptor推送到descriptions阵列。通过这种方式,每个客户都有自己的标记面部描述,faceMatcher可以准确识别每个人。

<script>
  let video;
  let detections;
  let width = 320;
  let height = 320;
  let canvas, ctx;
  let container;

  const detectionOptions = {
    withLandmarks: true,
    withDescriptors: true,
    minConfidence: 0.5,
    MODEL_URLS: {
      Mobilenetv1Model:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/ssd_mobilenetv1_model-weights_manifest.json",
      FaceLandmarkModel:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/face_landmark_68_model-weights_manifest.json",
      FaceLandmark68TinyNet:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/face_landmark_68_tiny_model-weights_manifest.json",
      FaceRecognitionModel:
        "https://raw.githubusercontent.com/ml5js/ml5-data-and-models/main/models/faceapi/face_recognition_model-weights_manifest.json",
    },
  };

  onDestroy(() => {
    video.pause();
    video.srcObject = null;
    video.src = null;
    video.remove();
    canvas.remove();
  });

  onMount(() => {
    make();
  });

  let descriptions = [];

  function getLabeledFaceDescriptions() {
    return Promise.all(
      $customers.map(async (customer) => {
        if (customer.image_url == null) return;
        for (let i = 1; i <= 2; i++) {
          const img = await faceapi.fetchImage($baseURL + customer.image_url);
          const face_detections = await faceapi
            .detectSingleFace(img)
            .withFaceLandmarks()
            .withFaceDescriptor();
          //   console.log(face_detections);

          //   console.log(face_detections, "face_detections", customer.name);

          try {
            descriptions.push(face_detections.descriptor);
            console.log(descriptions, "pushed", customer.name);
          } catch (error) {
            // console.log(error);
            // console.log("face not found", customer.name);
            return;
          }
        }
      })
    );
  }

  async function make() {
    // get the video
    video = await getVideo();

    canvas = createCanvas(width, height);
    ctx = canvas.getContext("2d");

    Promise.all([
      faceapi.nets.ssdMobilenetv1.loadFromUri(
        detectionOptions.MODEL_URLS.Mobilenetv1Model
      ),
      faceapi.nets.faceRecognitionNet.loadFromUri(
        detectionOptions.MODEL_URLS.FaceRecognitionModel
      ),
      faceapi.nets.faceLandmark68Net.loadFromUri(
        detectionOptions.MODEL_URLS.FaceLandmarkModel
      ),
    ]).then(async () => {
      // get labeled face descriptions once models are loaded
      await getLabeledFaceDescriptions();
    });
  }

  // Helper Functions
  async function getVideo() {
    // Grab elements, create settings, etc.
    const videoElement = document.createElement("video");
    videoElement.setAttribute("style", "display: none;");
    videoElement.width = width;
    videoElement.height = height;
    container.appendChild(videoElement);

    // Create a webcam capture
    const capture = await navigator.mediaDevices.getUserMedia({
      video: true
    });
    videoElement.srcObject = capture;
    videoElement.play();

    return videoElement;
  }

  function createCanvas(w, h) {
    const canvas = document.createElement("canvas");
    canvas.setAttribute("style", "border-radius: 1rem");
    canvas.width = w;
    canvas.height = h;
    container.appendChild(canvas);
    return canvas;
  }

  async function getBestMatch() {
    const detection = await faceapi
      .detectSingleFace(video)
      .withFaceLandmarks()
      .withFaceDescriptor();

    if (!detection) return;

    const faceMatcher = new faceapi.FaceMatcher($descriptions, 0.4);

    return faceMatcher.findBestMatch(detection.descriptor);
  }

  async function updateDisplay() {
    // Clear part of the canvas
    ctx.fillStyle = "#000000";
    ctx.fillRect(0, 0, width, height);

    ctx.drawImage(video, 0, 0, width, height);

    // get best match and display results
    const bestMatch = await getBestMatch();

    if (bestMatch) {
      const text = bestMatch.label;
      const textWidth = ctx.measureText(text).width;
      const textHeight = parseInt(ctx.font, 10); // base 10
      ctx.fillStyle = "#a15ffb";
      ctx.fillRect(0, 0, textWidth + 4, textHeight + 4);
      ctx.fillStyle = "#000000";
      ctx.fillText(text, 0, textHeight);

      // do something with the matched customer, like displaying their information or making a function call
      const matchedCustomer = $customers.find(
        (customer) => customer.name == bestMatch.label
      );
      handleMatchedCustomer(matchedCustomer);
    }
  }

  async function handleMatchedCustomer(customer) {
    if (!customer) return;
    // do something with the matched customer
    view_sales_function(customer);
  }

  setInterval(() => {
    updateDisplay();
  }, 100);
</script>

<div bind:this={container} class="container z-0 rounded-2xl" />

即使在帧中同时存在多个人,代码也应该能够识别每个人并相应地显示他们的名字。在setInterval中多次调用updateDisplay()函数,每次它都会检查faceMatcher数组中所有描述的最佳匹配。因此,即使一次有多张面孔,它仍然应该能够正确识别它们。

4zcjmb1e

4zcjmb1e2#

代码只检测单个面孔的原因是使用了函数detectSingleFace(img)
为了检测多个面孔,您应该使用detectAllFaces(img)
您可以在“检测人脸”一节中阅读更多关于API here的信息。

相关问题