I am trying to use Face-Api-js into the firebase cloud function. However, i keep getting the error which i can not understand. Moreover, i've tried to provide it base64 String as well but still i face some errors. Now, is there a way to make this happen or face-api-js just works for the html tags?
/* eslint-disable */
const functions = require('firebase-functions');
const admin = require('firebase-admin');
const fs = require('fs');
const fetch = require('node-fetch');
const { createCanvas, Image } = require('canvas');
// Import required libraries from face-api.js
require('@tensorflow/tfjs-node');
const faceapi = require('face-api.js');
// Initialize Firebase Admin SDK
admin.initializeApp();
exports.performFacialRecognitionOnCreate = functions.firestore.document('Access/{docId}').onCreate(async (snapshot, context) => {
try {
// Get the URL of the image file from 'picUrl' field in newly created document
const picUrl = snapshot.data().picUrl;
// Load models and weights for face detection and recognition using face-api.js
await Promise.all([
faceapi.nets.ssdMobilenetv1.loadFromDisk('./assets/models'),
faceapi.nets.faceLandmark68Net.loadFromDisk('./assets/models'),
faceapi.nets.faceRecognitionNet.loadFromDisk('./assets/models')
]);
// Fetch image data from provided URL using node-fetch package
const response = await fetch(picUrl);
// Convert downloaded image data into a Buffer using Buffer.from()
const imgBuffer = await response.buffer();
// Save downloaded image as JPEG file with fs.writeFile()
fs.writeFileSync('/tmp/image.jpg', imgBuffer);
// Create an HTMLImageElement instance with canvas package's Image class
const imgElm = new Image();
// Set the source of the image to the saved JPEG file
imgElm.src = '/tmp/image.jpg';
const canvas = createCanvas(imgElm.width, imgElm.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(imgElm, 0, 0);
// Perform facial detection on input image using face-api.js methods
const detectedFaces =
await faceapi.detectAllFaces(canvas).withFaceLandmarks().withFaceDescriptors();
if (detectedFaces.length === 0) {
console.log('No faces detected in the input image.');
// Update 'status' field of the newly created document to 'notFound'
await snapshot.ref.update({ status: "notFound" });
return null;
}
console.log(`Found ${detectedFaces.length} face(s) in the input image.`);
// Get a reference to the 'DB' collection
const dbCollectionRef = admin.firestore().collection('DB');
// Process each detected face and compare it with all documents in 'DB' collection
for (const detectedFace of detectedFaces) {
for await (const docSnapshot of dbCollectionRef.get()) {
const docData = docSnapshot.data();
// Skip documents that don't have an 'imageLink' field or already matched.
if (!docData.imageLink || docData.status === "found") continue;
// Fetch stored image data from Firebase Storage URL specified by 'imageLink'
const response2 = await fetch(docData.imageLink);
// Convert downloaded existing image data into a Buffer using Buffer.from()
const existingImgBuffer = await response2.buffer();
// Save downloaded existing image as JPEG file with fs.writeFile()
fs.writeFileSync('/tmp/existingImage.jpg', existingImgBuffer);
// Create an HTMLImageElement instance for existing image
const existingImgElm = new Image();
// Set the source of the image to the saved JPEG file
existingImgElm.src = '/tmp/existingImage.jpg';
const existingCanvas = createCanvas(existingImgElm.width, existingImgElm.height);
const ctxExisting = existingCanvas.getContext('2d');
ctxExisting.drawImage(existingImgElm, 0, 0);
// Perform facial recognition on current detected face and existing image
const queryFaceDescriptor =
await faceapi.computeFaceDescriptor(detectedFace);
const results =
await faceapi.detectSingleFace(existingCanvas).withFaceLandmarks().withFaceDescriptor();
if (!results) continue;
// Compare the computed descriptor with the descriptors of each detected face in 'DB' collection
const distance = faceapi.euclideanDistance(queryFaceDescriptor, results.descriptor);
// If match found, update 'status' field of the newly created document to 'found'
if (distance < 0.6) {
console.log('Match found!');
// Get the corresponding Uid from matched document in 'DB' collection
const uid = docSnapshot.id;
// Update 'status' field and store matched Uid in the newly created document.
await snapshot.ref.update({ status: "found", Uid: uid });
return null; // Stop further processing as a match is found.
}
}
}
console.log('No matching faces found.');
// Update 'status' field of the newly created document to 'notFound'
await snapshot.ref.update({ status: "notFound" });
return null;
} catch (error) {
console.error('Error during facial recognition:', error);
throw error;
}
});
the Error output is:
Error: toNetInput - expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id
at /workspace/node_modules/face-api.js/build/commonjs/dom/toNetInput.js:38:35
at Array.forEach (<anonymous>)
at Object.<anonymous> (/workspace/node_modules/face-api.js/build/commonjs/dom/toNetInput.js:33:32)
at step (/workspace/node_modules/face-api.js/node_modules/tslib/tslib.js:141:27)
at Object.next (/workspace/node_modules/face-api.js/node_modules/tslib/tslib.js:122:57)
at /workspace/node_modules/face-api.js/node_modules/tslib/tslib.js:115:75
at new Promise (<anonymous>)
at Object.__awaiter (/workspace/node_modules/face-api.js/node_modules/tslib/tslib.js:111:16)
at Object.toNetInput (/workspace/node_modules/face-api.js/build/commonjs/dom/toNetInput.js:17:20)
at SsdMobilenetv1.<anonymous> (/workspace/node_modules/face-api.js/build/commonjs/ssdMobilenetv1/SsdMobilenetv1.js:55:52)