I've been trying to use the Google ML Face Detection iOS Library but there is an issue with it not working with the front facing camera, it is only able to detect the faces when I use the back camera on my phone. I printed out the orientation and everything matches between front and back. It seems to work with both front and back on my iPhone X but when I test it on iPhone 11's and iPhone X max's it only works with the back camera. I am not sure what is causing this inconsistency. The code I use is below, note that all images passed into the photoVerification function are run through the fixedOrientation function first to ensure consistency:
func photoVerification(image: UIImage?) {
guard let imageFace = image else { return }
//Enhanced Face Detection
let options = FaceDetectorOptions()
options.performanceMode = .accurate
//Initialize face detector with given options
let faceDetector = FaceDetector.faceDetector(options: options)
// Initialize a VisionImage object with the given UIImage.
let visionImage = VisionImage(image: imageFace)
visionImage.orientation = imageFace.imageOrientation
print("$$The Images Orientation is: ",imageFace.imageOrientation.rawValue)
faceDetector.process(visionImage) { faces, error in
guard error == nil, let faces = faces, !faces.isEmpty else {
// [START_EXCLUDE]
let errorString = error?.localizedDescription ?? "NO Results Possible"
print("Error: ",errorString)
//No face detected provide error on image
print("No face detected!")
self.userVerified = false
self.addVerifiedTag(isVerified: false)
// [END_EXCLUDE]
return
}
// Faces detected
// [START_EXCLUDE]
//Face Has been detected Offer Verified Tag to user
print("Face detected!")
self.userVerified = true
self.addVerifiedTag(isVerified: true)
}
}
func fixedOrientation(image:UIImage) -> UIImage?{
guard image.imageOrientation != .up else{
//Orientation is correct
return image
}
guard let cgImage = image.cgImage else{
//CGimage not available
return nil
}
guard let colorSpace = cgImage.colorSpace, let ctx = CGContext(data: nil, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: cgImage.bitsPerComponent, bytesPerRow: 0, space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue) else{
return nil
}
var transform:CGAffineTransform = CGAffineTransform.identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: CGFloat.pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: CGFloat.pi / 2.0)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: CGFloat.pi / -2.0)
case .up, .upMirrored:
break
@unknown default:
break
}
// Flip image one more time if needed to, this is to prevent flipped image
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right:
break
@unknown default:
break
}
ctx.concatenate(transform)
switch image.imageOrientation {
case .left, .leftMirrored, .right, .rightMirrored:
ctx.draw(cgImage, in: CGRect(x: 0, y: 0, width: image.size.height, height: image.size.width))
default:
ctx.draw(cgImage, in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
break
}
guard let newCGImage = ctx.makeImage() else { return nil }
return UIImage.init(cgImage: newCGImage, scale: 1, orientation: .up)
}
from ML Kit iOS Face Detection Bug
No comments:
Post a Comment