Real time face detection is not working

1.7k views Asked by At

This code does not show the detection of face in camera, even there is no error. I want the face should be detected in realtime in camera with red squire surrounded, but I think I have not placed the code properly or where I should place something in Viewdidload or something else?

import UIKit
import CoreImage

class ViewController: UIViewController ,UIAlertViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate  {

@IBOutlet var imageView: UIImageView!
@IBAction func Moodify(_ sender: UIButton) {


    func detect() {

        guard let personciImage = CIImage(image: imageView.image!) else {
            return
        }

        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        let faces = faceDetector?.features(in: personciImage)


        // For converting the Core Image Coordinates to UIView Coordinates
        let ciImageSize = personciImage.extent.size
        var transform = CGAffineTransform(scaleX: 1, y: -1)
        transform = transform.translatedBy(x: 0, y: -ciImageSize.height)

        for face in faces as! [CIFaceFeature] {

            print("Found bounds are \(face.bounds)")

            // Apply the transform to convert the coordinates
            var faceViewBounds = face.bounds.applying(transform)

            // Calculate the actual position and size of the rectangle in the image view
            let viewSize = imageView.bounds.size
            let scale = min(viewSize.width / ciImageSize.width,
                            viewSize.height / ciImageSize.height)
            let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
            let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

            faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
            faceViewBounds.origin.x += offsetX
            faceViewBounds.origin.y += offsetY

            let faceBox = UIView(frame: faceViewBounds)
            //let faceBox = UIView(frame: face.bounds)
            faceBox.layer.borderWidth = 3
            faceBox.layer.borderColor = UIColor.red.cgColor
            faceBox.backgroundColor = UIColor.clear
            imageView.addSubview(faceBox)

            if face.hasLeftEyePosition {
                print("Left eye bounds are \(face.leftEyePosition)")
            }

            if face.hasRightEyePosition {
                print("Right eye bounds are \(face.rightEyePosition)")
            }
        }
    }

    let picker = UIImagePickerController()
    picker.delegate = self
    picker.allowsEditing = true
    picker.sourceType = .camera
    picker.cameraDevice = .front
    self.present(picker, animated: true, completion: { _ in })

    func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [AnyHashable: Any]) {
        let chosenImage = info[UIImagePickerControllerEditedImage]
        self.imageView!.image = chosenImage as? UIImage
        picker.dismiss(animated: true, completion: { _ in })
    }

     // picker.dismiss(animated: true, completion: { _ in })
    func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
        picker.dismiss(animated: true, completion: { _ in })
    }
}

override func viewDidLoad() {

    let alert = UIAlertController(title: "Ooops!!!", message: "Camera is not connected", preferredStyle: UIAlertControllerStyle.alert)
    alert.addAction(UIAlertAction(title: "Connect", style: UIAlertActionStyle.default, handler: nil))
    self.present(alert, animated: true, completion: nil)

    super.viewDidLoad()
    // Do any additional setup after loading the view, typically from a nib.

}

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
    // Dispose of any resources that can be recreated.
}
}
2

There are 2 answers

5
Vanya On

You most probably need just to trigger the function the way how it is described in the document

We will invoke the detect method in viewDidLoad. So insert the following line of code in the method:

override func viewDidLoad() {
   super.viewDidLoad()

   detect()

}

Compile and run the app.

EDIT: This is solution while the function "detect" is as a subclass method, but in your case, you use IBAction, which has different syntax like this. You should try to delete name of the function detect() and this bracket

}

let picker =

and this part have to inside a function

let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = true
picker.sourceType = .camera
picker.cameraDevice = .front
self.present(picker, animated: true, completion: { _ in })

for your case you can probably omit this part as well.

6
Willjay On

After looking through your code, you didn't even call detect() after you take a snap. I tried fixed it as described below, however, the detect() will return zero face found as I describe in Face Detection with Camera.

lazy var picker: UIImagePickerController = {
    let picker = UIImagePickerController()
    picker.delegate = self
    picker.allowsEditing = true
    picker.sourceType = .camera
    picker.cameraDevice = .front
    return picker
}()

@IBOutlet var imageView: UIImageView!
override func viewDidLoad() {
    super.viewDidLoad()
    imageView.contentMode = .scaleAspectFit
}

@IBAction func TakePhoto(_ sender: Any) {
    self.present(picker, animated: true, completion: nil)
}

// MARK: - UIImagePickerControllerDelegate
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
    if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
        self.imageView!.image = chosenImage
        // Got the image from camera, the imageView.image is not nil, so it's time for facial detection
        detect()
        picker.dismiss(animated: true, completion: nil)
    }
}


func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
    picker.dismiss(animated: true, completion: nil)
}

// MARK: - Face Detection

func detect() {

    guard let personciImage = CIImage(image: imageView.image!) else {
        return
    }

    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: personciImage)


    // For converting the Core Image Coordinates to UIView Coordinates
    let ciImageSize = personciImage.extent.size
    var transform = CGAffineTransform(scaleX: 1, y: -1)
    transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
    print("faces.count = \(faces?.count)")

    for face in faces as! [CIFaceFeature] {

        print("Found bounds are \(face.bounds)")

        // Apply the transform to convert the coordinates
        var faceViewBounds = face.bounds.applying(transform)

        // Calculate the actual position and size of the rectangle in the image view
        let viewSize = imageView.bounds.size
        let scale = min(viewSize.width / ciImageSize.width,
                        viewSize.height / ciImageSize.height)
        let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
        let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

        faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
        faceViewBounds.origin.x += offsetX
        faceViewBounds.origin.y += offsetY

        let faceBox = UIView(frame: faceViewBounds)
        //let faceBox = UIView(frame: face.bounds)
        faceBox.layer.borderWidth = 3
        faceBox.layer.borderColor = UIColor.red.cgColor
        faceBox.backgroundColor = UIColor.clear
        imageView.addSubview(faceBox)

        if face.hasLeftEyePosition {
            print("Left eye bounds are \(face.leftEyePosition)")
        }

        if face.hasRightEyePosition {
            print("Right eye bounds are \(face.rightEyePosition)")
        }
    }
}