This code does not show the detection of face in camera, even there is no error. I want the face should be detected in realtime in camera with red squire surrounded, but I think I have not placed the code properly or where I should place something in Viewdidload or something else?
import UIKit
import CoreImage
class ViewController: UIViewController ,UIAlertViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
@IBOutlet var imageView: UIImageView!
@IBAction func Moodify(_ sender: UIButton) {
func detect() {
guard let personciImage = CIImage(image: imageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personciImage)
// For converting the Core Image Coordinates to UIView Coordinates
let ciImageSize = personciImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
for face in faces as! [CIFaceFeature] {
print("Found bounds are \(face.bounds)")
// Apply the transform to convert the coordinates
var faceViewBounds = face.bounds.applying(transform)
// Calculate the actual position and size of the rectangle in the image view
let viewSize = imageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
//let faceBox = UIView(frame: face.bounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.red.cgColor
faceBox.backgroundColor = UIColor.clear
imageView.addSubview(faceBox)
if face.hasLeftEyePosition {
print("Left eye bounds are \(face.leftEyePosition)")
}
if face.hasRightEyePosition {
print("Right eye bounds are \(face.rightEyePosition)")
}
}
}
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = true
picker.sourceType = .camera
picker.cameraDevice = .front
self.present(picker, animated: true, completion: { _ in })
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [AnyHashable: Any]) {
let chosenImage = info[UIImagePickerControllerEditedImage]
self.imageView!.image = chosenImage as? UIImage
picker.dismiss(animated: true, completion: { _ in })
}
// picker.dismiss(animated: true, completion: { _ in })
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
picker.dismiss(animated: true, completion: { _ in })
}
}
override func viewDidLoad() {
let alert = UIAlertController(title: "Ooops!!!", message: "Camera is not connected", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "Connect", style: UIAlertActionStyle.default, handler: nil))
self.present(alert, animated: true, completion: nil)
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
You most probably need just to trigger the function the way how it is described in the document
We will invoke the detect method in viewDidLoad. So insert the following line of code in the method:
}
Compile and run the app.
EDIT: This is solution while the function "detect" is as a subclass method, but in your case, you use IBAction, which has different syntax like this. You should try to delete name of the function detect() and this bracket
and this part have to inside a function
for your case you can probably omit this part as well.