实时人脸检测不起作用

此代码不显示相机中的脸部检测,即使没有错误。 我希望脸部应该能够在相机中实时检测到红包的身影,但是我认为我没有正确放置代码,或者我应该在Viewdidload或其他位置放置什么东西?

import UIKit import CoreImage class ViewController: UIViewController ,UIAlertViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate { @IBOutlet var imageView: UIImageView! @IBAction func Moodify(_ sender: UIButton) { func detect() { guard let personciImage = CIImage(image: imageView.image!) else { return } let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh] let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy) let faces = faceDetector?.features(in: personciImage) // For converting the Core Image Coordinates to UIView Coordinates let ciImageSize = personciImage.extent.size var transform = CGAffineTransform(scaleX: 1, y: -1) transform = transform.translatedBy(x: 0, y: -ciImageSize.height) for face in faces as! [CIFaceFeature] { print("Found bounds are \(face.bounds)") // Apply the transform to convert the coordinates var faceViewBounds = face.bounds.applying(transform) // Calculate the actual position and size of the rectangle in the image view let viewSize = imageView.bounds.size let scale = min(viewSize.width / ciImageSize.width, viewSize.height / ciImageSize.height) let offsetX = (viewSize.width - ciImageSize.width * scale) / 2 let offsetY = (viewSize.height - ciImageSize.height * scale) / 2 faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale)) faceViewBounds.origin.x += offsetX faceViewBounds.origin.y += offsetY let faceBox = UIView(frame: faceViewBounds) //let faceBox = UIView(frame: face.bounds) faceBox.layer.borderWidth = 3 faceBox.layer.borderColor = UIColor.red.cgColor faceBox.backgroundColor = UIColor.clear imageView.addSubview(faceBox) if face.hasLeftEyePosition { print("Left eye bounds are \(face.leftEyePosition)") } if face.hasRightEyePosition { print("Right eye bounds are \(face.rightEyePosition)") } } } let picker = UIImagePickerController() picker.delegate = self picker.allowsEditing = true picker.sourceType = .camera picker.cameraDevice = .front self.present(picker, animated: true, completion: { _ in }) func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [AnyHashable: Any]) { let chosenImage = info[UIImagePickerControllerEditedImage] self.imageView!.image = chosenImage as? UIImage picker.dismiss(animated: true, completion: { _ in }) } // picker.dismiss(animated: true, completion: { _ in }) func imagePickerControllerDidCancel(_ picker: UIImagePickerController) { picker.dismiss(animated: true, completion: { _ in }) } } override func viewDidLoad() { let alert = UIAlertController(title: "Ooops!!!", message: "Camera is not connected", preferredStyle: UIAlertControllerStyle.alert) alert.addAction(UIAlertAction(title: "Connect", style: UIAlertActionStyle.default, handler: nil)) self.present(alert, animated: true, completion: nil) super.viewDidLoad() // Do any additional setup after loading the view, typically from a nib. } override func didReceiveMemoryWarning() { super.didReceiveMemoryWarning() // Dispose of any resources that can be recreated. } } 

您可能只需要按照文档中描述的方式来触发该function即可

我们将调用viewDidLoad中的检测方法。 因此,在该方法中插入以下代码行:

 override func viewDidLoad() { super.viewDidLoad() detect() 

}

编译并运行应用程序。

编辑:这是解决scheme,而function“检测”是作为一个子类的方法,但在你的情况下,你使用IBAction,它有不同的语法是这样的。 你应该尝试删除函数detect()和这个括号的名字

 } let picker = 

而这部分必须在一个函数里面

 let picker = UIImagePickerController() picker.delegate = self picker.allowsEditing = true picker.sourceType = .camera picker.cameraDevice = .front self.present(picker, animated: true, completion: { _ in }) 

对于你的情况,你也可以省略这部分。

在查看完代码之后,在进行快照之后,甚至没有调用detect() 。 我试着修正它,如下所述,然而, detect()将返回零面find,我在相机的脸部检测中描述。

 lazy var picker: UIImagePickerController = { let picker = UIImagePickerController() picker.delegate = self picker.allowsEditing = true picker.sourceType = .camera picker.cameraDevice = .front return picker }() @IBOutlet var imageView: UIImageView! override func viewDidLoad() { super.viewDidLoad() imageView.contentMode = .scaleAspectFit } @IBAction func TakePhoto(_ sender: Any) { self.present(picker, animated: true, completion: nil) } // MARK: - UIImagePickerControllerDelegate func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) { if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage { self.imageView!.image = chosenImage // Got the image from camera, the imageView.image is not nil, so it's time for facial detection detect() picker.dismiss(animated: true, completion: nil) } } func imagePickerControllerDidCancel(_ picker: UIImagePickerController) { picker.dismiss(animated: true, completion: nil) } // MARK: - Face Detection func detect() { guard let personciImage = CIImage(image: imageView.image!) else { return } let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh] let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy) let faces = faceDetector?.features(in: personciImage) // For converting the Core Image Coordinates to UIView Coordinates let ciImageSize = personciImage.extent.size var transform = CGAffineTransform(scaleX: 1, y: -1) transform = transform.translatedBy(x: 0, y: -ciImageSize.height) print("faces.count = \(faces?.count)") for face in faces as! [CIFaceFeature] { print("Found bounds are \(face.bounds)") // Apply the transform to convert the coordinates var faceViewBounds = face.bounds.applying(transform) // Calculate the actual position and size of the rectangle in the image view let viewSize = imageView.bounds.size let scale = min(viewSize.width / ciImageSize.width, viewSize.height / ciImageSize.height) let offsetX = (viewSize.width - ciImageSize.width * scale) / 2 let offsetY = (viewSize.height - ciImageSize.height * scale) / 2 faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale)) faceViewBounds.origin.x += offsetX faceViewBounds.origin.y += offsetY let faceBox = UIView(frame: faceViewBounds) //let faceBox = UIView(frame: face.bounds) faceBox.layer.borderWidth = 3 faceBox.layer.borderColor = UIColor.red.cgColor faceBox.backgroundColor = UIColor.clear imageView.addSubview(faceBox) if face.hasLeftEyePosition { print("Left eye bounds are \(face.leftEyePosition)") } if face.hasRightEyePosition { print("Right eye bounds are \(face.rightEyePosition)") } } }