如何更改某些function以兼容iOS 10或更低版本,以便在我的搜索function中使用相机视图控制器

在此处输入图像描述

我正在制作一个视图控制器来制作像Snapchat相机一样的摄像机视图控制器。 我的代码适用于iOS 11或更高版本。 说实话,我并没有真正掌握我的代码,因为我只是按照这个像摄像机视图控制器这样的教程

import UIKit import AVFoundation import SVProgressHUD class CameraVC: UIViewController { @IBOutlet weak var timeLabel: UILabel! @IBOutlet weak var dateLabel: UILabel! @IBOutlet weak var cameraButton: DesignableButton! @IBOutlet weak var retryButton: DesignableButton! // to receive data from MainMenuVC var employeeData : Employee? var checkinData = CheckIn() var captureSession = AVCaptureSession() // which camera input do we want to use var backCamera: AVCaptureDevice? var frontCamera: AVCaptureDevice? // to keep track which camera do we use currently var currentDevice: AVCaptureDevice? var photoOutput: AVCapturePhotoOutput? var cameraPreviewLayer: AVCaptureVideoPreviewLayer? var toggleCameraGestureRecognizer = UISwipeGestureRecognizer() var zoomInGestureRecognizer = UISwipeGestureRecognizer() var zoomOutGestureRecognizer = UISwipeGestureRecognizer() var thereIsAnError : Bool = false { didSet { if thereIsAnError { cameraButton.isHidden = true cameraButton.isEnabled = false retryButton.isHidden = false retryButton.isEnabled = true } else { cameraButton.isHidden = false cameraButton.isEnabled = true retryButton.isHidden = true retryButton.isEnabled = false } } } override func viewDidLoad() { super.viewDidLoad() getDateTimeFromServer() // initial value thereIsAnError = false timeLabel.text = "" dateLabel.text = "" cameraButton.isEnabled = false cameraButton.alpha = 0.4 setupCaptureSession() setupDevice() setupInputOutput() setupPreviewLayer() startRunningCaptureSession() setGestureRecognizer() } override func viewWillAppear(_ animated: Bool) { super.viewWillAppear(animated) if checkinData.dateTime != nil { SVProgressHUD.dismiss() } } @IBAction func shutterButtonDidPressed(_ sender: Any) { // when the button is pressed, we capture the image and set the photoOutput let settings = AVCapturePhotoSettings() photoOutput?.capturePhoto(with: settings, delegate: self) // perform segue is below in the AVCapturePhotoCaptureDelegate } @IBAction func retryButtonDidPressed(_ sender: Any) { if checkinData.dateTime == nil { getDateTimeFromServer() } } } extension CameraVC { // MARK: - Helper Methods // MARK: - Helper Methods override func prepare(for segue: UIStoryboardSegue, sender: Any?) { if segue.identifier == "goToCheckinDetail" { let checkinDetailTVC = segue.destination as! CheckinDetailVC checkinDetailTVC.dataOfCheckin = checkinData checkinDetailTVC.dataOfEmployee = employeeData // to set the navbar back button title in the checkinDetailVC navigationItem.backBarButtonItem = UIBarButtonItem(title: "", style: .plain, target: nil, action: nil) } } func getDateTimeFromServer() { SVProgressHUD.show(withStatus: "Loading Data") NetworkingService.getCurrentTimeFromServer { (result) in switch result { case .failure: self.thereIsAnError = true SVProgressHUD.dismiss() self.showAlert(alertTitle: "Sorry", alertMessage: "Internet connection issue, please tap the retry button.", actionTitle: "Back") case .success(let timeFromServer) : guard let stringDateTimeServer = timeFromServer as? String else {return} self.checkinData.dateTime = stringDateTimeServer let dateTimeService = DateTimeService(fromDateTimeString: stringDateTimeServer) let time = dateTimeService.parsingDateAndTime()?.timeOnly self.timeLabel.text = "\(time ?? "-")" self.dateLabel.text = DateTimeService.changeFormat(of: stringDateTimeServer, toFormat: "dd MMM yyyy") self.cameraButton.isEnabled = true self.cameraButton.alpha = 1 self.thereIsAnError = false SVProgressHUD.dismiss() } } } func setGestureRecognizer() { // change camera from front to back toggleCameraGestureRecognizer.direction = .up toggleCameraGestureRecognizer.addTarget(self, action: #selector(self.switchCamera)) view.addGestureRecognizer(toggleCameraGestureRecognizer) // Zoom In recognizer zoomInGestureRecognizer.direction = .right zoomInGestureRecognizer.addTarget(self, action: #selector(zoomIn)) view.addGestureRecognizer(zoomInGestureRecognizer) // Zoom Out recognizer zoomOutGestureRecognizer.direction = .left zoomOutGestureRecognizer.addTarget(self, action: #selector(zoomOut)) view.addGestureRecognizer(zoomOutGestureRecognizer) } func setupCaptureSession() { // to specify image resolution and quality we want, we set to the highest resolution possible captureSession.sessionPreset = AVCaptureSession.Preset.photo } func setupDevice() { // to decide whether we use front or back camer let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified) let devices = deviceDiscoverySession.devices for device in devices { if device.position == AVCaptureDevice.Position.back { backCamera = device } else if device.position == AVCaptureDevice.Position.front { frontCamera = device } } // default device currentDevice = frontCamera } func setupInputOutput() { // after the camera capture that image (input), we generate the image DATA (output) // put the input and output to capture Session do { let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!) captureSession.addInput(captureDeviceInput) photoOutput = AVCapturePhotoOutput() photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil) captureSession.addOutput(photoOutput!) } catch { print(error) } } func setupPreviewLayer() { // to display image data on the screen cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession) cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait cameraPreviewLayer?.frame = self.view.frame self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0) } @objc func switchCamera() { captureSession.beginConfiguration() // Change the device based on the current camera let newDevice = (currentDevice?.position == AVCaptureDevice.Position.back) ? frontCamera : backCamera // Remove all inputs from the session for input in captureSession.inputs { captureSession.removeInput(input as! AVCaptureDeviceInput) } // Change to the new input let cameraInput:AVCaptureDeviceInput do { cameraInput = try AVCaptureDeviceInput(device: newDevice!) } catch { print(error) return } if captureSession.canAddInput(cameraInput) { captureSession.addInput(cameraInput) } currentDevice = newDevice captureSession.commitConfiguration() } @objc func zoomIn() { if let zoomFactor = currentDevice?.videoZoomFactor { if zoomFactor  1.0 { let newZoomFactor = max(zoomFactor - 1.0, 1.0) do { try currentDevice?.lockForConfiguration() currentDevice?.ramp(toVideoZoomFactor: newZoomFactor, withRate: 1.0) currentDevice?.unlockForConfiguration() } catch { print(error) } } } } func startRunningCaptureSession() { // to start capturing the data captureSession.startRunning() } } extension CameraVC: AVCapturePhotoCaptureDelegate { func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) { if let imageData = photo.fileDataRepresentation() { checkinData.photo = UIImage(data: imageData) performSegue(withIdentifier: "goToCheckinDetail", sender: nil) } } } 

但是当我将部署目标设置为iOS 10.3时,我收到一条错误消息,表示某些方法仅适用于iOS 11或更高版本。

在此处输入图像描述

 func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) { if let imageData = photo.fileDataRepresentation() { checkinData.photo = UIImage(data: imageData) performSegue(withIdentifier: "goToCheckinDetail", sender: nil) } } 
  1. AVCapturePhoto’仅适用于iOS 11.0或更高版本
  2. fileDataRepresentation()’仅适用于iOS 11.0或更高版本

在此处输入图像描述

 func setupInputOutput() { // after the camera capture that image (input), we generate the image DATA (output) // put the input and output to capture Session do { let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!) captureSession.addInput(captureDeviceInput) photoOutput = AVCapturePhotoOutput() photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil) captureSession.addOutput(photoOutput!) } catch { print(error) } } 

‘jpeg’仅适用于iOS 11.0或更高版本

请帮助我,我需要一些function,等于iOS 10(至少)或以下的function。

  1. 创建AVCapturePhotoOutput对象。 使用其属性确定支持的捕获设置并启用某些function(例如,是否捕获实时照片)。

     fileprivate var photoOutput: AVCapturePhotoOutput! 
  2. 创建和配置AVCapturePhotoSettings对象以选择特定捕获的function和设置(例如,是否启用图像稳定或闪存)。

     photoOutput = AVCapturePhotoOutput() if self.session.canAddOutput(photoOutput) { self.session.addOutput(photoOutput) } 
  3. 通过将照片设置对象传递给capturePhoto(with:delegate:)方法以及实现AVCapturePhotoCaptureDelegate协议的委托对象来捕获图像。 然后,照片捕获输出会调用您的代理人在捕获过程中通知您重要事件。

     queue.async { self.photoOutput.capturePhoto(with: AVCapturePhotoSettings(), delegate: self) }