iOS:从前置摄像头捕捉图像

我正在制作一个应用程序,我想从前置摄像头捕捉图像,而不显示任何types的捕捉屏幕。 我想在没有任何用户交互的情况下完全在代码中拍照。 我该怎么做前置摄像头呢?

如何使用AVFoundation前置摄像头捕捉图像:

发展注意事项:

  • 仔细检查您的应用程序和图像方向设置
  • AVFoundation及其相关框架是令人讨厌的庞然大物,很难理解/实施。 我已经尽可能地使自己的代码变得精益,但是请查看这个优秀的教程以获得更好的解释(网站不再可用,请通过archive.org链接): http : //www.benjaminloulier.com/posts/ios4-和直接访问到的相机

ViewController.h

// Frameworks #import <CoreVideo/CoreVideo.h> #import <CoreMedia/CoreMedia.h> #import <AVFoundation/AVFoundation.h> #import <UIKit/UIKit.h> @interface CameraViewController : UIViewController <AVCaptureVideoDataOutputSampleBufferDelegate> // Camera @property (weak, nonatomic) IBOutlet UIImageView* cameraImageView; @property (strong, nonatomic) AVCaptureDevice* device; @property (strong, nonatomic) AVCaptureSession* captureSession; @property (strong, nonatomic) AVCaptureVideoPreviewLayer* previewLayer; @property (strong, nonatomic) UIImage* cameraImage; @end 

ViewController.m

 #import "CameraViewController.h" @implementation CameraViewController - (void)viewDidLoad { [super viewDidLoad]; [self setupCamera]; [self setupTimer]; } - (void)setupCamera { NSArray* devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; for(AVCaptureDevice *device in devices) { if([device position] == AVCaptureDevicePositionFront) self.device = device; } AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:self.device error:nil]; AVCaptureVideoDataOutput* output = [[AVCaptureVideoDataOutput alloc] init]; output.alwaysDiscardsLateVideoFrames = YES; dispatch_queue_t queue; queue = dispatch_queue_create("cameraQueue", NULL); [output setSampleBufferDelegate:self queue:queue]; NSString* key = (NSString *) kCVPixelBufferPixelFormatTypeKey; NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; [output setVideoSettings:videoSettings]; self.captureSession = [[AVCaptureSession alloc] init]; [self.captureSession addInput:input]; [self.captureSession addOutput:output]; [self.captureSession setSessionPreset:AVCaptureSessionPresetPhoto]; self.previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:self.captureSession]; self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill; // CHECK FOR YOUR APP self.previewLayer.frame = CGRectMake(0, 0, self.view.frame.size.height, self.view.frame.size.width); self.previewLayer.orientation = AVCaptureVideoOrientationLandscapeRight; // CHECK FOR YOUR APP [self.view.layer insertSublayer:self.previewLayer atIndex:0]; // Comment-out to hide preview layer [self.captureSession startRunning]; } - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection { CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); CVPixelBufferLockBaseAddress(imageBuffer,0); uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer); size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); size_t width = CVPixelBufferGetWidth(imageBuffer); size_t height = CVPixelBufferGetHeight(imageBuffer); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGImageRef newImage = CGBitmapContextCreateImage(newContext); CGContextRelease(newContext); CGColorSpaceRelease(colorSpace); self.cameraImage = [UIImage imageWithCGImage:newImage scale:1.0f orientation:UIImageOrientationDownMirrored]; CGImageRelease(newImage); CVPixelBufferUnlockBaseAddress(imageBuffer,0); } - (void)setupTimer { NSTimer* cameraTimer = [NSTimer scheduledTimerWithTimeInterval:2.0f target:self selector:@selector(snapshot) userInfo:nil repeats:YES]; } - (void)snapshot { NSLog(@"SNAPSHOT"); self.cameraImageView.image = self.cameraImage; // Comment-out to hide snapshot } @end 

连接到UIViewController与UIImageView的快照,它将工作! 快照是以2.0秒的间隔以编程方式进行的,没有任何用户input。 注释掉所选行以删除预览图层和快照反馈。

有任何更多的问题/评论,请让我知道!

您可能需要使用AVFoundation来捕获videostream/图像而不显示它。 与UIImagePickerController不同,它不能“开箱即用”。 以苹果的AVCam为例,让你开始。

我将上面的代码从Objc转换到Swift 3,如果有人在2017年仍然寻找解决scheme。

 import UIKit import AVFoundation class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate { @IBOutlet weak var cameraImageView: UIImageView! var device: AVCaptureDevice? var captureSession: AVCaptureSession? var previewLayer: AVCaptureVideoPreviewLayer? var cameraImage: UIImage? override func viewDidLoad() { super.viewDidLoad() setupCamera() setupTimer() } func setupCamera() { let discoverySession = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .front) device = discoverySession?.devices[0] let input: AVCaptureDeviceInput do { input = try AVCaptureDeviceInput(device: device) } catch { return } let output = AVCaptureVideoDataOutput() output.alwaysDiscardsLateVideoFrames = true let queue = DispatchQueue(label: "cameraQueue") output.setSampleBufferDelegate(self, queue: queue) output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA] captureSession = AVCaptureSession() captureSession?.addInput(input) captureSession?.addOutput(output) captureSession?.sessionPreset = AVCaptureSessionPresetPhoto previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill previewLayer?.frame = CGRect(x: 0.0, y: 0.0, width: view.frame.width, height: view.frame.height) view.layer.insertSublayer(previewLayer!, at: 0) captureSession?.startRunning() } func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: .allZeros)) let baseAddress = UnsafeMutableRawPointer(CVPixelBufferGetBaseAddress(imageBuffer!)) let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!) let width = CVPixelBufferGetWidth(imageBuffer!) let height = CVPixelBufferGetHeight(imageBuffer!) let colorSpace = CGColorSpaceCreateDeviceRGB() let newContext = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue) let newImage = newContext!.makeImage() cameraImage = UIImage(cgImage: newImage!) CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: .allZeros)) } func setupTimer() { _ = Timer.scheduledTimer(timeInterval: 2.0, target: self, selector: #selector(snapshot), userInfo: nil, repeats: true) } func snapshot() { print("SNAPSHOT") cameraImageView.image = cameraImage } } 

另外,我find了一个从CMSampleBuffer获取图像的较短解决scheme:

 func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { let myPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) let myCIimage = CIImage(cvPixelBuffer: myPixelBuffer!) let videoImage = UIImage(ciImage: myCIimage) cameraImage = videoImage } 

在UIImagePickerController类的文档中有一个叫做takePicture的方法。 它说:

将此方法与自定义叠加视图结合使用,可启动静态图像的编程捕获。 这支持拍摄多个图片而不离开界面,但要求您隐藏默认的图像select器控件。

将上面的代码转换成Swift 4

 import UIKit import AVFoundation class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate { @IBOutlet weak var cameraImageView: UIImageView! var device: AVCaptureDevice? var captureSession: AVCaptureSession? var previewLayer: AVCaptureVideoPreviewLayer? var cameraImage: UIImage? override func viewDidLoad() { super.viewDidLoad() setupCamera() setupTimer() } func setupCamera() { let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front) device = discoverySession.devices[0] let input: AVCaptureDeviceInput do { input = try AVCaptureDeviceInput(device: device!) } catch { return } let output = AVCaptureVideoDataOutput() output.alwaysDiscardsLateVideoFrames = true let queue = DispatchQueue(label: "cameraQueue") output.setSampleBufferDelegate(self, queue: queue) output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String: kCVPixelFormatType_32BGRA] captureSession = AVCaptureSession() captureSession?.addInput(input) captureSession?.addOutput(output) captureSession?.sessionPreset = AVCaptureSession.Preset.photo previewLayer = AVCaptureVideoPreviewLayer(session: captureSession!) previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill previewLayer?.frame = CGRect(x: 0.0, y: 0.0, width: view.frame.width, height: view.frame.height) view.layer.insertSublayer(previewLayer!, at: 0) captureSession?.startRunning() } func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: 0)) let baseAddress = UnsafeMutableRawPointer(CVPixelBufferGetBaseAddress(imageBuffer!)) let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!) let width = CVPixelBufferGetWidth(imageBuffer!) let height = CVPixelBufferGetHeight(imageBuffer!) let colorSpace = CGColorSpaceCreateDeviceRGB() let newContext = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue) let newImage = newContext!.makeImage() cameraImage = UIImage(cgImage: newImage!) CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: 0)) } func setupTimer() { _ = Timer.scheduledTimer(timeInterval: 2.0, target: self, selector: #selector(snapshot), userInfo: nil, repeats: true) } @objc func snapshot() { print("SNAPSHOT") cameraImageView.image = cameraImage } }