AVFoundation导出方向错误

我正在尝试将图片和video合并。 我有他们合并和出口,但它是旋转的方式。

抱歉批量代码粘贴。 我已经看到了关于应用一个转换到compositionVideoTrack.preferredTransform但是什么都不做的答案。 添加到AVMutableVideoCompositionInstruction也不会。

我觉得这个领域是事情开始出错的地方。 这儿这儿:

 // I feel like this loading here is the problem let videoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0] // because it makes our parentLayer and videoLayer sizes wrong let videoSize = videoTrack.naturalSize // this is returning 1920x1080, so it is rotating the video print("\(videoSize.width) , \(videoSize.height)") 

所以在这里,我们的框架大小是错误的方法的其余部分。 现在,当我们尝试去创build覆盖图像层的框架是不正确的:

  let aLayer = CALayer() aLayer.contents = UIImage(named: "OverlayTestImageOverlay")?.CGImage aLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height) aLayer.opacity = 1 

这是我完整的方法。

  func combineImageVid() { let path = NSBundle.mainBundle().pathForResource("SampleMovie", ofType:"MOV") let fileURL = NSURL(fileURLWithPath: path!) let videoAsset = AVURLAsset(URL: fileURL) let mixComposition = AVMutableComposition() let compositionVideoTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid) var clipVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo) do { try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: clipVideoTrack[0], atTime: kCMTimeZero) } catch _ { print("failed to insertTimeRange") } compositionVideoTrack.preferredTransform = videoAsset.preferredTransform // I feel like this loading here is the problem let videoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0] // because it makes our parentLayer and videoLayer sizes wrong let videoSize = videoTrack.naturalSize // this is returning 1920x1080, so it is rotating the video print("\(videoSize.width) , \(videoSize.height)") let aLayer = CALayer() aLayer.contents = UIImage(named: "OverlayTestImageOverlay")?.CGImage aLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height) aLayer.opacity = 1 let parentLayer = CALayer() let videoLayer = CALayer() parentLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height) videoLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height) parentLayer.addSublayer(videoLayer) parentLayer.addSublayer(aLayer) let videoComp = AVMutableVideoComposition() videoComp.renderSize = videoSize videoComp.frameDuration = CMTimeMake(1, 30) videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, inLayer: parentLayer) let instruction = AVMutableVideoCompositionInstruction() instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration) let mixVideoTrack = mixComposition.tracksWithMediaType(AVMediaTypeVideo)[0] mixVideoTrack.preferredTransform = CGAffineTransformMakeRotation(CGFloat(M_PI * 90.0 / 180)) let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: mixVideoTrack) instruction.layerInstructions = [layerInstruction] videoComp.instructions = [instruction] // create new file to receive data let dirPaths = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true) let docsDir: AnyObject = dirPaths[0] let movieFilePath = docsDir.stringByAppendingPathComponent("result.mov") let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath) do { try NSFileManager.defaultManager().removeItemAtPath(movieFilePath) } catch _ {} // use AVAssetExportSession to export video let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetHighestQuality) assetExport?.videoComposition = videoComp assetExport!.outputFileType = AVFileTypeQuickTimeMovie assetExport!.outputURL = movieDestinationUrl assetExport!.exportAsynchronouslyWithCompletionHandler({ switch assetExport!.status{ case AVAssetExportSessionStatus.Failed: print("failed \(assetExport!.error)") case AVAssetExportSessionStatus.Cancelled: print("cancelled \(assetExport!.error)") default: print("Movie complete") // play video NSOperationQueue.mainQueue().addOperationWithBlock({ () -> Void in print(movieDestinationUrl) }) } }) } 

这就是我要出口的东西: 在这里输入图像说明


我试图添加这两个方法来旋转video:

 class func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction { let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track) let assetTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] let transform = assetTrack.preferredTransform let assetInfo = orientationFromTransform(transform) var scaleToFitRatio = UIScreen.mainScreen().bounds.width / assetTrack.naturalSize.width if assetInfo.isPortrait { scaleToFitRatio = UIScreen.mainScreen().bounds.width / assetTrack.naturalSize.height let scaleFactor = CGAffineTransformMakeScale(scaleToFitRatio, scaleToFitRatio) instruction.setTransform(CGAffineTransformConcat(assetTrack.preferredTransform, scaleFactor), atTime: kCMTimeZero) } else { let scaleFactor = CGAffineTransformMakeScale(scaleToFitRatio, scaleToFitRatio) var concat = CGAffineTransformConcat(CGAffineTransformConcat(assetTrack.preferredTransform, scaleFactor), CGAffineTransformMakeTranslation(0, UIScreen.mainScreen().bounds.width / 2)) if assetInfo.orientation == .Down { let fixUpsideDown = CGAffineTransformMakeRotation(CGFloat(M_PI)) let windowBounds = UIScreen.mainScreen().bounds let yFix = assetTrack.naturalSize.height + windowBounds.height let centerFix = CGAffineTransformMakeTranslation(assetTrack.naturalSize.width, yFix) concat = CGAffineTransformConcat(CGAffineTransformConcat(fixUpsideDown, centerFix), scaleFactor) } instruction.setTransform(concat, atTime: kCMTimeZero) } return instruction } class func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) { var assetOrientation = UIImageOrientation.Up var isPortrait = false if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 { assetOrientation = .Right isPortrait = true } else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 { assetOrientation = .Left isPortrait = true } else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 { assetOrientation = .Up } else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 { assetOrientation = .Down } return (assetOrientation, isPortrait) } 

更新我的combineImageVid()方法添加combineImageVid()

 let instruction = AVMutableVideoCompositionInstruction() instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration) let mixVideoTrack = mixComposition.tracksWithMediaType(AVMediaTypeVideo)[0] //let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: mixVideoTrack) //layerInstruction.setTransform(videoAsset.preferredTransform, atTime: kCMTimeZero) let layerInstruction = videoCompositionInstructionForTrack(compositionVideoTrack, asset: videoAsset) 

这给了我这个输出:

在这里输入图像说明

所以我越来越近了,但是我觉得因为曲目本来是装错的,所以我需要在那里解决这个问题。 另外,我不知道为什么那个巨大的黑匣子现在在那里。 我想也许这是由于我的图像层在这里加载了video资源的边界:

 aLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height) 

然而,改变一些小的宽度/高度并没有什么区别。 然后,我想添加一个裁剪logging摆脱黑色的方块,但是也没有工作:(


Allensbuild议不要使用这两种方法:

 class func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction class func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) 

但更新我的原始方法看起来像这样:

 videoLayer.frame = CGRectMake(0, 0, videoSize.height, videoSize.width) //notice the switched width and height ... videoComp.renderSize = CGSizeMake(videoSize.height,videoSize.width) //this make the final video in portrait ... layerInstruction.setTransform(videoTrack.preferredTransform, atTime: kCMTimeZero) //important piece of information let composition know you want to rotate the original video in output 

我们正在接近真正的问题现在似乎是编辑renderSize 。 如果我将其更改为除横向尺寸之外的其他任何东西,则可以这样做:

在这里输入图像说明

这里是苹果的定位文件:

https://developer.apple.com/library/ios/qa/qa1744/_index.html

如果您的原始video是以纵向模式iOS拍摄的,则自然大小仍然是横向,但是它带有mov文件中的旋转元数据。 为了旋转你的video,你需要改变你的第一段代码,如下所示:

 videoLayer.frame = CGRectMake(0, 0, videoSize.height, videoSize.width) //notice the switched width and height ... videoComp.renderSize = CGSizeMake(videoSize.height,videoSize.width) //this make the final video in portrait ... layerInstruction.setTransform(videoTrack.preferredTransform, atTime: kCMTimeZero) //important piece of information let composition know you want to rotate the original video in output 

是的,你真的很接近!

也许U应该检查videoTrack的preferredTransform,以便给它一个确切的renderSize和transform:

 CGAffineTransform transform = assetVideoTrack.preferredTransform; CGFloat rotation = [self rotationWithTransform:transform]; //if been rotated if (rotation != 0) { //if rotation is 360° if (fabs((rotation - M_PI * 2)) >= valueOfError) { CGFloat m = rotation / M_PI; CGAffineTransform t1; //rotation is 90° or 270° if (fabs(m - 1/2.0) < valueOfError || fabs(m - 3/2.0) < valueOfError) { self.mutableVideoComposition.renderSize = CGSizeMake(assetVideoTrack.naturalSize.height,assetVideoTrack.naturalSize.width); t1 = CGAffineTransformMakeTranslation(assetVideoTrack.naturalSize.height, 0); } //rotation is 180° if (fabs(m - 1.0) < valueOfError) { t1 = CGAffineTransformMakeTranslation(assetVideoTrack.naturalSize.width, assetVideoTrack.naturalSize.height); } CGAffineTransform t2 = CGAffineTransformRotate(t1,rotation); // CGAffineTransform transform = makeTransform(1.0, 1.0, 90, videoTrack.naturalSize.height, 0); [passThroughLayer setTransform:t2 atTime:kCMTimeZero]; } } //convert transform to radian - (CGFloat)rotationWithTransform:(CGAffineTransform)t { return atan2f(tb, ta); }