将SceneKit场景渲染为video输出

作为主要的高级/ iOS开发人员,我对使用SceneKit进行animation项目感兴趣。

我已经在SceneKit上玩了好几个月了,尽pipe它明显是为了“实时”交互而devise的,但是我觉得能够将SKScene渲染成video是非常有用的。 目前,我一直使用Quicktime的屏幕录像机捕捉video输出,但是(当然)帧速率下降。 有没有一种方法可以让场景按照自己的速度渲染并作为stream畅的video文件输出?

我知道这是不太可能的……只是想我会问,如果我错过了一些较低级别的东西!

这实际上很容易! 这里是我将如何做的一个伪代码(在SCNView上):

int numberOfFrames = 300; int currentFrame = 0; int framesPerSecond = 30; -(void) renderAFrame{ [self renderAtTime:1/framesPerSecond]; NSImage *frame = [self snapshot]; // save the image with the frame number in the name such as f_001.png currentFrame++; if(currentFrame < numberOfFrames){ [self renderAFrame]; } } 

它将输出一系列图像,以每秒30帧的速度渲染,您可以导入任何编辑软件并转换为video。

**这是使用Metal的SceneKit的答案。

**警告:这可能不是一个适当的App Store的方法。 但它的工作。

第1步:使用swizzling将新的CAMetalLayer的nextDrawable的方法交换。 保存每个渲染循环的CAMetalDrawable。

 extension CAMetalLayer { public static func setupSwizzling() { struct Static { static var token: dispatch_once_t = 0 } dispatch_once(&Static.token) { let copiedOriginalSelector = #selector(CAMetalLayer.orginalNextDrawable) let originalSelector = #selector(CAMetalLayer.nextDrawable) let swizzledSelector = #selector(CAMetalLayer.newNextDrawable) let copiedOriginalMethod = class_getInstanceMethod(self, copiedOriginalSelector) let originalMethod = class_getInstanceMethod(self, originalSelector) let swizzledMethod = class_getInstanceMethod(self, swizzledSelector) let oldImp = method_getImplementation(originalMethod) method_setImplementation(copiedOriginalMethod, oldImp) method_exchangeImplementations(originalMethod, swizzledMethod) } } func newNextDrawable() -> CAMetalDrawable? { let drawable = orginalNextDrawable() // Save the drawable to any where you want AppManager.sharedInstance.currentSceneDrawable = drawable return drawable } func orginalNextDrawable() -> CAMetalDrawable? { // This is just a placeholder. Implementation will be replaced with nextDrawable. return nil } } 

第2步:在AppDelegate:didFinishLaunchingWithOptions中设置搅拌

 func application(application: UIApplication, didFinishLaunchingWithOptions launchOptions: [NSObject: AnyObject]?) -> Bool { CAMetalLayer.setupSwizzling() return true } 

步骤3:为您的SCNView的CAMetalLayer禁用framebufferOnly(为了调用getBytes for MTLTexture)

 if let metalLayer = scnView.layer as? CAMetalLayer { metalLayer.framebufferOnly = false } 

第4步:在你的SCNView的委托(SCNSceneRendererDelegate),玩纹理

 func renderer(renderer: SCNSceneRenderer, didRenderScene scene: SCNScene, atTime time: NSTimeInterval) { if let texture = AppManager.sharedInstance.currentSceneDrawable?.texture where !texture.framebufferOnly { AppManager.sharedInstance.currentSceneDrawable = nil // Get image from texture let image = texture.toImage() // Use the image for video recording } } extension MTLTexture { func bytes() -> UnsafeMutablePointer<Void> { let width = self.width let height = self.height let rowBytes = self.width * 4 let p = malloc(width * height * 4) //Beware for memory leak self.getBytes(p, bytesPerRow: rowBytes, fromRegion: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0) return p } func toImage() -> UIImage? { var uiImage: UIImage? let p = bytes() let pColorSpace = CGColorSpaceCreateDeviceRGB() let rawBitmapInfo = CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo) let selftureSize = self.width * self.height * 4 let rowBytes = self.width * 4 let provider = CGDataProviderCreateWithData(nil, p, selftureSize, {_,_,_ in })! if let cgImage = CGImageCreate(self.width, self.height, 8, 32, rowBytes, pColorSpace, bitmapInfo, provider, nil, true, CGColorRenderingIntent.RenderingIntentDefault) { uiImage = UIImage(CGImage: cgImage) } return uiImage } func toImageAsJpeg(compressionQuality: CGFloat) -> UIImage? { } } 

步骤5(可选):您可能需要确认CAMetalLayer中的绘图是否是您的目标。 (如果多于一个CAMetalLayer在同一时间)

你可以使用SCNRenderer来渲染CGImage离屏,然后使用AVFoundation将CGImage添加到videostream。

我编写了这个Swift扩展,用于渲染成CGImage。

 public extension SCNRenderer { public func renderToImageSize(size: CGSize, floatComponents: Bool, atTime time: NSTimeInterval) -> CGImage? { var thumbnailCGImage: CGImage? let width = GLsizei(size.width), height = GLsizei(size.height) let samplesPerPixel = 4 #if os(iOS) let oldGLContext = EAGLContext.currentContext() let glContext = unsafeBitCast(context, EAGLContext.self) EAGLContext.setCurrentContext(glContext) objc_sync_enter(glContext) #elseif os(OSX) let oldGLContext = CGLGetCurrentContext() let glContext = unsafeBitCast(context, CGLContextObj.self) CGLSetCurrentContext(glContext) CGLLockContext(glContext) #endif // set up the OpenGL buffers var thumbnailFramebuffer: GLuint = 0 glGenFramebuffers(1, &thumbnailFramebuffer) glBindFramebuffer(GLenum(GL_FRAMEBUFFER), thumbnailFramebuffer); checkGLErrors() var colorRenderbuffer: GLuint = 0 glGenRenderbuffers(1, &colorRenderbuffer) glBindRenderbuffer(GLenum(GL_RENDERBUFFER), colorRenderbuffer) if floatComponents { glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA16F), width, height) } else { glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA8), width, height) } glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0), GLenum(GL_RENDERBUFFER), colorRenderbuffer); checkGLErrors() var depthRenderbuffer: GLuint = 0 glGenRenderbuffers(1, &depthRenderbuffer) glBindRenderbuffer(GLenum(GL_RENDERBUFFER), depthRenderbuffer) glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_DEPTH_COMPONENT24), width, height) glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_DEPTH_ATTACHMENT), GLenum(GL_RENDERBUFFER), depthRenderbuffer); checkGLErrors() let framebufferStatus = Int32(glCheckFramebufferStatus(GLenum(GL_FRAMEBUFFER))) assert(framebufferStatus == GL_FRAMEBUFFER_COMPLETE) if framebufferStatus != GL_FRAMEBUFFER_COMPLETE { return nil } // clear buffer glViewport(0, 0, width, height) glClear(GLbitfield(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)); checkGLErrors() // render renderAtTime(time); checkGLErrors() // create the image if floatComponents { // float components (16-bits of actual precision) // slurp bytes out of OpenGL typealias ComponentType = Float var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0) glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_FLOAT), &imageRawBuffer) // flip image vertically — OpenGL has a different 'up' than CoreGraphics let rowLength = Int(width) * samplesPerPixel for rowIndex in 0..<(Int(height) / 2) { let baseIndex = rowIndex * rowLength let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)]) } // make the CGImage var imageBuffer = vImage_Buffer( data: UnsafeMutablePointer<Float>(imageRawBuffer), height: vImagePixelCount(height), width: vImagePixelCount(width), rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel) var format = vImage_CGImageFormat( bitsPerComponent: UInt32(sizeof(ComponentType) * 8), bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8), colorSpace: nil, // defaults to sRGB bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue | CGBitmapInfo.FloatComponents.rawValue), version: UInt32(0), decode: nil, renderingIntent: kCGRenderingIntentDefault) var error: vImage_Error = 0 thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue() } else { // byte components // slurp bytes out of OpenGL typealias ComponentType = UInt8 var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0) glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), &imageRawBuffer) // flip image vertically — OpenGL has a different 'up' than CoreGraphics let rowLength = Int(width) * samplesPerPixel for rowIndex in 0..<(Int(height) / 2) { let baseIndex = rowIndex * rowLength let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)]) } // make the CGImage var imageBuffer = vImage_Buffer( data: UnsafeMutablePointer<Float>(imageRawBuffer), height: vImagePixelCount(height), width: vImagePixelCount(width), rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel) var format = vImage_CGImageFormat( bitsPerComponent: UInt32(sizeof(ComponentType) * 8), bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8), colorSpace: nil, // defaults to sRGB bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue), version: UInt32(0), decode: nil, renderingIntent: kCGRenderingIntentDefault) var error: vImage_Error = 0 thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue() } #if os(iOS) objc_sync_exit(glContext) if oldGLContext != nil { EAGLContext.setCurrentContext(oldGLContext) } #elseif os(OSX) CGLUnlockContext(glContext) if oldGLContext != nil { CGLSetCurrentContext(oldGLContext) } #endif return thumbnailCGImage } } func checkGLErrors() { var glError: GLenum var hadError = false do { glError = glGetError() if glError != 0 { println(String(format: "OpenGL error %#x", glError)) hadError = true } } while glError != 0 assert(!hadError) } 

你可以用SKVideoNode这样做,把SKVideo放到一个SKScene中,用来映射SCNode的SCMaterial.Diffuse.Content(希望这是明确的)

  player = AVPlayer(URL: fileURL!) let videoSpriteKitNodeLeft = SKVideoNode(AVPlayer: player) let videoNodeLeft = SCNNode() let spriteKitScene1 = SKScene(size: CGSize(width: 1280 * screenScale, height: 1280 * screenScale)) spriteKitScene1.shouldRasterize = true videoNodeLeft.geometry = SCNSphere(radius: 30) spriteKitScene1.scaleMode = .AspectFit videoSpriteKitNodeLeft.position = CGPoint(x: spriteKitScene1.size.width / 2.0, y: spriteKitScene1.size.height / 2.0) videoSpriteKitNodeLeft.size = spriteKitScene1.size spriteKitScene1.addChild(videoSpriteKitNodeLeft) videoNodeLeft.geometry?.firstMaterial?.diffuse.contents = spriteKitScene1 videoNodeLeft.geometry?.firstMaterial?.doubleSided = true // Flip video upside down, so that it's shown in the right position var transform = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0) transform = SCNMatrix4Translate(transform, 1.0, 1.0, 0.0) videoNodeLeft.pivot = SCNMatrix4MakeRotation(Float(M_PI_2), 0.0, -1.0, 0.0) videoNodeLeft.geometry?.firstMaterial?.diffuse.contentsTransform = transform videoNodeLeft.position = SCNVector3(x: 0, y: 0, z: 0) scene.rootNode.addChildNode(videoNodeLeft) 

我已经从我的github项目中提取了一个360video播放器的代码,使用SceneKit在3D球体内播放video: https : //github.com/Aralekk/simple360player_iOS/blob/master/simple360player/ViewController.swift

我希望这有帮助 !

亚瑟