iOS的纹理倒置,也只在纹理的一部分渲染?

在这里输入图像说明 重新渲染所有数据后,我可以将videostream缓冲区打开到OpenGL。 但是video是倒置的,只能渲染整个纹理的一部分。

我从stream式video捕捉帧并将其渲染为纹理。 这是我的顶点着色器

static const char *kVertexShaderString = "#version 100\n" "\n" "uniform mat4 uMVP; \n" "uniform vec3 uPosition; \n" "attribute vec3 aVertex; \n" "attribute vec4 aColor;\n" "varying vec4 vColor;\n" "varying vec3 vGrid; \n" "attribute vec2 v_TextCoord;\n" "varying vec2 texCoord;\n" "void main(void) { \n" " vGrid = aVertex + uPosition; \n" " vec4 pos = vec4(vGrid, 1.0); \n" " vColor = aColor;" " gl_Position = uMVP * pos; \n" " texCoord = v_TextCoord;\n" " \n" "}\n"; 

和片段着色器

 static const char *kPassThroughFragmentShaderString = "#version 100\n" "\n" "#ifdef GL_ES\n" "precision mediump float;\n" "#endif\n" "varying vec4 vColor;\n" "uniform sampler2D v_Texture;\n" "varying lowp vec2 texCoord;\n" "\n" "void main(void) { \n" "gl_FragColor = texture2D(v_Texture, texCoord);\n" "}\n"; 

更新我的顶点如下

 -1.0f, -1.0f, -2.0f, 1.0f, -1.0f, -2.0f, 1.0f, 1.0f, -2.0f, -1.0f, -1.0f, -2.0f, 1.0f, 1.0f, -2.0f, -1.0f, 1.0f, -2.0f, 

创build我用过的video纹理

  - (GLuint)createVideoTexture { GLuint handle; glGenTextures(1, &handle); glBindTexture(GL_TEXTURE_2D, handle); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); return handle; } 

并在屏幕上画我正在使用

 CVPixelBufferRef pixelBuffer; // If we have a valid buffer, lock the base address of its pixel buffer // if (NULL != latestSampleBuffer) { // pixelBuffer = CMSampleBufferGetImageBuffer(latestSampleBuffer); pixelBuffer = [self.mOutput copyPixelBufferForItemTime:_player.currentItem.currentTime itemTimeForDisplay:nil]; unsigned char* pixelBufferBaseAddress = NULL; CVPixelBufferLockBaseAddress(pixelBuffer, 0); /// Address where image buffer starts uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(pixelBuffer); /// Read image parameters size_t width = CVPixelBufferGetWidth(pixelBuffer); size_t height = CVPixelBufferGetHeight(pixelBuffer); // CVPixelBufferRef rotatedBuffer = [self correctBufferOrientation:pixelBuffer]; pixelBufferBaseAddress = (unsigned char*)CVPixelBufferGetBaseAddress(pixelBuffer) ; const size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer); if(pixelBufferBaseAddress != NULL){ if (0 == _floorTexture) { _floorTexture = [self createVideoTexture]; } glBindTexture(GL_TEXTURE0, _floorTexture); GLsizei bufferHeight = (GLsizei) CVPixelBufferGetHeight(pixelBuffer); GLsizei bufferWidth = (GLsizei) CVPixelBufferGetWidth(pixelBuffer); if (bytesPerRow / BYTES_PER_TEXEL == videoSize.width) { // No padding between lines of decoded video glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (GLsizei) width, (GLsizei) height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, pixelBufferBaseAddress); } else { // Decoded video contains padding between lines. We must not // upload it to graphics memory as we do not want to display it // Allocate storage for the texture (correctly sized) glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (GLsizei) width, (GLsizei) height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL); // Now upload each line of texture data as a sub-image for (int i = 0; i < videoSize.height; ++i) { GLubyte* line = pixelBufferBaseAddress + i * bytesPerRow; glTexSubImage2D(GL_TEXTURE_2D, 0, 0, i, (GLsizei) videoSize.width, 1, GL_BGRA, GL_UNSIGNED_BYTE, line); } //glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bufferWidth/BYTES_PER_TEXEL,bufferHeight, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, pixelBufferBaseAddress); } } //glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, GL_FALSE,sizeof(float) * 4, 0); glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, 4*2, false, (GLvoid*) (sizeof(float) * 4)); glDrawArrays(GL_TRIANGLES, 0, NUM_CUBE_VERTICES / 3); glBindTexture(GL_TEXTURE0, 0); CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); if (pixelBuffer) { CFRelease(pixelBuffer); } 

我也尝试了不同的顶点,也为_texCoords使用了不同的glVertexAttribPointer。

任何人都可以帮助确定我的方法中的问题