2016-06-26 17 views
1

Ich möchte einfach einen kleinen Bereich eines YUV 420 Bildes extrahieren. Erstellen Sie einen CVImageBufferRef aus einem CVImageBufferRef, der nur einen rechteckigen Teil des ursprünglichen Bilds enthält.Extrahiere Teilbild von CVImageBufferRef

Hier ist, was ich bisher versucht:

- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection 
{ 
    // callback from AVCaptureOutput 
    // 
    CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef); 
    if (imageBufferRef) 
    { 
     // Take a subset of buffer to create a smaller image 
     CVPixelBufferLockBaseAddress(imageBufferRef, 0); 
     size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0); 
     size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1); 
     size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0); 
     size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1); 
     size_t cropHeightY = 320; 
     size_t cropWidthY = 320; 
     size_t cropHeightUV = cropHeightY/2; 
     size_t cropWidthUV = cropWidthY; 
     size_t cropY_X0 = widthY/2 - (cropWidthY/2); 
     size_t cropY_Y0 = heightY/2 - (cropHeightY/2); 
     size_t cropUV_X0 = widthUV/2 - (cropWidthUV/2); 
     size_t cropUV_Y0 = heightUV/2 - (cropHeightUV/2); 

     void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0); 
     void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1); 

     size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0); 
     size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1); 

     size_t pixelBytesY = bytesPerRowY/widthY; 
     size_t pixelBytesUV = bytesPerRowUV/widthUV; 

     void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY); 
     void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV); 

     size_t bytesPerRowOut = cropWidthY * pixelBytesY; 

     size_t sizeY = bytesPerRowOut * cropHeightY; 
     size_t sizeUV = bytesPerRowOut * cropHeightUV; 

     unsigned char * pixelY = (unsigned char *)malloc(sizeY); 
     unsigned char * pixelUV = (unsigned char *)malloc(sizeUV); 

     for (int i = 0; i < cropHeightY; ++i) { 
      memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut); 
     } 

     for (int i = 0; i < cropHeightUV; ++i) { 
      memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut); 
     } 

     void *baseAddresses[2] = {pixelY, pixelUV}; 
     size_t planeWidths[2] = {cropWidthY, cropWidthUV}; 
     size_t planeHeights[2] = {cropHeightY, cropHeightUV}; 
     size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut}; 

     // create a new CVImageBufferRef from pixelY and pixelUV 
     CVPixelBufferRef outBuff; 
     CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outBuff); 

     if(logCameraSettings) { 
      NSLog(@"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY); 
      size_t outWidthY = CVPixelBufferGetWidthOfPlane(outBuff, 0); 
      size_t outHeightY = CVPixelBufferGetHeightOfPlane(outBuff, 0); 
      NSLog(@"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY); 
     } 

     // Here would be the place where I actually want to do something with the image 

     // TEST: show image (in debugger in following method) 
     [self convertToUIImage:imageBufferRef]; // --> works 
     [self convertToUIImage:outBuff]; // --> only gray, does not work 

     // Release the allocated memory 
     CVPixelBufferUnlockBaseAddress(imageBufferRef,0); 
     free(pixelY); 
     free(pixelUV); 
    } 
} 

-(void) convertToUIImage:(CVImageBufferRef)imageBuffer 
{ 
    CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer]; 
    CIContext *temporaryContext = [CIContext contextWithOptions:nil]; 
    CGImageRef videoImage = [temporaryContext 
          createCGImage:ciImage 
          fromRect:CGRectMake(0, 0, 
               CVPixelBufferGetWidth(imageBuffer), 
               CVPixelBufferGetHeight(imageBuffer))]; 

    // Inspect the following UIImage in debugger. 
    UIImage *image = [[UIImage alloc] initWithCGImage:videoImage]; 

    CGImageRelease(videoImage); 
} 

In dem obigen Code habe ich eine kleine Funktion convertToUIImage, die außer keinen Zweck hat, lassen Sie mich das CVImageBufferRef inspizieren ich als UIImage im Debugger erstellt.

Die Überprüfung von imageBufferRef zeigt mir den korrekten Kamera-Feed.

Das Auschecken von outBuff zeigt mir jedoch nicht einen kleinen Bereich dieser Kameraeinspeisung, sondern ein graues Patch in der richtigen Größe.

Also meine Frage ist:

  1. Was mache ich falsch hier?

  2. Ist das überhaupt der richtige Weg, um mein Ziel zu erreichen?

Jede Hilfe wird wirklich geschätzt. Vielen Dank im Voraus.

Antwort

0

Hier ist, wie ich es

gelöst
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection 
{ 
    // callback from AVCaptureOutput 
    // 
    CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef); 
    if (imageBufferRef) 
    { 
     // Take a subset of buffer to create a smaller image 
     CVPixelBufferLockBaseAddress(imageBufferRef, 0); 
     size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0); 
     size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1); 
     size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0); 
     size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1); 
     size_t cropHeightY = 500; 
     size_t cropWidthY = 500; 
     size_t cropHeightUV = cropHeightY/2; 
     size_t cropWidthUV = cropWidthY; 
     size_t cropY_X0 = widthY/2 - (cropWidthY/2); 
     size_t cropY_Y0 = heightY/2 - (cropHeightY/2); 
     size_t cropUV_X0 = widthUV/2 - (cropWidthUV/2); 
     size_t cropUV_Y0 = heightUV/2 - (cropHeightUV/2); 

     void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0); 
     void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1); 

     size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0); 
     size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1); 

     size_t pixelBytesY = bytesPerRowY/widthY; 
     size_t pixelBytesUV = bytesPerRowUV/widthUV; 

     void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY); 
     void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV); 

     size_t bytesPerRowOut = cropWidthY * pixelBytesY; 

     size_t sizeY = bytesPerRowOut * cropHeightY; 
     size_t sizeUV = bytesPerRowOut * cropHeightUV; 

     unsigned char * pixelY = (unsigned char *)malloc(sizeY); 
     unsigned char * pixelUV = (unsigned char *)malloc(sizeUV); 

     for (int i = 0; i < cropHeightY; ++i) { 
      memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut); 
     } 

     for (int i = 0; i < cropHeightUV; ++i) { 
      memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut); 
     } 

     void *baseAddresses[2] = {pixelY, pixelUV}; 
     size_t planeWidths[2] = {cropWidthY, cropWidthUV}; 
     size_t planeHeights[2] = {cropHeightY, cropHeightUV}; 
     size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut}; 

     // Transform input to UIImage 
     UIImage *inputAsUIImage = [self convertToUIImage:imageBufferRef]; 

     // Extract subimage of UIImage 
     CGRect fromRect = CGRectMake(cropY_X0, cropY_Y0, cropWidthY, cropHeightY); // or whatever rectangle 
     CGImageRef drawImage = CGImageCreateWithImageInRect(inputAsUIImage.CGImage, fromRect); 
     UIImage *newImage = [UIImage imageWithCGImage:drawImage]; 
     CGImageRelease(drawImage); 

     // Convert UIImage back to CVImageBufferRef 
     // 1. Create a CIImage with the underlying CGImage encapsulated by the UIImage (referred to as 'image'): 
     CIImage *inputImage = [CIImage imageWithCGImage:newImage.CGImage]; 
     // 2. Create a CIContext: 
     CIContext *ciContext = [CIContext contextWithCGContext:UIGraphicsGetCurrentContext() options:nil]; 
     // 3. Render the CIImage to a CVPixelBuffer (referred to as 'outputBuffer'): 
     CVPixelBufferRef outputBuffer; 
     CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outputBuffer); 
     [ciContext render:inputImage toCVPixelBuffer:outputBuffer]; 


     if(logCameraSettings) { 
      NSLog(@"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY); 
      size_t outWidthY = CVPixelBufferGetWidthOfPlane(outputBuffer, 0); 
      size_t outHeightY = CVPixelBufferGetHeightOfPlane(outputBuffer, 0); 
      NSLog(@"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY); 
     } 

     // Do something with it here 

     // Release the allocated memory 
     CVPixelBufferUnlockBaseAddress(imageBufferRef,0); 
     free(pixelY); 
     free(pixelUV); 
    } 
} 

-(UIImage*) convertToUIImage:(CVImageBufferRef)imageBuffer 
{ 
    CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer]; 
    CIContext *temporaryContext = [CIContext contextWithOptions:nil]; 
    CGImageRef videoImage = [temporaryContext 
          createCGImage:ciImage 
          fromRect:CGRectMake(0, 0, 
               CVPixelBufferGetWidth(imageBuffer), 
               CVPixelBufferGetHeight(imageBuffer))]; 

    UIImage *image = [[UIImage alloc] initWithCGImage:videoImage]; 

    CGImageRelease(videoImage); 
    return image; 
} 
Verwandte Themen