2016-10-11 1 views
0

So ist es mir gelungen, die Video-Streams von mehr als 1 Video-Dateien mit MediaCodec - mit so vielen MediaExtractor s und Decoder MediaCodec s wie die Videodateien. Jetzt geht es bei meiner Frage darum, die Audio-Streams der Videos zu verketten.MediaCodec - Wie zwei mp4-Dateien Audio Streams in einem einheitlichen Format zu verketten und sie zurückmixen

den modifizierte ExtractDecodeEditEncodeMux Test habe ich versucht, die gleiche Methode, die ich verwenden, um den Video-Streams für den Audio-Streams verketten, darauf achten, dass der endgültige Audio-Encoder ein einzelnes Preset-Format hat:

private void audioExtractorLoop(MediaExtractor localAudioExtractor, MediaCodec destinationAudioDecoder, ByteBuffer[] dstAudioDecoderInputBuffers) 
{ 
    //Audio Extractor code begin 
    boolean localAudioExtractorIsOriginal = (localAudioExtractor == audioExtractor); 
    boolean localDone = localAudioExtractorIsOriginal ? audioExtractorDone : audioExtractorAppendDone; 
    Log.i("local_audio_extractor", localAudioExtractorIsOriginal+" "+localDone); 

    while (mCopyAudio && !localDone && (encoderOutputAudioFormat == null || muxing)) { 
     int decoderInputBufferIndex = destinationAudioDecoder.dequeueInputBuffer(TIMEOUT_USEC); 
     if (decoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
      if (VERBOSE) 
       Log.d(TAG, "no audio decoder input buffer"); 
      break; 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: returned input buffer: " 
        + decoderInputBufferIndex); 
     } 
     ByteBuffer decoderInputBuffer = dstAudioDecoderInputBuffers[decoderInputBufferIndex]; 
     int size = localAudioExtractor.readSampleData(decoderInputBuffer, 0); 
     long presentationTime = localAudioExtractor.getSampleTime(); 
     if(localAudioExtractorIsOriginal)currentFrameTimestamp = presentationTime; 
     if (VERBOSE) { 
      Log.d(TAG, "audio extractor: returned buffer of size " 
        + size); 
      Log.d(TAG, "audio extractor: returned buffer for time " 
        + presentationTime); 
     } 
     if (size >= 0) { 
      destinationAudioDecoder.queueInputBuffer(decoderInputBufferIndex, 0, 
        size, presentationTime, 
        localAudioExtractor.getSampleFlags()); 
     } 
     localDone = !localAudioExtractor.advance(); 
     if (localDone) { 
      if (VERBOSE) 
       Log.d(TAG, "audio extractor: EOS"); 
      if(localAudioExtractorIsOriginal) { 
       initAudioExtractorFinalTimestamp = currentFrameTimestamp; 
       audioExtractorDone = true; 
      } 
      destinationAudioDecoder.queueInputBuffer(decoderInputBufferIndex, 0, 
        0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM); 
     } 
     audioExtractedFrameCount++; 
     break; 
    } 
    //Audio Extractor code end 
} 

private void localizedAudioDecoderLoop(MediaCodec localAudioDecoder) 
{ 
    boolean localAudioDecoderIsOriginal = (localAudioDecoder == audioDecoder); 
    boolean localDone = localAudioDecoderIsOriginal ? audioDecoderDone : audioDecoderAppendDone; 

    Log.i("local_audio_decoder", localAudioDecoderIsOriginal+""); 
    ByteBuffer[] localDecoderOutByteBufArray = localAudioDecoderIsOriginal ? audioDecoderOutputBuffers : audioDecoderAppendOutputBuffers; 
    MediaCodec.BufferInfo localDecoderBufInfo = localAudioDecoderIsOriginal ? audioDecoderOutputBufferInfo : audioDecoderAppendOutputBufferInfo; 
    while (mCopyAudio && !localDone && pendingAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) { 
     int decoderOutputBufferIndex = localAudioDecoder.dequeueOutputBuffer(localDecoderBufInfo, TIMEOUT_USEC); 
     if(!localAudioDecoderIsOriginal)localDecoderBufInfo.presentationTimeUs += initAudioExtractorFinalTimestamp+33333; 
     //Log.i("decoder_out_buf_info", audioDecoderOutputBufferInfo.size + " " + audioDecoderOutputBufferInfo.offset); 
     if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
      if (VERBOSE) 
       Log.d(TAG, "no audio decoder output buffer"); 
      break; 
     } 
     if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) { 
      if (VERBOSE) 
       Log.d(TAG, "audio decoder: output buffers changed"); 
      //audioDecoderOutputBuffers = audioDecoder.getOutputBuffers(); 
      localDecoderOutByteBufArray = audioDecoder.getOutputBuffers(); 
      break; 
     } 
     if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) { 
      decoderOutputAudioFormat = localAudioDecoder.getOutputFormat(); 
      decoderOutputChannelNum = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT); 
      decoderOutputAudioSampleRate = decoderOutputAudioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE); 
      if (VERBOSE) { 
       Log.d(TAG, "audio decoder: output format changed: " 
         + decoderOutputAudioFormat); 
      } 
      break; 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: returned output buffer: " 
        + decoderOutputBufferIndex); 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: returned buffer of size " 
        + localDecoderBufInfo.size); 
     } 
     ByteBuffer decoderOutputBuffer = localDecoderOutByteBufArray[decoderOutputBufferIndex]; 
     if ((localDecoderBufInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) { 
      if (VERBOSE) 
       Log.d(TAG, "audio decoder: codec config buffer"); 
      localAudioDecoder.releaseOutputBuffer(decoderOutputBufferIndex, 
        false); 
      break; 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: returned buffer for time " 
        + localDecoderBufInfo.presentationTimeUs); 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: output buffer is now pending: " 
        + pendingAudioDecoderOutputBufferIndex); 
     } 
     pendingAudioDecoderOutputBufferIndex = decoderOutputBufferIndex; 
     audioDecodedFrameCount++; 
     break; 
    } 

    while (mCopyAudio && pendingAudioDecoderOutputBufferIndex != -1) { 
     if (VERBOSE) { 
      Log.d(TAG, 
        "audio decoder: attempting to process pending buffer: " 
          + pendingAudioDecoderOutputBufferIndex); 
     } 
     int encoderInputBufferIndex = audioEncoder 
       .dequeueInputBuffer(TIMEOUT_USEC); 
     if (encoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) { 
      if (VERBOSE) 
       Log.d(TAG, "no audio encoder input buffer"); 
      break; 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio encoder: returned input buffer: " 
        + encoderInputBufferIndex); 
     } 
     ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex]; 
     int size = localDecoderBufInfo.size; 
     long presentationTime = localDecoderBufInfo.presentationTimeUs; 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: processing pending buffer: " 
        + pendingAudioDecoderOutputBufferIndex); 
     } 
     if (VERBOSE) { 
      Log.d(TAG, "audio decoder: pending buffer of size " + size); 
      Log.d(TAG, "audio decoder: pending buffer for time " 
        + presentationTime); 
     } 
     if (size >= 0) { 
      ByteBuffer decoderOutputBuffer = localDecoderOutByteBufArray[pendingAudioDecoderOutputBufferIndex] 
        .duplicate(); 

      byte[] testBufferContents = new byte[size]; 
      //int bufferSize = (extractorInputChannelNum == 1 && decoderOutputChannelNum == 2) ? size/2 : size; 
      float samplingFactor = (decoderOutputChannelNum/extractorInputChannelNum) * (decoderOutputAudioSampleRate/extractorAudioSampleRate); 
      int bufferSize = size/(int)samplingFactor; 
      Log.i("sampling_factor", samplingFactor+" "+bufferSize); 

      if (decoderOutputBuffer.remaining() < size) { 
       for (int i = decoderOutputBuffer.remaining(); i < size; i++) { 
        testBufferContents[i] = 0; // pad with extra 0s to make a full frame. 
       } 
       decoderOutputBuffer.get(testBufferContents, 0, decoderOutputBuffer.remaining()); 
      } else { 
       decoderOutputBuffer.get(testBufferContents, 0, size); 
      } 

      //WARNING: This works for 11025-22050-44100 or 8000-16000-24000-48000 
      //What about in-between? 
      //BTW, the size of the bytebuffer may be less than 4096 depending on the sampling factor 
      //(Now that I think about it I should've realized this back when I decoded the video result from the encoding - 2048 bytes decoded) 
      if (((int)samplingFactor) > 1) { 
       Log.i("s2m_conversion", "Stereo to Mono and/or downsampling"); 
       byte[] finalByteBufferContent = new byte[size/2]; 

       for (int i = 0; i < bufferSize; i+=2) { 
        if((i+1)*((int)samplingFactor) > testBufferContents.length) 
        { 
         finalByteBufferContent[i] = 0; 
         finalByteBufferContent[i+1] = 0; 
        } 
        else 
        { 
         finalByteBufferContent[i] = testBufferContents[i*((int)samplingFactor)]; 
         finalByteBufferContent[i+1] = testBufferContents[i*((int)samplingFactor) + 1]; 
        } 
       } 

       decoderOutputBuffer = ByteBuffer.wrap(finalByteBufferContent); 
      } 

      decoderOutputBuffer.position(localDecoderBufInfo.offset); 
      decoderOutputBuffer.limit(localDecoderBufInfo.offset + bufferSize); 
      //decoderOutputBuffer.limit(audioDecoderOutputBufferInfo.offset + size); 
      encoderInputBuffer.position(0); 

      Log.d(TAG, "hans, audioDecoderOutputBufferInfo:" + localDecoderBufInfo.offset); 
      Log.d(TAG, "hans, decoderOutputBuffer:" + decoderOutputBuffer.remaining()); 
      Log.d(TAG, "hans, encoderinputbuffer:" + encoderInputBuffer.remaining()); 
      encoderInputBuffer.put(decoderOutputBuffer); 

      audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, bufferSize, presentationTime, localDecoderBufInfo.flags); 
      //audioEncoder.queueInputBuffer(encoderInputBufferIndex, 0, size, presentationTime, audioDecoderOutputBufferInfo.flags); 
     } 
     audioDecoder.releaseOutputBuffer(
       pendingAudioDecoderOutputBufferIndex, false); 
     pendingAudioDecoderOutputBufferIndex = -1; 
     if ((localDecoderBufInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) { 
      if (VERBOSE) 
       Log.d(TAG, "audio decoder: EOS"); 
      if(localDecoderBufInfo == audioDecoderOutputBufferInfo){audioDecoderDone = true;} 
      else{audioDecoderAppendDone = true;} 
     } 
     break; 
    } 
} 

In diesen Funktionen Ich übergebe die MediaExtractor und Decoder MediaCodec Objekte für den ersten Audio-Stream und Schleife durch sie, bis sie EOS erreichen, dann werde ich die MediaExtractor und Decoder MediaCodec mit denen für den zweiten Audio-Stream austauschen.

Dieser Code funktioniert für den ersten Audiostrom in Ordnung, aber nach dem Swap ich folgende Stacktrace erhalten: nur am Ende Dekodierung alle Audio-Streams zu audio/raw Typ mit 44100 Hz Probe

10-11 15:14:59.941 3067-22024/? E/SEC_AAC_DEC: saacd_decode() failed ret_val: -3, Indata 0x 11 90 00 00, length : 683 
10-11 15:14:59.941 3067-22024/? E/SEC_AAC_DEC: ASI 0x 11, 90 00 00 
10-11 15:14:59.951 29907-22020/com.picmix.mobile E/ACodec: OMXCodec::onEvent, OMX_ErrorStreamCorrupt 
10-11 15:14:59.951 29907-22020/com.picmix.mobile W/AHierarchicalStateMachine: Warning message AMessage(what = 'omxI') = { 
                      int32_t type = 0 
                      int32_t event = 1 
                      int32_t data1 = -2147479541 
                      int32_t data2 = 0 
                      } unhandled in root state. 

Ich dachte, die Decoder würden Rate und 2 Kanäle, so kann der Encoder nur die Daten nehmen und zu einem endgültigen Format kodieren.

Welche zusätzlichen Überlegungen muss ich für Audio einleiten, und wie kann ich verhindern, dass der Audiostream beschädigt wird, wenn ich Extractor-Decoder-Paare vertausche?

EDIT:

ich diese Zeilen hinzugefügt, um die Inhalte der extrahierten Proben in MediaExtractor zu überprüfen:

ByteBuffer decoderInputBuffer = dstAudioDecoderInputBuffers[decoderInputBufferIndex]; 
     int size = localAudioExtractor.readSampleData(decoderInputBuffer, 0); 
     long presentationTime = localAudioExtractor.getSampleTime(); 
     //new lines begin 
     byte[] debugBytes = new byte[decoderInputBuffer.remaining()]; 
     decoderInputBuffer.duplicate().get(debugBytes); 
     Log.i(TAG, "DEBUG - extracted frame: "+ audioExtractedFrameCount +" | bytebuffer contents: "+new String(debugBytes)); 
     //new lines end 

In der decoderInputBuffer.duplicate().get(debugBytes); Linie, erhalte ich die IllegalStateException: buffer is inaccessible Fehler.

Bedeutet das, dass ich den Extraktor falsch eingerichtet habe?

EDIT 2:

Als ich es sah weiter, es ist nur ein Problem mit dem Anfügen Audio Extractor, nicht das erste Audio-Extraktor.

Antwort

0

Es stellte sich heraus, dass es etwas völlig Dummes war. Früher in der Code, wenn ich die Einstellung der Decoder-Puffer nach oben, habe ich dies:

audioDecoderInputBuffers = audioDecoder.getInputBuffers(); 
audioDecoderOutputBuffers = audioDecoder.getOutputBuffers(); 
audioDecoderAppendInputBuffers = audioDecoder.getInputBuffers(); 
audioDecoderAppendOutputBuffers = audioDecoder.getOutputBuffers(); 

Sie wurden auf die gleiche Decoder Instanz verweisen.

Verwandte Themen