diff --git a/QSVPipeline/rgy_output_avcodec.cpp b/QSVPipeline/rgy_output_avcodec.cpp index 641a0bc3..8a86563e 100644 --- a/QSVPipeline/rgy_output_avcodec.cpp +++ b/QSVPipeline/rgy_output_avcodec.cpp @@ -2477,8 +2477,8 @@ void RGYOutputAvcodec::WriteNextPacketProcessed(AVPktMuxData *pktData, int64_t * *writtenDts = pktData->dts; } -vector> RGYOutputAvcodec::AudioDecodePacket(AVMuxAudio *muxAudio, AVPacket *pkt) { - vector> decodedFrames; +vector>> RGYOutputAvcodec::AudioDecodePacket(AVMuxAudio *muxAudio, AVPacket *pkt) { + vector>> decodedFrames; if (muxAudio->decodeError > muxAudio->ignoreDecodeError) { return std::move(decodedFrames); } @@ -2491,7 +2491,7 @@ vector> RGYOutputAvcodec::AudioDe int64_t recieved_samples = 0; int recv_ret = 0; for (;;) { - unique_ptr receivedData(nullptr, av_frame_unref); + unique_ptr> receivedData(nullptr, RGYAVDeleter(av_frame_free)); int send_ret = 0; //必ず一度はパケットを送る if (!sent_packet || pkt->size > 0) { @@ -2516,7 +2516,7 @@ vector> RGYOutputAvcodec::AudioDe AddMessage(RGY_LOG_ERROR, _T("failed to send packet to audio decoder: %s.\n"), qsv_av_err2str(send_ret).c_str()); muxAudio->decodeError++; } else { - receivedData = unique_ptr(av_frame_alloc(), av_frame_unref); + receivedData = unique_ptr>(av_frame_alloc(), RGYAVDeleter(av_frame_free)); recv_ret = avcodec_receive_frame(muxAudio->outCodecDecodeCtx, receivedData.get()); if (recv_ret == AVERROR(EAGAIN) //もっとパケットを送る必要がある || recv_ret == AVERROR_EOF) { //最後まで読み込んだ @@ -2554,7 +2554,7 @@ vector> RGYOutputAvcodec::AudioDe if (muxAudio->decodeError <= muxAudio->ignoreDecodeError) { #if 0 //デコードエラーを無視する場合、入力パケットのサイズ分、無音を挿入する - unique_ptr silentFrame(av_frame_alloc(), av_frame_unref); + unique_ptr> silentFrame(av_frame_alloc(), RGYAVDeleter(av_frame_free)); AVRational samplerate = { 1, muxAudio->outCodecDecodeCtx->sample_rate }; silentFrame->nb_samples = (int)av_rescale_q(pktInInfo.duration, muxAudio->streamIn->time_base, samplerate); silentFrame->channels = muxAudio->outCodecDecodeCtx->channels; @@ -2584,7 +2584,7 @@ vector> RGYOutputAvcodec::AudioDe //音声をフィルタ vector RGYOutputAvcodec::AudioFilterFrame(vector inputFrames) { vector outputFrames; - for (const auto& pktData : inputFrames) { + for (auto& pktData : inputFrames) { AVMuxAudio *muxAudio = pktData.muxAudio; if (pktData.muxAudio->filterGraph == nullptr) { //フィルタリングなし @@ -2598,16 +2598,19 @@ vector RGYOutputAvcodec::AudioFilterFrame(vector inp break; } } - //フィルターチェーンにフレームを追加 - if (av_buffersrc_add_frame_flags(muxAudio->filterBufferSrcCtx, pktData.frame, AV_BUFFERSRC_FLAG_PUSH) < 0) { - AddMessage(RGY_LOG_ERROR, _T("failed to feed the audio filtergraph\n")); - m_Mux.format.streamError = true; - av_frame_unref(pktData.frame); - break; + { //フィルターチェーンにフレームを追加 + auto ret = av_buffersrc_add_frame_flags(muxAudio->filterBufferSrcCtx, pktData.frame, AV_BUFFERSRC_FLAG_PUSH); + // AVFrame構造体の破棄 + av_frame_free(&pktData.frame); + if (ret < 0) { + AddMessage(RGY_LOG_ERROR, _T("failed to feed the audio filtergraph\n")); + m_Mux.format.streamError = true; + break; + } } for (;;) { - unique_ptr filteredFrame(av_frame_alloc(), av_frame_unref); - int ret = av_buffersink_get_frame_flags(muxAudio->filterBufferSinkCtx, filteredFrame.get(), AV_BUFFERSINK_FLAG_NO_REQUEST); + unique_ptr> filteredFrame(av_frame_alloc(), RGYAVDeleter(av_frame_free)); + auto ret = av_buffersink_get_frame_flags(muxAudio->filterBufferSinkCtx, filteredFrame.get(), AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } @@ -3073,7 +3076,7 @@ RGY_ERR RGYOutputAvcodec::WriteNextAudioFrame(AVPktMuxData *pktData) { return RGY_ERR_UNSUPPORTED; } auto encPktDatas = AudioEncodeFrame(pktData->muxAudio, pktData->frame); - av_frame_unref(pktData->frame); + av_frame_free(&pktData->frame); #if ENABLE_AVCODEC_AUDPROCESS_THREAD if (m_Mux.thread.thAudProcess.joinable()) { for (auto& pktMux : encPktDatas) { diff --git a/QSVPipeline/rgy_output_avcodec.h b/QSVPipeline/rgy_output_avcodec.h index 3a2790f3..59b5661b 100644 --- a/QSVPipeline/rgy_output_avcodec.h +++ b/QSVPipeline/rgy_output_avcodec.h @@ -474,7 +474,7 @@ class RGYOutputAvcodec : public RGYOutput void AudioFlushStream(AVMuxAudio *muxAudio, int64_t *writtenDts); //音声をデコード - vector> AudioDecodePacket(AVMuxAudio *muxAudio, AVPacket *pkt); + vector>> AudioDecodePacket(AVMuxAudio *muxAudio, AVPacket *pkt); //音声をエンコード vector AudioEncodeFrame(AVMuxAudio *muxAudio, AVFrame *frame);