diff --git a/Core/HW/BufferQueue.h b/Core/HW/BufferQueue.h index 79e7072f198e..f4231062f3c3 100644 --- a/Core/HW/BufferQueue.h +++ b/Core/HW/BufferQueue.h @@ -124,6 +124,22 @@ struct BufferQueue { return bytesgot; } + bool containsAuAfter(int off) { + int avail = getQueueSize(); + for (int i = off; i <= avail - 3; ++i) { + if (peek(i) != 0) { + continue; + } + + if (peek(i + 1) == 1 && peek(i + 2) == 9) { + NOTICE_LOG(HLE, "found AU at offset %d: %d", i, i > 0 ? peek(i - 1) : 1337); + return true; + } + } + + return false; + } + void DoState(PointerWrap &p) { auto s = p.Section("BufferQueue", 0, 1); @@ -172,6 +188,11 @@ struct BufferQueue { return pts; } + unsigned char peek(int i) { + int off = start + i % bufQueueSize; + return bufQueue[i]; + } + unsigned char* bufQueue; int start, end; int bufQueueSize; diff --git a/Core/HW/MediaEngine.cpp b/Core/HW/MediaEngine.cpp index b89232971b6f..9e501ed1c692 100644 --- a/Core/HW/MediaEngine.cpp +++ b/Core/HW/MediaEngine.cpp @@ -553,11 +553,40 @@ void MediaEngine::updateSwsFormat(int videoPixelMode) { #endif } +void MediaEngine::HandleDecodedFrame(AVCodecContext *ctx, int videoPixelMode, bool skipFrame) { + if (!m_pFrameRGB) { + setVideoDim(); + } + + if (m_pFrameRGB && !skipFrame) { + updateSwsFormat(videoPixelMode); + // TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf. + // Update the linesize for the new format too. We started with the largest size, so it should fit. + m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth; + + sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0, + ctx->height, m_pFrameRGB->data, m_pFrameRGB->linesize); + } + + if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE) + m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp; + else + m_videopts += av_frame_get_pkt_duration(m_pFrame); +} + bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) { #ifdef USE_FFMPEG auto codecIter = m_pCodecCtxs.find(m_videoStream); AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second; + if (m_desWidth == 0 || m_desHeight == 0) { + // Sometimes there is no separator, let's assume 20kb is always enough. + // Less than 20kb is not enough for Valkyrie Profile. + if (!m_pdata->containsAuAfter(m_mpegheaderSize) && m_pdata->getQueueSize() < 20480) { + return false; + } + } + if (!m_pFormatCtx) return false; if (!m_pCodecCtx) @@ -580,23 +609,7 @@ bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) { int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet); if (frameFinished) { - if (!m_pFrameRGB) { - setVideoDim(); - } - if (m_pFrameRGB && !skipFrame) { - updateSwsFormat(videoPixelMode); - // TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf. - // Update the linesize for the new format too. We started with the largest size, so it should fit. - m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth; - - sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0, - m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize); - } - - if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE) - m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp; - else - m_videopts += av_frame_get_pkt_duration(m_pFrame); + HandleDecodedFrame(m_pCodecCtx, videoPixelMode, skipFrame); bGetFrame = true; } if (result <= 0 && dataEnd) { diff --git a/Core/HW/MediaEngine.h b/Core/HW/MediaEngine.h index 6561176b03af..a59dd82cf930 100644 --- a/Core/HW/MediaEngine.h +++ b/Core/HW/MediaEngine.h @@ -98,6 +98,10 @@ class MediaEngine void updateSwsFormat(int videoPixelMode); int getNextAudioFrame(u8 **buf, int *headerCode1, int *headerCode2); +#ifdef USE_FFMPEG + void HandleDecodedFrame(AVCodecContext *ctx, int videoPixelMode, bool skipFrame); +#endif + public: // TODO: Very little of this below should be public. // Video ffmpeg context - not used for audio