From 413467ddc79dda1c53c3e92a992620463b3a7912 Mon Sep 17 00:00:00 2001 From: yangrtc Date: Thu, 24 Feb 2022 16:33:40 +0800 Subject: [PATCH] 3.0.009 --- include/Yang_Config.h | 33 + include/yangaudiodev/YangAudioCapture.h | 44 + include/yangaudiodev/YangAudioCaptureData.h | 35 + include/yangaudiodev/YangAudioDeviceQuery.h | 31 + include/yangaudiodev/YangAudioPlay.h | 61 + include/yangaudiodev/YangAudioPlayerSdl.h | 63 + include/yangaudiodev/YangAudioRenderData.h | 65 + include/yangavutil/audio/YangAudioMix.h | 22 + include/yangavutil/audio/YangAudioUtil.h | 56 + include/yangavutil/audio/YangMakeWave.h | 41 + include/yangavutil/audio/YangPreProcess.h | 27 + include/yangavutil/audio/YangResample.h | 23 + include/yangavutil/audio/YangRtcAec.h | 28 + include/yangavutil/audio/YangSwResample.h | 29 + include/yangavutil/video/YangBittype.h | 262 + include/yangavutil/video/YangBmp.h | 47 + include/yangavutil/video/YangCMeta.h | 28 + include/yangavutil/video/YangCNalu.h | 40 + include/yangavutil/video/YangCYuvUtil.h | 20 + include/yangavutil/video/YangGetBits.h | 750 +++ include/yangavutil/video/YangGolomb.h | 406 ++ include/yangavutil/video/YangMeta.h | 13 + include/yangavutil/video/YangNalu.h | 14 + include/yangavutil/video/YangPicConvert.h | 36 + include/yangavutil/video/YangPicUtilFfmpeg.h | 55 + include/yangavutil/video/YangResize.h | 46 + .../yangavutil/video/YangVideoEncoderMeta.h | 16 + include/yangavutil/video/YangYuvConvert.h | 230 + include/yangavutil/video/YangYuvUtil.h | 14 + include/yangcapture/YangCaptureFactory.h | 24 + include/yangcapture/YangMultiVideoCapture.h | 27 + include/yangcapture/YangScreenCapture.h | 41 + .../yangcapture/YangScreenCaptureHandleI.h | 15 + include/yangcapture/YangVideoCapture.h | 38 + include/yangdecoder/YangAudioDecoder.h | 28 + include/yangdecoder/YangAudioDecoderHandle.h | 64 + include/yangdecoder/YangAudioDecoderHandles.h | 60 + include/yangdecoder/YangDecoder.h | 17 + include/yangdecoder/YangDecoderFactory.h | 22 + include/yangdecoder/YangVideoDecoder.h | 25 + include/yangdecoder/YangVideoDecoderHandle.h | 66 + include/yangdecoder/YangVideoDecoderHandles.h | 67 + include/yangencoder/YangAudioEncoder.h | 30 + include/yangencoder/YangAudioEncoderHandle.h | 55 + .../yangencoder/YangAudioEncoderHandleCb.h | 60 + include/yangencoder/YangAudioEncoderMeta.h | 35 + include/yangencoder/YangEncoder.h | 18 + include/yangencoder/YangEncoderFactory.h | 24 + include/yangencoder/YangGpuEncoderFactory.h | 19 + include/yangencoder/YangVideoEncoder.h | 27 + include/yangencoder/YangVideoEncoderHandle.h | 55 + include/yangplayer/YangPlayFactory.h | 20 + include/yangplayer/YangPlayReceive.h | 42 + include/yangplayer/YangPlayerBase.h | 34 + include/yangplayer/YangPlayerDecoder.h | 46 + include/yangplayer/YangPlayerHandle.h | 21 + include/yangplayer/YangPlayerPlay.h | 38 + include/yangplayer/YangWinPlayFactroy.h | 44 + include/yangpush/YangPushCapture.h | 89 + include/yangpush/YangPushCommon.h | 27 + include/yangpush/YangPushFactory.h | 19 + include/yangpush/YangPushHandle.h | 35 + include/yangpush/YangPushPublish.h | 82 + include/yangpush/YangRtcPublish.h | 55 + include/yangpush/YangSendVideoI.h | 18 + include/yangrecliving/YangLivingHandle.h | 29 + include/yangrecliving/YangLivingType.h | 45 + include/yangrecliving/YangRecMessageI.h | 38 + include/yangrecliving/YangRecordUtilFactory.h | 30 + include/yangrecliving/YangScreenHandle.h | 32 + include/yangrecliving/YangVrHandle.h | 42 + include/yangrecord/YangFlvWrite.h | 99 + include/yangrecord/YangMp4File.h | 118 + include/yangrecord/YangMp4FileApp.h | 35 + include/yangrecord/YangRecEncoder.h | 41 + include/yangrecord/YangRecord.h | 60 + include/yangrecord/YangRecordApp.h | 30 + include/yangrecord/YangRecordCapture.h | 79 + include/yangrecord/YangRecordMp4.h | 78 + include/yangrtmp/YangRtmpHandle.h | 60 + include/yangsrt/YangSrtBase.h | 75 + include/yangsrt/YangTsBuffer.h | 56 + include/yangsrt/YangTsMuxer.h | 114 + include/yangsrt/YangTsPacket.h | 191 + include/yangsrt/YangTsPid.h | 77 + include/yangsrt/YangTsdemux.h | 185 + include/yangsrt/common.h | 30 + include/yangsrt/crc.h | 54 + include/yangsrt/srt_data.hpp | 33 + include/yangstream/YangStream.h | 46 + include/yangstream/YangStreamCapture.h | 43 + include/yangstream/YangStreamHandle.h | 12 + include/yangstream/YangStreamManager.h | 49 + include/yangstream/YangStreamType.h | 33 + include/yangstream/YangSynBuffer.h | 105 + include/yangtrace/YangCameraControl.h | 42 + include/yangtrace/YangTraceHandle.h | 66 + include/yangtrace/YangTraceIni.h | 21 + include/yangtrace/YangTraceServer.h | 32 + include/yangtrace/YangTraceUdp.h | 30 + include/yangutil/YangErrorCode.h | 110 + include/yangutil/buffer/YangAudioBuffer.h | 24 + .../yangutil/buffer/YangAudioEncoderBuffer.h | 25 + include/yangutil/buffer/YangAudioPlayBuffer.h | 27 + include/yangutil/buffer/YangBuffer.h | 19 + include/yangutil/buffer/YangCBuffer.h | 70 + include/yangutil/buffer/YangMediaBuffer.h | 42 + include/yangutil/buffer/YangVideoBuffer.h | 47 + .../yangutil/buffer/YangVideoDecoderBuffer.h | 21 + .../yangutil/buffer/YangVideoEncoderBuffer.h | 22 + include/yangutil/sys/YangAmf.h | 143 + include/yangutil/sys/YangBits.h | 111 + include/yangutil/sys/YangCString.h | 32 + include/yangutil/sys/YangCTime.h | 32 + include/yangutil/sys/YangCTimer.h | 42 + include/yangutil/sys/YangCUrl.h | 27 + include/yangutil/sys/YangEndian.h | 24 + include/yangutil/sys/YangFile.h | 16 + include/yangutil/sys/YangHttp.h | 34 + include/yangutil/sys/YangHttpSocket.h | 15 + include/yangutil/sys/YangIni.h | 31 + include/yangutil/sys/YangJson.h | 15 + include/yangutil/sys/YangLibHandle.h | 25 + include/yangutil/sys/YangLoadLib.h | 30 + include/yangutil/sys/YangLog.h | 50 + include/yangutil/sys/YangMath.h | 27 + include/yangutil/sys/YangSRtp.h | 25 + include/yangutil/sys/YangSocket.h | 15 + include/yangutil/sys/YangSsl.h | 32 + include/yangutil/sys/YangSsrc.h | 19 + include/yangutil/sys/YangString.h | 22 + include/yangutil/sys/YangSysMessageHandle.h | 42 + include/yangutil/sys/YangSysMessageI.h | 32 + include/yangutil/sys/YangThread.h | 34 + include/yangutil/sys/YangTime.h | 37 + include/yangutil/sys/YangTimer.h | 78 + include/yangutil/sys/YangUrl.h | 14 + include/yangutil/sys/YangVector.h | 76 + include/yangutil/sys/YangWebsocket.h | 33 + include/yangutil/sys/YangWindowsMouse.h | 58 + include/yangutil/yang_unistd.h | 22 + include/yangutil/yangavctype.h | 78 + include/yangutil/yangavctype_H.h | 14 + include/yangutil/yangavinfotype.h | 152 + include/yangutil/yangavtype.h | 214 + include/yangutil/yangavtype_h265.h | 72 + include/yangutil/yangtype.h | 61 + include/yangwebrtc/YangAVContext.h | 23 + include/yangwebrtc/YangMetaConnection.h | 42 + include/yangwebrtc/YangPeerConnection.h | 33 + include/yangwebrtc/YangRtcHandle.h | 31 + libmetartc3/CMakeLists.txt | 39 + libmetartc3/metartc3.pro | 243 + .../src/yangaudiodev/YangAudioCapture.cpp | 42 + .../src/yangaudiodev/YangAudioCaptureData.cpp | 74 + .../yangaudiodev/YangAudioCaptureHandle.cpp | 124 + .../src/yangaudiodev/YangAudioCaptureHandle.h | 38 + .../src/yangaudiodev/YangAudioPlay.cpp | 55 + .../src/yangaudiodev/YangAudioRenderData.cpp | 180 + .../src/yangaudiodev/YangCaptureCallback.h | 17 + .../linux/YangAlsaDeviceHandle.cpp | 582 ++ .../yangaudiodev/linux/YangAlsaDeviceHandle.h | 82 + .../src/yangaudiodev/linux/YangAlsaHandle.cpp | 358 + .../src/yangaudiodev/linux/YangAlsaHandle.h | 63 + .../linux/YangAudioCaptureImpl.cpp | 227 + .../yangaudiodev/linux/YangAudioCaptureImpl.h | 49 + .../yangaudiodev/linux/YangAudioPlayAlsa.cpp | 224 + .../yangaudiodev/linux/YangAudioPlayAlsa.h | 41 + .../yangaudiodev/win/YangAudioApiCapture.h | 25 + .../win/YangRecAudioCaptureHandle.cpp | 72 + .../win/YangRecAudioCaptureHandle.h | 32 + .../src/yangaudiodev/win/YangWinAudioApi.cpp | 183 + .../src/yangaudiodev/win/YangWinAudioApi.h | 28 + .../yangaudiodev/win/YangWinAudioApiAec.cpp | 363 + .../src/yangaudiodev/win/YangWinAudioApiAec.h | 55 + .../win/YangWinAudioApiCapture.cpp | 372 ++ .../yangaudiodev/win/YangWinAudioApiCapture.h | 63 + .../win/YangWinAudioApiDevice.cpp | 137 + .../yangaudiodev/win/YangWinAudioApiDevice.h | 54 + .../win/YangWinAudioApiRender.cpp | 512 ++ .../yangaudiodev/win/YangWinAudioApiRender.h | 101 + .../yangaudiodev/win/YangWinAudioCapture.cpp | 263 + .../yangaudiodev/win/YangWinAudioCapture.h | 76 + .../win/YangWinAudioCaptureHandle.cpp | 117 + .../win/YangWinAudioCaptureHandle.h | 62 + .../src/yangaudiodev/win/YangWinAudioDevice.h | 113 + .../win/YangWinRecordAudioCapture.cpp | 256 + .../win/YangWinRecordAudioCapture.h | 76 + .../src/yangavutil/YangImageConvert.cpp | 427 ++ libmetartc3/src/yangavutil/YangImageConvert.h | 28 + libmetartc3/src/yangavutil/YangMakeWave.cpp | 87 + .../src/yangavutil/YangPicUtilFfmpeg.cpp | 111 + libmetartc3/src/yangavutil/YangSwResample.cpp | 67 + .../src/yangavutil/YangVideoEncoderMeta.cpp | 19 + libmetartc3/src/yangavutil/YangYuvConvert.cpp | 289 + .../src/yangcapture/YangAudioDeviceQuery.cpp | 111 + .../src/yangcapture/YangCaptureFactory.cpp | 65 + .../src/yangcapture/YangDXGIManager.cpp | 732 ++ libmetartc3/src/yangcapture/YangDXGIManager.h | 95 + .../src/yangcapture/YangScreenCaptureImpl.cpp | 225 + .../src/yangcapture/YangScreenCaptureImpl.h | 42 + .../src/yangcapture/YangScreenShare.cpp | 90 + libmetartc3/src/yangcapture/YangScreenShare.h | 38 + .../src/yangcapture/YangVideoCapture.cpp | 31 + .../yangcapture/YangVideoCaptureHandle.cpp | 98 + .../src/yangcapture/YangVideoCaptureHandle.h | 45 + .../src/yangcapture/YangVideoCaptureImpl.cpp | 455 ++ .../src/yangcapture/YangVideoCaptureImpl.h | 71 + .../src/yangcapture/YangVideoDeviceQuery.cpp | 104 + .../src/yangcapture/YangVideoDeviceQuery.h | 18 + .../src/yangcapture/win/YangVideoSrc.cpp | 413 ++ .../src/yangcapture/win/YangVideoSrc.h | 222 + .../yangcapture/win/YangWinVideoCapture.cpp | 421 ++ .../src/yangcapture/win/YangWinVideoCapture.h | 90 + .../win/YangWinVideoCaptureHandle.cpp | 56 + .../win/YangWinVideoCaptureHandle.h | 27 + .../src/yangdecoder/YangAudioDecoder.cpp | 22 + .../src/yangdecoder/YangAudioDecoderAac.cpp | 120 + .../src/yangdecoder/YangAudioDecoderAac.h | 62 + .../yangdecoder/YangAudioDecoderHandle.cpp | 171 + .../yangdecoder/YangAudioDecoderHandles.cpp | 251 + .../src/yangdecoder/YangAudioDecoderOpus.cpp | 97 + .../src/yangdecoder/YangAudioDecoderOpus.h | 42 + .../src/yangdecoder/YangAudioDecoderSpeex.cpp | 173 + .../src/yangdecoder/YangAudioDecoderSpeex.h | 48 + .../src/yangdecoder/YangDecoderFactory.cpp | 66 + .../YangH2645VideoDecoderFfmpeg.cpp | 491 ++ .../yangdecoder/YangH2645VideoDecoderFfmpeg.h | 122 + libmetartc3/src/yangdecoder/YangH264Dec.h | 25 + .../src/yangdecoder/YangH264DecoderSoft.cpp | 96 + .../src/yangdecoder/YangH264DecoderSoft.h | 48 + .../yangdecoder/YangH264DecoderSoftFactory.h | 16 + .../src/yangdecoder/YangH264Header.cpp | 654 ++ libmetartc3/src/yangdecoder/YangH264Header.h | 213 + libmetartc3/src/yangdecoder/YangH264Header1.h | 712 ++ .../src/yangdecoder/YangHeaderParseFfmpeg.cpp | 111 + .../src/yangdecoder/YangHeaderParseFfmpeg.h | 52 + .../yangdecoder/YangVideoDecoderHandle.cpp | 201 + .../yangdecoder/YangVideoDecoderHandles.cpp | 260 + .../src/yangdecoder/YangVideoDecoderIntel.cpp | 495 ++ .../src/yangdecoder/YangVideoDecoderIntel.h | 127 + .../src/yangencoder/YangAudioEncoder.cpp | 23 + .../src/yangencoder/YangAudioEncoderAac.cpp | 113 + .../src/yangencoder/YangAudioEncoderAac.h | 57 + .../yangencoder/YangAudioEncoderHandle.cpp | 103 + .../yangencoder/YangAudioEncoderHandleCb.cpp | 98 + .../src/yangencoder/YangAudioEncoderMeta.cpp | 91 + .../src/yangencoder/YangAudioEncoderMp3.cpp | 166 + .../src/yangencoder/YangAudioEncoderMp3.h | 68 + .../src/yangencoder/YangAudioEncoderOpus.cpp | 179 + .../src/yangencoder/YangAudioEncoderOpus.h | 46 + .../src/yangencoder/YangAudioEncoderSpeex.cpp | 211 + .../src/yangencoder/YangAudioEncoderSpeex.h | 55 + .../src/yangencoder/YangEncoderFactory.cpp | 73 + .../src/yangencoder/YangFfmpegEncoderMeta.cpp | 284 + .../src/yangencoder/YangFfmpegEncoderMeta.h | 84 + .../src/yangencoder/YangH264EncHeader.cpp | 216 + .../src/yangencoder/YangH264EncHeader.h | 62 + .../src/yangencoder/YangH264EncoderIntel.cpp | 999 +++ .../src/yangencoder/YangH264EncoderIntel.h | 168 + .../src/yangencoder/YangH264EncoderIntel1.h | 795 +++ .../src/yangencoder/YangH264EncoderMeta.cpp | 121 + .../src/yangencoder/YangH264EncoderMeta.h | 37 + .../src/yangencoder/YangH264EncoderSoft.cpp | 220 + .../src/yangencoder/YangH264EncoderSoft.h | 58 + .../src/yangencoder/YangH265EncoderMeta.cpp | 119 + .../src/yangencoder/YangH265EncoderMeta.h | 40 + .../src/yangencoder/YangH265EncoderSoft.cpp | 220 + .../src/yangencoder/YangH265EncoderSoft.h | 71 + .../src/yangencoder/YangVideoEncoder.cpp | 23 + .../yangencoder/YangVideoEncoderFfmpeg.cpp | 322 + .../src/yangencoder/YangVideoEncoderFfmpeg.h | 102 + .../yangencoder/YangVideoEncoderHandle.cpp | 194 + libmetartc3/src/yangencoder/lame.h | 1342 ++++ .../src/yangplayer/YangPlayFactory.cpp | 30 + .../src/yangplayer/YangPlayReceive.cpp | 188 + libmetartc3/src/yangplayer/YangPlayerBase.cpp | 68 + .../src/yangplayer/YangPlayerDecoder.cpp | 114 + .../src/yangplayer/YangPlayerHandleImpl.cpp | 151 + .../src/yangplayer/YangPlayerHandleImpl.h | 39 + libmetartc3/src/yangplayer/YangPlayerPlay.cpp | 76 + libmetartc3/src/yangplayer/YangRtcReceive.cpp | 150 + libmetartc3/src/yangplayer/YangRtcReceive.h | 58 + libmetartc3/src/yangpush/YangPushCapture.cpp | 331 + libmetartc3/src/yangpush/YangPushEncoder.cpp | 113 + libmetartc3/src/yangpush/YangPushEncoder.h | 46 + libmetartc3/src/yangpush/YangPushFactory.cpp | 39 + .../src/yangpush/YangPushHandleImpl.cpp | 241 + libmetartc3/src/yangpush/YangPushHandleImpl.h | 61 + .../src/yangpush/YangPushMessageHandle.cpp | 118 + .../src/yangpush/YangPushMessageHandle.h | 32 + libmetartc3/src/yangpush/YangPushPublish.cpp | 219 + libmetartc3/src/yangpush/YangRtcPublish.cpp | 257 + .../src/yangpush/YangSendVideoImpl.cpp | 44 + libmetartc3/src/yangpush/YangSendVideoImpl.h | 32 + libmetartc3/src/yangrecord/YangFlvWrite.cpp | 486 ++ libmetartc3/src/yangrecord/YangMp4File.cpp | 193 + libmetartc3/src/yangrecord/YangMp4FileApp.cpp | 76 + libmetartc3/src/yangrecord/YangRecEncoder.cpp | 105 + libmetartc3/src/yangrecord/YangRecord.cpp | 192 + libmetartc3/src/yangrecord/YangRecordApp.cpp | 67 + .../src/yangrecord/YangRecordCapture.cpp | 337 + .../src/yangrecord/YangRecordHandle.cpp | 63 + libmetartc3/src/yangrecord/YangRecordHandle.h | 29 + libmetartc3/src/yangrecord/YangRecordMp4.cpp | 263 + libmetartc3/src/yangsrt/YangSrtBase.cpp | 409 ++ libmetartc3/src/yangsrt/YangTsBuffer.cpp | 219 + libmetartc3/src/yangsrt/YangTsMuxer.cpp | 424 ++ libmetartc3/src/yangsrt/YangTsPacket.cpp | 509 ++ libmetartc3/src/yangsrt/YangTsdemux.cpp | 599 ++ libmetartc3/src/yangsrt/common.cpp | 65 + libmetartc3/src/yangsrt/crc.cpp | 16 + libmetartc3/src/yangsrt/srt_data.cpp | 48 + .../src/yangstream/YangStreamHandle.cpp | 30 + .../src/yangstream/YangStreamManager.cpp | 123 + libmetartc3/src/yangstream/YangStreamSrt.cpp | 249 + libmetartc3/src/yangstream/YangStreamSrt.h | 11 + libmetartc3/src/yangstream/YangSynBuffer.cpp | 271 + libmetartc3/src/yangutil/YangAvinfo.cpp | 104 + libmetartc3/src/yangutil/YangIniImpl.cpp | 263 + libmetartc3/src/yangutil/YangJson.cpp | 50 + libmetartc3/src/yangutil/YangLoadLib.cpp | 112 + libmetartc3/src/yangutil/YangString.cpp | 131 + .../src/yangutil/YangSysMessageHandle.cpp | 119 + libmetartc3/src/yangutil/YangThread.cpp | 66 + libmetartc3/src/yangutil/YangTimer.cpp | 221 + libmetartc3/src/yangutil/YangWindowsMouse.cpp | 158 + .../src/yangutil/buffer/YangAudioBuffer.cpp | 55 + .../buffer/YangAudioEncoderBuffer.cpp | 45 + .../yangutil/buffer/YangAudioPlayBuffer.cpp | 56 + .../src/yangutil/buffer/YangMediaBuffer.cpp | 104 + .../src/yangutil/buffer/YangVideoBuffer.cpp | 78 + .../buffer/YangVideoDecoderBuffer.cpp | 29 + .../buffer/YangVideoEncoderBuffer.cpp | 34 + libmetartc3/utils.cmake | 15 + libmetartccore3/CMakeLists.txt | 32 + libmetartccore3/metartccore3.pro | 216 + libmetartccore3/src/ffmpeg/Makefile | 676 ++ .../src/ffmpeg/YangMetaConnection.h | 104 + libmetartccore3/src/ffmpeg/allformats.c | 654 ++ libmetartccore3/src/ffmpeg/demuxer_list.c | 307 + libmetartccore3/src/ffmpeg/ffmpeg.c | 5093 ++++++++++++++ libmetartccore3/src/ffmpeg/h264dec.c | 1159 ++++ libmetartccore3/src/ffmpeg/muxer_list.c | 167 + libmetartccore3/src/ffmpeg/protocol_list.c | 31 + libmetartccore3/src/ffmpeg/protocols.c | 144 + libmetartccore3/src/ffmpeg/utils.c | 5889 +++++++++++++++++ libmetartccore3/src/ffmpeg/webrtc_demuxer.c | 615 ++ libmetartccore3/src/ffmpeg/webrtc_muxer.c | 311 + libmetartccore3/src/ffmpeg/webrtc_proto.c | 105 + .../src/ffmpeg/yang_h264_initExtra.h | 13 + libmetartccore3/src/yangavutil/YangAudioMix.c | 139 + .../src/yangavutil/YangAudioUtil.c | 282 + libmetartccore3/src/yangavutil/YangConvert.c | 610 ++ libmetartccore3/src/yangavutil/YangMeta.c | 404 ++ libmetartccore3/src/yangavutil/YangNalu.c | 209 + .../src/yangavutil/YangPreProcess.c | 95 + libmetartccore3/src/yangavutil/YangResample.c | 55 + libmetartccore3/src/yangavutil/YangRtcAec.c | 115 + libmetartccore3/src/yangavutil/YangYuvUtil.c | 215 + .../src/yangcsrs/YangSrsConnection.c | 61 + .../src/yangcsrs/YangSrsConnection.h | 10 + .../src/yangcsrs/YangSrsRtcHandle.c | 169 + libmetartccore3/src/yangcsrs/YangSrsSdp.c | 194 + libmetartccore3/src/yangcsrs/YangSrsSdp.h | 20 + libmetartccore3/src/yangrtmp/YangRtmp.c | 3385 ++++++++++ libmetartccore3/src/yangrtmp/YangRtmp.h | 240 + libmetartccore3/src/yangrtmp/YangRtmp2.h | 1911 ++++++ .../src/yangrtp/YangPublishNackBuffer.c | 52 + .../src/yangrtp/YangPublishNackBuffer.h | 31 + .../src/yangrtp/YangReceiveNackBuffer.c | 178 + .../src/yangrtp/YangReceiveNackBuffer.h | 36 + libmetartccore3/src/yangrtp/YangRtcp.c | 7 + libmetartccore3/src/yangrtp/YangRtcp.h | 88 + libmetartccore3/src/yangrtp/YangRtcpApp.c | 175 + libmetartccore3/src/yangrtp/YangRtcpApp.h | 37 + libmetartccore3/src/yangrtp/YangRtcpCommon.c | 92 + libmetartccore3/src/yangrtp/YangRtcpCommon.h | 31 + .../src/yangrtp/YangRtcpCompound.c | 162 + .../src/yangrtp/YangRtcpCompound.h | 29 + libmetartccore3/src/yangrtp/YangRtcpNack.c | 185 + libmetartccore3/src/yangrtp/YangRtcpNack.h | 30 + libmetartccore3/src/yangrtp/YangRtcpPli.c | 79 + libmetartccore3/src/yangrtp/YangRtcpPli.h | 15 + .../src/yangrtp/YangRtcpPsfbCommon.c | 50 + .../src/yangrtp/YangRtcpPsfbCommon.h | 19 + libmetartccore3/src/yangrtp/YangRtcpRR.c | 158 + libmetartccore3/src/yangrtp/YangRtcpRR.h | 16 + libmetartccore3/src/yangrtp/YangRtcpRpsi.c | 76 + libmetartccore3/src/yangrtp/YangRtcpRpsi.h | 16 + libmetartccore3/src/yangrtp/YangRtcpSR.c | 155 + libmetartccore3/src/yangrtp/YangRtcpSR.h | 14 + libmetartccore3/src/yangrtp/YangRtcpSli.c | 76 + libmetartccore3/src/yangrtp/YangRtcpSli.h | 29 + libmetartccore3/src/yangrtp/YangRtcpTWCC.c | 514 ++ libmetartccore3/src/yangrtp/YangRtcpTWCC.h | 120 + libmetartccore3/src/yangrtp/YangRtcpXr.c | 50 + libmetartccore3/src/yangrtp/YangRtcpXr.h | 15 + libmetartccore3/src/yangrtp/YangRtp.c | 366 + libmetartccore3/src/yangrtp/YangRtp.h | 94 + libmetartccore3/src/yangrtp/YangRtpBuffer.c | 26 + libmetartccore3/src/yangrtp/YangRtpBuffer.h | 27 + libmetartccore3/src/yangrtp/YangRtpConstant.h | 81 + .../src/yangrtp/YangRtpFUAPayload.c | 7 + .../src/yangrtp/YangRtpFUAPayload.h | 10 + .../src/yangrtp/YangRtpFUAPayload2.c | 122 + .../src/yangrtp/YangRtpFUAPayload2.h | 51 + libmetartccore3/src/yangrtp/YangRtpHeader.c | 123 + libmetartccore3/src/yangrtp/YangRtpHeader.h | 32 + libmetartccore3/src/yangrtp/YangRtpPacket.c | 116 + libmetartccore3/src/yangrtp/YangRtpPacket.h | 49 + .../src/yangrtp/YangRtpRawPayload.c | 29 + .../src/yangrtp/YangRtpRawPayload.h | 24 + libmetartccore3/src/yangrtp/YangRtpRecvNack.c | 230 + libmetartccore3/src/yangrtp/YangRtpRecvNack.h | 77 + .../src/yangrtp/YangRtpSTAPPayload.c | 256 + .../src/yangrtp/YangRtpSTAPPayload.h | 46 + .../src/yangsdp/YangAudioPayload.c | 42 + .../src/yangsdp/YangAudioPayload.h | 21 + .../src/yangsdp/YangCodecPayload.c | 25 + .../src/yangsdp/YangCodecPayload.h | 28 + libmetartccore3/src/yangsdp/YangMediaDesc.c | 673 ++ libmetartccore3/src/yangsdp/YangMediaDesc.h | 72 + .../src/yangsdp/YangMediaPayloadType.c | 61 + .../src/yangsdp/YangMediaPayloadType.h | 25 + libmetartccore3/src/yangsdp/YangRedPayload.c | 11 + libmetartccore3/src/yangsdp/YangRedPayload.h | 23 + libmetartccore3/src/yangsdp/YangRtcSdp.c | 469 ++ libmetartccore3/src/yangsdp/YangRtcSdp.h | 80 + .../src/yangsdp/YangRtxPayloadDes.c | 11 + .../src/yangsdp/YangRtxPayloadDes.h | 23 + libmetartccore3/src/yangsdp/YangSSRCInfo.c | 93 + libmetartccore3/src/yangsdp/YangSSRCInfo.h | 36 + libmetartccore3/src/yangsdp/YangSdp.c | 206 + libmetartccore3/src/yangsdp/YangSdp.h | 10 + libmetartccore3/src/yangsdp/YangSdpHandle.c | 19 + libmetartccore3/src/yangsdp/YangSdpHandle.h | 10 + libmetartccore3/src/yangsdp/YangSdpType.h | 8 + libmetartccore3/src/yangstream/YangStream.c | 60 + .../src/yangstream/YangStreamCapture.c | 354 + .../src/yangstream/YangStreamRtc.c | 97 + .../src/yangstream/YangStreamRtc.h | 11 + .../src/yangstream/YangStreamRtmp.c | 186 + .../src/yangstream/YangStreamRtmp.h | 11 + libmetartccore3/src/yangutil/sys/YangAmf.c | 1276 ++++ libmetartccore3/src/yangutil/sys/YangAvtype.c | 128 + libmetartccore3/src/yangutil/sys/YangBuffer.c | 290 + libmetartccore3/src/yangutil/sys/YangCLog.c | 153 + .../src/yangutil/sys/YangCString.c | 127 + libmetartccore3/src/yangutil/sys/YangCTimer.c | 148 + libmetartccore3/src/yangutil/sys/YangEndian.c | 74 + libmetartccore3/src/yangutil/sys/YangFile.c | 59 + .../src/yangutil/sys/YangHttpSocket.c | 211 + .../src/yangutil/sys/YangLibHandle.c | 141 + libmetartccore3/src/yangutil/sys/YangMath.c | 73 + libmetartccore3/src/yangutil/sys/YangSRtp.c | 141 + libmetartccore3/src/yangutil/sys/YangSocket.c | 198 + libmetartccore3/src/yangutil/sys/YangSsl.c | 237 + libmetartccore3/src/yangutil/sys/YangSsrc.c | 148 + libmetartccore3/src/yangutil/sys/YangTime.c | 111 + libmetartccore3/src/yangutil/sys/YangUrl.c | 212 + libmetartccore3/src/yangutil/sys/YangVector.c | 48 + .../src/yangutil/sys/YangWebsocket.c | 281 + libmetartccore3/src/yangwebrtc/YangAec.c | 104 + .../src/yangwebrtc/YangH264RecvTrack.c | 488 ++ .../src/yangwebrtc/YangH264RecvTrack.h | 20 + .../src/yangwebrtc/YangH264RtpEncode.c | 298 + .../src/yangwebrtc/YangH264RtpEncode.h | 21 + .../src/yangwebrtc/YangH265RecvTrack.c | 497 ++ .../src/yangwebrtc/YangH265RecvTrack.h | 17 + .../src/yangwebrtc/YangH265RtpEncode.c | 294 + .../src/yangwebrtc/YangH265RtpEncode.h | 22 + .../src/yangwebrtc/YangMetaConnection.c | 225 + .../src/yangwebrtc/YangPeerConnection.c | 91 + .../src/yangwebrtc/YangRecvTrack.c | 232 + .../src/yangwebrtc/YangRecvTrack.h | 39 + .../src/yangwebrtc/YangRtcAudioRecvTrack.c | 50 + .../src/yangwebrtc/YangRtcAudioRecvTrack.h | 19 + .../src/yangwebrtc/YangRtcConnection.c | 177 + .../src/yangwebrtc/YangRtcConnection.h | 28 + .../src/yangwebrtc/YangRtcContext.c | 80 + .../src/yangwebrtc/YangRtcContext.h | 19 + .../src/yangwebrtc/YangRtcContextH.h | 54 + libmetartccore3/src/yangwebrtc/YangRtcDtls.c | 472 ++ libmetartccore3/src/yangwebrtc/YangRtcDtls.h | 30 + libmetartccore3/src/yangwebrtc/YangRtcDtlsH.h | 54 + .../src/yangwebrtc/YangRtcEncodeCommon.h | 56 + .../src/yangwebrtc/YangRtcPlayStream.c | 473 ++ .../src/yangwebrtc/YangRtcPlayStream.h | 33 + .../src/yangwebrtc/YangRtcPublishStream.c | 207 + .../src/yangwebrtc/YangRtcPublishStream.h | 35 + .../src/yangwebrtc/YangRtcSession.c | 503 ++ .../src/yangwebrtc/YangRtcSession.h | 34 + .../src/yangwebrtc/YangRtcSessionH.h | 38 + .../src/yangwebrtc/YangRtcStream.h | 130 + libmetartccore3/src/yangwebrtc/YangRtcStun.c | 376 ++ libmetartccore3/src/yangwebrtc/YangRtcStun.h | 80 + .../src/yangwebrtc/YangStreamHandle.c | 32 + .../src/yangwebrtc/YangStreamHandle.h | 10 + .../src/yangwebrtc/YangUdpHandle.c | 127 + .../src/yangwebrtc/YangUdpHandle.h | 19 + .../src/yangwebrtc/YangUdpHandleH.h | 38 + .../src/yangwebrtc/YangVideoRecvTrack.c | 77 + .../src/yangwebrtc/YangVideoRecvTrack.h | 23 + libmetartccore3/utils.cmake | 15 + 505 files changed, 78782 insertions(+) create mode 100755 include/Yang_Config.h create mode 100755 include/yangaudiodev/YangAudioCapture.h create mode 100755 include/yangaudiodev/YangAudioCaptureData.h create mode 100755 include/yangaudiodev/YangAudioDeviceQuery.h create mode 100755 include/yangaudiodev/YangAudioPlay.h create mode 100755 include/yangaudiodev/YangAudioPlayerSdl.h create mode 100755 include/yangaudiodev/YangAudioRenderData.h create mode 100755 include/yangavutil/audio/YangAudioMix.h create mode 100755 include/yangavutil/audio/YangAudioUtil.h create mode 100755 include/yangavutil/audio/YangMakeWave.h create mode 100755 include/yangavutil/audio/YangPreProcess.h create mode 100755 include/yangavutil/audio/YangResample.h create mode 100755 include/yangavutil/audio/YangRtcAec.h create mode 100755 include/yangavutil/audio/YangSwResample.h create mode 100755 include/yangavutil/video/YangBittype.h create mode 100755 include/yangavutil/video/YangBmp.h create mode 100755 include/yangavutil/video/YangCMeta.h create mode 100755 include/yangavutil/video/YangCNalu.h create mode 100755 include/yangavutil/video/YangCYuvUtil.h create mode 100755 include/yangavutil/video/YangGetBits.h create mode 100755 include/yangavutil/video/YangGolomb.h create mode 100755 include/yangavutil/video/YangMeta.h create mode 100755 include/yangavutil/video/YangNalu.h create mode 100755 include/yangavutil/video/YangPicConvert.h create mode 100755 include/yangavutil/video/YangPicUtilFfmpeg.h create mode 100755 include/yangavutil/video/YangResize.h create mode 100755 include/yangavutil/video/YangVideoEncoderMeta.h create mode 100755 include/yangavutil/video/YangYuvConvert.h create mode 100755 include/yangavutil/video/YangYuvUtil.h create mode 100755 include/yangcapture/YangCaptureFactory.h create mode 100755 include/yangcapture/YangMultiVideoCapture.h create mode 100755 include/yangcapture/YangScreenCapture.h create mode 100755 include/yangcapture/YangScreenCaptureHandleI.h create mode 100755 include/yangcapture/YangVideoCapture.h create mode 100755 include/yangdecoder/YangAudioDecoder.h create mode 100755 include/yangdecoder/YangAudioDecoderHandle.h create mode 100755 include/yangdecoder/YangAudioDecoderHandles.h create mode 100755 include/yangdecoder/YangDecoder.h create mode 100755 include/yangdecoder/YangDecoderFactory.h create mode 100755 include/yangdecoder/YangVideoDecoder.h create mode 100755 include/yangdecoder/YangVideoDecoderHandle.h create mode 100755 include/yangdecoder/YangVideoDecoderHandles.h create mode 100755 include/yangencoder/YangAudioEncoder.h create mode 100755 include/yangencoder/YangAudioEncoderHandle.h create mode 100755 include/yangencoder/YangAudioEncoderHandleCb.h create mode 100755 include/yangencoder/YangAudioEncoderMeta.h create mode 100755 include/yangencoder/YangEncoder.h create mode 100755 include/yangencoder/YangEncoderFactory.h create mode 100755 include/yangencoder/YangGpuEncoderFactory.h create mode 100755 include/yangencoder/YangVideoEncoder.h create mode 100755 include/yangencoder/YangVideoEncoderHandle.h create mode 100755 include/yangplayer/YangPlayFactory.h create mode 100755 include/yangplayer/YangPlayReceive.h create mode 100755 include/yangplayer/YangPlayerBase.h create mode 100755 include/yangplayer/YangPlayerDecoder.h create mode 100755 include/yangplayer/YangPlayerHandle.h create mode 100755 include/yangplayer/YangPlayerPlay.h create mode 100755 include/yangplayer/YangWinPlayFactroy.h create mode 100755 include/yangpush/YangPushCapture.h create mode 100755 include/yangpush/YangPushCommon.h create mode 100755 include/yangpush/YangPushFactory.h create mode 100755 include/yangpush/YangPushHandle.h create mode 100755 include/yangpush/YangPushPublish.h create mode 100755 include/yangpush/YangRtcPublish.h create mode 100755 include/yangpush/YangSendVideoI.h create mode 100755 include/yangrecliving/YangLivingHandle.h create mode 100755 include/yangrecliving/YangLivingType.h create mode 100755 include/yangrecliving/YangRecMessageI.h create mode 100755 include/yangrecliving/YangRecordUtilFactory.h create mode 100755 include/yangrecliving/YangScreenHandle.h create mode 100755 include/yangrecliving/YangVrHandle.h create mode 100755 include/yangrecord/YangFlvWrite.h create mode 100755 include/yangrecord/YangMp4File.h create mode 100755 include/yangrecord/YangMp4FileApp.h create mode 100755 include/yangrecord/YangRecEncoder.h create mode 100755 include/yangrecord/YangRecord.h create mode 100755 include/yangrecord/YangRecordApp.h create mode 100755 include/yangrecord/YangRecordCapture.h create mode 100755 include/yangrecord/YangRecordMp4.h create mode 100755 include/yangrtmp/YangRtmpHandle.h create mode 100755 include/yangsrt/YangSrtBase.h create mode 100755 include/yangsrt/YangTsBuffer.h create mode 100755 include/yangsrt/YangTsMuxer.h create mode 100755 include/yangsrt/YangTsPacket.h create mode 100755 include/yangsrt/YangTsPid.h create mode 100755 include/yangsrt/YangTsdemux.h create mode 100755 include/yangsrt/common.h create mode 100755 include/yangsrt/crc.h create mode 100755 include/yangsrt/srt_data.hpp create mode 100755 include/yangstream/YangStream.h create mode 100755 include/yangstream/YangStreamCapture.h create mode 100755 include/yangstream/YangStreamHandle.h create mode 100755 include/yangstream/YangStreamManager.h create mode 100755 include/yangstream/YangStreamType.h create mode 100755 include/yangstream/YangSynBuffer.h create mode 100755 include/yangtrace/YangCameraControl.h create mode 100755 include/yangtrace/YangTraceHandle.h create mode 100755 include/yangtrace/YangTraceIni.h create mode 100755 include/yangtrace/YangTraceServer.h create mode 100755 include/yangtrace/YangTraceUdp.h create mode 100755 include/yangutil/YangErrorCode.h create mode 100755 include/yangutil/buffer/YangAudioBuffer.h create mode 100755 include/yangutil/buffer/YangAudioEncoderBuffer.h create mode 100755 include/yangutil/buffer/YangAudioPlayBuffer.h create mode 100755 include/yangutil/buffer/YangBuffer.h create mode 100755 include/yangutil/buffer/YangCBuffer.h create mode 100755 include/yangutil/buffer/YangMediaBuffer.h create mode 100755 include/yangutil/buffer/YangVideoBuffer.h create mode 100755 include/yangutil/buffer/YangVideoDecoderBuffer.h create mode 100755 include/yangutil/buffer/YangVideoEncoderBuffer.h create mode 100755 include/yangutil/sys/YangAmf.h create mode 100755 include/yangutil/sys/YangBits.h create mode 100755 include/yangutil/sys/YangCString.h create mode 100755 include/yangutil/sys/YangCTime.h create mode 100755 include/yangutil/sys/YangCTimer.h create mode 100755 include/yangutil/sys/YangCUrl.h create mode 100755 include/yangutil/sys/YangEndian.h create mode 100755 include/yangutil/sys/YangFile.h create mode 100755 include/yangutil/sys/YangHttp.h create mode 100755 include/yangutil/sys/YangHttpSocket.h create mode 100755 include/yangutil/sys/YangIni.h create mode 100755 include/yangutil/sys/YangJson.h create mode 100755 include/yangutil/sys/YangLibHandle.h create mode 100755 include/yangutil/sys/YangLoadLib.h create mode 100755 include/yangutil/sys/YangLog.h create mode 100755 include/yangutil/sys/YangMath.h create mode 100755 include/yangutil/sys/YangSRtp.h create mode 100755 include/yangutil/sys/YangSocket.h create mode 100755 include/yangutil/sys/YangSsl.h create mode 100755 include/yangutil/sys/YangSsrc.h create mode 100755 include/yangutil/sys/YangString.h create mode 100755 include/yangutil/sys/YangSysMessageHandle.h create mode 100755 include/yangutil/sys/YangSysMessageI.h create mode 100755 include/yangutil/sys/YangThread.h create mode 100755 include/yangutil/sys/YangTime.h create mode 100755 include/yangutil/sys/YangTimer.h create mode 100755 include/yangutil/sys/YangUrl.h create mode 100755 include/yangutil/sys/YangVector.h create mode 100755 include/yangutil/sys/YangWebsocket.h create mode 100755 include/yangutil/sys/YangWindowsMouse.h create mode 100755 include/yangutil/yang_unistd.h create mode 100755 include/yangutil/yangavctype.h create mode 100755 include/yangutil/yangavctype_H.h create mode 100755 include/yangutil/yangavinfotype.h create mode 100755 include/yangutil/yangavtype.h create mode 100755 include/yangutil/yangavtype_h265.h create mode 100755 include/yangutil/yangtype.h create mode 100755 include/yangwebrtc/YangAVContext.h create mode 100755 include/yangwebrtc/YangMetaConnection.h create mode 100755 include/yangwebrtc/YangPeerConnection.h create mode 100755 include/yangwebrtc/YangRtcHandle.h create mode 100755 libmetartc3/CMakeLists.txt create mode 100755 libmetartc3/metartc3.pro create mode 100755 libmetartc3/src/yangaudiodev/YangAudioCapture.cpp create mode 100755 libmetartc3/src/yangaudiodev/YangAudioCaptureData.cpp create mode 100755 libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.cpp create mode 100755 libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.h create mode 100755 libmetartc3/src/yangaudiodev/YangAudioPlay.cpp create mode 100755 libmetartc3/src/yangaudiodev/YangAudioRenderData.cpp create mode 100755 libmetartc3/src/yangaudiodev/YangCaptureCallback.h create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.cpp create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.h create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.cpp create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.h create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.cpp create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.h create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.cpp create mode 100755 libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangAudioApiCapture.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApi.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApi.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinAudioDevice.h create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.cpp create mode 100755 libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.h create mode 100755 libmetartc3/src/yangavutil/YangImageConvert.cpp create mode 100755 libmetartc3/src/yangavutil/YangImageConvert.h create mode 100755 libmetartc3/src/yangavutil/YangMakeWave.cpp create mode 100755 libmetartc3/src/yangavutil/YangPicUtilFfmpeg.cpp create mode 100755 libmetartc3/src/yangavutil/YangSwResample.cpp create mode 100755 libmetartc3/src/yangavutil/YangVideoEncoderMeta.cpp create mode 100755 libmetartc3/src/yangavutil/YangYuvConvert.cpp create mode 100755 libmetartc3/src/yangcapture/YangAudioDeviceQuery.cpp create mode 100755 libmetartc3/src/yangcapture/YangCaptureFactory.cpp create mode 100755 libmetartc3/src/yangcapture/YangDXGIManager.cpp create mode 100755 libmetartc3/src/yangcapture/YangDXGIManager.h create mode 100755 libmetartc3/src/yangcapture/YangScreenCaptureImpl.cpp create mode 100755 libmetartc3/src/yangcapture/YangScreenCaptureImpl.h create mode 100755 libmetartc3/src/yangcapture/YangScreenShare.cpp create mode 100755 libmetartc3/src/yangcapture/YangScreenShare.h create mode 100755 libmetartc3/src/yangcapture/YangVideoCapture.cpp create mode 100755 libmetartc3/src/yangcapture/YangVideoCaptureHandle.cpp create mode 100755 libmetartc3/src/yangcapture/YangVideoCaptureHandle.h create mode 100755 libmetartc3/src/yangcapture/YangVideoCaptureImpl.cpp create mode 100755 libmetartc3/src/yangcapture/YangVideoCaptureImpl.h create mode 100755 libmetartc3/src/yangcapture/YangVideoDeviceQuery.cpp create mode 100755 libmetartc3/src/yangcapture/YangVideoDeviceQuery.h create mode 100755 libmetartc3/src/yangcapture/win/YangVideoSrc.cpp create mode 100755 libmetartc3/src/yangcapture/win/YangVideoSrc.h create mode 100755 libmetartc3/src/yangcapture/win/YangWinVideoCapture.cpp create mode 100755 libmetartc3/src/yangcapture/win/YangWinVideoCapture.h create mode 100755 libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.cpp create mode 100755 libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.h create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoder.cpp create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderAac.cpp create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderAac.h create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderHandle.cpp create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderHandles.cpp create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderOpus.cpp create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderOpus.h create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.cpp create mode 100755 libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.h create mode 100755 libmetartc3/src/yangdecoder/YangDecoderFactory.cpp create mode 100755 libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.cpp create mode 100755 libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.h create mode 100755 libmetartc3/src/yangdecoder/YangH264Dec.h create mode 100755 libmetartc3/src/yangdecoder/YangH264DecoderSoft.cpp create mode 100755 libmetartc3/src/yangdecoder/YangH264DecoderSoft.h create mode 100755 libmetartc3/src/yangdecoder/YangH264DecoderSoftFactory.h create mode 100755 libmetartc3/src/yangdecoder/YangH264Header.cpp create mode 100755 libmetartc3/src/yangdecoder/YangH264Header.h create mode 100755 libmetartc3/src/yangdecoder/YangH264Header1.h create mode 100755 libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.cpp create mode 100755 libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.h create mode 100755 libmetartc3/src/yangdecoder/YangVideoDecoderHandle.cpp create mode 100755 libmetartc3/src/yangdecoder/YangVideoDecoderHandles.cpp create mode 100755 libmetartc3/src/yangdecoder/YangVideoDecoderIntel.cpp create mode 100755 libmetartc3/src/yangdecoder/YangVideoDecoderIntel.h create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoder.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderAac.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderAac.h create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderHandle.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderHandleCb.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderMeta.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderMp3.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderMp3.h create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderOpus.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderOpus.h create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderSpeex.cpp create mode 100755 libmetartc3/src/yangencoder/YangAudioEncoderSpeex.h create mode 100755 libmetartc3/src/yangencoder/YangEncoderFactory.cpp create mode 100755 libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.cpp create mode 100755 libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.h create mode 100755 libmetartc3/src/yangencoder/YangH264EncHeader.cpp create mode 100755 libmetartc3/src/yangencoder/YangH264EncHeader.h create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderIntel.cpp create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderIntel.h create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderIntel1.h create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderMeta.cpp create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderMeta.h create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderSoft.cpp create mode 100755 libmetartc3/src/yangencoder/YangH264EncoderSoft.h create mode 100755 libmetartc3/src/yangencoder/YangH265EncoderMeta.cpp create mode 100755 libmetartc3/src/yangencoder/YangH265EncoderMeta.h create mode 100755 libmetartc3/src/yangencoder/YangH265EncoderSoft.cpp create mode 100755 libmetartc3/src/yangencoder/YangH265EncoderSoft.h create mode 100755 libmetartc3/src/yangencoder/YangVideoEncoder.cpp create mode 100755 libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.cpp create mode 100755 libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.h create mode 100755 libmetartc3/src/yangencoder/YangVideoEncoderHandle.cpp create mode 100755 libmetartc3/src/yangencoder/lame.h create mode 100755 libmetartc3/src/yangplayer/YangPlayFactory.cpp create mode 100755 libmetartc3/src/yangplayer/YangPlayReceive.cpp create mode 100755 libmetartc3/src/yangplayer/YangPlayerBase.cpp create mode 100755 libmetartc3/src/yangplayer/YangPlayerDecoder.cpp create mode 100755 libmetartc3/src/yangplayer/YangPlayerHandleImpl.cpp create mode 100755 libmetartc3/src/yangplayer/YangPlayerHandleImpl.h create mode 100755 libmetartc3/src/yangplayer/YangPlayerPlay.cpp create mode 100755 libmetartc3/src/yangplayer/YangRtcReceive.cpp create mode 100755 libmetartc3/src/yangplayer/YangRtcReceive.h create mode 100755 libmetartc3/src/yangpush/YangPushCapture.cpp create mode 100755 libmetartc3/src/yangpush/YangPushEncoder.cpp create mode 100755 libmetartc3/src/yangpush/YangPushEncoder.h create mode 100755 libmetartc3/src/yangpush/YangPushFactory.cpp create mode 100755 libmetartc3/src/yangpush/YangPushHandleImpl.cpp create mode 100755 libmetartc3/src/yangpush/YangPushHandleImpl.h create mode 100755 libmetartc3/src/yangpush/YangPushMessageHandle.cpp create mode 100755 libmetartc3/src/yangpush/YangPushMessageHandle.h create mode 100755 libmetartc3/src/yangpush/YangPushPublish.cpp create mode 100755 libmetartc3/src/yangpush/YangRtcPublish.cpp create mode 100755 libmetartc3/src/yangpush/YangSendVideoImpl.cpp create mode 100755 libmetartc3/src/yangpush/YangSendVideoImpl.h create mode 100755 libmetartc3/src/yangrecord/YangFlvWrite.cpp create mode 100755 libmetartc3/src/yangrecord/YangMp4File.cpp create mode 100755 libmetartc3/src/yangrecord/YangMp4FileApp.cpp create mode 100755 libmetartc3/src/yangrecord/YangRecEncoder.cpp create mode 100755 libmetartc3/src/yangrecord/YangRecord.cpp create mode 100755 libmetartc3/src/yangrecord/YangRecordApp.cpp create mode 100755 libmetartc3/src/yangrecord/YangRecordCapture.cpp create mode 100755 libmetartc3/src/yangrecord/YangRecordHandle.cpp create mode 100755 libmetartc3/src/yangrecord/YangRecordHandle.h create mode 100755 libmetartc3/src/yangrecord/YangRecordMp4.cpp create mode 100755 libmetartc3/src/yangsrt/YangSrtBase.cpp create mode 100755 libmetartc3/src/yangsrt/YangTsBuffer.cpp create mode 100755 libmetartc3/src/yangsrt/YangTsMuxer.cpp create mode 100755 libmetartc3/src/yangsrt/YangTsPacket.cpp create mode 100755 libmetartc3/src/yangsrt/YangTsdemux.cpp create mode 100755 libmetartc3/src/yangsrt/common.cpp create mode 100755 libmetartc3/src/yangsrt/crc.cpp create mode 100755 libmetartc3/src/yangsrt/srt_data.cpp create mode 100755 libmetartc3/src/yangstream/YangStreamHandle.cpp create mode 100755 libmetartc3/src/yangstream/YangStreamManager.cpp create mode 100755 libmetartc3/src/yangstream/YangStreamSrt.cpp create mode 100755 libmetartc3/src/yangstream/YangStreamSrt.h create mode 100755 libmetartc3/src/yangstream/YangSynBuffer.cpp create mode 100755 libmetartc3/src/yangutil/YangAvinfo.cpp create mode 100755 libmetartc3/src/yangutil/YangIniImpl.cpp create mode 100755 libmetartc3/src/yangutil/YangJson.cpp create mode 100755 libmetartc3/src/yangutil/YangLoadLib.cpp create mode 100755 libmetartc3/src/yangutil/YangString.cpp create mode 100755 libmetartc3/src/yangutil/YangSysMessageHandle.cpp create mode 100755 libmetartc3/src/yangutil/YangThread.cpp create mode 100755 libmetartc3/src/yangutil/YangTimer.cpp create mode 100755 libmetartc3/src/yangutil/YangWindowsMouse.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangAudioBuffer.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangAudioEncoderBuffer.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangAudioPlayBuffer.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangMediaBuffer.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangVideoBuffer.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangVideoDecoderBuffer.cpp create mode 100755 libmetartc3/src/yangutil/buffer/YangVideoEncoderBuffer.cpp create mode 100755 libmetartc3/utils.cmake create mode 100755 libmetartccore3/CMakeLists.txt create mode 100755 libmetartccore3/metartccore3.pro create mode 100755 libmetartccore3/src/ffmpeg/Makefile create mode 100755 libmetartccore3/src/ffmpeg/YangMetaConnection.h create mode 100755 libmetartccore3/src/ffmpeg/allformats.c create mode 100755 libmetartccore3/src/ffmpeg/demuxer_list.c create mode 100755 libmetartccore3/src/ffmpeg/ffmpeg.c create mode 100755 libmetartccore3/src/ffmpeg/h264dec.c create mode 100755 libmetartccore3/src/ffmpeg/muxer_list.c create mode 100755 libmetartccore3/src/ffmpeg/protocol_list.c create mode 100755 libmetartccore3/src/ffmpeg/protocols.c create mode 100755 libmetartccore3/src/ffmpeg/utils.c create mode 100755 libmetartccore3/src/ffmpeg/webrtc_demuxer.c create mode 100755 libmetartccore3/src/ffmpeg/webrtc_muxer.c create mode 100755 libmetartccore3/src/ffmpeg/webrtc_proto.c create mode 100755 libmetartccore3/src/ffmpeg/yang_h264_initExtra.h create mode 100755 libmetartccore3/src/yangavutil/YangAudioMix.c create mode 100755 libmetartccore3/src/yangavutil/YangAudioUtil.c create mode 100755 libmetartccore3/src/yangavutil/YangConvert.c create mode 100755 libmetartccore3/src/yangavutil/YangMeta.c create mode 100755 libmetartccore3/src/yangavutil/YangNalu.c create mode 100755 libmetartccore3/src/yangavutil/YangPreProcess.c create mode 100755 libmetartccore3/src/yangavutil/YangResample.c create mode 100755 libmetartccore3/src/yangavutil/YangRtcAec.c create mode 100755 libmetartccore3/src/yangavutil/YangYuvUtil.c create mode 100755 libmetartccore3/src/yangcsrs/YangSrsConnection.c create mode 100755 libmetartccore3/src/yangcsrs/YangSrsConnection.h create mode 100755 libmetartccore3/src/yangcsrs/YangSrsRtcHandle.c create mode 100755 libmetartccore3/src/yangcsrs/YangSrsSdp.c create mode 100755 libmetartccore3/src/yangcsrs/YangSrsSdp.h create mode 100755 libmetartccore3/src/yangrtmp/YangRtmp.c create mode 100755 libmetartccore3/src/yangrtmp/YangRtmp.h create mode 100755 libmetartccore3/src/yangrtmp/YangRtmp2.h create mode 100755 libmetartccore3/src/yangrtp/YangPublishNackBuffer.c create mode 100755 libmetartccore3/src/yangrtp/YangPublishNackBuffer.h create mode 100755 libmetartccore3/src/yangrtp/YangReceiveNackBuffer.c create mode 100755 libmetartccore3/src/yangrtp/YangReceiveNackBuffer.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcp.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcp.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpApp.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpApp.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpCommon.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpCommon.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpCompound.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpCompound.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpNack.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpNack.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpPli.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpPli.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpPsfbCommon.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpPsfbCommon.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpRR.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpRR.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpRpsi.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpRpsi.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpSR.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpSR.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpSli.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpSli.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpTWCC.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpTWCC.h create mode 100755 libmetartccore3/src/yangrtp/YangRtcpXr.c create mode 100755 libmetartccore3/src/yangrtp/YangRtcpXr.h create mode 100755 libmetartccore3/src/yangrtp/YangRtp.c create mode 100755 libmetartccore3/src/yangrtp/YangRtp.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpBuffer.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpBuffer.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpConstant.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpFUAPayload.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpFUAPayload.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpFUAPayload2.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpFUAPayload2.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpHeader.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpHeader.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpPacket.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpPacket.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpRawPayload.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpRawPayload.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpRecvNack.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpRecvNack.h create mode 100755 libmetartccore3/src/yangrtp/YangRtpSTAPPayload.c create mode 100755 libmetartccore3/src/yangrtp/YangRtpSTAPPayload.h create mode 100755 libmetartccore3/src/yangsdp/YangAudioPayload.c create mode 100755 libmetartccore3/src/yangsdp/YangAudioPayload.h create mode 100755 libmetartccore3/src/yangsdp/YangCodecPayload.c create mode 100755 libmetartccore3/src/yangsdp/YangCodecPayload.h create mode 100755 libmetartccore3/src/yangsdp/YangMediaDesc.c create mode 100755 libmetartccore3/src/yangsdp/YangMediaDesc.h create mode 100755 libmetartccore3/src/yangsdp/YangMediaPayloadType.c create mode 100755 libmetartccore3/src/yangsdp/YangMediaPayloadType.h create mode 100755 libmetartccore3/src/yangsdp/YangRedPayload.c create mode 100755 libmetartccore3/src/yangsdp/YangRedPayload.h create mode 100755 libmetartccore3/src/yangsdp/YangRtcSdp.c create mode 100755 libmetartccore3/src/yangsdp/YangRtcSdp.h create mode 100755 libmetartccore3/src/yangsdp/YangRtxPayloadDes.c create mode 100755 libmetartccore3/src/yangsdp/YangRtxPayloadDes.h create mode 100755 libmetartccore3/src/yangsdp/YangSSRCInfo.c create mode 100755 libmetartccore3/src/yangsdp/YangSSRCInfo.h create mode 100755 libmetartccore3/src/yangsdp/YangSdp.c create mode 100755 libmetartccore3/src/yangsdp/YangSdp.h create mode 100755 libmetartccore3/src/yangsdp/YangSdpHandle.c create mode 100755 libmetartccore3/src/yangsdp/YangSdpHandle.h create mode 100755 libmetartccore3/src/yangsdp/YangSdpType.h create mode 100755 libmetartccore3/src/yangstream/YangStream.c create mode 100755 libmetartccore3/src/yangstream/YangStreamCapture.c create mode 100755 libmetartccore3/src/yangstream/YangStreamRtc.c create mode 100755 libmetartccore3/src/yangstream/YangStreamRtc.h create mode 100755 libmetartccore3/src/yangstream/YangStreamRtmp.c create mode 100755 libmetartccore3/src/yangstream/YangStreamRtmp.h create mode 100755 libmetartccore3/src/yangutil/sys/YangAmf.c create mode 100755 libmetartccore3/src/yangutil/sys/YangAvtype.c create mode 100755 libmetartccore3/src/yangutil/sys/YangBuffer.c create mode 100755 libmetartccore3/src/yangutil/sys/YangCLog.c create mode 100755 libmetartccore3/src/yangutil/sys/YangCString.c create mode 100755 libmetartccore3/src/yangutil/sys/YangCTimer.c create mode 100755 libmetartccore3/src/yangutil/sys/YangEndian.c create mode 100755 libmetartccore3/src/yangutil/sys/YangFile.c create mode 100755 libmetartccore3/src/yangutil/sys/YangHttpSocket.c create mode 100755 libmetartccore3/src/yangutil/sys/YangLibHandle.c create mode 100755 libmetartccore3/src/yangutil/sys/YangMath.c create mode 100755 libmetartccore3/src/yangutil/sys/YangSRtp.c create mode 100755 libmetartccore3/src/yangutil/sys/YangSocket.c create mode 100755 libmetartccore3/src/yangutil/sys/YangSsl.c create mode 100755 libmetartccore3/src/yangutil/sys/YangSsrc.c create mode 100755 libmetartccore3/src/yangutil/sys/YangTime.c create mode 100755 libmetartccore3/src/yangutil/sys/YangUrl.c create mode 100755 libmetartccore3/src/yangutil/sys/YangVector.c create mode 100755 libmetartccore3/src/yangutil/sys/YangWebsocket.c create mode 100755 libmetartccore3/src/yangwebrtc/YangAec.c create mode 100755 libmetartccore3/src/yangwebrtc/YangH264RecvTrack.c create mode 100755 libmetartccore3/src/yangwebrtc/YangH264RecvTrack.h create mode 100755 libmetartccore3/src/yangwebrtc/YangH264RtpEncode.c create mode 100755 libmetartccore3/src/yangwebrtc/YangH264RtpEncode.h create mode 100755 libmetartccore3/src/yangwebrtc/YangH265RecvTrack.c create mode 100755 libmetartccore3/src/yangwebrtc/YangH265RecvTrack.h create mode 100755 libmetartccore3/src/yangwebrtc/YangH265RtpEncode.c create mode 100755 libmetartccore3/src/yangwebrtc/YangH265RtpEncode.h create mode 100755 libmetartccore3/src/yangwebrtc/YangMetaConnection.c create mode 100755 libmetartccore3/src/yangwebrtc/YangPeerConnection.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRecvTrack.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRecvTrack.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcAudioRecvTrack.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcAudioRecvTrack.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcConnection.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcConnection.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcContext.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcContext.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcContextH.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcDtls.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcDtls.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcDtlsH.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcEncodeCommon.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcPlayStream.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcPlayStream.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcPublishStream.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcPublishStream.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcSession.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcSession.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcSessionH.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcStream.h create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcStun.c create mode 100755 libmetartccore3/src/yangwebrtc/YangRtcStun.h create mode 100755 libmetartccore3/src/yangwebrtc/YangStreamHandle.c create mode 100755 libmetartccore3/src/yangwebrtc/YangStreamHandle.h create mode 100755 libmetartccore3/src/yangwebrtc/YangUdpHandle.c create mode 100755 libmetartccore3/src/yangwebrtc/YangUdpHandle.h create mode 100755 libmetartccore3/src/yangwebrtc/YangUdpHandleH.h create mode 100755 libmetartccore3/src/yangwebrtc/YangVideoRecvTrack.c create mode 100755 libmetartccore3/src/yangwebrtc/YangVideoRecvTrack.h create mode 100755 libmetartccore3/utils.cmake diff --git a/include/Yang_Config.h b/include/Yang_Config.h new file mode 100755 index 00000000..051a35c2 --- /dev/null +++ b/include/Yang_Config.h @@ -0,0 +1,33 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANG_CONFIG_H_ +#define INCLUDE_YANG_CONFIG_H_ + +#define Yang_HaveLibva 0 +#define Yang_Have10bit 0 + +#define Yang_HaveVr 0 +#define Yang_GPU_Encoding 0 +#define Yang_H265_Encoding 1 + +#ifdef _WIN32 +#define Yang_Using_H264Decoder 0 +#define Yang_Using_H264Decoder_So 0 +#define Yang_Ffmpeg_UsingSo 0 +#else +#define Yang_Using_H264Decoder 1 +#define Yang_Using_H264Decoder_So 1 +#define Yang_Ffmpeg_UsingSo 1 +#endif + +//option lib +#define Yang_HavePicUtilFfmpeg 0 +#define Yang_HaveWebsockets 1 //using libwebsockets +#define Yang_HaveCurl 0 //using libcurl +#define Yang_HaveJson 0 //using jsonlib + +#define Yang_Using_TWCC 0 + +#endif /* INCLUDE_YANG_CONFIG_H_ */ diff --git a/include/yangaudiodev/YangAudioCapture.h b/include/yangaudiodev/YangAudioCapture.h new file mode 100755 index 00000000..079d6082 --- /dev/null +++ b/include/yangaudiodev/YangAudioCapture.h @@ -0,0 +1,44 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGAUDIOCAPTURE_H +#define YANGAUDIOCAPTURE_H + +#include +#include +#include +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/buffer/YangAudioBuffer.h" +#include +#include "yangutil/sys/YangThread.h" +#include "yangutil/sys/YangLog.h" +using namespace std; + +class YangAudioCapture:public YangThread +{ + public: + YangAudioCapture(); + virtual ~YangAudioCapture(); + public: + + int32_t aIndex; + int32_t m_isStart; + virtual int32_t init()=0; + virtual void setCatureStart()=0; + virtual void setCatureStop()=0; + virtual void setOutAudioBuffer(YangAudioBuffer *pbuffer)=0; + virtual void setPlayAudoBuffer(YangAudioBuffer *pbuffer)=0; + virtual void setAec(YangRtcAec *paec)=0; + virtual void setInAudioBuffer(vector *pbuffer)=0; + virtual void setPreProcess(YangPreProcess *pp)=0; + + void stop(); + protected: + void run(); + YangContext *m_context; + virtual void startLoop()=0; + virtual void stopLoop()=0; + +}; + +#endif // YANGAUDIOCAPTURE_H diff --git a/include/yangaudiodev/YangAudioCaptureData.h b/include/yangaudiodev/YangAudioCaptureData.h new file mode 100755 index 00000000..2125c413 --- /dev/null +++ b/include/yangaudiodev/YangAudioCaptureData.h @@ -0,0 +1,35 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGCAPTURE_WIN_API_YANGCAPTUREAUDIODATA_H_ +#define YANGCAPTURE_WIN_API_YANGCAPTUREAUDIODATA_H_ +#include +#include +#include +#include +#include +#include +class YangAudioCaptureData { +public: + YangAudioCaptureData(); + virtual ~YangAudioCaptureData(); + void initIn(int psample,int pchannel); + void initOut(int psample,int pchannel); + void caputure(YangFrame* audioFrame); + int getOutLength(); + YangCaptureCallback* m_cb; + +private: + uint8_t* m_cache; + int m_cacheLen; + int m_size; + int m_pos; + +private: + YangAudioResample m_res; + YangFrame m_audioFrame; + + void captureData(); +}; + +#endif /* YANGCAPTURE_WIN_API_YANGCAPTUREAUDIODATA_H_ */ diff --git a/include/yangaudiodev/YangAudioDeviceQuery.h b/include/yangaudiodev/YangAudioDeviceQuery.h new file mode 100755 index 00000000..f0625e14 --- /dev/null +++ b/include/yangaudiodev/YangAudioDeviceQuery.h @@ -0,0 +1,31 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGAUDIODEV_YANGAUDIODEVICEQUERY_H_ +#define INCLUDE_YANGAUDIODEV_YANGAUDIODEVICEQUERY_H_ +#include +#include + +using namespace std; +struct YangAudioDeivce{ + string name; + string deviceName; + string subName; + int32_t aIndex; + int32_t aSubIndex; + int32_t aIdx; +}; +class YangAudioDeviceQuery { +public: + YangAudioDeviceQuery(); + virtual ~YangAudioDeviceQuery(); + vector* getCaptureDeviceList(); + vector* getPlayDeviceList(); +private: + vector m_captureDeviceList; + vector m_playDeviceList; + void getDeviceList(int32_t stream,vector* plist); + +}; + +#endif /* INCLUDE_YANGAUDIODEV_YANGAUDIODEVICEQUERY_H_ */ diff --git a/include/yangaudiodev/YangAudioPlay.h b/include/yangaudiodev/YangAudioPlay.h new file mode 100755 index 00000000..ca3a2fda --- /dev/null +++ b/include/yangaudiodev/YangAudioPlay.h @@ -0,0 +1,61 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangAudioPlay_H +#define YangAudioPlay_H + +#include +#include +#include +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +#define YangAudioBufferMaxNum 10 + +#include +#include + +using namespace std; +//#define SIZE_AUDIO_FRAME 4096 + + + +class YangAudioPlay:public YangThread +{ + public: + YangAudioPlay(YangContext* pcontext); + virtual ~YangAudioPlay(); + public: + + int32_t aIndex; + virtual int init()=0; + void setAudioBuffers(vector *paudioList); + void setAudioBuffer(YangAudioPlayBuffer *paudioList); + void setAecBase(YangRtcAec* pace); + int32_t m_aecInit=0; + int32_t m_isStart; + void stop(); + YangAudioRenderData m_audioData; + protected: + virtual void startLoop()=0; + virtual void stopLoop()=0; + + void run(); + YangContext *m_context; + YangRtcAec *m_ace; + //YangAudioMix mix; + int m_frames; + int m_channel; + int m_sample; + + + + + private: + + + + + }; + +#endif // YANGAUDIOCAPTURE_H diff --git a/include/yangaudiodev/YangAudioPlayerSdl.h b/include/yangaudiodev/YangAudioPlayerSdl.h new file mode 100755 index 00000000..cbd4924e --- /dev/null +++ b/include/yangaudiodev/YangAudioPlayerSdl.h @@ -0,0 +1,63 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGPLAYER_INCLUDE_YANGAUDIOPLAYSDL1_H_ +#define YANGPLAYER_INCLUDE_YANGAUDIOPLAYSDL1_H_ + +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +#include "yangutil/sys/YangLoadLib.h" +#include + +class YangAudioPlayerSdl:public YangThread{ +public: + YangAudioPlayerSdl(YangAudioInfo *pcontext); + ~YangAudioPlayerSdl(); + void init(); + void* (*yang_SDL_memset)(SDL_OUT_BYTECAP(len) void *dst, int32_t c, size_t len); + void (*yang_SDL_MixAudio)(Uint8 * dst, const Uint8 * src, Uint32 len, int32_t volume); + void setAudioList(YangAudioPlayBuffer *pal); + int32_t m_isStart; + void stop(); + + int32_t m_frames; + int32_t m_channel; + int32_t m_sample; + static void fill_audio(void *udata, Uint8 *stream, int32_t len); +protected: + void startLoop(); + void stopLoop(); + void run(); + // uint8_t *m_buffer[YangAudioBufferMaxNum]; +private: + YangAudioInfo *m_context; + YangAudioPlayBuffer *m_in_audioBuffer; + static YangAudioPlayerSdl* m_instance; + int32_t isInit; + + void closeAudio(); + int32_t ret; + int32_t m_size; + int32_t m_loops; + + SDL_AudioSpec wanted; + void playSDL(uint8_t *p_data); + void initSDL(); + void startLoopSDL(); + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + int32_t (*yang_SDL_Init)(Uint32 flags); + void (*yang_SDL_Delay)(Uint32 ms); + + const char * (*yang_SDL_GetError)(void); + + int32_t (*yang_SDL_OpenAudio)(SDL_AudioSpec * desired, SDL_AudioSpec * obtained); + + void (*yang_SDL_PauseAudio)(int32_t pause_on); + void (*yang_SDL_CloseAudio)(void); + +}; + +#endif /* YANGPLAYER_INCLUDE_YANGAUDIOPLAYSDL_H_ */ diff --git a/include/yangaudiodev/YangAudioRenderData.h b/include/yangaudiodev/YangAudioRenderData.h new file mode 100755 index 00000000..c3775cfe --- /dev/null +++ b/include/yangaudiodev/YangAudioRenderData.h @@ -0,0 +1,65 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGCAPTURE_YANGRENDERAUDIODATA_H_ +#define INCLUDE_YANGCAPTURE_YANGRENDERAUDIODATA_H_ +#include +#include +#include +#include +#include + +#include +using namespace std; + +class YangAudioRenderData { +public: + YangAudioRenderData(); + virtual ~YangAudioRenderData(); + +public: + vector *m_in_audioBuffers; + YangSynBuffer* m_syn; + YangPreProcess* m_preProcess; + YangAudioResample m_res; + void setAec(); + void setRenderLen(int plen); + + void initRender(int psample,int pchannel); + void initPlay(int psample,int pchannel); + uint8_t* getAudioRef(YangFrame* pframe); + uint8_t* getRenderAudioData(int len); + uint8_t* getAecAudioData(); + void setInAudioBuffer(YangSynBuffer *pal); + void setInAudioBuffers(vector *pal); + +private: + bool m_hasAec; + int m_aecBufLen; + int m_mixPos; + int m_renderLen; + uint8_t* m_aecBuf; + + + uint8_t* m_cache; + int m_cacheLen; + int m_size; + int m_pos; + + uint8_t* m_mixBuf; + + YangFrame m_audioFrame; + + // YangAudioMix m_mix; + +private: + + uint8_t* getAudioData(YangFrame* frame); + + void setAudioData(YangFrame* frame); + bool hasData(); + +}; + +#endif /* INCLUDE_YANGCAPTURE_YANGRENDERAUDIODATA_H_ */ diff --git a/include/yangavutil/audio/YangAudioMix.h b/include/yangavutil/audio/YangAudioMix.h new file mode 100755 index 00000000..75f8b9e2 --- /dev/null +++ b/include/yangavutil/audio/YangAudioMix.h @@ -0,0 +1,22 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGUTIL_AUDIO_YANGAUDIOMIX_H_ +#define YANGUTIL_AUDIO_YANGAUDIOMIX_H_ +#include +#define SIZE_AUDIO_FRAME 4096 +#ifdef __cplusplus +extern "C" { +#endif + +void yang_mixaudio_mix1(short *dst, short *src, int32_t len1, int32_t volume); +void yang_mixaudio_mix2(uint8_t *dst, uint8_t *src,int32_t len,int32_t pvolume); +void yang_mixaudio_mix4(uint8_t *dst, uint8_t *src, int32_t len,int32_t pvolume) ; +void yang_mixaudio_mix3(uint8_t *dst, uint8_t *src, int32_t len,int32_t pvolume); +void yang_mixaudio_mix5(uint8_t *dst, uint8_t *src, int32_t len1,int32_t pvolume); + +#ifdef __cplusplus +} +#endif +#endif /* YANGUTIL_AUDIO_YANGAUDIOMIX_H_ */ diff --git a/include/yangavutil/audio/YangAudioUtil.h b/include/yangavutil/audio/YangAudioUtil.h new file mode 100755 index 00000000..90506165 --- /dev/null +++ b/include/yangavutil/audio/YangAudioUtil.h @@ -0,0 +1,56 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGUTIL_YANGAUDIOUTIL_H_ +#define SRC_YANGUTIL_YANGAUDIOUTIL_H_ +#include +#include +#include +#include +typedef struct { + int32_t inSample; + int32_t outSample; + int32_t inChannel; + int32_t outChannel; + + int32_t intervalTime; + + int32_t inBytes; + int32_t outBytes; + int32_t inFrames; + int32_t outFrames; + int32_t isInitTrans; + int32_t sampleTrans; + int32_t channelTrans; + int32_t isTrans; + uint8_t *inBuf; + uint8_t *outBuf; + + YangResample res; +} YangAudioResampleContext; +typedef struct { + YangAudioResampleContext* context; + void (*initIn)(YangAudioResampleContext* context,int32_t psample,int32_t pchannel); + void (*initOut)(YangAudioResampleContext* context,int32_t psample,int32_t pchannel); + void (*init)(YangAudioResampleContext* context,int32_t insample,int32_t inchannel,int32_t outsample,int32_t outchannel,int32_t ms); + int32_t (*resample)(YangAudioResampleContext* context,YangFrame* audioFrame); +}YangAudioResample; +#ifdef __cplusplus +extern "C" { +#endif +void yang_create_audioresample(YangAudioResample* res); +void yang_destroy_audioresample(YangAudioResample* res); +int32_t MonoToStereo(int16_t *pData, int16_t *dData, int32_t samples_per_channel); +int32_t StereoToMono(const int16_t* src_audio,int16_t* dst_audio,int32_t samples_per_channe); +uint64_t Resample_s16(const int16_t *input, int16_t *output, int32_t inSampleRate, int32_t outSampleRate, uint64_t inputSize, + uint32_t channels); +short FloatS16ToS16(float v); +int16_t maxAbsValueW16C(const int16_t* vector, size_t length); +void ComputeLevel(const int16_t* data, size_t length); +#ifdef __cplusplus +} +#endif + + + +#endif /* SRC_YANGUTIL_YANGAUDIOUTIL_H_ */ diff --git a/include/yangavutil/audio/YangMakeWave.h b/include/yangavutil/audio/YangMakeWave.h new file mode 100755 index 00000000..4acc7b22 --- /dev/null +++ b/include/yangavutil/audio/YangMakeWave.h @@ -0,0 +1,41 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef _YangMakeWave_H_ +#define _YangMakeWave_H_ + +#include +#include "stdio.h" + +typedef short Int16; +typedef int32_t Int32; +typedef struct { + char fileID[4]; + Int32 fileleth; + char wavTag[4]; + char FmtHdrID[4]; + Int32 FmtHdrLeth; + Int16 FormatTag; + Int16 Channels; + Int32 SamplesPerSec; + Int32 AvgBytesPerSec; + Int16 BlockAlign; + Int16 BitsPerSample; + char DataHdrID[4]; + Int32 DataHdrLeth; +} WaveHdr; +class YangMakeWave{ +public: + YangMakeWave(); + FILE *waveFile; + void init(); + void start(int32_t pisMono,char * filename); + void write(uint8_t *data,int32_t len); + void stop(); +void writeHeader(int32_t isMono,FILE *WavFile,unsigned long len); +int32_t isMp3(char* p); +//void updateLameTagFrame(lame_global_flags* gfp, FILE* fpStream); +int32_t PCMSize; +int32_t m_isMono; +}; +#endif diff --git a/include/yangavutil/audio/YangPreProcess.h b/include/yangavutil/audio/YangPreProcess.h new file mode 100755 index 00000000..e1208e05 --- /dev/null +++ b/include/yangavutil/audio/YangPreProcess.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGAUDIOPROC_YANGPREPROCESSSPEEX_H_ +#define YANGAUDIOPROC_YANGPREPROCESSSPEEX_H_ +#include + +typedef struct{ + void* context; + void (*init)(void* context,int32_t pFrameSize, int32_t sampleRate, int32_t pchannel); + void (*state_reset)(void* context); + void (*preprocess_run)(void* context,short *pcm); + void (*closePreprocess)(void* context); +}YangPreProcess; +#ifdef __cplusplus +extern "C"{ +#endif + +void yang_create_preProcess(YangPreProcess* pre); +void yang_destroy_preProcess(YangPreProcess* pre); + +#ifdef __cplusplus +} +#endif + +#endif /* YANGAUDIOPROC_YANGPREPROCESSSPEEX_H_ */ diff --git a/include/yangavutil/audio/YangResample.h b/include/yangavutil/audio/YangResample.h new file mode 100755 index 00000000..5c4f71f6 --- /dev/null +++ b/include/yangavutil/audio/YangResample.h @@ -0,0 +1,23 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGAUDIOPROC_YANGRESAMPLE_H_ +#define YANGAUDIOPROC_YANGRESAMPLE_H_ + +#include +typedef struct{ + void* context; + void (*init)(void* pcontext,int32_t pchannel,int32_t pinsample,int32_t poutsample); + void (*resample)(void* context,const short *pin,uint32_t pinLen,short* pout,uint32_t *poutLen); +}YangResample; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_resample(YangResample* res); +void yang_destroy_resample(YangResample* res); +#ifdef __cplusplus +} +#endif + +#endif /* YANGAUDIOPROC_YANGRESAMPLE_H_ */ diff --git a/include/yangavutil/audio/YangRtcAec.h b/include/yangavutil/audio/YangRtcAec.h new file mode 100755 index 00000000..62bfd14b --- /dev/null +++ b/include/yangavutil/audio/YangRtcAec.h @@ -0,0 +1,28 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGAUDIOPROC_YangRtcAec_H_ +#define YANGAUDIOPROC_YangRtcAec_H_ +#include +typedef struct{ + void *context; + void (*init)(void* context, int32_t sampleRate, int32_t pchannel,int32_t pframeSize,int32_t echopath); + void (*closeAec)(void* context); + + void (*echoCapture)(void* context,short *rec, short *out); + void (*preprocessRun)(void* context,short *pcm); + void (*echoStateReset)(void* context); + void (*echoPlayback)(void* context,short *play); + void (*echoCancellation)(void* context,const short *rec, const short *play, + short *out); +}YangRtcAec; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_rtcaec(YangRtcAec* aec); +void yang_destroy_rtcaec(YangRtcAec* aec); +#ifdef __cplusplus +} +#endif +#endif /* YANGAUDIOPROC_YANGAECSPEEX_H_ */ diff --git a/include/yangavutil/audio/YangSwResample.h b/include/yangavutil/audio/YangSwResample.h new file mode 100755 index 00000000..e053f419 --- /dev/null +++ b/include/yangavutil/audio/YangSwResample.h @@ -0,0 +1,29 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGSWRESAMPLE_H +#define YANGSWRESAMPLE_H + +#include +#include +#include +#include +class YangSwResample +{ +public: + YangSwResample(); + ~YangSwResample(); + void resample(const uint8_t *pin,uint32_t pinLen,uint8_t* pout,uint32_t *poutLen); + int init(int32_t pchannel,int32_t pinsample,int32_t poutsample,int32_t pframesize); +private: + struct SwrContext* swr_ctx; + int32_t m_inSample; + int32_t m_outSample; + int32_t m_contextt; + int32_t m_channel; + int32_t m_frameSize; + + uint8_t** m_swrData; +}; + +#endif // YANGSWRESAMPLE_H diff --git a/include/yangavutil/video/YangBittype.h b/include/yangavutil/video/YangBittype.h new file mode 100755 index 00000000..f647753a --- /dev/null +++ b/include/yangavutil/video/YangBittype.h @@ -0,0 +1,262 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGbittype_H_ +#define YANGbittype_H_ +#include "stdint.h" +#define Yang_QP_MAX_NUM (51 + 6*6) +typedef struct YangGetBitContext { + uint8_t *buffer, *buffer_end; + int32_t index; + int32_t size_in_bits; + int32_t size_in_bits_plus8; +} YangGetBitContext; +enum YangPictureType { + Yang_PICTURE_TYPE_NONE = 0, ///< Undefined + Yang_PICTURE_TYPE_I, ///< Intra + Yang_PICTURE_TYPE_P, ///< Predicted + Yang_PICTURE_TYPE_B, ///< Bi-dir predicted + Yang_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4 + Yang_PICTURE_TYPE_SI, ///< Switching Intra + Yang_PICTURE_TYPE_SP, ///< Switching Predicted + Yang_PICTURE_TYPE_BI, ///< BI type +}; +typedef struct YangPredWeightTable { + int32_t use_weight; + int32_t use_weight_chroma; + int32_t luma_log2_weight_denom; + int32_t chroma_log2_weight_denom; + int32_t luma_weight_flag[2]; ///< 7.4.3.2 luma_weight_lX_flag + int32_t chroma_weight_flag[2]; ///< 7.4.3.2 chroma_weight_lX_flag + // The following 2 can be changed to char but that causes a 10 CPU cycles speed loss + int32_t luma_weight[48][2][2]; + int32_t chroma_weight[48][2][2][2]; + int32_t implicit_weight[48][48][2]; +} YangPredWeightTable; +typedef struct YangNAL { + uint8_t *rbsp_buffer; + + int32_t size; + uint8_t *data; + + /** + * Size, in bits, of just the data, excluding the stop bit and any trailing + * padding. I.e. what HEVC calls SODB. + */ + int32_t size_bits; + + int32_t raw_size; + uint8_t *raw_data; + + YangGetBitContext gb; + + /** + * NAL unit type + */ + int32_t type; + + /** + * HEVC only, nuh_temporal_id_plus_1 - 1 + */ + int32_t temporal_id; + + int32_t skipped_bytes; + int32_t skipped_bytes_pos_size; + int32_t *skipped_bytes_pos; + /** + * H.264 only, nal_ref_idc + */ + int32_t ref_idc; +} YangNAL; + +typedef struct YangRBSP { + uint8_t *rbsp_buffer; + int32_t rbsp_buffer_alloc_size; + int32_t rbsp_buffer_size; +} YangRBSP; + +/* an input packet split into unescaped NAL units */ +typedef struct YangPacket { + YangNAL *nals; + YangRBSP rbsp; + int32_t nb_nals; + int32_t nals_allocated; +} YangPacket; +typedef struct YangRational{ + int32_t num; ///< Numerator + int32_t den; ///< Denominator +} YangRational; +typedef enum Yang_MMCOOpcode { + Yang_MMCO_END = 0, + Yang_MMCO_SHORT2UNUSED, + Yang_MMCO_LONG2UNUSED, + Yang_MMCO_SHORT2LONG, + Yang_MMCO_SET_MAX_LONG, + Yang_MMCO_RESET, + Yang_MMCO_LONG, +} Yang_MMCOOpcode; +typedef struct YangMMCO { + Yang_MMCOOpcode opcode; + int32_t short_pic_num; ///< pic_num without wrapping (pic_num & max_pic_num) + int32_t long_arg; ///< index, pic_num, or num long refs depending on opcode +} YangMMCO; + +enum YangColorTransferCharacteristic { + Yang_TRC_RESERVED0 = 0, + Yang_TRC_BT709 = 1, ///< also ITU-R BT1361 + Yang_TRC_UNSPECIFIED = 2, + Yang_TRC_RESERVED = 3, + Yang_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + Yang_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + Yang_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + Yang_TRC_SMPTE240M = 7, + Yang_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + Yang_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + Yang_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + Yang_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + Yang_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + Yang_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + Yang_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + Yang_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + Yang_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + Yang_TRC_SMPTEST2084 = Yang_TRC_SMPTE2084, + Yang_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + Yang_TRC_SMPTEST428_1 = Yang_TRC_SMPTE428, + Yang_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + Yang_TRC_NB ///< Not part of ABI +}; +enum YangColorPrimaries { + Yang_PRI_RESERVED0 = 0, + Yang_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + Yang_PRI_UNSPECIFIED = 2, + Yang_PRI_RESERVED = 3, + Yang_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + Yang_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + Yang_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + Yang_PRI_SMPTE240M = 7, ///< functionally identical to above + Yang_PRI_FILM = 8, ///< colour filters using Illuminant C + Yang_PRI_BT2020 = 9, ///< ITU-R BT2020 + Yang_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + Yang_PRI_SMPTEST428_1 = Yang_PRI_SMPTE428, + Yang_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + Yang_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + Yang_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + Yang_PRI_NB ///< Not part of ABI +}; +enum YangColorSpace { + Yang_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + Yang_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + Yang_SPC_UNSPECIFIED = 2, + Yang_SPC_RESERVED = 3, + Yang_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + Yang_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + Yang_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + Yang_SPC_SMPTE240M = 7, ///< functionally identical to above + Yang_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + Yang_SPC_YCOCG = Yang_SPC_YCGCO, + Yang_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + Yang_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + Yang_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + Yang_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system + Yang_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system + Yang_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp + Yang_SPC_NB ///< Not part of ABI +}; +typedef struct Yang_SPS { + uint32_t sps_id; + int32_t profile_idc; + int32_t level_idc; + int32_t chroma_format_idc; + int32_t transform_bypass; ///< qpprime_y_zero_transform_bypass_flag + int32_t log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4 + int32_t poc_type; ///< pic_order_cnt_type + int32_t log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4 + int32_t delta_pic_order_always_zero_flag; + int32_t offset_for_non_ref_pic; + int32_t offset_for_top_to_bottom_field; + int32_t poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle + int32_t ref_frame_count; ///< num_ref_frames + int32_t gaps_in_frame_num_allowed_flag; + int32_t mb_width; ///< pic_width_in_mbs_minus1 + 1 + ///< (pic_height_in_map_units_minus1 + 1) * (2 - frame_mbs_only_flag) + int32_t mb_height; + int32_t frame_mbs_only_flag; + int32_t mb_aff; ///< mb_adaptive_frame_field_flag + int32_t direct_8x8_inference_flag; + int32_t crop; ///< frame_cropping_flag + + /* those 4 are already in luma samples */ + uint32_t crop_left; ///< frame_cropping_rect_left_offset + uint32_t crop_right; ///< frame_cropping_rect_right_offset + uint32_t crop_top; ///< frame_cropping_rect_top_offset + uint32_t crop_bottom; ///< frame_cropping_rect_bottom_offset + int32_t vui_parameters_present_flag; + YangRational sar; + int32_t video_signal_type_present_flag; + int32_t full_range; + int32_t colour_description_present_flag; + //enum YangorPrimaries + int32_t color_primaries; + //enum YangorTransferCharacteristic + int32_t color_trc; + //enum YangorSpace + int32_t colorspace; + int32_t timing_info_present_flag; + uint32_t num_units_in_tick; + uint32_t time_scale; + int32_t fixed_frame_rate_flag; + short offset_for_ref_frame[256]; // FIXME dyn aloc? + int32_t bitstream_restriction_flag; + int32_t num_reorder_frames; + int32_t scaling_matrix_present; + uint8_t scaling_matrix4[6][16]; + uint8_t scaling_matrix8[6][64]; + int32_t nal_hrd_parameters_present_flag; + int32_t vcl_hrd_parameters_present_flag; + int32_t pic_struct_present_flag; + int32_t time_offset_length; + int32_t cpb_cnt; ///< See H.264 E.1.2 + int32_t initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1 + int32_t cpb_removal_delay_length; ///< cpb_removal_delay_length_minus1 + 1 + int32_t dpb_output_delay_length; ///< dpb_output_delay_length_minus1 + 1 + int32_t bit_depth_luma; ///< bit_depth_luma_minus8 + 8 + int32_t bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8 + int32_t residual_color_transform_flag; ///< residual_colour_transform_flag + int32_t constraint_set_flags; ///< constraint_set[0-3]_flag + // uint8_t data[4096]; + // size_t data_size; +} Yang_SPS; + +/** + * Picture parameter set + */ +typedef struct Yang_PPS { + uint32_t sps_id; + int32_t cabac; ///< entropy_coding_mode_flag + int32_t pic_order_present; ///< pic_order_present_flag + int32_t slice_group_count; ///< num_slice_groups_minus1 + 1 + int32_t mb_slice_group_map_type; + uint32_t ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1 + int32_t weighted_pred; ///< weighted_pred_flag + int32_t weighted_bipred_idc; + int32_t init_qp; ///< pic_init_qp_minus26 + 26 + int32_t init_qs; ///< pic_init_qs_minus26 + 26 + int32_t chroma_qp_index_offset[2]; + int32_t deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag + int32_t constrained_intra_pred; ///< constrained_intra_pred_flag + int32_t redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag + int32_t transform_8x8_mode; ///< transform_8x8_mode_flag + uint8_t scaling_matrix4[6][16]; + uint8_t scaling_matrix8[6][64]; + uint8_t chroma_qp_table[2][Yang_QP_MAX_NUM+1]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table + int32_t chroma_qp_diff; + //uint8_t data[4096]; + // size_t data_size; + + uint32_t dequant4_buffer[6][Yang_QP_MAX_NUM + 1][16]; + uint32_t dequant8_buffer[6][Yang_QP_MAX_NUM + 1][64]; + uint32_t(*dequant4_coeff[6])[16]; + uint32_t(*dequant8_coeff[6])[64]; +} Yang_PPS; +#endif diff --git a/include/yangavutil/video/YangBmp.h b/include/yangavutil/video/YangBmp.h new file mode 100755 index 00000000..660afbca --- /dev/null +++ b/include/yangavutil/video/YangBmp.h @@ -0,0 +1,47 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangBmp_H_ +#define YangBmp_H_ + +#pragma pack(1) +struct Yang_BITMAPFILEHEADER { + unsigned short bfType; + uint32_t bfSize; + unsigned short bfReserved1; + unsigned short bfReserved2; + uint32_t bfOffBits; + +}; + +struct Yang_BITMAPINFOHEADER +{ + uint32_t biSize; + uint32_t biWidth; + uint32_t biHeight; + unsigned short biPlanes; + unsigned short biBitCount; + uint32_t biCompression; + uint32_t biSizeImage; + uint32_t biXPelsPerMeter; + uint32_t biYPelsPerMeter; + uint32_t biClrUsed; + uint32_t biClrImportant; +}; +#pragma pack() + +class YangBmp { +public: + YangBmp(); + virtual ~YangBmp(); +public: + void create_bmpheader(int32_t p_width, int32_t p_height); + void save_bmp(char *p_filename, char *p_addr, int32_t p_len); +protected: + +private: + struct Yang_BITMAPFILEHEADER bfh; + struct Yang_BITMAPINFOHEADER bih; +}; + +#endif diff --git a/include/yangavutil/video/YangCMeta.h b/include/yangavutil/video/YangCMeta.h new file mode 100755 index 00000000..6320ea9f --- /dev/null +++ b/include/yangavutil/video/YangCMeta.h @@ -0,0 +1,28 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGAVUTIL_VIDEO_YANGCMETA_H_ +#define INCLUDE_YANGAVUTIL_VIDEO_YANGCMETA_H_ + +#include +//#include +#include +void yang_find_start_code(YangVideoCodec pve,uint8_t *buf,int32_t bufLen,int32_t *vpsPos,int32_t *vpsLen,int32_t *spsPos,int32_t *spsLen,int32_t *ppsPos,int32_t *ppsLen); +void yang_createH264Meta( YangVideoMeta* pvmd, YangFrame *videoFrame); +void yang_createH265Meta( YangVideoMeta* pvmd, YangFrame *videoFrame); +void yang_getConfig_Flv_H264( YangH2645Conf *p_264, uint8_t *configBuf,int32_t *p_configLen); +void yang_getConfig_Meta_H264( YangSample* sps, YangSample* pps,uint8_t *configBuf,int32_t *p_configLen); +void yang_getConfig_Flv_H265( YangH2645Conf *p_264, uint8_t *configBuf,int32_t *p_configLen); +void yang_getConfig_Meta_H265( YangSample* vps, YangSample* sps, YangSample* pps, uint8_t *configBuf,int32_t *p_configLen); + +void yang_getH265RtmpHeader(uint8_t *meta, uint8_t *src, int32_t *hLen); +void yang_getH264RtmpHeader(uint8_t *buf, uint8_t *src, int32_t *hLen); + +void yang_decodeMetaH264(uint8_t *configBuf,int32_t p_configLen, YangSample* sps, YangSample* pps); +void yang_decodeMetaH265(uint8_t *configBuf,int32_t p_configLen, YangSample* vps, YangSample* sps, YangSample* pps); + + + + +#endif /* INCLUDE_YANGAVUTIL_VIDEO_YANGCMETA_H_ */ diff --git a/include/yangavutil/video/YangCNalu.h b/include/yangavutil/video/YangCNalu.h new file mode 100755 index 00000000..264e5f55 --- /dev/null +++ b/include/yangavutil/video/YangCNalu.h @@ -0,0 +1,40 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGAVUTIL_VIDEO_YANGCNALU_H_ +#define INCLUDE_YANGAVUTIL_VIDEO_YANGCNALU_H_ + + +#include +//#include +#include +#include + typedef struct { + int spsppsPos; + int keyframePos; +}YangH264NaluData; + typedef struct { + int spsPos; + int ppsPos; + int spsLen; + int ppsLen; + int keyframePos; +}YangH264NaluData2; +int32_t yang_parseH264Nalu(YangFrame* videoFrame, YangH264NaluData* pnalu); +int32_t yang_parseH264Nalu2(YangFrame* videoFrame, YangH264NaluData2* pnalu); +int32_t yang_getH264SpsppseNalu(YangFrame* videoFrame,uint8_t* pnaludata); +int32_t yang_getH264KeyframeNalu(YangFrame* videoFrame); +bool yang_hasH264Pframe(uint8_t* p); + +int32_t yang_getH265SpsppseNalu(YangFrame* videoFrame,uint8_t* pnaludata); +int32_t yang_parseH265Nalu(YangFrame* videoFrame,YangH264NaluData* pnalu); + +/** + * Table 7-1 - NAL unit type codes, syntax element categories, and NAL unit type classes + * ISO_IEC_14496-10-AVC-2012.pdf, page 83. + */ + +//std::string yang_avc_nalu2str(YangAvcNaluType nalu_type); + + +#endif /* INCLUDE_YANGAVUTIL_VIDEO_YANGCNALU_H_ */ diff --git a/include/yangavutil/video/YangCYuvUtil.h b/include/yangavutil/video/YangCYuvUtil.h new file mode 100755 index 00000000..4270a244 --- /dev/null +++ b/include/yangavutil/video/YangCYuvUtil.h @@ -0,0 +1,20 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGAVUTIL_VIDEO_YANGCYUVUTIL_H_ +#define INCLUDE_YANGAVUTIL_VIDEO_YANGCYUVUTIL_H_ + +#include "stdint.h" + +void yang_plusNV12(uint8_t* src,uint8_t * dest,int32_t model,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); +void yang_plusI420(uint8_t* src,uint8_t * dest,int32_t model,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); +void yang_plusYuy2(uint8_t* src,uint8_t * dest,int32_t model,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); +void yang_rgbtobgr(uint8_t *rgb,uint8_t *bgr,int32_t srcWidth,int32_t srcHeight); +void yang_zoom4(uint8_t* src,uint8_t* dst,int32_t wid,int32_t hei); + + + + + +#endif /* INCLUDE_YANGAVUTIL_VIDEO_YANGCYUVUTIL_H_ */ diff --git a/include/yangavutil/video/YangGetBits.h b/include/yangavutil/video/YangGetBits.h new file mode 100755 index 00000000..3d0f1d1a --- /dev/null +++ b/include/yangavutil/video/YangGetBits.h @@ -0,0 +1,750 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGUTIL_VIDEO_YANGGETBITS_H_ +#define YANGUTIL_VIDEO_YANGGETBITS_H_ + +#include +#include "stdint.h" +#include "limits.h" + + +class YangGetBits { +public: + YangGetBits(); + virtual ~YangGetBits(); +}; + +//#define BITSTREAM_READER_LE +#ifndef UNCHECKED_BITSTREAM_READER +#define UNCHECKED_BITSTREAM_READER !CONFIG_YANG_BITSTREAM_READER +#endif + +#ifndef CACHED_BITSTREAM_READER +#define CACHED_BITSTREAM_READER 0 +#endif + + + +#define yang_MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define YangERRTAG(a, b, c, d) (-(int)yang_MKTAG(a, b, c, d)) +#define YangERROR_INVALIDDATA YangERRTAG( 'I','N','D','A') +#define Yang_INPUT_BUFFER_PADDING_SIZE 64 +#define YangMAX(a,b) ((a) > (b) ? (a) : (b)) +#define Yang_TYPE int16_t +inline unsigned yang_zero_extend(unsigned val, unsigned bits) { + return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits); +} +inline int32_t yang_sign_extend(int32_t val, unsigned bits) { + unsigned shift = 8 * sizeof(int) - bits; + union { + unsigned u; + int32_t s; + } v = { (unsigned) val << shift }; + return v.s >> shift; +} + +inline uint32_t yang_get_bits(YangGetBitContext *s, int32_t n); +inline void yang_skip_bits(YangGetBitContext *s, int32_t n); +inline uint32_t yang_show_bits(YangGetBitContext *s, int32_t n); + +#define Yang_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +# define Yang_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) + +# define Yang_NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s))) +# define Yang_NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s))) + +#if CACHED_BITSTREAM_READER +# define Yang_MIN_CACHE_BITS 64 +#elif defined LONG_BITSTREAM_READER +# define Yang_MIN_CACHE_BITS 32 +#else +# define Yang_MIN_CACHE_BITS 25 +#endif + +#if !CACHED_BITSTREAM_READER + + #define Yang_OPEN_READER_NOSIZE(name, gb) \ + uint32_t name ## _index = (gb)->index; \ + uint32_t name ## _cache; + + #if UNCHECKED_BITSTREAM_READER + #define Yang_OPEN_READER(name, gb) Yang_OPEN_READER_NOSIZE(name, gb) + + #define Yang_BITS_AVAILABLE(name, gb) 1 + #else + #define Yang_OPEN_READER(name, gb) \ + Yang_OPEN_READER_NOSIZE(name, gb); \ + uint32_t name ## _size_plus8 = (gb)->size_in_bits_plus8 + + #define Yang_BITS_AVAILABLE(name, gb) name ## _index < name ## _size_plus8 + #endif + + #define Yang_CLOSE_READER(name, gb) (gb)->index = name ## _index + + # ifdef LONG_BITSTREAM_READER + + # define Yang_UPDATE_CACHE_LE(name, gb) name ## _cache = \ + Yang_RL64((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7) + + # define Yang_UPDATE_CACHE_BE(name, gb) name ## _cache = \ + Yang_RB64((gb)->buffer + (name ## _index >> 3)) >> (32 - (name ## _index & 7)) + + #else + + # define Yang_UPDATE_CACHE_LE(name, gb) name ## _cache = \ + Yang_RL32((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7) + + # define Yang_UPDATE_CACHE_BE(name, gb) name ## _cache = \ + Yang_RB32((gb)->buffer + (name ## _index >> 3)) << (name ## _index & 7) + + #endif + + #ifdef BITSTREAM_READER_LE + + # define Yang_UPDATE_CACHE(name, gb) Yang_UPDATE_CACHE_LE(name, gb) + + # define Yang_Yang_SKIP_BITS(name, gb, num) name ## _cache >>= (num) + + #else + + # define Yang_UPDATE_CACHE(name, gb) Yang_UPDATE_CACHE_BE(name, gb) + + # define Yang_Yang_SKIP_BITS(name, gb, num) name ## _cache <<= (num) + + #endif + + #if UNCHECKED_BITSTREAM_READER + # define Yang_SKIP_COUNTER(name, gb, num) name ## _index += (num) + #else + # define Yang_SKIP_COUNTER(name, gb, num) \ + name ## _index = FFMIN(name ## _size_plus8, name ## _index + (num)) + #endif + + #define Yang_BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name ## _index)) + + #define Yang_SKIP_BITS(name, gb, num) \ + do { \ + Yang_Yang_SKIP_BITS(name, gb, num); \ + Yang_SKIP_COUNTER(name, gb, num); \ + } while (0) + + #define LAST_Yang_SKIP_BITS(name, gb, num) Yang_SKIP_COUNTER(name, gb, num) + + #define Yang_SHOW_UBITS_LE(name, gb, num) yang_zero_extend(name ## _cache, num) + #define Yang_SHOW_SBITS_LE(name, gb, num) yang_sign_extend(name ## _cache, num) + + #define Yang_SHOW_UBITS_BE(name, gb, num) Yang_NEG_USR32(name ## _cache, num) + #define Yang_SHOW_SBITS_BE(name, gb, num) Yang_NEG_SSR32(name ## _cache, num) + + #ifdef BITSTREAM_READER_LE + # define Yang_SHOW_UBITS(name, gb, num) Yang_SHOW_UBITS_LE(name, gb, num) + # define Yang_SHOW_SBITS(name, gb, num) Yang_SHOW_SBITS_LE(name, gb, num) + #else + # define Yang_SHOW_UBITS(name, gb, num) Yang_SHOW_UBITS_BE(name, gb, num) + # define Yang_SHOW_SBITS(name, gb, num) Yang_SHOW_SBITS_BE(name, gb, num) + #endif + + #define Yang_GET_CACHE(name, gb) ((uint32_t) name ## _cache) + +#endif + +static int32_t yang_get_bits_count(YangGetBitContext *s) { +#if CACHED_BITSTREAM_READER + return s->index - s->bits_left; +#else + return s->index; +#endif +} + +#if CACHED_BITSTREAM_READER +static void yang_refill_32(YangGetBitContext *s) +{ +#if !UNCHECKED_BITSTREAM_READER + if (s->index >> 3 >= s->buffer_end - s->buffer) + return; +#endif + +#ifdef BITSTREAM_READER_LE + s->cache = (uint64_t)Yang_RL32(s->buffer + (s->index >> 3)) << s->bits_left | s->cache; +#else + s->cache = s->cache | (uint64_t)Yang_RB32(s->buffer + (s->index >> 3)) << (32 - s->bits_left); +#endif + s->index += 32; + s->bits_left += 32; +} + +static void yang_refill_64(YangGetBitContext *s) +{ +#if !UNCHECKED_BITSTREAM_READER + if (s->index >> 3 >= s->buffer_end - s->buffer) + return; +#endif + +#ifdef BITSTREAM_READER_LE + s->cache = Yang_RL64(s->buffer + (s->index >> 3)); +#else + s->cache = Yang_RB64(s->buffer + (s->index >> 3)); +#endif + s->index += 64; + s->bits_left = 64; +} + +static uint64_t yang_get_val(YangGetBitContext *s, unsigned n, int32_t is_le) +{ + uint64_t ret; + av_assert2(n>0 && n<=63); + if (is_le) { + ret = s->cache & ((UINT64_C(1) << n) - 1); + s->cache >>= n; + } else { + ret = s->cache >> (64 - n); + s->cache <<= n; + } + s->bits_left -= n; + return ret; +} + +static unsigned yang_show_val(const YangGetBitContext *s, unsigned n) +{ +#ifdef BITSTREAM_READER_LE + return s->cache & ((UINT64_C(1) << n) - 1); +#else + return s->cache >> (64 - n); +#endif +} +#endif + +/** + * Skips the specified number of bits. + * @param n the number of bits to skip, + * For the UNCHECKED_BITSTREAM_READER this must not cause the distance + * from the start to overflow int32_t. Staying within the bitstream + padding + * is sufficient, too. + */ +static void yang_skip_bits_long(YangGetBitContext *s, int32_t n) { +#if CACHED_BITSTREAM_READER + yang_skip_bits(s, n); +#else +#if UNCHECKED_BITSTREAM_READER + s->index += n; +#else + s->index += av_clip(n, -s->index, s->size_in_bits_plus8 - s->index); +#endif +#endif +} + +#if CACHED_BITSTREAM_READER +static void yang_skip_remaining(YangGetBitContext *s, unsigned n) +{ +#ifdef BITSTREAM_READER_LE + s->cache >>= n; +#else + s->cache <<= n; +#endif + s->bits_left -= n; +} +#endif + +/** + * Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB). + * if MSB not set it is negative + * @param n length in bits + */ +static int32_t yang_get_xbits(YangGetBitContext *s, int32_t n) { +#if CACHED_BITSTREAM_READER + int32_t cache = yang_show_bits(s, 32); + int32_t sign = ~cache >> 31; + yang_skip_remaining(s, n); + + return ((((uint32_t)(sign ^ cache)) >> (32 - n)) ^ sign) - sign; +#else + register int32_t sign; + register int32_t cache; + Yang_OPEN_READER(re, s); + // av_assert2(n>0 && n<=25); + Yang_UPDATE_CACHE(re, s); + cache = Yang_GET_CACHE(re, s); + sign = ~cache >> 31; + LAST_Yang_SKIP_BITS(re, s, n); + Yang_CLOSE_READER(re, s); + return (Yang_NEG_USR32(sign ^ cache, n) ^ sign) - sign; +#endif +} + +#if !CACHED_BITSTREAM_READER +static int32_t yang_get_xbits_le(YangGetBitContext *s, int32_t n) { + register int32_t sign; + register int32_t cache; + Yang_OPEN_READER(re, s); + // av_assert2(n>0 && n<=25); + Yang_UPDATE_CACHE_LE(re, s); + cache = Yang_GET_CACHE(re, s); + sign = yang_sign_extend(~cache, n) >> 31; + LAST_Yang_SKIP_BITS(re, s, n); + Yang_CLOSE_READER(re, s); + return (yang_zero_extend(sign ^ cache, n) ^ sign) - sign; +} +#endif + +static int32_t yang_get_sbits(YangGetBitContext *s, int32_t n) { + register int32_t tmp; +#if CACHED_BITSTREAM_READER + av_assert2(n>0 && n<=25); + tmp = sign_extend(yang_get_bits(s, n), n); +#else + Yang_OPEN_READER(re, s); + //av_assert2(n>0 && n<=25); + Yang_UPDATE_CACHE(re, s); + tmp = Yang_SHOW_SBITS(re, s, n); + LAST_Yang_SKIP_BITS(re, s, n); + Yang_CLOSE_READER(re, s); +#endif + return tmp; +} + +/** + * Read 1-25 bits. + */ +inline uint32_t yang_get_bits(YangGetBitContext *s, int32_t n) { + register int32_t tmp; +#if CACHED_BITSTREAM_READER + + av_assert2(n>0 && n<=32); + if (n > s->bits_left) { + yang_refill_32(s); + if (s->bits_left < 32) + s->bits_left = n; + } + +#ifdef BITSTREAM_READER_LE + tmp = yang_get_val(s, n, 1); +#else + tmp = yang_get_val(s, n, 0); +#endif +#else + Yang_OPEN_READER(re, s); + // av_assert2(n>0 && n<=25); + Yang_UPDATE_CACHE(re, s); + tmp = Yang_SHOW_UBITS(re, s, n); + LAST_Yang_SKIP_BITS(re, s, n); + Yang_CLOSE_READER(re, s); +#endif + return tmp; +} + +/** + * Read 0-25 bits. + */ +inline int32_t yang_get_bitsz(YangGetBitContext *s, int32_t n) { + return n ? yang_get_bits(s, n) : 0; +} + +inline uint32_t yang_get_bits_le(YangGetBitContext *s, int32_t n) { +#if CACHED_BITSTREAM_READER + av_assert2(n>0 && n<=32); + if (n > s->bits_left) { + yang_refill_32(s); + if (s->bits_left < 32) + s->bits_left = n; + } + + return yang_get_val(s, n, 1); +#else + register int32_t tmp; + Yang_OPEN_READER(re, s); + // av_assert2(n>0 && n<=25); + Yang_UPDATE_CACHE_LE(re, s); + tmp = Yang_SHOW_UBITS_LE(re, s, n); + LAST_Yang_SKIP_BITS(re, s, n); + Yang_CLOSE_READER(re, s); + return tmp; +#endif +} + +/** + * Show 1-25 bits. + */ +inline uint32_t yang_show_bits(YangGetBitContext *s, int32_t n) { + register int32_t tmp; +#if CACHED_BITSTREAM_READER + if (n > s->bits_left) + yang_refill_32(s); + + tmp = yang_show_val(s, n); +#else + Yang_OPEN_READER_NOSIZE(re, s); + // av_assert2(n>0 && n<=25); + Yang_UPDATE_CACHE(re, s); + tmp = Yang_SHOW_UBITS(re, s, n); +#endif + return tmp; +} + +inline void yang_skip_bits(YangGetBitContext *s, int32_t n) { +#if CACHED_BITSTREAM_READER + if (n < s->bits_left) + yang_skip_remaining(s, n); + else { + n -= s->bits_left; + s->cache = 0; + s->bits_left = 0; + + if (n >= 64) { + unsigned skip = (n / 8) * 8; + + n -= skip; + s->index += skip; + } + yang_refill_64(s); + if (n) + yang_skip_remaining(s, n); + } +#else + Yang_OPEN_READER(re, s); + LAST_Yang_SKIP_BITS(re, s, n); + Yang_CLOSE_READER(re, s); +#endif +} + +inline uint32_t yang_get_bits1(YangGetBitContext *s) { +#if CACHED_BITSTREAM_READER + if (!s->bits_left) + yang_refill_64(s); + +#ifdef BITSTREAM_READER_LE + return yang_get_val(s, 1, 1); +#else + return yang_get_val(s, 1, 0); +#endif +#else + uint32_t index = s->index; + uint8_t result = s->buffer[index >> 3]; +#ifdef BITSTREAM_READER_LE + result >>= index & 7; + result &= 1; +#else + result <<= index & 7; + result >>= 8 - 1; +#endif +#if !UNCHECKED_BITSTREAM_READER + if (s->index < s->size_in_bits_plus8) +#endif + index++; + s->index = index; + + return result; +#endif +} + +inline uint32_t yang_show_bits1(YangGetBitContext *s) { + return yang_show_bits(s, 1); +} + +inline void yang_skip_bits1(YangGetBitContext *s) { + yang_skip_bits(s, 1); +} + +/** + * Read 0-32 bits. + */ +inline uint32_t yang_get_bits_long(YangGetBitContext *s, int32_t n) { + //av_assert2(n>=0 && n<=32); + if (!n) { + return 0; +#if CACHED_BITSTREAM_READER + } + return yang_get_bits(s, n); +#else + } else if (n <= Yang_MIN_CACHE_BITS) { + return yang_get_bits(s, n); + } else { +#ifdef BITSTREAM_READER_LE + unsigned ret = yang_get_bits(s, 16); + return ret | (yang_get_bits(s, n - 16) << 16); +#else + unsigned ret = yang_get_bits(s, 16) << (n - 16); + return ret | yang_get_bits(s, n - 16); +#endif + } +#endif +} + +/** + * Read 0-64 bits. + */ +inline uint64_t yang_get_bits64(YangGetBitContext *s, int32_t n) { + if (n <= 32) { + return yang_get_bits_long(s, n); + } else { +#ifdef BITSTREAM_READER_LE + uint64_t ret = yang_get_bits_long(s, 32); + return ret | (uint64_t) yang_get_bits_long(s, n - 32) << 32; +#else + uint64_t ret = (uint64_t) yang_get_bits_long(s, n - 32) << 32; + return ret | yang_get_bits_long(s, 32); +#endif + } +} + +/** + * Read 0-32 bits as a signed integer. + */ +inline int32_t yang__get_sbits_long(YangGetBitContext *s, int32_t n) { + // sign_extend(x, 0) is undefined + if (!n) + return 0; + + return yang_sign_extend(yang_get_bits_long(s, n), n); +} + +/** + * Show 0-32 bits. + */ +inline uint32_t yang_show_bits_long(YangGetBitContext *s, int32_t n) { + if (n <= Yang_MIN_CACHE_BITS) { + return yang_show_bits(s, n); + } else { + YangGetBitContext gb = *s; + return yang_get_bits_long(&gb, n); + } +} + +inline int32_t yang_check_marker(void *logctx, YangGetBitContext *s, + const char *msg) { + int32_t bit = yang_get_bits1(s); + // if (!bit) + // av_log(logctx, AV_LOG_INFO, "Marker bit missing at %d of %d %s\n", yang_get_bits_count(s) - 1, s->size_in_bits, msg); + + return bit; +} + +/** + * Initialize YangGetBitContext. + * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes + * larger than the actual read bits because some optimized bitstream + * readers read 32 or 64 bit at once and could read over the end + * @param bit_size the size of the buffer in bits + * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow. + */ +inline int32_t yang_init_get_bits(YangGetBitContext *s, uint8_t *buffer, + int32_t bit_size) { + int32_t buffer_size; + int32_t ret = 0; + + if (bit_size >= INT_MAX - YangMAX(7, Yang_INPUT_BUFFER_PADDING_SIZE*8) + || bit_size < 0 || !buffer) { + bit_size = 0; + buffer = 0; + ret = YangERROR_INVALIDDATA; + } + + buffer_size = (bit_size + 7) >> 3; + + s->buffer = buffer; + s->size_in_bits = bit_size; + s->size_in_bits_plus8 = bit_size + 8; + s->buffer_end = buffer + buffer_size; + s->index = 0; + +#if CACHED_BITSTREAM_READER + yang_refill_64(s); +#endif + + return ret; +} + +/** + * Initialize YangGetBitContext. + * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes + * larger than the actual read bits because some optimized bitstream + * readers read 32 or 64 bit at once and could read over the end + * @param byte_size the size of the buffer in bytes + * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow. + */ +inline int32_t yang_init_get_bits8(YangGetBitContext *s, uint8_t *buffer, + int32_t byte_size) { + if (byte_size > INT_MAX / 8 || byte_size < 0) + byte_size = -1; + return yang_init_get_bits(s, buffer, byte_size * 8); +} + +static uint8_t *align_yang_get_bits(YangGetBitContext *s) { + int32_t n = -yang_get_bits_count(s) & 7; + if (n) + yang_skip_bits(s, n); + return s->buffer + (s->index >> 3); +} + +/** + * If the vlc code is invalid and max_depth=1, then no bits will be removed. + * If the vlc code is invalid and max_depth>1, then the number of bits removed + * is undefined. + */ +#define Yang_GET_VLC(code, name, gb, table, bits, max_depth) \ + do { \ + int32_t n, nb_bits; \ + uint32_t index; \ + \ + index = Yang_SHOW_UBITS(name, gb, bits); \ + code = table[index][0]; \ + n = table[index][1]; \ + \ + if (max_depth > 1 && n < 0) { \ + LAST_Yang_SKIP_BITS(name, gb, bits); \ + Yang_UPDATE_CACHE(name, gb); \ + \ + nb_bits = -n; \ + \ + index = Yang_SHOW_UBITS(name, gb, nb_bits) + code; \ + code = table[index][0]; \ + n = table[index][1]; \ + if (max_depth > 2 && n < 0) { \ + LAST_Yang_SKIP_BITS(name, gb, nb_bits); \ + Yang_UPDATE_CACHE(name, gb); \ + \ + nb_bits = -n; \ + \ + index = Yang_SHOW_UBITS(name, gb, nb_bits) + code; \ + code = table[index][0]; \ + n = table[index][1]; \ + } \ + } \ + Yang_SKIP_BITS(name, gb, n); \ + } while (0) + +#define Yang_GET_RL_VLC(level, run, name, gb, table, bits, \ + max_depth, need_update) \ + do { \ + int32_t n, nb_bits; \ + uint32_t index; \ + \ + index = Yang_SHOW_UBITS(name, gb, bits); \ + level = table[index].level; \ + n = table[index].len; \ + \ + if (max_depth > 1 && n < 0) { \ + Yang_SKIP_BITS(name, gb, bits); \ + if (need_update) { \ + Yang_UPDATE_CACHE(name, gb); \ + } \ + \ + nb_bits = -n; \ + \ + index = Yang_SHOW_UBITS(name, gb, nb_bits) + level; \ + level = table[index].level; \ + n = table[index].len; \ + if (max_depth > 2 && n < 0) { \ + LAST_Yang_SKIP_BITS(name, gb, nb_bits); \ + if (need_update) { \ + Yang_UPDATE_CACHE(name, gb); \ + } \ + nb_bits = -n; \ + \ + index = Yang_SHOW_UBITS(name, gb, nb_bits) + level; \ + level = table[index].level; \ + n = table[index].len; \ + } \ + } \ + run = table[index].run; \ + Yang_SKIP_BITS(name, gb, n); \ + } while (0) + +/* Return the LUT element for the given bitstream configuration. */ +inline int32_t yang_set_idx(YangGetBitContext *s, int32_t code, int32_t *n, int32_t *nb_bits, +Yang_TYPE (*table)[2]) { + unsigned idx; + + *nb_bits = -*n; + idx = yang_show_bits(s, *nb_bits) + code; + *n = table[idx][1]; + + return table[idx][0]; +} + +/** + * Parse a vlc code. + * @param bits is the number of bits which will be read at once, must be + * identical to nb_bits in init_vlc() + * @param max_depth is the number of times bits bits must be read to completely + * read the longest vlc code + * = (max_vlc_length + bits - 1) / bits + * @returns the code parsed or -1 if no vlc matches + */ +inline int32_t Yang_GET_VLC2(YangGetBitContext *s, Yang_TYPE (*table)[2], int32_t bits, + int32_t max_depth) { +#if CACHED_BITSTREAM_READER + int32_t nb_bits; + unsigned idx = yang_show_bits(s, bits); + int32_t code = table[idx][0]; + int32_t n = table[idx][1]; + + if (max_depth > 1 && n < 0) { + yang_skip_remaining(s, bits); + code = yang_set_idx(s, code, &n, &nb_bits, table); + if (max_depth > 2 && n < 0) { + yang_skip_remaining(s, nb_bits); + code = yang_set_idx(s, code, &n, &nb_bits, table); + } + } + yang_skip_remaining(s, n); + + return code; +#else + int32_t code; + + Yang_OPEN_READER(re, s); + Yang_UPDATE_CACHE(re, s); + + Yang_GET_VLC(code, re, s, table, bits, max_depth); + + Yang_CLOSE_READER(re, s); + + return code; +#endif +} + +inline int32_t yang_decode012(YangGetBitContext *gb) { + int32_t n; + n = yang_get_bits1(gb); + if (n == 0) + return 0; + else + return yang_get_bits1(gb) + 1; +} + +inline int32_t yang_decode210(YangGetBitContext *gb) { + if (yang_get_bits1(gb)) + return 0; + else + return 2 - yang_get_bits1(gb); +} + +inline int32_t yang_get_bits_left(YangGetBitContext *gb) { + return gb->size_in_bits - yang_get_bits_count(gb); +} + +inline int32_t yang_skip_1stop_8data_bits(YangGetBitContext *gb) { + if (yang_get_bits_left(gb) <= 0) + return YangERROR_INVALIDDATA; + + while (yang_get_bits1(gb)) { + yang_skip_bits(gb, 8); + if (yang_get_bits_left(gb) <= 0) + return YangERROR_INVALIDDATA; + } + + return 0; +} + + +#endif /* YANGUTIL_VIDEO_YANGGETBITS_H_ */ diff --git a/include/yangavutil/video/YangGolomb.h b/include/yangavutil/video/YangGolomb.h new file mode 100755 index 00000000..3116d039 --- /dev/null +++ b/include/yangavutil/video/YangGolomb.h @@ -0,0 +1,406 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGGolombH12345_ +#define YANGGolombH12345_ +#include +#include "stdlib.h" +#include "immintrin.h" + +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define INVALID_VLC 0x80000000 + +//#define yang_log2(x) (_bit_scan_reverse((x)|1)) +//#define ff_log2(x) (31 - __builtin_clz((x)|1)) + +const uint8_t yang_golomb_vlc_len[512]={ +19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, +7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, +5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, +5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +}; + +const uint8_t yang_ue_golomb_vlc_code[512]={ +32,32,32,32,32,32,32,32,31,32,32,32,32,32,32,32,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30, + 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13,14,14,14,14, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +const char yang_se_golomb_vlc_code[512]={ + 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 8, -8, 9, -9, 10,-10, 11,-11, 12,-12, 13,-13, 14,-14, 15,-15, + 4, 4, 4, 4, -4, -4, -4, -4, 5, 5, 5, 5, -5, -5, -5, -5, 6, 6, 6, 6, -6, -6, -6, -6, 7, 7, 7, 7, -7, -7, -7, -7, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + + +const uint8_t yang_ue_golomb_len[256]={ + 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,11, +11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,13, +13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13, +13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,15, +15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, +15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, +15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, +15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17, +}; + +const uint8_t yang_interleaved_golomb_vlc_len[256]={ +9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, +9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, +9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +}; + +const uint8_t yang_interleaved_ue_golomb_vlc_code[256]={ + 15,16,7, 7, 17,18,8, 8, 3, 3, 3, 3, 3, 3, 3, 3, + 19,20,9, 9, 21,22,10,10,4, 4, 4, 4, 4, 4, 4, 4, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 23,24,11,11,25,26,12,12,5, 5, 5, 5, 5, 5, 5, 5, + 27,28,13,13,29,30,14,14,6, 6, 6, 6, 6, 6, 6, 6, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +const char yang_interleaved_se_golomb_vlc_code[256]={ + 8, -8, 4, 4, 9, -9, -4, -4, 2, 2, 2, 2, 2, 2, 2, 2, + 10,-10, 5, 5, 11,-11, -5, -5, -2, -2, -2, -2, -2, -2, -2, -2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 12,-12, 6, 6, 13,-13, -6, -6, 3, 3, 3, 3, 3, 3, 3, 3, + 14,-14, 7, 7, 15,-15, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +const uint8_t yang_interleaved_dirac_golomb_vlc_code[256]={ +0, 1, 0, 0, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, +4, 5, 2, 2, 6, 7, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +8, 9, 4, 4, 10,11,5, 5, 2, 2, 2, 2, 2, 2, 2, 2, +12,13,6, 6, 14,15,7, 7, 3, 3, 3, 3, 3, 3, 3, 3, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}; +const uint8_t yang_log2_tab[256]={ + 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 +}; +//#define get_ue_golomb(a) get_ue(a, __FILE__, __func__, __LINE__) +//#define get_se_golomb(a) get_se(a, __FILE__, __func__, __LINE__) +//#define get_te_golomb(a, r) get_te(a, r, __FILE__, __func__, __LINE__) +//#define get_te0_golomb(a, r) get_te(a, r, __FILE__, __func__, __LINE__) +inline int32_t yang_log2(uint32_t v) +{ + int32_t n = 0; + if (v & 0xffff0000) { + v >>= 16; + n += 16; + } + if (v & 0xff00) { + v >>= 8; + n += 8; + } + n += yang_log2_tab[v]; + + return n; +} + +inline int32_t yang_get_ue_golomb(YangGetBitContext *gb) +{ + uint32_t buf; + +#if CACHED_BITSTREAM_READER + buf = show_bits_long(gb, 32); + + if (buf >= (1 << 27)) { + buf >>= 32 - 9; + skip_bits_long(gb, ff_golomb_vlc_len[buf]); + + return ff_ue_golomb_vlc_code[buf]; + } else { + int32_t log = 2 * av_log2(buf) - 31; + buf >>= log; + buf--; + skip_bits_long(gb, 32 - log); + + return buf; + } +#else + Yang_OPEN_READER(re, gb); + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + + if (buf >= (1 << 27)) { + buf >>= 32 - 9; + LAST_Yang_SKIP_BITS(re, gb, yang_golomb_vlc_len[buf]); + Yang_CLOSE_READER(re, gb); + + return yang_ue_golomb_vlc_code[buf]; + } else { + int32_t log = 2 * yang_log2(buf) - 31; + LAST_Yang_SKIP_BITS(re, gb, 32 - log); + Yang_CLOSE_READER(re, gb); + if (log < 7) { + // av_log(NULL, AV_LOG_ERROR, "Invalid UE golomb code\n"); + return YangERROR_INVALIDDATA; + } + buf >>= log; + buf--; + + return buf; + } +#endif +} + +inline int32_t yang_get_ue_golomb_31(YangGetBitContext *gb) +{ + uint32_t buf; + Yang_OPEN_READER(re, gb); + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + buf >>= 32 - 9; + LAST_Yang_SKIP_BITS(re, gb, yang_golomb_vlc_len[buf]); + Yang_CLOSE_READER(re, gb); + + + return yang_ue_golomb_vlc_code[buf]; +} + + +inline unsigned yang_get_interleaved_ue_golomb(YangGetBitContext *gb) +{ + uint32_t buf; + + + Yang_OPEN_READER(re, gb); + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + + if (buf & 0xAA800000) { + buf >>= 32 - 8; + LAST_Yang_SKIP_BITS(re, gb, yang_interleaved_golomb_vlc_len[buf]); + Yang_CLOSE_READER(re, gb); + + return yang_interleaved_ue_golomb_vlc_code[buf]; + } else { + unsigned ret = 1; + + do { + buf >>= 32 - 8; + LAST_Yang_SKIP_BITS(re, gb, + FFMIN(yang_interleaved_golomb_vlc_len[buf], 8)); + + if (yang_interleaved_golomb_vlc_len[buf] != 9) { + ret <<= (yang_interleaved_golomb_vlc_len[buf] - 1) >> 1; + ret |= yang_interleaved_dirac_golomb_vlc_code[buf]; + break; + } + ret = (ret << 4) | yang_interleaved_dirac_golomb_vlc_code[buf]; + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + } while (ret<0x8000000U && Yang_BITS_AVAILABLE(re, gb)); + + Yang_CLOSE_READER(re, gb); + return ret - 1; + } + +} + + + +/** + * read unsigned truncated exp golomb code. + */ +inline int32_t yang_get_te0_golomb(YangGetBitContext *gb, int32_t range) +{ + // av_assert2(range >= 1); + + if (range == 1) + return 0; + else if (range == 2) + return yang_get_bits1(gb) ^ 1; + else + return yang_get_ue_golomb(gb); + return 0; +} + +/** + * read unsigned truncated exp golomb code. + */ +inline int32_t yang_get_te_golomb(YangGetBitContext *gb, int32_t range) +{ + // av_assert2(range >= 1); + + if (range == 2) + return yang_get_bits1(gb) ^ 1; + else + return yang_get_ue_golomb(gb); + return 0; +} +inline int32_t yang_get_se_golomb(YangGetBitContext *gb) +{ + uint32_t buf; + + + Yang_OPEN_READER(re, gb); + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + + if (buf >= (1 << 27)) { + buf >>= 32 - 9; + LAST_Yang_SKIP_BITS(re, gb, yang_golomb_vlc_len[buf]); + Yang_CLOSE_READER(re, gb); + + return yang_se_golomb_vlc_code[buf]; + } else { + int32_t log = yang_log2(buf), sign; + LAST_Yang_SKIP_BITS(re, gb, 31 - log); + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + + buf >>= log; + + LAST_Yang_SKIP_BITS(re, gb, 32 - log); + Yang_CLOSE_READER(re, gb); + + sign = -(buf & 1); + buf = ((buf >> 1) ^ sign) - sign; + + return buf; + } + return 0; +} +inline unsigned yang_get_ue_golomb_long(YangGetBitContext *gb) +{ + unsigned buf, log; + + buf = yang_show_bits_long(gb, 32); + log = 31 - yang_log2(buf); + yang_skip_bits_long(gb, log); + + return yang_get_bits_long(gb, log + 1) - 1; +} +inline int32_t yang_get_se_golomb_long(YangGetBitContext *gb) +{ + uint32_t buf = yang_get_ue_golomb_long(gb); + int32_t sign = (buf & 1) - 1; + return ((buf >> 1) ^ sign) + 1; +} + + +inline int32_t yang_get_interleaved_se_golomb(YangGetBitContext *gb) +{ + uint32_t buf; + Yang_OPEN_READER(re, gb); + Yang_UPDATE_CACHE(re, gb); + buf = Yang_GET_CACHE(re, gb); + + if (buf & 0xAA800000) { + buf >>= 32 - 8; + LAST_Yang_SKIP_BITS(re, gb, yang_interleaved_golomb_vlc_len[buf]); + Yang_CLOSE_READER(re, gb); + + return yang_interleaved_se_golomb_vlc_code[buf]; + } else { + int32_t log; + LAST_Yang_SKIP_BITS(re, gb, 8); + Yang_UPDATE_CACHE(re, gb); + buf |= 1 | (Yang_GET_CACHE(re, gb) >> 8); + + if ((buf & 0xAAAAAAAA) == 0) + return INVALID_VLC; + + for (log = 31; (buf & 0x80000000) == 0; log--) + buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30); + + LAST_Yang_SKIP_BITS(re, gb, 63 - 2 * log - 8); + Yang_CLOSE_READER(re, gb); + + return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1; + } + +} + + +#endif diff --git a/include/yangavutil/video/YangMeta.h b/include/yangavutil/video/YangMeta.h new file mode 100755 index 00000000..4ad48409 --- /dev/null +++ b/include/yangavutil/video/YangMeta.h @@ -0,0 +1,13 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGAVUTIL_VIDEO_YANGMETA_H_ +#define INCLUDE_YANGAVUTIL_VIDEO_YANGMETA_H_ +#ifdef __cplusplus +extern "C"{ +#include +} +#else +#include +#endif +#endif /* INCLUDE_YANGAVUTIL_VIDEO_YANGMETA_H_ */ diff --git a/include/yangavutil/video/YangNalu.h b/include/yangavutil/video/YangNalu.h new file mode 100755 index 00000000..f1e8a9be --- /dev/null +++ b/include/yangavutil/video/YangNalu.h @@ -0,0 +1,14 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGAVUTIL_VIDEO_YANGNALU_H_ +#define INCLUDE_YANGAVUTIL_VIDEO_YANGNALU_H_ +#ifdef __cplusplus +extern "C"{ +#include +} +#else +#include +#endif + +#endif /* INCLUDE_YANGAVUTIL_VIDEO_YANGNALU_H_ */ diff --git a/include/yangavutil/video/YangPicConvert.h b/include/yangavutil/video/YangPicConvert.h new file mode 100755 index 00000000..66950844 --- /dev/null +++ b/include/yangavutil/video/YangPicConvert.h @@ -0,0 +1,36 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef _YangConvert_H__ +#define _YangConvert_H__ + +#include "stdint.h" + +//#include "YangResize.h" +#define u8_t uint8_t +#ifdef __cplusplus +extern "C" { +#endif + +void yang_pic_zoomIn4(uint8_t *src,uint8_t *dest,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeigh); +void yang_pic_zoom4Yuy2(uint8_t *src,uint8_t *dest,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); +//void yang_pic_initNv12_resize(int32_t p_srcWidth,int32_t p_srcHeight,int32_t p_dstWidth,int32_t p_dstHeight); +void yang_pic_resize_NV12(uint8_t *src,uint8_t *dest,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); +void yang_pic_resize_Yuy2_NV12(uint8_t *src,uint8_t *dest,int32_t p_srcWidth,int32_t p_srcHeight,int32_t p_destWidth,int32_t p_destHeight); +void yang_pic_nv12_nearest_scale(uint8_t* src, uint8_t* dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight) ; +void yang_pic_YUY2toI420(int32_t inWidth, int32_t inHeight, uint8_t *pSrc, uint8_t *pDest); +void yang_pic_YUY2toNV12(int32_t inWidth, int32_t inHeight, uint8_t *pSrc, uint8_t *pDest); +void yang_pic_plusNV12(uint8_t* src,uint8_t * dest,int32_t model,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); +void yang_pic_plusYuy2(uint8_t* src,uint8_t * dest,int32_t model,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight); + +void yang_pic_RGB24_TO_YV12(uint8_t* yv12,uint8_t* rgb24,int32_t w,int32_t h); +void yang_pic_RGB24_To_I420( uint8_t *RGBbuf, uint8_t *YUV, int32_t width, int32_t height ); +void yang_pic_RGB24_To_NV12( uint8_t *RGBbuf, uint8_t *YUV, int32_t width, int32_t height ); +void yang_pic_YUY2_To_RGB24(uint8_t *YUY2buff,uint8_t *RGBbuff,unsigned long dwSize); + + uint8_t clip255(long v); +#ifdef __cplusplus +} +#endif +#endif + diff --git a/include/yangavutil/video/YangPicUtilFfmpeg.h b/include/yangavutil/video/YangPicUtilFfmpeg.h new file mode 100755 index 00000000..3e5a7705 --- /dev/null +++ b/include/yangavutil/video/YangPicUtilFfmpeg.h @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangPicUtilFfmpeg___ +#define YangPicUtilFfmpeg___ +#include "yangutil/sys/YangLoadLib.h" +#include "../../Yang_Config.h" +#if HavePicUtilFfmpeg +//extern "C" { +#include "libswscale/swscale.h" +#include "libavformat/avformat.h" +//} +class YangPicUtilFfmpeg +{ +public: + YangPicUtilFfmpeg(void); + ~YangPicUtilFfmpeg(void); + int32_t inWidth,inHeight,outWidth,outHeight; + void resize(); + void init(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight,AVPixelFormat src,AVPixelFormat dst); + void init(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + void initYuy2(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + void initNv12(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + void init420P(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + void initYuy2_Bgr24(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + void initBgr24_Yuy2(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + void getAddr(uint8_t **p_in,uint8_t **p_out); + void closeAll(); + + struct SwsContext *img_convert_ctx; + AVPicture in_pic, out_pic; +private: + + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + struct SwsContext *(*yang_sws_getContext)(int32_t srcW, int32_t srcH, enum AVPixelFormat srcFormat, + int32_t dstW, int32_t dstH, enum AVPixelFormat dstFormat, + int32_t flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + + int32_t (*yang_sws_scale)(struct SwsContext *c, const uint8_t *const srcSlice[], + const int32_t srcStride[], int32_t srcSliceY, int32_t srcSliceH, + uint8_t *const dst[], const int32_t dstStride[]); + void (*yang_sws_freeContext)(struct SwsContext *swsContext); + + + + + int32_t (*yang_avpicture_alloc)(AVPicture *picture, enum AVPixelFormat pix_fmt, int32_t width, int32_t height); + void (*yang_avpicture_free)(AVPicture *picture); +}; +#endif +#endif + diff --git a/include/yangavutil/video/YangResize.h b/include/yangavutil/video/YangResize.h new file mode 100755 index 00000000..e9d99abb --- /dev/null +++ b/include/yangavutil/video/YangResize.h @@ -0,0 +1,46 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGRESIZE_H +#define YANGRESIZE_H + + +class PicUtilBmp +{ +public: + PicUtilBmp(void); + virtual ~PicUtilBmp(void); + //uint8_t *in; + //uint8_t *out; + //int32_t inWidth,inHeight,outWidth,outHeight; + virtual void resize(); + virtual void init(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + virtual void initYuy2(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + virtual void initYuy2tonv12(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + virtual void initNv12(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + virtual void init420P(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight); + virtual void getAddr(uint8_t **p_in,uint8_t **p_out); + virtual void close(); +}; + + + +class YangResize +{ +public: + YangResize(); + virtual ~YangResize(); +public: + + //uint8_t *in; + //uint8_t *out; + PicUtilBmp *pu; + + +protected: + +private: +}; + + +#endif // YANGRESIZE_H diff --git a/include/yangavutil/video/YangVideoEncoderMeta.h b/include/yangavutil/video/YangVideoEncoderMeta.h new file mode 100755 index 00000000..f0fe1b2f --- /dev/null +++ b/include/yangavutil/video/YangVideoEncoderMeta.h @@ -0,0 +1,16 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGENCODER_INCLUDE_YANGVIDEOENCODERMETA_H_ +#define YANGENCODER_INCLUDE_YANGVIDEOENCODERMETA_H_ +#include +#include + +class YangVideoEncoderMeta { +public: + YangVideoEncoderMeta(); + virtual ~YangVideoEncoderMeta(); + virtual void yang_initVmd(YangVideoMeta *p_vmd, YangVideoInfo *p_config, YangVideoEncInfo *penc)=0; +}; + +#endif /* YANGENCODER_INCLUDE_YANGVIDEOENCODERMETA_H_ */ diff --git a/include/yangavutil/video/YangYuvConvert.h b/include/yangavutil/video/YangYuvConvert.h new file mode 100755 index 00000000..ec18325e --- /dev/null +++ b/include/yangavutil/video/YangYuvConvert.h @@ -0,0 +1,230 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YangYuvConvert_H_ +#define YangYuvConvert_H_ +//#include "yangutil/yangavtype.h" +#include "stdint.h" +#include "yangutil/sys/YangLoadLib.h" +#include "libyuv.h" +using namespace libyuv; + +class YangYuvConvert { +public: + YangYuvConvert(); + virtual ~YangYuvConvert(); + //void rgbtonv12(); + int32_t yuy2tonv12(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht); + int32_t yuy2toi420(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht); + int32_t yuy2toargb(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht); + int32_t i420tonv12(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht); + int32_t i420tonv21(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht); + int32_t bgr24toyuy2(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht); + int32_t rgb24toI420(uint8_t* src_rgb24,uint8_t *dst,int32_t width,int32_t height); + int32_t rgbatoI420(uint8_t* src_rgba,uint8_t *dst,int32_t width,int32_t height); + int32_t bgratoI420(uint8_t* src_rgba,uint8_t *dst,int32_t width,int32_t height); + //I420ToRGB24 + int32_t I420torgb24(uint8_t* src_rgb24,uint8_t *dst,int32_t width,int32_t height); + int32_t nv12torgb24(uint8_t* src_rgb24,uint8_t *dst,int32_t width,int32_t height); + int32_t nv21torgb24(uint8_t* src_rgb24,uint8_t *dst,int32_t width,int32_t height); + int32_t argbtorgb24(uint8_t* src_argb, uint8_t *dst,int32_t width,int32_t height); + int32_t rgb24toargb(uint8_t *src_rgb24, uint8_t *dst, int32_t width,int32_t height); + int32_t scaleNv12(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight,int32_t mode=2); + int32_t scaleI420(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight,int32_t mode=2); + int32_t scaleYuy2(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight,int32_t mode=2); + int32_t scaleRgb(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight,int32_t mode=2); + int32_t scaleArgb(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight,int32_t mode=2); + + +private: + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + //I420ToNV12 + int32_t (*yang_YUY2ToNV12)(const uint8_t* src_yuy2, + int32_t src_stride_yuy2, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_uv, + int32_t dst_stride_uv, + int32_t width, + int32_t height); + int32_t (*yang_YUY2ToI420)(const uint8_t* src_yuy2, + int32_t src_stride_yuy2, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_u, + int32_t dst_stride_u, + uint8_t* dst_v, + int32_t dst_stride_v, + int32_t width, + int32_t height); + int (*yang_YUY2ToARGB)(const uint8_t* src_yuy2, + int src_stride_yuy2, + uint8_t* dst_argb, + int dst_stride_argb, + int width, + int height); + int32_t (*yang_I420ToNV12)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_u, + int32_t src_stride_u, + const uint8_t* src_v, + int32_t src_stride_v, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_uv, + int32_t dst_stride_uv, + int32_t width, + int32_t height); + + int32_t (*yang_I420ToNV21)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_u, + int32_t src_stride_u, + const uint8_t* src_v, + int32_t src_stride_v, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_vu, + int32_t dst_stride_vu, + int32_t width, + int32_t height); + + int32_t (*yang_I420ToRGB24)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_u, + int32_t src_stride_u, + const uint8_t* src_v, + int32_t src_stride_v, + uint8_t* dst_rgb24, + int32_t dst_stride_rgb24, + int32_t width, + int32_t height); + int32_t (*yang_NV12ToRGB24)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_uv, + int32_t src_stride_uv, + uint8_t* dst_rgb24, + int32_t dst_stride_rgb24, + int32_t width, + int32_t height); + int32_t (*yang_NV21ToRGB24)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_vu, + int32_t src_stride_vu, + uint8_t* dst_rgb24, + int32_t dst_stride_rgb24, + int32_t width, + int32_t height); + int32_t (*yang_ARGBToRGB24)(const uint8_t* src_argb, + int32_t src_stride_argb, + uint8_t* dst_rgb24, + int32_t dst_stride_rgb24, + int32_t width, + int32_t height); + int32_t (*yang_RGB24ToARGB)(const uint8_t* src_rgb24, + int32_t src_stride_rgb24, + uint8_t* dst_argb, + int32_t dst_stride_argb, + int32_t width, + int32_t height); + int32_t (*yang_RAWToARGB)(const uint8_t* src_raw, + int32_t src_stride_raw, + uint8_t* dst_argb, + int32_t dst_stride_argb, + int32_t width, + int32_t height); + int32_t (*yang_RGB24ToI420)(const uint8_t* src_rgb24, + int32_t src_stride_rgb24, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_u, + int32_t dst_stride_u, + uint8_t* dst_v, + int32_t dst_stride_v, + int32_t width, + int32_t height); + int (*yang_RGBAToI420)(const uint8_t* src_rgba, + int src_stride_rgba, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height); + + int (*yang_BGRAToI420)(const uint8_t* src_bgra, + int src_stride_bgra, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height); + int (*yang_ARGBToI420)(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height); + int32_t (*yang_NV12Scale)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_uv, + int32_t src_stride_uv, + int32_t src_width, + int32_t src_height, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_uv, + int32_t dst_stride_uv, + int32_t dst_width, + int32_t dst_height, + enum FilterMode filtering); + int32_t (*yang_I420Scale)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_u, + int32_t src_stride_u, + const uint8_t* src_v, + int32_t src_stride_v, + int32_t src_width, + int32_t src_height, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_u, + int32_t dst_stride_u, + uint8_t* dst_v, + int32_t dst_stride_v, + int32_t dst_width, + int32_t dst_height, + enum FilterMode filtering); + void (*yang_ScalePlane)(const uint8_t* src, + int32_t src_stride, + int32_t src_width, + int32_t src_height, + uint8_t* dst, + int32_t dst_stride, + int32_t dst_width, + int32_t dst_height, + enum FilterMode filtering); + int32_t (*yang_ARGBScale)(const uint8_t* src_argb, + int32_t src_stride_argb, + int32_t src_width, + int32_t src_height, + uint8_t* dst_argb, + int32_t dst_stride_argb, + int32_t dst_width, + int32_t dst_height, + enum FilterMode filtering); +}; + +#endif /* YANGYUVUTIL_H_ */ diff --git a/include/yangavutil/video/YangYuvUtil.h b/include/yangavutil/video/YangYuvUtil.h new file mode 100755 index 00000000..142018d0 --- /dev/null +++ b/include/yangavutil/video/YangYuvUtil.h @@ -0,0 +1,14 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGAVUTIL_VIDEO_YANGYUVUTIL_H_ +#define INCLUDE_YANGAVUTIL_VIDEO_YANGYUVUTIL_H_ +#ifdef __cplusplus +extern "C"{ +#include +} +#else +#include +#endif +#endif /* INCLUDE_YANGAVUTIL_VIDEO_YANGYUVUTIL_H_ */ diff --git a/include/yangcapture/YangCaptureFactory.h b/include/yangcapture/YangCaptureFactory.h new file mode 100755 index 00000000..cc3afcab --- /dev/null +++ b/include/yangcapture/YangCaptureFactory.h @@ -0,0 +1,24 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGCAPTURE_YANGCAPTUREFACTORY_H_ +#define INCLUDE_YANGCAPTURE_YANGCAPTUREFACTORY_H_ +#include +#include +#include "YangScreenCapture.h" +//#include "../include/YangLivingVideoCapture.h" + +class YangCaptureFactory { +public: + YangCaptureFactory(); + virtual ~YangCaptureFactory(); + YangAudioCapture *createAudioCapture(YangContext *pcontext); + YangAudioCapture *createRecordAudioCapture(YangContext *pcontext); + //YangMultiVideoCapture *createVideoCapture(YangVideoInfo *pcontext); + YangMultiVideoCapture *createVideoCapture(YangVideoInfo *pcontext); + YangMultiVideoCapture *createRecordVideoCapture(YangVideoInfo *pcontext); + YangScreenCapture *createScreenCapture(YangContext *pcontext); +}; + +#endif /* INCLUDE_YANGCAPTURE_YANGCAPTUREFACTORY_H_ */ diff --git a/include/yangcapture/YangMultiVideoCapture.h b/include/yangcapture/YangMultiVideoCapture.h new file mode 100755 index 00000000..d36b96ff --- /dev/null +++ b/include/yangcapture/YangMultiVideoCapture.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangMultiVideoCapture_H +#define YangMultiVideoCapture_H +#include "yangutil/sys/YangThread.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangLog.h" +#include "YangVideoCapture.h" +class YangMultiVideoCapture: public YangVideoCapture { +public: + YangMultiVideoCapture(); + virtual ~YangMultiVideoCapture(); +public: + virtual void setLivingOutVideoBuffer(YangVideoBuffer *pbuf)=0; + virtual void setLivingVideoCaptureStart()=0; + virtual void setLivingVideoCaptureStop()=0; + virtual int32_t getLivingVideoCaptureState()=0; + + virtual void setFilmOutVideoBuffer(YangVideoBuffer *pbuf)=0; + virtual void setFilmVideoCaptureStart()=0; + virtual void setFilmVideoCaptureStop()=0; + virtual int32_t getFilmVideoCaptureState()=0; +}; + +#endif // YANGVIDEOCAPTURE_H diff --git a/include/yangcapture/YangScreenCapture.h b/include/yangcapture/YangScreenCapture.h new file mode 100755 index 00000000..2cf0affa --- /dev/null +++ b/include/yangcapture/YangScreenCapture.h @@ -0,0 +1,41 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGCAPTURE_YANGSCREENCAPTURE_H_ +#define INCLUDE_YANGCAPTURE_YANGSCREENCAPTURE_H_ + +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangThread.h" +#include "YangScreenCaptureHandleI.h" + + +class YangScreenCapture :public YangThread{ +public: + YangScreenCapture(); + virtual ~YangScreenCapture(); + + int32_t m_isStart; + +public: + virtual void setDrawmouse(bool isDraw)=0; + virtual bool getisDrawmouse()=0; + + virtual void setInterval(int32_t pinterval)=0; + virtual int32_t init()=0; + virtual void setVideoCaptureStart()=0; + virtual void setVideoCaptureStop()=0; + virtual YangVideoBuffer* getOutVideoBuffer()=0; + virtual YangVideoBuffer* getPreVideoBuffer()=0; + virtual int32_t getVideoCaptureState()=0; + void stop(); +protected: + void run(); + virtual void startLoop()=0; + virtual void stopLoop()=0; + + + +}; + +#endif /* INCLUDE_YANGCAPTURE_YANGSCREENCAPTURE_H_ */ diff --git a/include/yangcapture/YangScreenCaptureHandleI.h b/include/yangcapture/YangScreenCaptureHandleI.h new file mode 100755 index 00000000..54cc6bf4 --- /dev/null +++ b/include/yangcapture/YangScreenCaptureHandleI.h @@ -0,0 +1,15 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangScreenCaptureI_H_ +#define YangScreenCaptureI_H_ +class YangScreenCaptureHandleI { +public: + YangScreenCaptureHandleI(){} + virtual ~YangScreenCaptureHandleI(){} + int32_t m_width=0,m_height=0; + virtual int32_t init()=0; + virtual int32_t captureFrame(uint8_t* p)=0; +}; + +#endif diff --git a/include/yangcapture/YangVideoCapture.h b/include/yangcapture/YangVideoCapture.h new file mode 100755 index 00000000..44012826 --- /dev/null +++ b/include/yangcapture/YangVideoCapture.h @@ -0,0 +1,38 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGCAPTURE_YANGVIDEOCAPTURE_H_ +#define INCLUDE_YANGCAPTURE_YANGVIDEOCAPTURE_H_ +#include +#include +#include +#include +#include +#include + + +void yang_get_camera_indexs(std::vector *pvs,std::string pcamindex); +class YangVideoCapture :public YangThread { +public: + YangVideoCapture(); + virtual ~YangVideoCapture(); +public: + int32_t cameraIndex; + virtual int32_t init()=0; + virtual void setVideoCaptureStart()=0; + virtual void setVideoCaptureStop()=0; + virtual void setOutVideoBuffer(YangVideoBuffer *pbuf)=0; + virtual void setPreVideoBuffer(YangVideoBuffer *pbuf)=0; + virtual int32_t getVideoCaptureState()=0; + virtual void initstamp()=0; + + int32_t m_isStart; + void stop(); +protected: + void run(); + virtual void startLoop()=0; + virtual void stopLoop()=0; + YangVideoInfo *m_para; +}; + +#endif /* INCLUDE_YANGCAPTURE_YANGVIDEOCAPTURE_H_ */ diff --git a/include/yangdecoder/YangAudioDecoder.h b/include/yangdecoder/YangAudioDecoder.h new file mode 100755 index 00000000..55081dde --- /dev/null +++ b/include/yangdecoder/YangAudioDecoder.h @@ -0,0 +1,28 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGDECODER_INCLUDE_YANGAUDIODECODER_H_ +#define YANGDECODER_INCLUDE_YANGAUDIODECODER_H_ +#include +#include "yangstream/YangStreamType.h" +#include "YangDecoder.h" + + +class YangAudioDecoder{ +public: + YangAudioDecoder(); + virtual ~YangAudioDecoder(); + virtual void init()=0; + virtual int32_t decode(YangFrame* pframe,YangDecoderCallback* pcallback)=0; + int32_t m_uid; +protected: + int32_t m_isInit; + int32_t m_frameSize; + int32_t m_alen; + YangAudioParam *m_context; + uint8_t* m_dstBuffer; + int32_t m_dstLen; +}; + +#endif /* YANGDECODER_INCLUDE_YANGAUDIODECODER_H_ */ diff --git a/include/yangdecoder/YangAudioDecoderHandle.h b/include/yangdecoder/YangAudioDecoderHandle.h new file mode 100755 index 00000000..58a1462b --- /dev/null +++ b/include/yangdecoder/YangAudioDecoderHandle.h @@ -0,0 +1,64 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangAudioDecoderHandle__ +#define __YangAudioDecoderHandle__ + +#include "stdint.h" +#include + +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" + +#include "yangdecoder/YangAudioDecoder.h" +using namespace std; +class YangAudioDecoderHandle:public YangThread,public YangDecoderCallback,public YangMediaConfigCallback +{ +public: + YangAudioDecoderHandle(YangContext *pcontext); + ~YangAudioDecoderHandle(void); + int32_t m_isStart; +private: + int32_t m_isInit; + +public: + + YangAudioPlayBuffer* getOutAudioBuffer(); + void setRemoteParam(YangAudioParam* para); + void init(); + void stop(); + void setInAudioBuffer(YangAudioEncoderBuffer *pbuf); + void setOutAudioBuffer(YangAudioPlayBuffer* pbuf); + void removeAudioStream(int32_t puid); + void removeAllStream(); + + void onAudioData(YangFrame* pframe); + void onVideoData(YangFrame* pframe); + void setMediaConfig(int32_t puid, YangAudioParam *audio,YangVideoParam *video); + +protected: + void run(); + void stopLoop(); + void startLoop(); + +void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + YangAudioPlayBuffer* m_out_audioBuffer; + YangAudioDecoder *m_decs; + int32_t m_isConvert; + YangAudioEncoderBuffer *m_in_audioBuffer; + YangContext *m_context; + YangAudioParam* m_param; + + //int32_t m_channel; + bool m_is44100; + uint8_t* m_buf; + int32_t m_size; + YangFrame m_audioFrame; + void decode(int32_t isIframe,uint8_t*src,int32_t p_buflen,uint8_t *dest,int32_t *p_destLen); + +}; +#endif diff --git a/include/yangdecoder/YangAudioDecoderHandles.h b/include/yangdecoder/YangAudioDecoderHandles.h new file mode 100755 index 00000000..85489dc4 --- /dev/null +++ b/include/yangdecoder/YangAudioDecoderHandles.h @@ -0,0 +1,60 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangAudioDecoderHandles__ +#define __YangAudioDecoderHandles__ + +#include "stdint.h" +#include +#include +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" + +#include "yangdecoder/YangAudioDecoder.h" +using namespace std; +class YangAudioDecoderHandles:public YangThread,public YangDecoderCallback +{ +public: + YangAudioDecoderHandles(YangContext *pcontext); + ~YangAudioDecoderHandles(void); + +private: + int32_t m_isInit; + +public: + YangAudioDecoder *getDecoder(int32_t puid); + YangAudioPlayBuffer* getAudioBuffer(int32_t puid); + int32_t getDecoderIndex(int32_t puid); + void init(); + void stop(); + void setInAudioBuffer(YangAudioEncoderBuffer *pbuf); + void setOutAudioBuffer(vector* pbuf); + void removeAudioStream(int32_t puid); + void removeAllStream(); + int32_t m_isStart; + void onAudioData(YangFrame* pframe); + void onVideoData(YangFrame* pframe); + void setRemoteParam(int32_t puid,YangAudioParam* para); + +protected: + void run(); + void stopLoop(); + void startLoop(); + +void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + std::map m_paramMap; + vector *m_out_audioBuffer; + vector *m_decs; + int32_t m_isConvert; + YangAudioEncoderBuffer *m_in_audioBuffer; + YangContext *m_context; + + int32_t m_frameSize,m_channel; + void decode(int32_t isIframe,uint8_t*src,int32_t p_buflen,uint8_t *dest,int32_t *p_destLen); + +}; +#endif diff --git a/include/yangdecoder/YangDecoder.h b/include/yangdecoder/YangDecoder.h new file mode 100755 index 00000000..e1964cee --- /dev/null +++ b/include/yangdecoder/YangDecoder.h @@ -0,0 +1,17 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGDECODER_INCLUDE_YANGDECODER_H_ +#define YANGDECODER_INCLUDE_YANGDECODER_H_ + +class YangDecoderCallback{ +public: + YangDecoderCallback(){}; + virtual ~YangDecoderCallback(){}; + virtual void onAudioData(YangFrame* pframe)=0; + virtual void onVideoData(YangFrame* pframe)=0; +}; + + + +#endif /* YANGDECODER_INCLUDE_YANGDECODER_H_ */ diff --git a/include/yangdecoder/YangDecoderFactory.h b/include/yangdecoder/YangDecoderFactory.h new file mode 100755 index 00000000..a2f47931 --- /dev/null +++ b/include/yangdecoder/YangDecoderFactory.h @@ -0,0 +1,22 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGDECODER_INCLUDE_YANGDECODERFACTORY_H_ +#define YANGDECODER_INCLUDE_YANGDECODERFACTORY_H_ +#include "yangdecoder/YangAudioDecoder.h" +#include "yangdecoder/YangVideoDecoder.h" + + +class YangDecoderFactory { +public: + YangDecoderFactory(); + virtual ~YangDecoderFactory(); + + YangAudioDecoder* createAudioDecoder(YangAudioCodec paet,YangAudioParam *pcontext); + YangAudioDecoder* createAudioDecoder(YangAudioParam *pcontext); + YangVideoDecoder* createVideoDecoder(YangVideoCodec paet,YangVideoInfo *pcontext); + YangVideoDecoder* createFfmpegVideoDecoder(YangVideoCodec paet,YangVideoInfo *pcontext); + YangVideoDecoder* createVideoDecoder(YangVideoInfo *pcontext); +}; + +#endif /* YANGDECODER_INCLUDE_YANGDECODERFACTORY_H_ */ diff --git a/include/yangdecoder/YangVideoDecoder.h b/include/yangdecoder/YangVideoDecoder.h new file mode 100755 index 00000000..c4f9116c --- /dev/null +++ b/include/yangdecoder/YangVideoDecoder.h @@ -0,0 +1,25 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGH264DECODER_H +#define YANGH264DECODER_H +#include +#include "YangDecoder.h" +class YangVideoDecoder { +public: + YangVideoDecoder(){} + virtual ~YangVideoDecoder(){} + virtual void init()=0; + virtual void parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, + int32_t *phei, int32_t *pfps)=0; + virtual int32_t decode(YangFrame* pframe,YangYuvType yuvtype,YangDecoderCallback* pcallback)=0; + int32_t m_isInit=0; + int32_t m_uid = -1; + int32_t m_state = 0; +protected: + +private: + +}; + +#endif // YANGH264DECODER_H diff --git a/include/yangdecoder/YangVideoDecoderHandle.h b/include/yangdecoder/YangVideoDecoderHandle.h new file mode 100755 index 00000000..13dead58 --- /dev/null +++ b/include/yangdecoder/YangVideoDecoderHandle.h @@ -0,0 +1,66 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangVideoDecoderHandle__ +#define __YangVideoDecoderHandle__ +#include "stdint.h" + +#include + +#include "yangutil/buffer/YangVideoDecoderBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +#include "YangVideoDecoder.h" +using namespace std; +class YangVideoDecoderHandle:public YangThread,public YangDecoderCallback +{ +public: + YangVideoDecoderHandle(YangContext *pcontext); + ~YangVideoDecoderHandle(void); + +private: + int32_t isInit; + +public: + int32_t m_isStart; + YangVideoDecoder *getDecoder(int32_t puid); + YangVideoBuffer* getOutVideoBuffer(); + //int32_t getDecoderIndex(int32_t puid); + void parseVideoHeader(uint8_t *p,int32_t pind); + void addVideoStream(uint8_t *ps,int32_t pind,int32_t pisAdd); + //void removeAllStream(); + //int32_t fx; + void init(); + void stop(); + void setInVideoBuffer(YangVideoDecoderBuffer *pbuf); + void setOutVideoBuffer(YangVideoBuffer* pbuf); + + void onAudioData(YangFrame* pframe); + void onVideoData(YangFrame* pframe); + + + + +protected: + void run(); + void stopLoop(); + void startLoop(); + + +void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + YangVideoDecoderBuffer *m_in_videoBuffer; + YangVideoBuffer *m_out_videoBuffer; + YangVideoDecoder *m_decs; + //vector m_removeList; + int32_t m_isConvert; + YangContext *m_context; + + //void removeStream(); + void decode(int32_t isIframe,uint8_t*src,int32_t p_buflen,uint8_t *dest,int32_t *p_destLen); + + +}; +#endif diff --git a/include/yangdecoder/YangVideoDecoderHandles.h b/include/yangdecoder/YangVideoDecoderHandles.h new file mode 100755 index 00000000..2232233b --- /dev/null +++ b/include/yangdecoder/YangVideoDecoderHandles.h @@ -0,0 +1,67 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangVideoDecoderHandles__ +#define __YangVideoDecoderHandles__ +#include "stdint.h" + +#include + +#include "yangutil/buffer/YangVideoDecoderBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +#include "YangVideoDecoder.h" +using namespace std; +class YangVideoDecoderHandles:public YangThread,public YangDecoderCallback +{ +public: + YangVideoDecoderHandles(YangContext *pcontext); + ~YangVideoDecoderHandles(void); + +private: + int32_t isInit; + +public: + int32_t m_isStart; + YangVideoDecoder *getDecoder(int32_t puid); + YangVideoBuffer* getVideoBuffer(int32_t puid); + int32_t getDecoderIndex(int32_t puid); + void parseVideoHeader(uint8_t *p,int32_t pind); + void addVideoStream(uint8_t *ps,int32_t pind,int32_t pisAdd); + void removeAllStream(); + //int32_t fx; + void init(); + void stop(); + void setInVideoBuffer(YangVideoDecoderBuffer *pbuf); + void setOutVideoBuffer(vector* pbuf); + + void onAudioData(YangFrame* pframe); + void onVideoData(YangFrame* pframe); + + + + +protected: + void run(); + void stopLoop(); + void startLoop(); + + +void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + YangVideoDecoderBuffer *m_in_videoBuffer; + vector *m_out_videoBuffer; + vector *m_decs; + vector m_removeList; + int32_t m_isConvert; + //int32_t m_roomState; + YangContext *m_context; + + void removeStream(); + void decode(int32_t isIframe,uint8_t*src,int32_t p_buflen,uint8_t *dest,int32_t *p_destLen); + + +}; +#endif diff --git a/include/yangencoder/YangAudioEncoder.h b/include/yangencoder/YangAudioEncoder.h new file mode 100755 index 00000000..c4240507 --- /dev/null +++ b/include/yangencoder/YangAudioEncoder.h @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef ___YangAudioEncoderPipe__ +#define ___YangAudioEncoderPipe__ +#include +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +#include "YangEncoder.h" + +class YangAudioEncoder//:public YangThread +{ +public: + YangAudioEncoder(); + virtual ~YangAudioEncoder(void); + virtual void init(YangAudioInfo *pap)=0; + virtual int32_t encoder(YangFrame* pframe,YangEncoderCallback* pcallback)=0; + void stop(); + int32_t m_uid; + +protected: + void setAudioPara(YangAudioInfo *audioInfo); + int32_t m_isInit; + YangAudioInfo m_audioInfo; + //YangAudioFrame m_audioFrame; +}; + +#endif diff --git a/include/yangencoder/YangAudioEncoderHandle.h b/include/yangencoder/YangAudioEncoderHandle.h new file mode 100755 index 00000000..ce5bb705 --- /dev/null +++ b/include/yangencoder/YangAudioEncoderHandle.h @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangAudioEncoderrHandle__ +#define __YangAudioEncoderrHandle__ +#include "stdint.h" + +#include + +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +#include "yangencoder/YangAudioEncoder.h" +using namespace std; +class YangAudioEncoderHandle:public YangThread,public YangEncoderCallback +{ +public: + YangAudioEncoderHandle(YangAudioInfo *pcontext); + ~YangAudioEncoderHandle(void); + +private: + int32_t m_isInit; + +public: + void init(); + void stop(); + void setInAudioBuffer(YangAudioBuffer *pbuf); + void setOutAudioBuffer(YangAudioEncoderBuffer *pbuf); + + void onVideoData(YangFrame* pframe); + void onAudioData(YangFrame* pframe); + int32_t m_isStart; + int32_t m_uid; + + + +protected: + void run(); + void stopLoop(); + void startLoop(); + +void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + + YangAudioEncoder *m_enc; + int32_t m_isConvert; + YangAudioBuffer *m_in_audioBuffer; + YangAudioEncoderBuffer *m_out_audioBuffer; + YangAudioInfo *m_context; + void Encoder(int32_t isIframe,uint8_t*src,int32_t p_buflen,uint8_t *dest,int32_t *p_destLen); + +}; +#endif diff --git a/include/yangencoder/YangAudioEncoderHandleCb.h b/include/yangencoder/YangAudioEncoderHandleCb.h new file mode 100755 index 00000000..bd3e36e4 --- /dev/null +++ b/include/yangencoder/YangAudioEncoderHandleCb.h @@ -0,0 +1,60 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_INCLUDE_YANGAUDIOENCODERHANDLECB_H_ +#define YANGENCODER_INCLUDE_YANGAUDIOENCODERHANDLECB_H_ + +#include "stdint.h" + +#include + +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" +//#include "yangutil/YangVideoContext.h" +#include "yangencoder/YangAudioEncoder.h" +using namespace std; +class YangAudioEncoderHandleCb:public YangThread +{ +public: + YangAudioEncoderHandleCb(YangAudioInfo *pcontext); + ~YangAudioEncoderHandleCb(void); + +private: + int32_t m_isInit; + +public: + void init(); + void stop(); + void setInAudioBuffer(YangAudioBuffer *pbuf); + //void setOutAudioBuffer(YangAudioEncoderBuffer *pbuf); + void setCallback(YangEncoderCallback* pcb); + //void setRoomState(int32_t pst); + //void onVideoData(uint8_t* p,int32_t plen,int64_t timestamp,int32_t pframetype,int32_t puid); + //void onAudioData(uint8_t* p,int32_t plen,int32_t puid); + int32_t m_isStart; + int32_t m_uid; + + + +protected: + void run(); + void stopLoop(); + void startLoop(); + +void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + YangEncoderCallback* m_cb; + YangAudioEncoder *m_enc; + int32_t m_isConvert; + //int32_t m_roomState; + YangAudioBuffer *m_in_audioBuffer; + //YangAudioEncoderBuffer *m_out_audioBuffer; + YangAudioInfo *m_context; + void Encoder(int32_t isIframe,uint8_t*src,int32_t p_buflen,uint8_t *dest,int32_t *p_destLen); + +}; +#endif /* YANGENCODER_INCLUDE_YANGAUDIOENCODERHANDLECB_H_ */ diff --git a/include/yangencoder/YangAudioEncoderMeta.h b/include/yangencoder/YangAudioEncoderMeta.h new file mode 100755 index 00000000..ec978240 --- /dev/null +++ b/include/yangencoder/YangAudioEncoderMeta.h @@ -0,0 +1,35 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_INCLUDE_YANGAUDIOENCODERMETA_H_ +#define YANGENCODER_INCLUDE_YANGAUDIOENCODERMETA_H_ +#include "faac.h" +#include "yangutil/sys/YangLoadLib.h" + +class YangAudioEncoderMeta{ +public: + YangAudioEncoderMeta(); + ~YangAudioEncoderMeta(); + void createMeta(uint8_t *pasc,int32_t *asclen); +private: + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + faacEncHandle (*yang_faacEncOpen)(unsigned long sampleRate, uint32_t numChannels, + unsigned long *inputSamples, unsigned long *maxOutputBytes); + int32_t (*yang_faacEncSetConfiguration)(faacEncHandle hEncoder,faacEncConfigurationPtr config); + int32_t (*yang_faacEncEncode)(faacEncHandle hEncoder, int32_t * inputBuffer, uint32_t samplesInput, + uint8_t *outputBuffer, uint32_t bufferSize); + int32_t (*yang_faacEncClose)(faacEncHandle hEncoder); + faacEncConfigurationPtr + (*yang_faacEncGetCurrentConfiguration)(faacEncHandle hEncoder); + + + int32_t (*yang_faacEncGetDecoderSpecificInfo)(faacEncHandle hEncoder, uint8_t **ppBuffer, + unsigned long *pSizeOfDecoderSpecificInfo); +}; + + + +#endif /* YANGENCODER_INCLUDE_YANGAUDIOENCODERMETA_H_ */ diff --git a/include/yangencoder/YangEncoder.h b/include/yangencoder/YangEncoder.h new file mode 100755 index 00000000..53ff1f10 --- /dev/null +++ b/include/yangencoder/YangEncoder.h @@ -0,0 +1,18 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGENCODER_INCLUDE_YANGENCODER_H_ +#define YANGENCODER_INCLUDE_YANGENCODER_H_ +#include +class YangEncoderCallback{ +public: + YangEncoderCallback(){}; + virtual ~YangEncoderCallback(){}; + virtual void onVideoData(YangFrame* pframe)=0; + virtual void onAudioData(YangFrame* pframe)=0; +}; + + + + +#endif /* YANGENCODER_INCLUDE_YANGENCODER_H_ */ diff --git a/include/yangencoder/YangEncoderFactory.h b/include/yangencoder/YangEncoderFactory.h new file mode 100755 index 00000000..5800f400 --- /dev/null +++ b/include/yangencoder/YangEncoderFactory.h @@ -0,0 +1,24 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_INCLUDE_YANGVIDEOENCODERFACTORY_H_ +#define YANGENCODER_INCLUDE_YANGVIDEOENCODERFACTORY_H_ +#include +#include "YangVideoEncoder.h" +#include "yangencoder/YangAudioEncoder.h" +#include + + +class YangEncoderFactory { +public: + YangEncoderFactory(); + virtual ~YangEncoderFactory(); + YangAudioEncoder* createAudioEncoder(YangAudioCodec paet,YangAudioInfo *pcontext); + YangAudioEncoder* createAudioEncoder(YangAudioInfo *pcontext); + YangVideoEncoder* createVideoEncoder(YangVideoCodec paet,YangVideoInfo *pcontext); + YangVideoEncoder* createVideoEncoder(YangVideoInfo *pcontext); + YangVideoEncoderMeta* createVideoEncoderMeta(YangVideoInfo *pcontext); +}; + +#endif /* YANGENCODER_INCLUDE_YANGVIDEOENCODERFACTORY_H_ */ diff --git a/include/yangencoder/YangGpuEncoderFactory.h b/include/yangencoder/YangGpuEncoderFactory.h new file mode 100755 index 00000000..ffd795f1 --- /dev/null +++ b/include/yangencoder/YangGpuEncoderFactory.h @@ -0,0 +1,19 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGGPUENCODERFACTORY_H +#define YANGGPUENCODERFACTORY_H + +#include +#include +class YangGpuEncoderFactory +{ +public: + + YangGpuEncoderFactory(); + ~YangGpuEncoderFactory(); + YangVideoEncoder* createGpuEncoder(); + +}; + +#endif // YANGGPUENCODERFACTORY_H diff --git a/include/yangencoder/YangVideoEncoder.h b/include/yangencoder/YangVideoEncoder.h new file mode 100755 index 00000000..87259633 --- /dev/null +++ b/include/yangencoder/YangVideoEncoder.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_INCLUDE_YANGVideoENCODER_H_ +#define YANGENCODER_INCLUDE_YANGVideoENCODER_H_ +#include +#include "stdint.h" +#include "YangEncoder.h" + +class YangVideoEncoder{ +public: + YangVideoEncoder(); + virtual ~YangVideoEncoder(); + virtual int32_t init(YangVideoInfo *pvp,YangVideoEncInfo *penc)=0; + virtual int32_t encode(YangFrame* pframe, YangEncoderCallback* pcallback)=0; + virtual void setVideoMetaData(YangVideoMeta *pvmd)=0; + virtual void sendKeyFrame()=0; +protected: + int32_t m_isInit; + YangVideoInfo m_videoInfo; + YangVideoEncInfo m_enc; + uint8_t* m_vbuffer; + void setVideoPara(YangVideoInfo *pap,YangVideoEncInfo *penc); +}; + +#endif /* YANGENCODER_INCLUDE_YANGENCODER_H_ */ diff --git a/include/yangencoder/YangVideoEncoderHandle.h b/include/yangencoder/YangVideoEncoderHandle.h new file mode 100755 index 00000000..8051ef49 --- /dev/null +++ b/include/yangencoder/YangVideoEncoderHandle.h @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangVideoEncoderHandle__ +#define __YangVideoEncoderHandle__ +#include "YangVideoEncoder.h" +#include "stdint.h" + +#include + +#include "yangutil/buffer/YangVideoEncoderBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/sys/YangThread.h" + +using namespace std; +class YangVideoEncoderHandle:public YangThread,public YangEncoderCallback +{ +public: + YangVideoEncoderHandle(YangVideoInfo *pcontext,YangVideoEncInfo *enc); + ~YangVideoEncoderHandle(void); + +private: + int32_t m_isInit; + +public: + int32_t m_isStart; + int32_t m_uid; + void init(); + void stop(); + void setOutVideoBuffer(YangVideoEncoderBuffer * pvl); + void setInVideoBuffer(YangVideoBuffer *pvl); + + //void setRoomState(int32_t pst); + void setVideoMetaData(YangVideoMeta *pvmd); + void sendKeyframe(); + void onVideoData(YangFrame* pframe); + void onAudioData(YangFrame* pframe); +protected: + void run(); + void stopLoop(); + void startLoop(); + void saveFile(char *fileName, uint8_t *pBuffer, long BufferLen); + +private: + YangVideoBuffer *m_in_videoBuffer; + YangVideoEncoderBuffer *m_out_videoBuffer; + YangVideoMeta *m_vmd; + int32_t m_isConvert; + YangVideoInfo *m_para; + YangVideoEncInfo *m_enc; + int32_t m_sendKeyframe; + +}; +#endif diff --git a/include/yangplayer/YangPlayFactory.h b/include/yangplayer/YangPlayFactory.h new file mode 100755 index 00000000..efc458ee --- /dev/null +++ b/include/yangplayer/YangPlayFactory.h @@ -0,0 +1,20 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGPLAYER_INCLUDE_YANGPLAYFACTORY_H_ +#define YANGPLAYER_INCLUDE_YANGPLAYFACTORY_H_ +#include +enum YangAudioPlayType{ + Yang_AP_SDL, + Yang_AP_ALSA, +}; + +class YangPlayFactory { +public: + YangPlayFactory(); + virtual ~YangPlayFactory(); + YangAudioPlay* createAudioPlay(YangAudioInfo *pcontext); + YangAudioPlay *createAudioPlay(YangAudioPlayType paet,YangAudioInfo *pcontext); +}; + +#endif /* YANGPLAYER_INCLUDE_YANGPLAYFACTORY_H_ */ diff --git a/include/yangplayer/YangPlayReceive.h b/include/yangplayer/YangPlayReceive.h new file mode 100755 index 00000000..b5a10625 --- /dev/null +++ b/include/yangplayer/YangPlayReceive.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangPlayReceive_H +#define YangPlayReceive_H + +#include +#include +#include +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangVideoDecoderBuffer.h" +#include "yangutil/sys/YangThread.h" +using namespace std; +class YangPlayReceive: public YangThread +{ + public: + YangPlayReceive(YangContext* pcontext); + virtual ~YangPlayReceive(); + void receiveAudio(YangFrame* audioFrame); + void receiveVideo(YangFrame* videoFrame); + int32_t init(int32_t nettype,string server,int32_t pport,string stream); + void setBuffer(YangAudioEncoderBuffer *al,YangVideoDecoderBuffer *vl); + void disConnect(); + void play(char* pserverStr,char *streamName); + YangStreamHandle *m_recv; + YangReceiveCallback m_recvCallback; + int32_t isReceived; //,isHandled; + int32_t isReceiveConvert; //,isHandleAllInvoke; + int32_t m_isStart; + void stop(); + protected: + void run(); + void startLoop(); + + private: + int32_t m_headLen; + YangAudioEncoderBuffer *m_out_audioBuffer; + YangVideoDecoderBuffer *m_out_videoBuffer; + YangContext* m_context; +}; + +#endif // VIDEOMEETING_H diff --git a/include/yangplayer/YangPlayerBase.h b/include/yangplayer/YangPlayerBase.h new file mode 100755 index 00000000..bd59c4e5 --- /dev/null +++ b/include/yangplayer/YangPlayerBase.h @@ -0,0 +1,34 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef MeetingPlayoBase_H +#define MeetingPlayoBase_H +#include +#include +#include + +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/buffer/YangVideoDecoderBuffer.h" + +using namespace std; +class YangPlayerBase +{ + public: + YangPlayerBase(); + virtual ~YangPlayerBase(); + YangPlayerDecoder *m_ydb; + YangPlayerPlay *m_ypb; + + void startAudioDecoder(YangAudioEncoderBuffer *prr); + void startVideoDecoder(YangVideoDecoderBuffer *prr); + void init(YangContext* audio); + void startAudioPlay(YangContext* paudio); + void stopAll(); + protected: + + private: + +}; + +#endif // ZBBASE_H diff --git a/include/yangplayer/YangPlayerDecoder.h b/include/yangplayer/YangPlayerDecoder.h new file mode 100755 index 00000000..3e290c92 --- /dev/null +++ b/include/yangplayer/YangPlayerDecoder.h @@ -0,0 +1,46 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGAPP_YANGDECODERAPP_H_ +#define YANGAPP_YANGDECODERAPP_H_ +#include +#include + +#include + + +#include "yangutil/sys/YangIni.h" + +using namespace std; +class YangPlayerDecoder { +public: + YangPlayerDecoder(YangContext* pcontext); + virtual ~YangPlayerDecoder(); + void initAudioDecoder(); + void initVideoDecoder(); + void setInVideoBuffer(YangVideoDecoderBuffer *pvel); + void setInAudioBuffer(YangAudioEncoderBuffer *pael); + YangVideoBuffer* getOutVideoBuffer(); + YangAudioPlayBuffer* getOutAudioBuffer(); + void startAudioDecoder(); + void startVideoDecoder(); + //YangRoomI* getYangRoomI(); + void setRoomState(int32_t pst); + //void setAddSdl(YangSdlAdd *psa); + void stopAll(); + YangVideoDecoderHandle *m_videoDec; + //YangAudioInfo m_audio;//={0}; +private: + YangContext *m_context; + //YangSynBuffer m_syn; + YangVideoBuffer* m_out_videoBuffer; + YangAudioPlayBuffer* m_out_audioBuffer; + YangAudioDecoderHandle *m_audioDec; + //YangVideoInfo m_video;//={0}; + + //void initList(); +}; + + +#endif /* YANGAPP_YANGDECODERAPP_H_ */ diff --git a/include/yangplayer/YangPlayerHandle.h b/include/yangplayer/YangPlayerHandle.h new file mode 100755 index 00000000..d1ba0b90 --- /dev/null +++ b/include/yangplayer/YangPlayerHandle.h @@ -0,0 +1,21 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGPLAYER_YANGPLAYERHANDLE_H_ +#define INCLUDE_YANGPLAYER_YANGPLAYERHANDLE_H_ +#include +#include +#include +#include +class YangPlayerHandle { +public: + YangPlayerHandle(){}; + virtual ~YangPlayerHandle(){}; + virtual YangVideoBuffer* getVideoBuffer()=0; + virtual int play(std::string url,int32_t localport)=0; + virtual int32_t playRtc(int32_t puid,std::string localIp,int32_t localPort, std::string server, int32_t pport,std::string app,std::string stream)=0; + virtual void stopPlay()=0; + static YangPlayerHandle* createPlayerHandle(YangContext* pcontext,YangSysMessageI* pmessage); +}; + +#endif /* INCLUDE_YANGPLAYER_YANGPLAYERHANDLE_H_ */ diff --git a/include/yangplayer/YangPlayerPlay.h b/include/yangplayer/YangPlayerPlay.h new file mode 100755 index 00000000..944f4261 --- /dev/null +++ b/include/yangplayer/YangPlayerPlay.h @@ -0,0 +1,38 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGAPP_YANGPLAYAPP_H_ +#define YANGAPP_YANGPLAYAPP_H_ +#include +#include +#include + +#include + + +#include "yangavutil/audio/YangRtcAec.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" + +using namespace std; +class YangPlayerPlay { +public: + YangPlayerPlay(); + virtual ~YangPlayerPlay(); + void initAudioPlay(YangContext* paudio); + void startAudioPlay(); + void setInAudioList(YangAudioPlayBuffer *paudioList); + void stopAll(); +private: + #ifdef _WIN32 + YangWinAudioApiRender *m_audioPlay; +#else + YangAudioPlayAlsa *m_audioPlay; +#endif + int32_t vm_audio_player_start; + +}; + +#endif /* YANGAPP_YANGPLAYAPP_H_ */ diff --git a/include/yangplayer/YangWinPlayFactroy.h b/include/yangplayer/YangWinPlayFactroy.h new file mode 100755 index 00000000..31cd0c3c --- /dev/null +++ b/include/yangplayer/YangWinPlayFactroy.h @@ -0,0 +1,44 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGPLAYER_INCLUDE_YANGWINPLAYFACTROY_H_ +#define YANGPLAYER_INCLUDE_YANGWINPLAYFACTROY_H_ +#include + +class YangPainter{ +public: + YangPainter(); + virtual ~YangPainter(); + virtual void draw(void *win,YangRect *prect,YangColor *pcolor)=0; +}; +class YangWinPlay{ + public: + YangWinPlay(); + virtual ~YangWinPlay(); + virtual void init(void* pid)=0; + virtual void initBg(int32_t pwid,int32_t phei)=0; + virtual void initVideo(int32_t pwid,int32_t phei,YangYuvType sdfe)=0; + virtual void initText(char *pname,YangColor *pcolor)=0; + //virtual void initAll(char *pname,YangColor *pcolor,void* pid,int32_t pwid,int32_t phei,YangYuvType sdfe)=0; + + //virtual void reInitBg(int32_t pwid,int32_t phei)=0; + virtual void reInitVideo(int32_t pwid,int32_t phei,YangYuvType sdfe)=0; + virtual void reInitText(char *pname,YangColor *pcolor)=0; + + + virtual void renderPreview(uint8_t* pdata)=0; + virtual void render(uint8_t* pdata)=0; + virtual void render(uint8_t* pdata,int64_t ptimestamp)=0; + virtual void renderBg(YangColor *pcolor)=0; + int32_t m_width,m_height; +}; +class YangWinPlayFactroy { +public: + YangWinPlayFactroy(); + virtual ~YangWinPlayFactroy(); + YangPainter *createPainter(); + YangWinPlay *createWinPlay(); +}; + +#endif /* YANGPLAYER_INCLUDE_YANGWINPLAYFACTROY_H_ */ diff --git a/include/yangpush/YangPushCapture.h b/include/yangpush/YangPushCapture.h new file mode 100755 index 00000000..5b8d5ec7 --- /dev/null +++ b/include/yangpush/YangPushCapture.h @@ -0,0 +1,89 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGAPP_YangPushCapture_H_ +#define YANGAPP_YangPushCapture_H_ +#include +#include +#include +#include +#include +#include +#include +#include +#include +class YangPushCapture :public YangThread { +public: + YangPushCapture(YangContext *pcontext); + virtual ~YangPushCapture(); +public: + void startCamera(); + void startScreen(); + void stopCamera(); + void stopScreen(); + void setScreenInterval(int32_t pinterval); + void setDrawmouse(bool isDraw); + void startVideoCaptureState(); + void startScreenCaptureState(); + void stopVideoCaptureState(); + void stopScreenCaptureState(); + int32_t initVideo(); + int32_t initScreen(); + void startVideoCapture(); + void startScreenCapture(); + YangVideoBuffer * getOutVideoBuffer(); + YangVideoBuffer * getPreVideoBuffer(); + + YangVideoBuffer * getScreenOutVideoBuffer(); + YangVideoBuffer * getScreenPreVideoBuffer(); + void stopAll(); + void change(int32_t st); + + + +#if Yang_HaveVr + void addVr(); + void delVr(); +#endif +private: + YangAudioCapture *m_audioCapture; + YangMultiVideoCapture *m_videoCapture; + + YangVideoBuffer *m_out_videoBuffer; + YangVideoBuffer *m_pre_videoBuffer; + + YangVideoBuffer *m_screen_pre_videoBuffer; + YangVideoBuffer *m_screen_out_videoBuffer; + YangScreenCapture* m_screenCapture; + + YangContext *m_context; + YangAudioBuffer *m_out_audioBuffer; + +#if Yang_HaveVr + YangVideoBuffer *m_out_vr_pre_videoBuffer; +#endif +public: + void stop(); + int32_t m_isStart; +protected: + void run(); +#if Yang_HaveVr + void startLoop(); +#else + void startLoop(){}; + #endif + void stopLoop(); + void initVr(); + + int32_t m_isConvert; +public: + int32_t initAudio(YangPreProcess *pp); + void startAudioCapture(); + YangAudioBuffer* getOutAudioBuffer(); + void stopAudioCaptureState(); + void startAudioCaptureState(); + void setAec(YangRtcAec *paec); + void setInAudioBuffer(vector *pbuf); +}; + +#endif /* YANGAPP_YANGCAPTUREAPP_H_ */ diff --git a/include/yangpush/YangPushCommon.h b/include/yangpush/YangPushCommon.h new file mode 100755 index 00000000..588674fb --- /dev/null +++ b/include/yangpush/YangPushCommon.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGPUSH_YANGPUSHCOMMON_H_ +#define INCLUDE_YANGPUSH_YANGPUSHCOMMON_H_ + + +#define Yang_VideoSrc_Camera 0 +#define Yang_VideoSrc_Screen 1 +#define Yang_VideoSrc_OutInterface 2 +enum YangPushMessageType { + YangM_Push_StartAudioCapture, + YangM_Push_StartVideoCapture, + YangM_Push_StartScreenCapture, + YangM_Push_StartOutCapture, + YangM_Push_Publish_Start, + YangM_Push_Publish_Stop, + YangM_Push_Record_Start, + YangM_Push_Record_Stop, + YangM_Push_SwitchToCamera, + YangM_Push_SwitchToScreen, + YangM_Sys_Setvr, + YangM_Sys_UnSetvr + +}; + +#endif /* INCLUDE_YANGPUSH_YANGPUSHCOMMON_H_ */ diff --git a/include/yangpush/YangPushFactory.h b/include/yangpush/YangPushFactory.h new file mode 100755 index 00000000..c0eb3c77 --- /dev/null +++ b/include/yangpush/YangPushFactory.h @@ -0,0 +1,19 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGPUSH_YANGPUSHFACTORY_H_ +#define INCLUDE_YANGPUSH_YANGPUSHFACTORY_H_ +#include +#include +#include +class YangPushFactory { +public: + YangPushFactory(); + virtual ~YangPushFactory(); + YangPushHandle* createPushHandle(bool hasAudio,bool initVideo,int pvideotype,YangVideoInfo *screenvideo, YangVideoInfo *outvideo,YangContext* pcontext,YangSysMessageI* pmessage); + YangSysMessageHandle* createPushMessageHandle(bool hasAudio,bool initVideo,int pvideotype,YangVideoInfo *screenvideo, YangVideoInfo *outvideo,YangContext* pcontext,YangSysMessageI* pmessage,YangSysMessageHandleI* pmessagehandle); + YangVideoBuffer* getPreVideoBuffer(YangSysMessageHandle* pmessageHandle); + YangSendVideoI* getSendVideo(YangSysMessageHandle* pmessageHandle); +}; + +#endif /* INCLUDE_YANGPUSH_YANGPUSHFACTORY_H_ */ diff --git a/include/yangpush/YangPushHandle.h b/include/yangpush/YangPushHandle.h new file mode 100755 index 00000000..11d46b0d --- /dev/null +++ b/include/yangpush/YangPushHandle.h @@ -0,0 +1,35 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGPUSH_YANGPUSHHANDLE_H_ +#define INCLUDE_YANGPUSH_YANGPUSHHANDLE_H_ +#include +#include +#include +#include +#include +#include +class YangPushHandle { +public: + YangPushHandle(); + virtual ~YangPushHandle(); + virtual int publish(std::string url,std::string localIp,int32_t localport)=0; + virtual void disconnect()=0; + virtual void recordFile(char* filename)=0; + virtual void stopRecord()=0; + virtual void init()=0; + virtual void changeSrc(int videoSrcType,bool pisinit)=0; + + virtual void setScreenVideoInfo(int videoSrcType,YangVideoInfo* pvideo)=0; + virtual void setScreenInterval(int32_t pinterval)=0; + virtual void setDrawmouse(bool isDraw)=0; + virtual YangVideoBuffer* getPreVideoBuffer()=0; + + virtual YangSendVideoI* getSendVideo()=0; + //virtual YangVideoBuffer* getScreenPreVideoBuffer()=0; + +}; + + + +#endif /* INCLUDE_YANGPUSH_YANGPUSHHANDLE_H_ */ diff --git a/include/yangpush/YangPushPublish.h b/include/yangpush/YangPushPublish.h new file mode 100755 index 00000000..d5fd97e6 --- /dev/null +++ b/include/yangpush/YangPushPublish.h @@ -0,0 +1,82 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangPushPublish_H +#define YangPushPublish_H +#include +#include +#include + +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/buffer/YangVideoEncoderBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +class YangPushPublish: public YangSendRequestCallback { +public: + YangPushPublish(YangContext *pcontext); + virtual ~YangPushPublish(); + + void setCaptureType(int pct); + + void startCamera(); + void startScreen(); + void stopCamera(); + void stopScreen(); + void setScreenInterval(int32_t pinterval); + void setDrawmouse(bool isDraw); + void setNetBuffer(YangRtcPublish *prr); + void startPubVideo(); + void startPubAudio(); + void initAudioEncoding(); + void initVideoEncoding(); + void setVideoInfo(YangVideoInfo *pvideo); + int32_t startAudioCapture(); + int32_t startVideoCapture(); + int32_t startScreenCapture(); + + void initVideoMeeting(); + void startAudioEncoding(); + void startVideoEncoding(); + void deleteVideoEncoding(); + void startAudioCaptureState(); + void startVideoCaptureState(); + void startScreenCaptureState(); + void stopAudioCaptureState(); + void stopVideoCaptureState(); + void stopScreenCaptureState(); + YangVideoBuffer* getPreVideoBuffer(); + YangVideoBuffer* getScreenPreVideoBuffer(); + YangVideoBuffer* getScreenOutVideoBuffer(); + YangVideoBuffer* getOutPreVideoBuffer(); + YangVideoBuffer* getOutVideoBuffer(); + void stopAll(); + void setInAudioBuffer(vector *pbuf); + void change(int32_t st); +#if Yang_HaveVr + void addVr(); + void delVr(); +#endif + void sendRequest(int32_t puid, uint32_t ssrc, YangRequestType req); + void sendKeyframe(); + YangPushCapture* getPushCapture(); +protected: + +private: + YangVideoBuffer* m_outVideoBuffer; + YangVideoBuffer* m_outPreVideoBuffer; +private: + YangContext *m_context; + YangPushEncoder *m_encoder; + YangPushCapture *m_capture; + + YangVideoInfo *m_videoInfo; + int32_t isStartAudioCapture, isStartVideoCapture, isStartScreenCapture; + int32_t isStartAudioEncoder, isStartVideoEncoder; + void stopAudioState(); + void stopVideoState(); + void initCapture(); + int m_captureType; +}; + +#endif // diff --git a/include/yangpush/YangRtcPublish.h b/include/yangpush/YangRtcPublish.h new file mode 100755 index 00000000..3cfa73cc --- /dev/null +++ b/include/yangpush/YangRtcPublish.h @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGMEETING_INCLUDE_YangRtcPublish_H_ +#define SRC_YANGMEETING_INCLUDE_YangRtcPublish_H_ +#include +#include +#include +#include +#include +#include +#include +using namespace std; +class YangRtcPublish: public YangThread { +public: + YangRtcPublish(YangContext *pcontext); + virtual ~YangRtcPublish(); + + int32_t init(int32_t nettype, string server,string localIp,int32_t localPort, int32_t pport, + string app,string stream); + int32_t connectServer(int32_t puid); + int32_t connectMediaServer(); + int32_t disConnectMediaServer(); + int32_t reconnectMediaServer(); + void setInVideoMetaData(YangVideoMeta *pvmd); + void setInAudioList(YangAudioEncoderBuffer *pbuf); + void setInVideoList(YangVideoEncoderBuffer *pbuf); + + int32_t stopPublishAudioData(); + int32_t stopPublishVideoData(); + + int32_t m_netState; + int32_t isPublished; + int32_t m_isStart; + void stop(); +protected: + void run(); + void handleError(int32_t perrCode); + void startLoop(); + void startLoop_h265(); + YangContext *m_context; + YangVideoMeta *m_vmd; + YangVideoEncoderBuffer *m_in_videoBuffer; + YangAudioEncoderBuffer *m_in_audioBuffer; + + int32_t m_isConvert; + int32_t m_isInit; + int32_t m_audioEncoderType; + std::vector m_pushs; +private: + int32_t m_transType; + int32_t notifyState; +}; + +#endif /* SRC_YANGMEETING_INCLUDE_YangRtcPublish_H_ */ diff --git a/include/yangpush/YangSendVideoI.h b/include/yangpush/YangSendVideoI.h new file mode 100755 index 00000000..14fe922a --- /dev/null +++ b/include/yangpush/YangSendVideoI.h @@ -0,0 +1,18 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGPUSH_YANGSENDVIDEOI_H_ +#define INCLUDE_YANGPUSH_YANGSENDVIDEOI_H_ +#include +class YangSendVideoI{ +public: + YangSendVideoI(){}; + virtual ~YangSendVideoI(){}; + virtual void putVideoRgba(uint8_t* data,int len,int64_t timestamp)=0; + virtual void putVideoI420(uint8_t* data,int len,int64_t timestamp)=0; +}; + + + + +#endif /* INCLUDE_YANGPUSH_YANGSENDVIDEOI_H_ */ diff --git a/include/yangrecliving/YangLivingHandle.h b/include/yangrecliving/YangLivingHandle.h new file mode 100755 index 00000000..87696d8e --- /dev/null +++ b/include/yangrecliving/YangLivingHandle.h @@ -0,0 +1,29 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGRTMP_INCLUDE_YangRecordLivingHandle_H_ +#define YANGRTMP_INCLUDE_YangRecordLivingHandle_H_ + + +#include +#include +#include "yangutil/buffer/YangVideoBuffer.h" +class YangLivingHandle +{ +public: + YangLivingHandle(); + virtual ~YangLivingHandle(); + + virtual void init()=0; + virtual void recordFile()=0; + virtual void stopRecord()=0; + virtual void pauseRecord()=0; + virtual void resumeRecord()=0; + virtual YangVideoBuffer* getPrebuffer()=0; + + virtual void startVr(char* pbgfile)=0; + virtual void stopVr()=0; + + +}; +#endif diff --git a/include/yangrecliving/YangLivingType.h b/include/yangrecliving/YangLivingType.h new file mode 100755 index 00000000..92c63475 --- /dev/null +++ b/include/yangrecliving/YangLivingType.h @@ -0,0 +1,45 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGRECLIVING_INCLUDE_YANGRECORDTYPE_H_ +#define YANGRECLIVING_INCLUDE_YANGRECORDTYPE_H_ +#include + +struct YangRecordParam{ + char app[20]; + char filePath[128]; + int32_t roomId; + int32_t hzhType; + int32_t isMp4; + int32_t fileTimeLen; + int32_t livingModel; + int32_t recordModel; + int32_t mode; +}; +class YangRecordContext:public YangContext{ +public: + YangRecordContext(); + ~YangRecordContext(); +public: + void initExt(void *filename); + YangRecordParam record; + //YangCameraParam camera; + int32_t createFile; + int32_t createRtmp; + + int32_t createFile3; + int32_t createRtmp3; + char filename[50]; + char filenames[50]; + char bgFilename[128]; +}; +enum YangRecodeModuleType{ + Yang_Record_Film=1, + Yang_Record_Hzh, + Yang_Record_Res, + Yang_Record_Both +}; + + +#endif /* YANGRECLIVING_INCLUDE_YANGRECORDTYPE_H_ */ diff --git a/include/yangrecliving/YangRecMessageI.h b/include/yangrecliving/YangRecMessageI.h new file mode 100755 index 00000000..4510e528 --- /dev/null +++ b/include/yangrecliving/YangRecMessageI.h @@ -0,0 +1,38 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGRECLIVING_YANGRECMESSAGEI_H_ +#define INCLUDE_YANGRECLIVING_YANGRECMESSAGEI_H_ +#include "yangutil/sys/YangSysMessageI.h" +enum YangRecMessageType { + YangM_Rec_Shutdown, + YangM_Rec_ConnectServer, + YangM_Rec_DisconnectServer, + YangM_Rec_ConnectServerInterrupt, + + YangM_Rec_PushMediaServerConnect, + YangM_Rec_PlayMediaServerConnect, + YangM_Rec_PushMediaServerError, + YangM_Rec_PlayMediaServerError, + + YangM_Rec_Start, + YangM_Rec_Stop, + + YangM_Rec_Setvr, + YangM_Rec_UnSetvr, + YangM_Rec_Pubaudio, + YangM_Rec_Pubvideo, + YangM_Rec_UnPubaudio, + YangM_Rec_UnPubvideo, + YangM_Rec_Film_Start, + YangM_Rec_Film_Stop, + YangM_Rec_Pg_Start, + YangM_Rec_pg_Stop, + YangM_Rec_Screen_Start, + YangM_Rec_Screen_Stop +}; + + + + +#endif /* INCLUDE_YANGRECLIVING_YANGRECMESSAGEI_H_ */ diff --git a/include/yangrecliving/YangRecordUtilFactory.h b/include/yangrecliving/YangRecordUtilFactory.h new file mode 100755 index 00000000..048d8a2d --- /dev/null +++ b/include/yangrecliving/YangRecordUtilFactory.h @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGRECLIVING_INCLUDE_YANGRECORDUTILFACTORY_H_ +#define YANGRECLIVING_INCLUDE_YANGRECORDUTILFACTORY_H_ +#include +#include "YangVrHandle.h" +#include "YangLivingHandle.h" +#include "YangScreenHandle.h" +#include "yangutil/sys/YangSysMessageHandle.h" + + +class YangRecordUtilFactory { +public: + YangRecordUtilFactory(); + virtual ~YangRecordUtilFactory(); + void createIni(const char* p_filename,YangRecordContext *pcontext); + void createIni(const char* p_filename,YangRecordParam *pcontext); + YangVrHandle* createRecordHandle(YangRecordContext *pcontext); + YangLivingHandle* createLivingHandle(YangRecordContext *pcontext); + //YangScreenHandle* createScreenHandle(YangRecordContext *pcontext); + + YangSysMessageHandle* createVrRecMessageHandle(YangRecordContext *pcontext); + YangSysMessageHandle* createRecMessageHandle(YangRecordContext *pcontext); + + YangVrHandle* getVrHandle(YangSysMessageHandle* pms); + YangLivingHandle* getLivingHandle(YangSysMessageHandle* pms); +}; + +#endif /* YANGRECLIVING_INCLUDE_YANGRECORDUTILFACTORY_H_ */ diff --git a/include/yangrecliving/YangScreenHandle.h b/include/yangrecliving/YangScreenHandle.h new file mode 100755 index 00000000..60a2544b --- /dev/null +++ b/include/yangrecliving/YangScreenHandle.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGRECLIVING_YANGSCREENHANDLE_H_ +#define INCLUDE_YANGRECLIVING_YANGSCREENHANDLE_H_ +#include +#include +#include "yangutil/buffer/YangVideoBuffer.h" +class YangScreenHandle +{ +public: + + YangScreenHandle(); + virtual ~YangScreenHandle(); + + virtual void init()=0; + virtual void recordFile()=0; + virtual void stopRecord()=0; + //virtual void pauseRecord()=0; + //virtual void resumeRecord()=0; + virtual YangVideoBuffer* getPrebuffer()=0; + + virtual void startScreen()=0; + virtual void stopScreen()=0; + //YangRecordContext *m_para; + +}; + + + + +#endif /* INCLUDE_YANGRECLIVING_YANGSCREENHANDLE_H_ */ diff --git a/include/yangrecliving/YangVrHandle.h b/include/yangrecliving/YangVrHandle.h new file mode 100755 index 00000000..d134f636 --- /dev/null +++ b/include/yangrecliving/YangVrHandle.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGRECLIVING_SRC_YANGVRHANDLE_H_ +#define YANGRECLIVING_SRC_YANGVRHANDLE_H_ +#include +#include +#include "yangutil/buffer/YangVideoBuffer.h" + +class YangVrHandle { +public: + YangVrHandle(); + virtual ~YangVrHandle(); + virtual void setVideoCount(int32_t vcount)=0; + virtual void setRecordModule(int32_t mo)=0;; + virtual void setHzhtype(int32_t ttype)=0;; + virtual void startRecordWave(char* filename)=0;; + virtual void stopRecordWave()=0;; + virtual void initPara(YangRecordContext *pra)=0;; + virtual void initAll()=0;; + virtual void reset()=0;; + + + virtual void startRecordLiving()=0;; + virtual void stopRecordLiving()=0;; + virtual void startFilmLiving()=0;; + virtual void stopFilmLiving()=0;; + virtual void startMultiLiving()=0;; + virtual void stopMultiLiving()=0;; + + + // virtual void closeAll()=0;; + virtual void pause()=0;; + virtual void resume()=0;; + virtual void change(int32_t st)=0;; + virtual int32_t isValid(int32_t p_vtype)=0;; + virtual std::vector* getPreVideoBuffer()=0;; + + +}; + +#endif /* YANGRECLIVING_SRC_YANGVRHANDLE_H_ */ diff --git a/include/yangrecord/YangFlvWrite.h b/include/yangrecord/YangFlvWrite.h new file mode 100755 index 00000000..b10face6 --- /dev/null +++ b/include/yangrecord/YangFlvWrite.h @@ -0,0 +1,99 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangFlvWriter__ +#define __YangFlvWriter__ +#include "stdlib.h" +#include "stdio.h" +#include +#include + +//#include "flv_types.h" +//#include "as_objects.h" + +#define FLV_CODEC_NONE -1 +#define FLV_CODEC_FLV1 2 // Sorenson H.263 +#define FLV_CODEC_FLV4 4 // On2 VP6 +#define FLV_CODEC_H264 7 // H.264 + +#define FLV_CODEC_MP3 2 // MP3 +#define FLV_CODEC_AAC 10 // AAC + + +//typedef long __int64; +typedef uint8_t BYTE; +class YangFlvWriter { +public: + + int32_t video_codec; + int32_t audio_codec; + + + double video_fps; + int32_t audio_channels; + int32_t audio_samplerate; + + // time helpers + bool is_first; + int64_t time_first_ms; // timestamp of the first packet + int32_t time_last_ms; // timestamp of the last packet + int32_t duration_ms; // calculated duration (for the onMetaData tag) + int32_t video_raw_size; // size of all video packets + int32_t video_frames; // total video frame count + int32_t file_size; + + int32_t last_tag_size; // helper for writing FLV file + long metadatapos; // byte position in the file + + + +public: + YangFlvWriter(char* fileName, YangAudioInfo *paudio,YangVideoInfo *pvideo); + virtual ~YangFlvWriter(); + void setAudioAac(); + // stream config + FILE *file; + int32_t Reset(); + + int32_t Start(); + int32_t Stop(); + void Close(); + //void ConfigAll(); + int32_t WriteVideoPacket(YangFrame* videoFrame); + int32_t WriteAudioPacket(YangFrame* audioFrame); + void WriteVideoInfo(uint8_t* buf1, int32_t buflen); + double framerate, i_bitrate, i_level_idc; + uint32_t vtime,pre_vt; + uint32_t atime,pre_at; + + //int32_t tick, tickinterval, tickinterval1; + //int32_t tick; + + double atime1; + double perSt; +private: + YangAudioInfo *m_audio; + YangVideoInfo *m_video; + BYTE prev[4]; + BYTE tag_hdr[20]; + // write the data + int32_t m_Video_Bit_Count, m_Video_Width, m_Video_Height; + + short vcount; + int32_t vtcou; + int32_t WriteMetaData(); + + static int32_t MakeAVCc(char* data, int32_t size, char *output_data, + int32_t output_size); + + char * put_amf_string(char *c, const char *str); + char * put_amf_double(char *c, double d); + //char * put_byte(char *output, uint8_t nVal); + + //int32_t WriteExtradata(GUID type); + //AM_MEDIA_TYPE *amt; + //AM_MEDIA_TYPE *vmt; + //int32_t ConfigStream(int32_t isVideo); +}; + +#endif diff --git a/include/yangrecord/YangMp4File.h b/include/yangrecord/YangMp4File.h new file mode 100755 index 00000000..0445891f --- /dev/null +++ b/include/yangrecord/YangMp4File.h @@ -0,0 +1,118 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangMp4File____ +#define __YangMp4File____ +#include + +#include "faac.h" +#include "yangutil/sys/YangLoadLib.h" +#include "mp4v2/mp4v2.h" + + +class YangMp4File{ + +public : + YangMp4File( char* fileName,YangVideoInfo *pvideo); + ~YangMp4File(); + MP4FileHandle m_MP4hFile ; + MP4TrackId m_mp4Audiotrack ; + MP4TrackId m_mp4Videotrack ; + uint32_t m_ntracks, m_trackno ; + uint32_t m_ndiscs , m_discno ; + const char *m_artist ; + + double m_newtick,m_oldtick,m_newalltick,m_oldalltick,m_tmptick; + int32_t m_tick,m_interval,m_interval1; + uint32_t m_total_samples; + uint32_t m_encoded_samples; + uint32_t m_delay_samples; + uint32_t m_frameSize; + int32_t m_cou; + + + + void init(uint8_t *p_spsBuf, int32_t p_spsLen); + void closeMp4(); + + int32_t WriteVideoPacket(YangFrame* videoFrame); + int32_t WriteAudioPacket(YangFrame* audioFrame); + void WriteVideoInfo(uint8_t *p_vpsBuf,int32_t p_vpsLen,uint8_t *p_spsBuf, int32_t p_spsLen,uint8_t *p_ppsBuf, int32_t p_ppsLen); + void WriteAudioInfo(uint8_t *pasc,unsigned long pasclen,uint8_t* buf1,int32_t buflen); + double m_framerate,m_bitrate, m_level_idc; +private: + YangVideoInfo *m_context; + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + const MP4Tags* (*yang_MP4TagsAlloc)( void ); + void (*yang_MP4TagsFree)( const MP4Tags* tags ); + MP4FileHandle (*yang_MP4Create)(const char* fileName,uint32_t flags); + bool (*yang_MP4TagsFetch)( const MP4Tags* tags, MP4FileHandle hFile ); + bool (*yang_MP4TagsSetSortArtist) ( const MP4Tags*, const char* ); + bool (*yang_MP4TagsStore)( const MP4Tags* tags, MP4FileHandle hFile ); + bool (*yang_MP4SetTimeScale)( MP4FileHandle hFile, uint32_t value ); + void (*yang_MP4SetVideoProfileLevel)( MP4FileHandle hFile, uint8_t value ); + void (*yang_MP4SetAudioProfileLevel)( MP4FileHandle hFile, uint8_t value ); + MP4TrackId (*yang_MP4AddAudioTrack)( MP4FileHandle hFile, uint32_t timeScale, + MP4Duration sampleDuration, uint8_t audioType ); + MP4TrackId (*yang_MP4AddH264VideoTrack)( + MP4FileHandle hFile, uint32_t timeScale,MP4Duration sampleDuration,uint16_t width,uint16_t height, uint8_t AVCProfileIndication + ,uint8_t profile_compat, uint8_t AVCLevelIndication, uint8_t sampleLenFieldSizeMinusOne ); + bool (*yang_MP4SetTrackESConfiguration)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pConfig, + uint32_t configSize ); + void (*yang_MP4AddH264SequenceParameterSet)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pSequence, + uint16_t sequenceLen ); + void (*yang_MP4AddH264PictureParameterSet)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pPict, + uint16_t pictLen ); + bool (*yang_MP4WriteSample)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pBytes, + uint32_t numBytes, + MP4Duration duration, + MP4Duration renderingOffset, + bool isSyncSample); + void (*yang_MP4Close)( + MP4FileHandle hFile, + uint32_t flags); + + MP4TrackId (*yang_MP4AddH265VideoTrack)( + MP4FileHandle hFile, + uint32_t timeScale, + MP4Duration sampleDuration, + uint16_t width, + uint16_t height, + uint8_t isIso); + void (*yang_MP4AddH265SequenceParameterSet)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pSequence, + uint16_t sequenceLen ); + void (*yang_MP4AddH265PictureParameterSet)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pPict, + uint16_t pictLen ); + void (*yang_MP4AddH265VideoParameterSet) (MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t *pSequence, + uint16_t sequenceLen); + uint32_t (*yang_MP4GetTrackTimeScale)( + MP4FileHandle hFile, + MP4TrackId trackId ); + bool (*yang_MP4SetTrackTimeScale)( + MP4FileHandle hFile, + MP4TrackId trackId, + uint32_t value ); +}; +#endif diff --git a/include/yangrecord/YangMp4FileApp.h b/include/yangrecord/YangMp4FileApp.h new file mode 100755 index 00000000..a6afb097 --- /dev/null +++ b/include/yangrecord/YangMp4FileApp.h @@ -0,0 +1,35 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef SRC_YANGRECORD_SRC_YangMp4FileApp_H_ +#define SRC_YANGRECORD_SRC_YangMp4FileApp_H_ +#include +#include "yangrecord/YangRecEncoder.h" +#include "yangrecord/YangRecordMp4.h" + +class YangMp4FileApp { +public: + YangMp4FileApp(YangAudioInfo *paudio,YangVideoInfo *pvideo,YangVideoEncInfo *penc); + virtual ~YangMp4FileApp(); + + YangRecEncoder *m_enc; + YangRecordMp4 *m_rec; + +public: + void init(); + void startRecordMp4(char *filename0,int32_t p_module,int32_t p_isMp4); + void stopRecordMp4(); + void setInAudioBuffer(YangAudioBuffer *pbuf); + void setInVideoBuffer(YangVideoBuffer *pbuf); + void setFileTimeLen(int32_t ptlen_min); + void pauseRecord(); + void resumeRecord(); +private: + YangAudioInfo *m_audio; + YangVideoInfo *m_video; + YangVideoEncInfo *m_encPara; + int32_t m_isPause; +}; + +#endif /* SRC_YANGRECORD_SRC_YANGRECORDHANDLE_H_ */ diff --git a/include/yangrecord/YangRecEncoder.h b/include/yangrecord/YangRecEncoder.h new file mode 100755 index 00000000..e0bedf35 --- /dev/null +++ b/include/yangrecord/YangRecEncoder.h @@ -0,0 +1,41 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef SRC_YANGRECORD_SRC_YANGRECENCODER_H_ +#define SRC_YANGRECORD_SRC_YANGRECENCODER_H_ +#include "yangencoder/YangAudioEncoderHandle.h" +#include "yangencoder/YangVideoEncoderHandle.h" +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/buffer/YangVideoEncoderBuffer.h" + +class YangRecEncoder { +public: + YangRecEncoder(YangAudioInfo *paudio,YangVideoInfo *pvideo,YangVideoEncInfo *penc); + virtual ~YangRecEncoder(); +public: + void initVideoEncoder(); + void initAudioEncoder(); + void startAudioEncoder(); + void startVideoEncoder(); + //void setAVList(YangAudioList *pal,YangVideoList *pvl); + void setInAudioBuffer(YangAudioBuffer *pal); + void setInVideoBuffer(YangVideoBuffer *pvl); + YangAudioEncoderBuffer * getOutAudioBuffer(); + YangVideoEncoderBuffer * getOutVideoBuffer(); + YangVideoMeta * getOutVideoMetaData(); + YangVideoEncoderHandle *m_ve=NULL; + YangAudioEncoderHandle *m_ae=NULL; +private: + YangAudioInfo *m_audio; + YangVideoInfo *m_video; + YangVideoEncInfo *m_enc; + YangVideoMeta *m_vmd; + YangAudioEncoderBuffer *m_out_auidoBuffer; + YangVideoEncoderBuffer *m_out_videoBuffer; + +}; + +#endif /* SRC_YANGRECORD_SRC_YANGRECENCODER_H_ */ diff --git a/include/yangrecord/YangRecord.h b/include/yangrecord/YangRecord.h new file mode 100755 index 00000000..f1d158ec --- /dev/null +++ b/include/yangrecord/YangRecord.h @@ -0,0 +1,60 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGRECORD_INCLUDE_YANGRECORD_H_ +#define YANGRECORD_INCLUDE_YANGRECORD_H_ + +#include "yangutil/sys/YangTime.h" +#include "YangFlvWrite.h" + +#include "YangMp4File.h" + +struct YangMp4Para { + YangVideoMeta *vmd; + uint8_t asc[10]; + char *fileName; + int32_t ascLen; +}; +class YangRecord { +public: + YangRecord(YangAudioInfo *paudio, YangVideoInfo *pvideo, + YangVideoEncInfo *penc); + ~YangRecord(void); + + int32_t isMp4; + void initPara(YangVideoMeta *p_vmd, char *filename, int32_t p_isMp4); + void setFileTimeLen(int32_t ptlen_min); + void pauseRec(); + void resumeRec(); + void writeVideoData(YangFrame* videoFrame); + void writeAudioData(YangFrame* audioFrame); + void closeRec(); + YangMp4File *mp4; + YangFlvWriter *flv; + YangAudioInfo *m_audio; + YangVideoInfo *m_video; + YangVideoEncInfo *m_enc; + + int64_t oldalltick, alltick; + int32_t curVideoTimestamp; + long videoDestLen; + int64_t preVideotimestamp, basestamp, minusStamp; + +private: + int64_t m_prePauseTime, m_afterPauseTime; + int64_t m_pauseTime; + int64_t m_alltime1; + int32_t m_alltime; + int32_t m_fileTimeLen; + int32_t m_isCreateNewFile; + //YangTime m_time; + YangMp4Para m_mp4Para; + void createNewfile(); + void createFile(char *filename); + void initRecPara(); + int32_t m_fileId; + //YangAudioFrame m_audioFrame; + //YangVideoFrame m_videoFrame; +}; + +#endif /* YANGRECORD_INCLUDE_YANGRECORD_H_ */ diff --git a/include/yangrecord/YangRecordApp.h b/include/yangrecord/YangRecordApp.h new file mode 100755 index 00000000..ae1f5062 --- /dev/null +++ b/include/yangrecord/YangRecordApp.h @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGRECORD_INCLUDE_YANGRECORDAPP_H_ +#define YANGRECORD_INCLUDE_YANGRECORDAPP_H_ +#include +#include +#include "YangMp4FileApp.h" +#include "YangRecordCapture.h" + +class YangRecordApp { +public: + YangRecordApp(YangContext* pcontext); + virtual ~YangRecordApp(); + YangRecordCapture *m_cap; + YangMp4FileApp *m_rec; + void init(); + void recordFile(char* filename); + void stopRecord(); + void pauseRecord(); + void resumeRecord(); + +private: + YangContext* m_context; + + +}; + +#endif /* YANGRECORD_INCLUDE_YANGRECORDAPP_H_ */ diff --git a/include/yangrecord/YangRecordCapture.h b/include/yangrecord/YangRecordCapture.h new file mode 100755 index 00000000..8e19a4c8 --- /dev/null +++ b/include/yangrecord/YangRecordCapture.h @@ -0,0 +1,79 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGAPP_YangReczbCapture_H_ +#define YANGAPP_YangReczbCapture_H_ +#include +#include +#include +#include +#include +#include +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "yangutil/buffer/YangAudioPlayBuffer.h" +#include "yangutil/sys/YangThread.h" +using namespace std; + +class YangRecordCapture:public YangThread { +public: + YangRecordCapture(YangContext* pcontext); + virtual ~YangRecordCapture(); +public: + void startAudioCaptureState(); + void startVideoCaptureState(); + void stopAudioCaptureState(); + void stopVideoCaptureState(); + void startPauseCaptureState(); + void stopPauseCaptureState(); + void initAudio(YangPreProcess *pp); + void initVideo(); + void startAudioCapture(); + void startVideoCapture(); + void stopAudioCapture(); + void stopVideoCapture(); + void setAec(YangRtcAec *paec); + void setInAudioBuffer(vector *pbuf); + + void startVr(char* pbg); + void stopVr(); + + void startScreen(); + void stopScreen(); + + YangAudioBuffer * getOutAudioBuffer(); + YangVideoBuffer * getOutVideoBuffer(); + YangVideoBuffer * getPreVideoBuffer(); + YangAudioCapture *m_audioCapture; + //YangMultiVideoCapture *m_videoCapture; + YangVideoCapture *m_videoCapture; + YangScreenCapture *m_screenCapture; + + int32_t m_isStart; + void stop(); +protected: + void run(); + void startVrLoop(); + void startScreenLoop(); + void stopLoop(); + int32_t m_isConvert; + int32_t m_isScreen; + string m_bgFileName; +private: + //YangAudioInfo *m_audio; + //YangVideoInfo *m_video; + YangContext* m_context; + YangCaptureFactory m_capture; + YangAudioBuffer *m_out_audioBuffer; + YangVideoBuffer *m_out_videoBuffer; + YangVideoBuffer *m_pre_videoBuffer; + + YangVideoBuffer *m_vr_videoBuffer; + YangVideoBuffer *m_screen_videoBuffer; + + +}; + +#endif /* YANGAPP_YANGCAPTUREAPP_H_ */ diff --git a/include/yangrecord/YangRecordMp4.h b/include/yangrecord/YangRecordMp4.h new file mode 100755 index 00000000..bc18abc2 --- /dev/null +++ b/include/yangrecord/YangRecordMp4.h @@ -0,0 +1,78 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef _YangRecordMp4_H +#define _YangRecordMp4_H +#include "yangutil/sys/YangThread.h" +#include "yangutil/sys/YangTime.h" +#include "yangutil/buffer/YangVideoEncoderBuffer.h" +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "YangFlvWrite.h" +#include "YangMp4File.h" +struct YangMp4FilePara{ + YangVideoMeta *vmd; + uint8_t asc[10]; + char *fileName; + int32_t ascLen; +}; +class YangRecordMp4:public YangThread +{ +public: + YangRecordMp4(YangAudioInfo *paudio,YangVideoInfo *pvideo,YangVideoEncInfo *penc); + ~YangRecordMp4(void); + + int32_t isMp4; + int32_t m_isStart; + void setInVideoBuffer(YangVideoEncoderBuffer *pbuf); + void setInAudioBuffer(YangAudioEncoderBuffer *pbuf); + void initPara(YangVideoMeta *p_vmd, char *filename,int32_t p_isMp4); + void setFileTimeLen(int32_t ptlen_min); + void pauseRec(); + void resumeRec(); + void stop(); +protected: + void run(); +private: + void startLoop(); + void stopLoop(); + void closeRec(); + + YangMp4File *mp4; + YangFlvWriter *flv; + YangAudioInfo *m_audio; + YangVideoInfo *m_video; + YangVideoEncInfo *m_enc; + YangAudioEncoderBuffer *m_in_audioBuffer; + YangVideoEncoderBuffer *m_in_videoBuffer; + int64_t oldalltick,alltick; + int32_t curVideoTimestamp; + long videoDestLen; + int32_t frameType; + uint8_t *srcVideoSource; + uint8_t *srcAudioSource; + //int32_t audioBufLen; + int64_t m_videoTimestamp,m_preVideotimestamp,m_startStamp,m_mp4Stamp; + // int32_t videoBufLen; + + int32_t m_isConvert; + void writeVideoData(); + void writeAudioData(); + +private: + int64_t m_prePauseTime,m_afterPauseTime; + int64_t m_pauseTime; + int64_t m_alltime1; + int32_t m_alltime; + int32_t m_fileTimeLen; + int32_t m_isCreateNewFile; + YangFrame m_videoFrame; + YangFrame m_audioFrame; + YangMp4FilePara m_mp4Para; + void createNewfile(); + void createFile(char* filename); + void initRecPara(); + int32_t m_fileId; +}; + +#endif + diff --git a/include/yangrtmp/YangRtmpHandle.h b/include/yangrtmp/YangRtmpHandle.h new file mode 100755 index 00000000..828adb4b --- /dev/null +++ b/include/yangrtmp/YangRtmpHandle.h @@ -0,0 +1,60 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGRTMP_YANGRTMPHANDLE_H_ +#define INCLUDE_YANGRTMP_YANGRTMPHANDLE_H_ +#include +#include +#define RTMP_PACKET_TYPE_AUDIO 0x08 +#define RTMP_PACKET_TYPE_VIDEO 0x09 +#define RTMP_PACKET_SIZE_LARGE 0 +#define RTMP_PACKET_SIZE_MEDIUM 1 +#define RTMP_PACKET_SIZE_SMALL 2 +#define RTMP_PACKET_SIZE_MINIMUM 3 +#define RTMP_MAX_HEADER_SIZE 18 +#define RTMP_MAX_BODY_SIZE 512000 +#define RTMPPacket_IsReady(a) ((a)->m_nBytesRead == (a)->m_nBodySize) +typedef struct RTMPChunk { + int32_t c_headerSize; + int32_t c_chunkSize; + char *c_chunk; + char c_header[18]; +}RTMPChunk; + +typedef struct RTMPPacket { + uint8_t m_headerType; + uint8_t m_packetType; + uint8_t m_hasAbsTimestamp; /* timestamp absolute or relative? */ + int32_t m_nChannel; + uint32_t m_nTimeStamp; /* timestamp */ + int32_t m_nInfoField2; /* last 4 bytes in a long header */ + uint32_t m_nBodySize; + uint32_t m_nBytesRead; + RTMPChunk *m_chunk; + char *m_body; +}RTMPPacket; +typedef struct{ + void *context; + void (*initRTMP)(void *context,char *serverIp, char *app, int32_t port); + int32_t (*isConnect)(void *context); + int32_t (*getStreamId)(void* context); + int32_t (*sendPacket)(void *context,RTMPPacket *packet, int32_t queue); + int32_t (*yangReadPacket)(void *context,RTMPPacket *packet, char *pszbody) ; + + int32_t (*connectRtmpServer)(void *context,YangStreamOptType pisPublish,char *serverIp,char *app,int32_t port); + int32_t (*HandleStream)(void *context,char *psName,YangStreamOptType pisPublish,int32_t puid) ; + int32_t (*clientPacket)(void *context,RTMPPacket *packet); + void (*RTMP_Close)(void *context); +}YangRtmpHandle; +#ifdef __cplusplus +extern "C"{ +#endif + +void yang_create_rtmp(YangRtmpHandle* rtmp); +void yang_destroy_rtmp(YangRtmpHandle* rtmp); + +#ifdef __cplusplus +} +#endif +#endif /* INCLUDE_YANGRTMP_YANGRTMPHANDLE_H_ */ diff --git a/include/yangsrt/YangSrtBase.h b/include/yangsrt/YangSrtBase.h new file mode 100755 index 00000000..86a1006f --- /dev/null +++ b/include/yangsrt/YangSrtBase.h @@ -0,0 +1,75 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGSRTBASE_H_ +#define YANGSRTBASE_H_ +#include "stdint.h" +#ifdef __WIN32 +#include +#include +#include +#endif +#include +#include "srt/srt.h" +#include "yangutil/YangErrorCode.h" +#include "yangutil/sys/YangLoadLib.h" + +#define YangSrtBase_strlen 128 +#define YangSrtUnitLen 188 +#define Yang_Srt_CacheSize 4096 + +class YangSrtBase { +public: + YangSrtBase(); + virtual ~YangSrtBase(); + int32_t init(char *pserver,int32_t pport); + int32_t publish(char* message,int32_t len); + int32_t receive(char *p,int32_t *len); + //int32_t receive2(char *p,int32_t *len); + void startLoop(); + void closeSrt(); + int32_t initConnect(char *streamId); + int32_t connectServer(); + int32_t getSrtSocketStatus(); +#ifdef _WIN32 + int32_t yang_inet_pton(int32_t af, const char * src, void * dst); +#endif + int64_t get_bitrate(); + int32_t m_errState; + int32_t m_contextt; +private: + int32_t m_port; + int32_t m_sfd; + int32_t m_eid; + char m_server[YangSrtBase_strlen]; + int64_t m_dataCount; + int64_t m_bitRate; + int64_t m_beginTm; + + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + int32_t (*yang_srt_epoll_create)(void); + int32_t (*yang_srt_epoll_remove_usock)(int32_t eid, SRTSOCKET u); + int32_t (*yang_srt_epoll_release)(int32_t eid); + int32_t (*yang_srt_close) (SRTSOCKET u); + int32_t (*yang_srt_cleanup)(void); + int32_t (*yang_srt_startup)(void); + SRT_SOCKSTATUS (*yang_srt_getsockstate)(SRTSOCKET u); + void (*yang_srt_setloglevel)(int32_t ll); + SRTSOCKET (*yang_srt_create_socket)(void); + int32_t (*yang_srt_epoll_set)(int32_t eid, int32_t flags); + int32_t (*yang_srt_epoll_add_usock)(int32_t eid, SRTSOCKET u, const int* events); + int32_t (*yang_srt_setsockopt) (SRTSOCKET u, int32_t level /*ignored*/, SRT_SOCKOPT optname, const void* optval, int32_t optlen); + int32_t (*yang_srt_setsockflag) (SRTSOCKET u, SRT_SOCKOPT opt, const void* optval, int32_t optlen); + int32_t (*yang_srt_connect ) (SRTSOCKET u, const struct sockaddr* name, int32_t namelen); + int32_t (*yang_srt_epoll_wait)(int32_t eid, SRTSOCKET* readfds, int* rnum, SRTSOCKET* writefds, int* wnum, int64_t msTimeOut, + SYSSOCKET* lrfds, int* lrnum, SYSSOCKET* lwfds, int* lwnum); + int32_t (*yang_srt_sendmsg) (SRTSOCKET u, const char* buf, int32_t len, int32_t ttl/* = -1*/, int32_t inorder/* = false*/); + int32_t (*yang_srt_recvmsg) (SRTSOCKET u, char* buf, int32_t len); + int32_t (*yang_srt_getlasterror)(int* errno_loc); + const char* (*yang_srt_getlasterror_str)(void); +}; + +#endif /* YANGSRTBASE_H_ */ diff --git a/include/yangsrt/YangTsBuffer.h b/include/yangsrt/YangTsBuffer.h new file mode 100755 index 00000000..b05ba0d5 --- /dev/null +++ b/include/yangsrt/YangTsBuffer.h @@ -0,0 +1,56 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __SIMPLE_BUFFER_H__ +#define __SIMPLE_BUFFER_H__ + +#include +#include +#include + +// only support little endian +class YangTsBuffer +{ +public: + YangTsBuffer(); + YangTsBuffer(int32_t size, char value); + virtual ~YangTsBuffer(); + +public: + void write_1byte(char val); + void write_2bytes(int16_t val); + void write_3bytes(int32_t val); + void write_4bytes(int32_t val); + void write_8bytes(int64_t val); + void writeBytes(uint8_t* bytes,int32_t size); + void append( uint8_t* bytes, int32_t size); + +public: + char read_1byte(); + int16_t read_2bytes(); + int32_t read_3bytes(); + int32_t read_4bytes(); + int64_t read_8bytes(); + void readBytes(uint8_t *p,int32_t len); + std::string read_string(int32_t len); + +public: + void skip(int32_t size); + bool require(int32_t required_size); + bool empty(); + int32_t size(); + int32_t pos(); + uint8_t *data(); + void clear(); + void set_data(int32_t pos, const uint8_t *data, int32_t len); + +public: + std::string to_string(); + +private: + uint8_t _data[188*2]; + int32_t _pos; + int32_t curPos; +}; + +#endif /* __SIMPLE_BUFFER_H__ */ diff --git a/include/yangsrt/YangTsMuxer.h b/include/yangsrt/YangTsMuxer.h new file mode 100755 index 00000000..ac7d92c5 --- /dev/null +++ b/include/yangsrt/YangTsMuxer.h @@ -0,0 +1,114 @@ +// +// Copyright (c) 2019-2022 yanggaofeng akanchi +// + +#ifndef YANGTS_YANGTSMUXER_H_ +#define YANGTS_YANGTSMUXER_H_ +#include "stdint.h" +#include +#include + +#include "YangTsBuffer.h" +#include "YangTsPacket.h" +#include "YangTsPid.h" +using namespace std; + +/** +static const int32_t DEFAULT_PCR_PID = 4097; +static const int32_t DEFAULT_PMT_PID = 256; + +static const uint8_t SEQUENCE_END_CODE = 0xb7; +static const uint8_t ISO_11172_END_CODE = 0xb9; +static const uint8_t PACK_START_CODE = 0xba; +static const uint8_t SYSTEM_HEADER_START_CODE = 0xbb; +static const uint8_t PES_PROGRAM_STREAM_MAP = 0xbc; +static const uint8_t PES_PRIVATE_DATA1 = 0xbd; +static const uint8_t PADDING_STREAM = 0xbe; +static const uint8_t PES_PRIVATE_DATA2 = 0xbf; +static const uint8_t PROGRAM_STREAM_DIRECTORY = 0xff; + +static const uint8_t PES_AUDIO_ID = 0xc0; +static const uint8_t PES_VIDEO_ID = 0xe1; +static const uint8_t PES_VC1_ID = 0xfd; + +static const uint8_t DVB_SUBT_DESCID = 0x59; + +static const uint8_t STREAM_TYPE_VIDEO_MPEG1 = 0x01; +static const uint8_t STREAM_TYPE_VIDEO_MPEG2 = 0x02; +static const uint8_t STREAM_TYPE_PRIVATE_SECTION = 0x05; +static const uint8_t STREAM_TYPE_PRIVATE_DATA = 0x06; +static const uint8_t STREAM_TYPE_VIDEO_MPEG4 = 0x10; +static const uint8_t STREAM_TYPE_VIDEO_H264 = 0x1b; +static const uint8_t STREAM_TYPE_VIDEO_MVC = 0x20; +static const uint8_t STREAM_TYPE_VIDEO_H265 = 0x24; +static const uint8_t STREAM_TYPE_VIDEO_VC1 = 0xea; + +static const uint8_t STREAM_TYPE_AUDIO_MPEG1 = 0x03; +static const uint8_t STREAM_TYPE_AUDIO_MPEG2 = 0x04; +static const uint8_t STREAM_TYPE_AUDIO_AAC = 0x0f; +static const uint8_t STREAM_TYPE_AUDIO_AAC_RAW = 0x11; +static const uint8_t STREAM_TYPE_AUDIO_DTS = 0x82; // 0x8a + +static const uint8_t STREAM_TYPE_AUDIO_LPCM = 0x80; +static const uint8_t STREAM_TYPE_AUDIO_AC3 = 0x81; +static const uint8_t STREAM_TYPE_AUDIO_EAC3 = 0x84; +static const uint8_t STREAM_TYPE_AUDIO_EAC3_ATSC = 0x87; +static const uint8_t STREAM_TYPE_AUDIO_EAC3_TRUE_HD = 0x83; +static const uint8_t STREAM_TYPE_AUDIO_DTS_HD = 0x85; +static const uint8_t STREAM_TYPE_AUDIO_DTS_HD_MASTER_AUDIO = 0x86; + +static const uint8_t STREAM_TYPE_AUDIO_EAC3_SECONDARY = 0xA1; +static const uint8_t STREAM_TYPE_AUDIO_DTS_HD_SECONDARY = 0xA2; + +static const uint8_t STREAM_TYPE_AUDIO_VC9 = 0x88; +static const uint8_t STREAM_TYPE_AUDIO_OPUS = 0x89; +static const uint8_t STREAM_TYPE_SUB_PGS = 0x90; + +static const uint8_t STREAM_TYPE_SUBTITLE_DVB = 0x00; +**/ +#define pat_interval 50 +enum YangTsStream{ + TS_H264, + TS_H265, + TS_AAC, + TS_OPUS, + TS_PRIVATE + +}; +struct YangTsPes{ + uint8_t *data; + int32_t len; + int32_t pos; + uint64_t pts; + uint64_t dts; + uint64_t pcr; + uint8_t stream_type; + uint8_t stream_id; + uint16_t pid; +}; +class YangTsMuxer { +public: + YangTsMuxer(); + virtual ~YangTsMuxer(); + void create_pat(YangTsBuffer *sb, uint16_t pmt_pid, uint8_t cc); + void create_pmt(YangTsBuffer *sb,uint8_t cc); + void create_ts(YangTsPes *frame,vector *sb); + void create_pes(YangTsPes *frame,uint8_t *p,int32_t plen,int32_t frametype,int64_t timestamp,YangTsStream streamType); + void create_pcr(YangTsBuffer *sb); + void create_null(YangTsBuffer *sb); + void encode(uint8_t* p,int32_t plen,int32_t frametype,int64_t timestamp ,YangTsStream streamType,vector *sb); + void encodeWithPmt(uint8_t* p,int32_t plen,int32_t frametype,int64_t timestamp ,YangTsStream streamType,vector *sb); + //void encodePmt(vector *sb); + void encodePmtWithoutData(vector *sb); + std::map m_stream_pid_map; +private: + uint8_t get_cc(uint32_t with_pid); + bool should_create_pat(); + int32_t m_pmt_pid; + + int32_t current_index; +private: + std::map _pid_cc_map; +}; + +#endif /* YANGTS_YANGTSMUXER_H_ */ diff --git a/include/yangsrt/YangTsPacket.h b/include/yangsrt/YangTsPacket.h new file mode 100755 index 00000000..bb0b9546 --- /dev/null +++ b/include/yangsrt/YangTsPacket.h @@ -0,0 +1,191 @@ +// +// Copyright (c) 2019-2022 yanggaofeng akanchi +// + +#include +#include +#include +#include + +class YangTsBuffer; + +class MpegTsStream +{ +public: + static const uint8_t AAC = 0x0f; + static const uint8_t AVC = 0x1b; +}; + +class TsFrame +{ +public: + TsFrame(); + TsFrame(uint8_t st); + virtual ~TsFrame(){}; + +public: + bool empty(); + void reset(); + +public: + std::shared_ptr _data; + uint64_t pts; + uint64_t dts; + uint64_t pcr; + uint8_t stream_type; + uint8_t stream_id; + uint16_t pid; + uint16_t expected_pes_packet_length; + bool completed; +}; + +class TsHeader +{ +public: + TsHeader(); + virtual ~TsHeader(); + +public: + void encode(YangTsBuffer *sb); + void decode(YangTsBuffer *sb); + +public: + uint8_t sync_byte; // 8 bits + uint8_t transport_error_indicator; // 1 bit + uint8_t payload_unit_start_indicator; // 1 bit + uint8_t transport_priority; // 1 bit + uint16_t pid; // 13 bits + uint8_t transport_scrambling_control; // 2 bits + uint8_t adaptation_field_control; // 2 bits + uint8_t continuity_counter; // 4 bits +}; + +class PATHeader +{ +public: + PATHeader(); + virtual ~PATHeader(); + +public: + void encode(YangTsBuffer *sb); + void decode(YangTsBuffer *sb); + void print(); + +public: + uint8_t table_id; // 8 bits + uint8_t section_syntax_indicator; // 1 bit + uint8_t b0; // 1 bit + uint8_t reserved0; // 2 bits + uint16_t section_length; // 12 bits + uint16_t transport_stream_id; // 16 bits + uint8_t reserved1; // 2 bits + uint8_t version_number; // 5 bits + uint8_t current_next_indicator; // 1 bit + uint8_t section_number; // 8 bits + uint8_t last_section_number; // 8 bits +}; + +class PMTElementInfo +{ +public: + PMTElementInfo(); + PMTElementInfo(uint8_t st, uint16_t pid); + virtual ~PMTElementInfo(); + +public: + void encode(YangTsBuffer *sb); + void decode(YangTsBuffer *sb); + uint16_t size(); + void print(); + +public: + uint8_t stream_type; // 8 bits + uint8_t reserved0; // 3 bits + uint16_t elementary_PID; // 13 bits + uint8_t reserved1; // 4 bits + uint16_t ES_info_length; // 12 bits + std::string ES_info; +}; + +class PMTHeader +{ +public: + PMTHeader(); + virtual ~PMTHeader(); + +public: + void encode(YangTsBuffer *sb); + void decode(YangTsBuffer *sb); + uint16_t size(); + void print(); + +public: + uint8_t table_id; // 8 bits + uint8_t section_syntax_indicator; // 1 bit + uint8_t b0; // 1 bit + uint8_t reserved0; // 2 bits + uint16_t section_length; // 12 bits + uint16_t program_number; // 16 bits + uint8_t reserved1; // 2 bits + uint8_t version_number; // 5 bits + uint8_t current_next_indicator; // 1 bit + uint8_t section_number; // 8 bits + uint8_t last_section_number; // 8 bits + uint8_t reserved2; // 3 bits + uint16_t PCR_PID; // 13 bits + uint8_t reserved3; // 4 bits + uint16_t program_info_length; // 12 bits + std::vector> infos; +}; + +class AdaptationFieldHeader +{ +public: + AdaptationFieldHeader(); + virtual ~AdaptationFieldHeader(); + +public: + void encode(YangTsBuffer *sb); + void decode(YangTsBuffer *sb); + +public: + uint8_t adaptation_field_length; // 8 bits + uint8_t adaptation_field_extension_flag; // 1 bit + uint8_t transport_private_data_flag; // 1 bit + uint8_t splicing_point_flag; // 1 bit + uint8_t opcr_flag; // 1 bit + uint8_t pcr_flag; // 1 bit + uint8_t elementary_stream_priority_indicator; // 1 bit + uint8_t random_access_indicator; // 1 bit + uint8_t discontinuity_indicator; // 1 bit +}; + +class PESHeader +{ +public: + PESHeader(); + virtual ~PESHeader(); + +public: + void encode(YangTsBuffer *sb); + void decode(YangTsBuffer *sb); + +public: + uint32_t packet_start_code; // 24 bits + uint8_t stream_id; // 8 bits + uint16_t pes_packet_length; // 16 bits + uint8_t original_or_copy; // 1 bit + uint8_t copyright; // 1 bit + uint8_t data_alignment_indicator; // 1 bit + uint8_t pes_priority; // 1 bit + uint8_t pes_scrambling_control; // 2 bits + uint8_t marker_bits; // 2 bits + uint8_t pes_ext_flag; // 1 bit + uint8_t pes_crc_flag; // 1 bit + uint8_t add_copy_info_flag; // 1 bit + uint8_t dsm_trick_mode_flag; // 1 bit + uint8_t es_rate_flag; // 1 bit + uint8_t escr_flag; // 1 bit + uint8_t pts_dts_flags; // 2 bits + uint8_t header_data_length; // 8 bits +}; diff --git a/include/yangsrt/YangTsPid.h b/include/yangsrt/YangTsPid.h new file mode 100755 index 00000000..a1adb614 --- /dev/null +++ b/include/yangsrt/YangTsPid.h @@ -0,0 +1,77 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef SRC_YANGSRT_INCLUDE_YANGTSPID_H_ +#define SRC_YANGSRT_INCLUDE_YANGTSPID_H_ + + +/* mpegts stream type in ts pmt +Value Description +0x00 ITU-T | ISO/IEC Reserved +0x01 ISO/IEC 11172-2 Video (mpeg video v1) +0x02 ITU-T Rec. H.262 | ISO/IEC 13818-2 Video(mpeg video v2)or ISO/IEC 11172-2 constrained parameter video stream +0x03 ISO/IEC 11172-3 Audio (MPEG 1 Audio codec Layer I, Layer II and Layer III audio specifications) +0x04 ISO/IEC 13818-3 Audio (BC Audio Codec) +0x05 ITU-T Rec. H.222.0 | ISO/IEC 13818-1 private_sections +0x06 ITU-T Rec. H.222.0 | ISO/IEC 13818-1 PES packets containing private data +0x07 ISO/IEC 13522 MHEG +0x08 ITU-T Rec. H.222.0 | ISO/IEC 13818-1 Annex A DSM-CC +0x09 ITU-T Rec. H.222.1 +0x0A ISO/IEC 13818-6 type A +0x0B ISO/IEC 13818-6 type B +0x0C ISO/IEC 13818-6 type C +0x0D ISO/IEC 13818-6 type D +0x0E ITU-T Rec. H.222.0 | ISO/IEC 13818-1 auxiliary +0x0F ISO/IEC 13818-7 Audio with ADTS transport syntax +0x10 ISO/IEC 14496-2 Visual +0x11 ISO/IEC 14496-3 Audio with the LATM transport syntax as defined in ISO/IEC 14496-3/Amd.1 +0x12 ISO/IEC 14496-1 SL-packetized stream or FlexMux stream carried in PES packets +0x13 ISO/IEC 14496-1 SL-packetized stream or FlexMux stream carried in ISO/IEC 14496_sections +0x14 ISO/IEC 13818-6 Synchronized Download Protocol +0x15 Metadata carried in PES packets +0x16 Metadata carried in metadata_sections +0x17 Metadata carried in ISO/IEC 13818-6 Data Carousel +0x18 Metadata carried in ISO/IEC 13818-6 Object Carousel +0x19 Metadata carried in ISO/IEC 13818-6 Synchronized Download Protocol +0x1A IPMP stream (defined in ISO/IEC 13818-11, MPEG-2 IPMP) +0x1B AVC video stream as defined in ITU-T Rec. H.264 | ISO/IEC 14496-10 Video (h.264) +0x1C ISO/IEC 14496-3 Audio, without using any additional transport syntax, such as DST, ALS and SLS +0x1D ISO/IEC 14496-17 Text +0x1E Auxiliary video stream as defined in ISO/IEC 23002-3 (AVS) +0x1F-0x7E ITU-T Rec. H.222.0 | ISO/IEC 13818-1 Reserved +0x7F IPMP stream 0x80-0xFF User Private +*/ +#define STREAM_TYPE_VIDEO_MPEG1 0x01 +#define STREAM_TYPE_VIDEO_MPEG2 0x02 +#define STREAM_TYPE_AUDIO_MPEG1 0x03 +#define STREAM_TYPE_AUDIO_MPEG2 0x04 +#define STREAM_TYPE_PRIVATE_SECTION 0x05 +#define STREAM_TYPE_PRIVATE_DATA 0x06 +#define STREAM_TYPE_AUDIO_AAC 0x0f +#define STREAM_TYPE_AUDIO_AAC_LATM 0x11 + +#define STREAM_TYPE_VIDEO_MPEG4 0x10 +#define STREAM_TYPE_METADATA 0x15 +#define STREAM_TYPE_VIDEO_H264 0x1b +#define STREAM_TYPE_VIDEO_HEVC 0x24 +#define STREAM_TYPE_VIDEO_CAVS 0x42 +#define STREAM_TYPE_VIDEO_VC1 0xea +#define STREAM_TYPE_VIDEO_DIRAC 0xd1 + +#define STREAM_TYPE_AUDIO_AC3 0x81 +#define STREAM_TYPE_AUDIO_DTS 0x82 +#define STREAM_TYPE_AUDIO_TRUEHD 0x83 +#define STREAM_TYPE_AUDIO_EAC3 0x87 + +#define STREAM_TYPE_AUDIO_OPUS 0x12 +#define PES_AUDIO_ID 0xc0 +#define PES_VIDEO_ID 0xe0 +//#define PES_VIDEO_ID 0xe1 +#define Yang_H264_PID 225 +#define Yang_H265_PID 226 +#define Yang_AAC_PID 192 +#define Yang_OPUS_PID 193 +#define Yang_PRIVATE_PID 200 + +#endif /* SRC_YANGSRT_INCLUDE_YANGTSPID_H_ */ diff --git a/include/yangsrt/YangTsdemux.h b/include/yangsrt/YangTsdemux.h new file mode 100755 index 00000000..fb4acbf3 --- /dev/null +++ b/include/yangsrt/YangTsdemux.h @@ -0,0 +1,185 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGSRT_INCLUDE_YANGTSDEMUX_H_ +#define YANGSRT_INCLUDE_YANGTSDEMUX_H_ +#include +#include +#include +#include +#include +#include "srt_data.hpp" +#include "yangutil/YangErrorCode.h" +#include "YangTsPid.h" + + +typedef struct ts_media_data_callback_I { + void* context; + void (*on_data_callback)(void* context,SRT_DATA_MSG_PTR data_ptr, uint32_t media_type, uint64_t dts, uint64_t pts); +}ts_media_data_callback_I; + +typedef std::shared_ptr TS_DATA_CALLBACK_PTR; + +class adaptation_field { +public: + adaptation_field(){}; + ~adaptation_field(){}; + +public: + uint8_t _adaptation_field_length; + + uint8_t _discontinuity_indicator:1; + uint8_t _random_access_indicator:1; + uint8_t _elementary_stream_priority_indicator:1; + uint8_t _PCR_flag:1; + uint8_t _OPCR_flag:1; + uint8_t _splicing_point_flag:1; + uint8_t _transport_private_data_flag:1; + uint8_t _adaptation_field_extension_flag:1; + + //if(PCR_flag == '1') + unsigned long _program_clock_reference_base;//33 bits + unsigned short _program_clock_reference_extension;//9bits + //if (OPCR_flag == '1') + unsigned long _original_program_clock_reference_base;//33 bits + unsigned short _original_program_clock_reference_extension;//9bits + //if (splicing_point_flag == '1') + uint8_t _splice_countdown; + //if (transport_private_data_flag == '1') + uint8_t _transport_private_data_length; + uint8_t _private_data_byte[256]; + //if (adaptation_field_extension_flag == '1') + uint8_t _adaptation_field_extension_length; + uint8_t _ltw_flag; + uint8_t _piecewise_rate_flag; + uint8_t _seamless_splice_flag; + uint8_t _reserved0; + //if (ltw_flag == '1') + unsigned short _ltw_valid_flag:1; + unsigned short _ltw_offset:15; + //if (piecewise_rate_flag == '1') + uint32_t _piecewise_rate;//22bits + //if (seamless_splice_flag == '1') + uint8_t _splice_type;//4bits + uint8_t _DTS_next_AU1;//3bits + uint8_t _marker_bit1;//1bit + unsigned short _DTS_next_AU2;//15bit + uint8_t _marker_bit2;//1bit + unsigned short _DTS_next_AU3;//15bit +}; + +class ts_header { +public: + ts_header(){} + ~ts_header(){} + +public: + uint8_t _sync_byte; + + unsigned short _transport_error_indicator:1; + unsigned short _payload_unit_start_indicator:1; + unsigned short _transport_priority:1; + unsigned short _PID:13; + + uint8_t _transport_scrambling_control:2; + uint8_t _adaptation_field_control:2; + uint8_t _continuity_counter:4; + + adaptation_field _adaptation_field_info; +}; + +typedef struct { + unsigned short _program_number; + unsigned short _pid; + unsigned short _network_id; +} PID_INFO; + +class pat_info { +public: + pat_info(){}; + ~pat_info(){}; + +public: + uint8_t _table_id; + + unsigned short _section_syntax_indicator:1; + unsigned short _reserved0:1; + unsigned short _reserved1:2; + unsigned short _section_length:12; + + unsigned short _transport_stream_id; + + uint8_t _reserved3:2; + uint8_t _version_number:5; + uint8_t _current_next_indicator:1; + + uint8_t _section_number; + uint8_t _last_section_number; + std::vector _pid_vec; +}; + +typedef struct { + uint8_t _stream_type; + unsigned short _reserved1:3; + unsigned short _elementary_PID:13; + unsigned short _reserved:4; + unsigned short _ES_info_length; + uint8_t _dscr[4096]; + uint32_t _crc_32; +} STREAM_PID_INFO; + +class pmt_info { +public: + pmt_info(){}; + ~pmt_info(){}; +public: + uint8_t _table_id; + unsigned short _section_syntax_indicator:1; + unsigned short _reserved1:1; + unsigned short _reserved2:2; + unsigned short _section_length:12; + unsigned short _program_number:16; + uint8_t _reserved:2; + uint8_t _version_number:5; + uint8_t _current_next_indicator:5; + uint8_t _section_number; + uint8_t _last_section_number; + unsigned short _reserved3:3; + unsigned short _PCR_PID:13; + unsigned short _reserved4:4; + unsigned short _program_info_length:12; + uint8_t _dscr[4096]; + + std::unordered_map _pid2steamtype; + std::vector _stream_pid_vec; +}; + +class YangTsdemux { +public: + YangTsdemux(); + virtual ~YangTsdemux(); + int32_t decode(SRT_DATA_MSG_PTR data_ptr, ts_media_data_callback_I* callback); + + private: + int32_t decode_unit(uint8_t* data_p,ts_media_data_callback_I* callback); + bool is_pmt(unsigned short pmt_id); + int32_t pes_parse(uint8_t* p, size_t npos, uint8_t** ret_pp, size_t& ret_size, + uint64_t& dts, uint64_t& pts,int32_t *pesLen); + void insert_into_databuf(uint8_t* data_p, size_t data_size, unsigned short pid); + void on_callback(ts_media_data_callback_I* callback, unsigned short pid, uint64_t dts, uint64_t pts); + + private: + std::string _key_path;//only for srt + + pat_info _pat; + pmt_info _pmt; + std::map> _data_buffer_map; + //std::vector _data_buffer_vec; + size_t _data_total; + unsigned short _last_pid; + uint64_t _last_dts; + uint64_t _last_pts; +}; +typedef std::shared_ptr TS_DEMUX_PTR; +#endif /* YANGSRT_INCLUDE_YANGTSDEMUX_H_ */ diff --git a/include/yangsrt/common.h b/include/yangsrt/common.h new file mode 100755 index 00000000..cdb1e3cb --- /dev/null +++ b/include/yangsrt/common.h @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef MPEGTS_COMMON_H +#define MPEGTS_COMMON_H + +#include +#include "yangutil/YangErrorCode.h" +class MpegTsAdaptationFieldType +{ +public: + // Reserved for future use by ISO/IEC + static const uint8_t reserved = 0x00; + // No adaptation_field, payload only + static const uint8_t payload_only = 0x01; + // Adaptation_field only, no payload + static const uint8_t adaption_only = 0x02; + // Adaptation_field followed by payload + static const uint8_t payload_adaption_both = 0x03; +}; + +class YangTsBuffer; + +extern void write_pcr(YangTsBuffer *sb, uint64_t pcr); +extern void write_pts(YangTsBuffer *sb, uint32_t fb, uint64_t pts); + +extern uint64_t read_pts(YangTsBuffer *sb); +extern uint64_t read_pcr(YangTsBuffer *sb); + +#endif //MPEGTS_COMMON_H diff --git a/include/yangsrt/crc.h b/include/yangsrt/crc.h new file mode 100755 index 00000000..13ab1e41 --- /dev/null +++ b/include/yangsrt/crc.h @@ -0,0 +1,54 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include + +static const uint32_t crc_table[256] = { + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, + 0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, + 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7, + 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, + 0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, + 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef, + 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, + 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, + 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0, + 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, + 0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, + 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08, + 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, + 0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, + 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050, + 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, + 0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, + 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1, + 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, + 0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, + 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9, + 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, + 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, + 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71, + 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, + 0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, + 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e, + 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, + 0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, + 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676, + 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, + 0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, + 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 +}; + +extern uint32_t crc32(const uint8_t *data, int32_t len); + diff --git a/include/yangsrt/srt_data.hpp b/include/yangsrt/srt_data.hpp new file mode 100755 index 00000000..535a261a --- /dev/null +++ b/include/yangsrt/srt_data.hpp @@ -0,0 +1,33 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRT_DATA_H +#define SRT_DATA_H +#include +#include + +#define SRT_MSG_DATA_TYPE 0x01 +#define SRT_MSG_CLOSE_TYPE 0x02 + +class SRT_DATA_MSG { +public: + SRT_DATA_MSG(); + SRT_DATA_MSG(uint32_t len); + SRT_DATA_MSG(uint8_t* data_p, uint32_t len); + ~SRT_DATA_MSG(); + + uint32_t msg_type(); + uint32_t data_len(); + uint8_t* get_data(); + // std::string get_path(); + +private: + uint32_t _msg_type; + uint32_t _len; + uint8_t* _data_p; + // std::string _key_path; +}; + +typedef std::shared_ptr SRT_DATA_MSG_PTR; + +#endif diff --git a/include/yangstream/YangStream.h b/include/yangstream/YangStream.h new file mode 100755 index 00000000..f50af5e1 --- /dev/null +++ b/include/yangstream/YangStream.h @@ -0,0 +1,46 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGSTREAM_YANGSTREAM_H_ +#define INCLUDE_YANGSTREAM_YANGSTREAM_H_ +#include +#include +#include +#include "YangStreamType.h" +typedef struct{ + int32_t transtype; + int32_t streamInit; + int32_t uid; + int32_t audioStream; + int32_t videoStream; + int32_t netState; + void* context; + YangAVInfo* avinfo; + YangContextStream* stream; + YangReceiveCallback *data; + YangStreamConfig streamconfig; +}YangStreamContext; +typedef struct { + YangStreamContext* context; + //void (*init)(YangStreamContext* context,YangStreamConfig *pconf); + int32_t (*connectServer)(YangStreamContext* context); + int32_t (*disConnectServer)(YangStreamContext* context); + int32_t (*reconnect)(YangStreamContext* context); + int32_t (*receiveData)(YangStreamContext* context,int32_t *plen); + int32_t (*publishVideoData)(YangStreamContext* context,YangStreamCapture *videoFrame); + int32_t (*publishAudioData)(YangStreamContext* context,YangStreamCapture *audioFrame); + int32_t (*getConnectState)(YangStreamContext* context); + int32_t (*isconnected)(YangStreamContext* context); + + +}YangStreamHandle; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_stream(int32_t transType,YangStreamHandle* streamHandle,int32_t puid,YangStreamConfig* streamconfig,YangAVInfo* pcontext,YangContextStream* stream,YangReceiveCallback* callback) ; +void yang_destroy_stream(YangStreamHandle* stream); +#ifdef __cplusplus +} +#endif +#endif /* INCLUDE_YANGSTREAM_YANGSTREAM_H_ */ diff --git a/include/yangstream/YangStreamCapture.h b/include/yangstream/YangStreamCapture.h new file mode 100755 index 00000000..455cf02f --- /dev/null +++ b/include/yangstream/YangStreamCapture.h @@ -0,0 +1,43 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGSTREAM_YANGSTREAMCAPTURE_H_ +#define INCLUDE_YANGSTREAM_YANGSTREAMCAPTURE_H_ +#include +#include + +typedef struct{ + void* context; + uint8_t* (*getVideoData)(void* pcontext); + int32_t (*getVideoLen)(void* pcontext); + int32_t (*getVideoFrametype)(void* pcontext); + int64_t (*getVideoTimestamp)(void* pcontext); + + uint8_t* (*getAudioData)(void* pcontext); + int32_t (*getAudioLen)(void* pcontext); + int64_t (*getAudioTimestamp)(void* pcontext); + YangAudioCodec (*getAudioType)(void* pcontext); + + void (*initVideo)(void* pcontext,int32_t transtype); + void (*setVideoData)(void* pcontext,YangFrame *videoFrame, YangVideoCodec videoType); + void (*setVideoMeta)(void* pcontext,uint8_t *p, int32_t plen, YangVideoCodec videoType); + void (*setMetaTimestamp)(void* pcontext,int64_t timestamp); + void (*setVideoFrametype)(void* pcontext,int32_t frametype); + + void (*setAudioData)(void* pcontext,YangFrame *audioFrame); + void (*setAudioMetaData)(void* pcontext,uint8_t *p, int32_t plen); + void (*setAudioFrametype)(void* pcontext,int32_t frametype); + void (*initAudio)(void* pcontext,int32_t transType,int32_t sample,int32_t channel,YangAudioCodec audioType); + + +}YangStreamCapture; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_streamCapture(YangStreamCapture* stream); +void yang_destroy_streamCapture(YangStreamCapture* stream); +#ifdef __cplusplus +} +#endif +#endif /* INCLUDE_YANGSTREAM_YANGSTREAMCAPTURE_H_ */ diff --git a/include/yangstream/YangStreamHandle.h b/include/yangstream/YangStreamHandle.h new file mode 100755 index 00000000..2639e4a4 --- /dev/null +++ b/include/yangstream/YangStreamHandle.h @@ -0,0 +1,12 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGSTREAM_INCLUDE_YANGSTREAMBASE_H_ +#define YANGSTREAM_INCLUDE_YANGSTREAMBASE_H_ + +#include + +void yang_create_streamHandle(int32_t transType,YangStreamHandle* streamHandle,int32_t puid,YangStreamConfig* streamconfig,YangAVInfo* pcontext,YangContextStream* stream,YangReceiveCallback* callback); +void yang_destroy_streamHandle(YangStreamHandle* stream); + +#endif /* YANGSTREAM_INCLUDE_YANGSTREAMBASE_H_ */ diff --git a/include/yangstream/YangStreamManager.h b/include/yangstream/YangStreamManager.h new file mode 100755 index 00000000..b087a2cb --- /dev/null +++ b/include/yangstream/YangStreamManager.h @@ -0,0 +1,49 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGSTREAM_YANGSTREAMMANAGER1_H_ +#define INCLUDE_YANGSTREAM_YANGSTREAMMANAGER1_H_ +#include +#include +#include +#include + +class YangStreamManager { +public: + YangStreamManager(); + virtual ~YangStreamManager(); + +public: + std::vector* m_playBuffers; + YangSynBuffer* m_playBuffer; + int getIndex(int puid); + YangSynBuffer* getSynBuffer(int puid); +public: + void setMediaConfig(int32_t puid,YangAudioParam* audio,YangVideoParam* video); + void sendRequest(int32_t puid,uint32_t ssrc,YangRequestType req); + void sendRequest(int32_t puid,YangRtcMessageType msg); + void setRtcMessageNotify(int puid,YangRtcMessageNotify* rtcmsg); + void setSendRequestCallback(YangSendRequestCallback* pli); + void setDecoderMediaConfigCallback(YangMediaConfigCallback* dec); + void setRenderMediaConfigCallback(YangMediaConfigCallback* render); + + + int32_t getAudioClock(); + int32_t getVideoClock(); +private: + YangSendRequestCallback* m_sendPli; + YangMediaConfigCallback* m_mediaConfig_dec; + YangMediaConfigCallback* m_mediaConfig_render; + + + int32_t m_videoClock; + int32_t m_audioClock; +private: + YangRtcMessageNotify* m_rtcMsg; + std::map *m_rtcMsgMap; + + + +}; + +#endif /* INCLUDE_YANGSTREAM_YANGSTREAMMANAGER_H_ */ diff --git a/include/yangstream/YangStreamType.h b/include/yangstream/YangStreamType.h new file mode 100755 index 00000000..5ada3cd5 --- /dev/null +++ b/include/yangstream/YangStreamType.h @@ -0,0 +1,33 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangStreamType_YangStreamType_H_ +#define YangStreamType_YangStreamType_H_ + +#include +typedef struct YangReceiveCallback { + void* context; + void (*receiveAudio)(void* context,YangFrame *audioFrame); + void (*receiveVideo)(void* context,YangFrame *videoFrame); +}YangReceiveCallback; +#ifdef __cplusplus + +class YangMediaConfigCallback { +public: + YangMediaConfigCallback() {}; + virtual ~YangMediaConfigCallback() {}; + virtual void setMediaConfig(int32_t puid, YangAudioParam *audio, + YangVideoParam *video)=0; +}; + +class YangSendRequestCallback { +public: + YangSendRequestCallback() {}; + virtual ~YangSendRequestCallback() {}; + virtual void sendRequest(int32_t puid, uint32_t ssrc, + YangRequestType req)=0; +}; + +#endif + +#endif diff --git a/include/yangstream/YangSynBuffer.h b/include/yangstream/YangSynBuffer.h new file mode 100755 index 00000000..57ecc4c0 --- /dev/null +++ b/include/yangstream/YangSynBuffer.h @@ -0,0 +1,105 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGPLAYER_INCLUDE_YANGAVSYN_H_ +#define YANGPLAYER_INCLUDE_YANGAVSYN_H_ +#include + +#include +#include +#include +#include + + +enum YangSynType{ + YANG_SYNC_AUDIO_MASTER, /* default choice */ + YANG_SYNC_VIDEO_MASTER, + YANG_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */ +}; +#define Yang_Max_Audio_Intervaltime 25 +#define Yang_Max_Video_Intervaltime 35 +#define Yang_Video_Base_Update_Interval 600000//10*60*1000 //10m +#define Yang_Audio_Base_Update_Interval 5000//10*60*1000 //10m +#define Yang_Video_Cache_time 35 +#define Yang_Audio_Cache_time 5 +class YangSynBuffer //:public YangSynBufferI +{ +public: + YangSynBuffer(); + virtual ~YangSynBuffer(); + int32_t m_uid; + int32_t m_width; + int32_t m_height; + void setTranstype(int transtype); + void setAudioClock(int paudioclock); + void setVideoClock(int pvideoclock); + void setVideoCacheTime(int pctime); + + void resetVideoClock(); + void resetAudioClock(); + +public: + uint8_t* getVideoRef(YangFrame* pframe); + uint8_t* getAudioRef(YangFrame* audioFrame); + int32_t getAudioSize(); + int32_t getVideoSize(); + + //void setAudioBuffers(vector *paudioList); + void setInAudioBuffer(YangAudioPlayBuffer *paudioList); + void setInVideoBuffer(YangVideoBuffer *pbuf); + + + void initClock(); + void setClock(); + + int playAudioFrame(int64_t pts); + int playVideoFrame(YangFrame* frame); + double compute_target_delay(double delay); + void video_refresh(int32_t pisaudio, double *remaining_time); + int32_t m_maxAudioMinus; + int32_t m_maxVideoMinus; +private: + bool m_isFirstVideo; + bool m_isFirstAudio; + int32_t m_video_time_state; + void updateBaseTimestamp(int64_t pts); + void updateVideoBaseTimestamp(int64_t pts); + void updateAudioBaseTimestamp(int64_t pts); +private: + + YangSynType m_synType; + int32_t m_paused; + int64_t m_baseClock; + + int64_t m_audio_startClock; + int32_t m_audioClock; + int64_t m_audioBase; + int64_t m_audioTime; + int32_t m_audioMinus; + int32_t m_audioDelay; + int32_t m_lostAudioCount; + int32_t m_audioNegativeCount; + int32_t m_audioTimeoutCount; + + // int32_t m_keyFrameCount; + int64_t m_videoBase; + int32_t m_videoClock; + int64_t m_videoTime; + int32_t m_videoMinus; + int32_t m_videoNegativeCount; + int32_t m_videoTimeoutCount; + int32_t m_videoCacheTime; + int64_t m_video_startClock; + + int64_t m_pre_audioTime; + int64_t m_pre_videoTime; + + int m_transtype; + + +private: + YangVideoBuffer *m_videoBuffer; + YangAudioPlayBuffer* m_audioBuffer; +}; + +#endif /* YANGPLAYER_INCLUDE_YANGAVSYN_H_ */ diff --git a/include/yangtrace/YangCameraControl.h b/include/yangtrace/YangCameraControl.h new file mode 100755 index 00000000..cecf4d01 --- /dev/null +++ b/include/yangtrace/YangCameraControl.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangCameraControl_H +#define YangCameraControl_H +#include +#include +#include + +using namespace std; + +class YangCameraControl +{ +public: + YangCameraControl(void); + ~YangCameraControl(void); + + void init(const char *teacherip,uint32_t teacherport,const char *studentip,uint32_t studentport); //分别 初始化老师和学生定位摄像机的ip + void Up(std::string type,const char *speed); + void Down(string type,const char *speed); + void Left(string type,const char *speed); + void Right(string type,const char *speed); + void Home(string type); //原点 + void Tele(string type); //近焦 长焦镜头 + void Wide(string type); //远焦 广角镜头 + void tiltStop(string type); + void zoomStop(string type); + + void StopAuto(); + void StartAuto(); + +private: + + void sendmsg(string type,char *visca,int32_t len); + + YangTraceUdp m_studentudp; //学生手动 + YangTraceUdp m_teacherudp; //老师手动 + + int32_t m_isManual; +}; + +#endif diff --git a/include/yangtrace/YangTraceHandle.h b/include/yangtrace/YangTraceHandle.h new file mode 100755 index 00000000..c98a4661 --- /dev/null +++ b/include/yangtrace/YangTraceHandle.h @@ -0,0 +1,66 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangTraceHandle_H +#define YangTraceHandle_H +#include +#include +#include +#include +#include +#define Yang_Trace_WhiteBoardStart "08090802" +#define Yang_Trace_Teacher "08090801" +#define Yang_Trace_Student "08090803" +#define Command_Trace_Start "Auto|END" +#define Command_Trace_Stop "Manual|END" +#define Command_Mouse_Move "MouseMove|END" +#define Command_Mouse_Stop "MouseStop|END" +#define Command_Teacher_Close "08090801" //jin jing +#define Command_Whiteboard_Start "08090802" +#define Command_Student_Close "08090803" //jin jing +#define Command_Whiteboard_Stop "08090804" +#define Command_Student_Down "08090805" //zuo xia +#define Command_Teacher_Move "08090806" +#define Command_Student_Multi_Up "08090807" +#define Command_Teacher_Disappear "08090808" +#define Command_Teacher_Full "08090809" +#define Command_Teacher_Move_Full "08090810" + +enum YangTraceType{ + Yang_Trace_Start, + Yang_Trace_Stop, + Yang_Trace_Teacher_Move, + Yang_Trace_Teacher_Pause, + Yang_Trace_Teacher_Full, + Yang_Trace_Teacher_Disappear, + Yang_Trace_Student_Single_Up, + Yang_Trace_Student_Single_Down, + Yang_Trace_Student_Multi_Up, + Yang_Trace_Student_Multi_Down, + Yang_Trace_Whiteboard_Start, + Yang_Trace_Whiteboard_Stop +}; +class YangTraceHandle +{ +public: + YangTraceHandle(); + virtual ~YangTraceHandle(); + YangCameraControl *m_camCtl; + void init(const char *ip,const int32_t port); + void startTrace(); + void stopTrace(); + void proc(const char* command); + void procCommand(YangTraceType evt); + void manualCyc(string str); +private: + void change(const char *s); + int32_t m_isStartTrace; + YangTraceUdp m_udp; + + int32_t m_whiteBoStat; + int32_t m_studentStat; + + std::string m_ip; +}; + +#endif // AUTOTRACEI_H diff --git a/include/yangtrace/YangTraceIni.h b/include/yangtrace/YangTraceIni.h new file mode 100755 index 00000000..fa7f6d21 --- /dev/null +++ b/include/yangtrace/YangTraceIni.h @@ -0,0 +1,21 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGTRACE_YANGTRACEINI_H_ +#define INCLUDE_YANGTRACE_YANGTRACEINI_H_ +#include +struct YangTraceIni{ + std::string traceIp; + std::string luboIp; + std::string studentIp; + std::string teacherIp; + int32_t studentPort; + int32_t teacherPort; + int32_t localPort; + int32_t luboPort; + +}; + + + +#endif /* INCLUDE_YANGTRACE_YANGTRACEINI_H_ */ diff --git a/include/yangtrace/YangTraceServer.h b/include/yangtrace/YangTraceServer.h new file mode 100755 index 00000000..1c176aad --- /dev/null +++ b/include/yangtrace/YangTraceServer.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef TRACE_YangTraceServer_H_ +#define TRACE_YangTraceServer_H_ + +#include +#include "YangCameraControl.h" +#include "YangTraceHandle.h" +#include "yangutil/sys/YangThread.h" +#include "YangTraceIni.h" +class YangTraceServer:public YangThread +{ +public: + YangTraceServer(YangTraceIni* pcontext); + virtual ~YangTraceServer(void); + + void init(); + void startLoop(); + + int32_t m_isStart; + int32_t m_isConvert; + void stop(); + void run(); +private: + YangTraceHandle *m_trace; + YangCameraControl m_camCtl; + YangTraceIni* m_context; + +}; + +#endif diff --git a/include/yangtrace/YangTraceUdp.h b/include/yangtrace/YangTraceUdp.h new file mode 100755 index 00000000..36432b75 --- /dev/null +++ b/include/yangtrace/YangTraceUdp.h @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef TRACE_YangTraceUdp_H_ +#define TRACE_YangTraceUdp_H_ +#ifdef _WIN32 +//#include +#include + +//#define SOCKADDR_IN +#else +#include +#include +#include +#endif +#include +class YangTraceUdp +{ +public: + YangTraceUdp(void); + ~YangTraceUdp(void); + void init(const char *ip,int32_t port); + int32_t sendUdp(const char* data,int32_t len); + void setSockSrv(const char *ip,int32_t port); +private: + int32_t sockSrv; + struct sockaddr_in m_addrClient; +}; + +#endif diff --git a/include/yangutil/YangErrorCode.h b/include/yangutil/YangErrorCode.h new file mode 100755 index 00000000..42347e82 --- /dev/null +++ b/include/yangutil/YangErrorCode.h @@ -0,0 +1,110 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_YANGERRORCODE_H_ +#define INCLUDE_YANGUTIL_YANGERRORCODE_H_ + +#define Yang_Ok 0 +#define Yang_LeaveRoom 1 + + +#define ERROR_SYS_NoAudioDevice 110 +#define ERROR_SYS_NoVideoDevice 111 +#define ERROR_SYS_NoAudioCaptureDevice 112 +#define ERROR_SYS_NoAudioPlayDevice 113 +#define ERROR_SYS_AudioCapture 114 +#define ERROR_SYS_AudioRender 115 + +#define ERROR_SYS_Linux_VideoDeveceOpenFailure 120 +#define ERROR_SYS_Linux_NoVideoDriver 122 +#define ERROR_SYS_Linux_NoVideoCatpureInterface 124 + +#define ERROR_SYS_Win_VideoDeveceOpenFailure 150 +#define ERROR_SYS_Win_NoVideoDriver 151 +#define ERROR_SYS_Win_NoVideoCatpureInterface 152 + +#define ERROR_SYS_Linux_ScreenDeviceOpenFailure 163 +#define ERROR_SYS_Linux_ScreenCaptureFailure 164 +#define ERROR_SYS_Win_ScreenCaptureFailure 165 + + +#define ERROR_SOCKET 201 +#define ERROR_SOCKET_Timeout 202 +#define ERROR_SOCKET_Close 203 +#define ERROR_SOCKET_Close_Wr 204 +#define ERROR_SOCKET_Negotiation 205 +#define ERROR_CONNECT_Handshaked 206 +#define ERROR_CONNECT_Fail 207 +#define ERROR_REQUEST_Close 208 +#define ERROR_SERVER_Reject 209 +#define ERROR_SERVER_Logout 210 +#define ERROR_SERVER_ConnectFailure 211 + +#define ERROR_SSL 300 + + + + +#define ERROR_RTMP_ConnectFailure 2100 +#define ERROR_RTMP_PubFailure 2101 +#define ERROR_RTMP_UnPubFailure 2102 +#define ERROR_RTMP_PlayFailure 2103 +#define ERROR_RTMP_UnPlayFailure 2104 +#define ERROR_RTMP_SendCommandFailure 2105 +#define ERROR_RTMP_UserInfosResult 2106 + +#define Yang_SRTS_SocketBase 3000 +#define ERROR_SRT_NotInit 3001 +#define Yang_SRTS_CONNECTING 3004 +#define Yang_SRTS_BROKEN 3006 +#define Yang_SRTS_CLOSING 3007 +#define Yang_SRTS_CLOSED 3008 +#define Yang_SRTS_NONEXIST 3009 +#define Yang_SRTS_NONSRTSERVER 3010 +#define ERROR_SRT_PushFailure 3100 +#define ERROR_SRT_PullFailure 3101 +#define ERROR_SRT_StreamIdSetFailure 3102 +#define ERROR_SRT_EpollCreateFailure 3103 +#define ERROR_SRT_EpollSetFailure 3104 +#define ERROR_SRT_EpollSelectFailure 3105 +#define ERROR_SRT_SocketConnectCreate 3201 +#define ERROR_SRT_SocketConnect 3202 +#define ERROR_SRT_WriteSocket 3210 +#define ERROR_SRT_ReadSocket 3211 + +#define ERROR_RTC_PORT 5000 +#define ERROR_RTP_PACKET_CREATE 5001 +#define ERROR_OpenSslCreateSSL 5002 +#define ERROR_OpenSslBIOReset 5003 +#define ERROR_OpenSslBIOWrite 5004 +#define ERROR_OpenSslBIONew 5005 +#define ERROR_RTC_RTP 5006 +#define ERROR_RTC_RTCP 5007 +#define ERROR_RTC_STUN 5008 +#define ERROR_RTC_DTLS 5009 +#define ERROR_RTC_UDP 5010 +#define ERROR_RTC_RTP_MUXER 5011 +#define ERROR_RTC_SDP_DECODE 5012 +#define ERROR_RTC_SRTP_INIT 5013 +#define ERROR_RTC_SRTP_PROTECT 5014 +#define ERROR_RTC_SRTP_UNPROTECT 5015 +#define ERROR_RTC_RTCP_CHECK 5016 +#define ERROR_RTC_SOURCE_CHECK 5017 +#define ERROR_RTC_SDP_EXCHANGE 5018 +#define ERROR_RTC_API_BODY 5019 +#define ERROR_RTC_SOURCE_BUSY 5020 +#define ERROR_RTC_DISABLED 5021 +#define ERROR_RTC_NO_SESSION 5022 +#define ERROR_RTC_INVALID_PARAMS 5023 +#define ERROR_RTC_DUMMY_BRIDGER 5024 +#define ERROR_RTC_STREM_STARTED 5025 +#define ERROR_RTC_TRACK_CODEC 5026 +#define ERROR_RTC_NO_PLAYER 5027 +#define ERROR_RTC_NO_PUBLISHER 5028 +#define ERROR_RTC_DUPLICATED_SSRC 5029 +#define ERROR_RTC_NO_TRACK 5030 +#define ERROR_RTC_RTCP_EMPTY_RR 5031 + + +#endif /* INCLUDE_YANGUTIL_YANGERRORCODE_H_ */ diff --git a/include/yangutil/buffer/YangAudioBuffer.h b/include/yangutil/buffer/YangAudioBuffer.h new file mode 100755 index 00000000..272f16c9 --- /dev/null +++ b/include/yangutil/buffer/YangAudioBuffer.h @@ -0,0 +1,24 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef ___YangAudioBuffer__ +#define ___YangAudioBuffer__ +#include + +#include +class YangAudioBuffer:public YangMediaBuffer +{ +public: + YangAudioBuffer(int32_t pcacheNum); + ~YangAudioBuffer(void); + void putAudio(YangFrame* pframe); + int32_t getAudio(YangFrame* pframe); + uint8_t *getAudioRef(YangFrame* pframe); + void reset(); + +private: + + int32_t m_bufLen; + +}; +#endif diff --git a/include/yangutil/buffer/YangAudioEncoderBuffer.h b/include/yangutil/buffer/YangAudioEncoderBuffer.h new file mode 100755 index 00000000..45bbb620 --- /dev/null +++ b/include/yangutil/buffer/YangAudioEncoderBuffer.h @@ -0,0 +1,25 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangAudioEncoderBuffer_H +#define YangAudioEncoderBuffer_H + +#include + +class YangAudioEncoderBuffer: public YangMediaBuffer { +public: + YangAudioEncoderBuffer(int32_t paudioCacheNum); + ~YangAudioEncoderBuffer(void); + void reset(); + + void putAudio(YangFrame* audioFrame); + void getAudio(YangFrame* audioFrame); + uint8_t* getAudioRef(YangFrame* pframe); + void putPlayAudio(YangFrame* pframe); + void getPlayAudio(YangFrame* audioFrame); + +private: + +}; + +#endif diff --git a/include/yangutil/buffer/YangAudioPlayBuffer.h b/include/yangutil/buffer/YangAudioPlayBuffer.h new file mode 100755 index 00000000..a381a14c --- /dev/null +++ b/include/yangutil/buffer/YangAudioPlayBuffer.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef ___YangAudioPlayBuffer1__ +#define ___YangAudioPlayBuffer1__ + +#include + + +class YangAudioPlayBuffer:public YangMediaBuffer{ + +public: + YangAudioPlayBuffer(); + ~YangAudioPlayBuffer(void); + + void putAudio(YangFrame* pframe); + void getAudio(YangFrame* pframe); + uint8_t *getAudios(YangFrame* pframe); + int32_t getFrameTimestamp(int64_t *timestamp); + int64_t getNextTimestamp(); + //int32_t playFrame(); + void reset(); +private: + int32_t m_bufLen; + +}; +#endif diff --git a/include/yangutil/buffer/YangBuffer.h b/include/yangutil/buffer/YangBuffer.h new file mode 100755 index 00000000..2981242a --- /dev/null +++ b/include/yangutil/buffer/YangBuffer.h @@ -0,0 +1,19 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangBuffer_YANGRTCUTIL_H_ +#define YangBuffer_YANGRTCUTIL_H_ + +#include + +#ifdef __cplusplus +extern "C"{ +#include +} + +#else +#include +#endif + + +#endif diff --git a/include/yangutil/buffer/YangCBuffer.h b/include/yangutil/buffer/YangCBuffer.h new file mode 100755 index 00000000..c052f257 --- /dev/null +++ b/include/yangutil/buffer/YangCBuffer.h @@ -0,0 +1,70 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_BUFFER_YANGCBUFFER_H_ +#define INCLUDE_YANGUTIL_BUFFER_YANGCBUFFER_H_ + +#include +typedef struct{ + // current position at bytes. + char* head; + // the bytes data for buffer to read or write. + char* data; + // the total number of bytes. + int32_t size; +}YangBuffer; + +void yang_init_buffer(YangBuffer* buf,char* b, int32_t nn); +void yang_destroy_buffer(YangBuffer* buf); +int32_t yang_buffer_pos(YangBuffer* buf); + // Left bytes in buffer, total size() minus the current pos(). + int32_t yang_buffer_left(YangBuffer* buf); + // Whether buffer is empty. + int32_t yang_buffer_empty(YangBuffer* buf); + // Whether buffer is able to supply required size of bytes. + // @remark User should check buffer by require then do read/write. + // @remark Assert the required_size is not negative. + int32_t yang_buffer_require(YangBuffer* buf,int32_t required_size); + void yang_buffer_skip(YangBuffer* buf,int32_t size); +// Write 1bytes char to buffer. +void yang_write_1bytes(YangBuffer* buf,char value); +// Write 2bytes int32_t to buffer. +void yang_write_2bytes(YangBuffer* buf,int16_t value); +void yang_write_le2bytes(YangBuffer* buf,int16_t value); +// Write 4bytes int32_t to buffer. +void yang_write_4bytes(YangBuffer* buf,int32_t value); +void yang_write_le4bytes(YangBuffer* buf,int32_t value); +// Write 3bytes int32_t to buffer. +void yang_write_3bytes(YangBuffer* buf,int32_t value); +void yang_write_le3bytes(YangBuffer* buf,int32_t value); +// Write 8bytes int32_t to buffer. +void yang_write_8bytes(YangBuffer* buf,int64_t value); +void yang_write_le8bytes(YangBuffer* buf,int64_t value); +// Write string to buffer +//void yang_write_string(YangBuffer* buf,std::string value); +// Write bytes to buffer +void yang_write_bytes(YangBuffer* buf,char* data, int32_t size); +void yang_write_cstring(YangBuffer* buf,char* data); + +// Read 1bytes char from buffer. + char yang_read_1bytes(YangBuffer* buf); + // Read 2bytes int32_t from buffer. + int16_t yang_read_2bytes(YangBuffer* buf); + int16_t yang_read_le2bytes(YangBuffer* buf); + // Read 3bytes int32_t from buffer. + int32_t yang_read_3bytes(YangBuffer* buf); + int32_t yang_read_le3bytes(YangBuffer* buf); + // Read 4bytes int32_t from buffer. + int32_t yang_read_4bytes(YangBuffer* buf); + int32_t yang_read_le4bytes(YangBuffer* buf); + // Read 8bytes int32_t from buffer. + int64_t yang_read_8bytes(YangBuffer* buf); + int64_t yang_read_le8bytes(YangBuffer* buf); + // Read string from buffer, length specifies by param len. + // std::string yang_read_string(YangBuffer* buf,int32_t len); + // Read bytes from buffer, length specifies by param len. + void yang_read_bytes(YangBuffer* buf,char* data, int32_t size); + + + +#endif /* INCLUDE_YANGUTIL_BUFFER_YANGCBUFFER_H_ */ diff --git a/include/yangutil/buffer/YangMediaBuffer.h b/include/yangutil/buffer/YangMediaBuffer.h new file mode 100755 index 00000000..18366e4d --- /dev/null +++ b/include/yangutil/buffer/YangMediaBuffer.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGUTIL_BUFFER_YANGBUFFER_H_ +#define YANGUTIL_BUFFER_YANGBUFFER_H_ +#include +#include +#include +#define yang_reindex(p) if(p!=NULL) p->resetIndex(); +#define Yang_MediaBuffer_Maxsize 2000 +class YangMediaBuffer { +public: + YangMediaBuffer(); + virtual ~YangMediaBuffer(); +public: + int32_t m_mediaType; + int32_t m_uid; + void resetIndex(); + int32_t size(); +protected: + uint32_t m_putIndex; + uint32_t m_getIndex; + uint32_t m_cache_num; + uint32_t m_size; + uint32_t m_nextIndex; + int32_t m_ret; + YangFrame** m_frames; + YangBufferManager* m_bufferManager; +protected: + void initFrames(int pnum,int unitsize); + + void putFrame(YangFrame* pframe); + void getFrame(YangFrame* pframe); + YangFrame* getCurFrameRef(); + int64_t getNextFrameTimestamp(); + uint8_t* getFrameRef(YangFrame* pframe); +private: + pthread_mutex_t m_lock; +}; + +#endif /* YANGUTIL_BUFFER_YANGBUFFER_H_ */ diff --git a/include/yangutil/buffer/YangVideoBuffer.h b/include/yangutil/buffer/YangVideoBuffer.h new file mode 100755 index 00000000..eec153aa --- /dev/null +++ b/include/yangutil/buffer/YangVideoBuffer.h @@ -0,0 +1,47 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangVideoBuffer__ +#define __YangVideoBuffer__ +#include +#include + + +#define yang_get_videoBuffer(x) new YangVideoBuffer(x->width,x->height,x->videoCaptureFormat==YangYuy2?16:12,x->bitDepth==8?1:2) +class YangVideoBuffer:public YangMediaBuffer +{ +public: + YangVideoBuffer(int32_t pBitDepthLen); + + YangVideoBuffer(int32_t pwid,int32_t phei,YangYuvType ptype,int32_t pBitDepthLen); + ~YangVideoBuffer(void); + void initTemp(); + void init(int32_t pwid,int32_t phei,YangYuvType ptype); + + + void reset(); + //YangAvsynI* m_syn; + + int32_t isPreview=0; + int32_t m_width; + int32_t m_height; + int32_t m_length; + int32_t m_frames; + int32_t m_bitDepthLen; + void putVideo(YangFrame* pframe); + + void getVideo(YangFrame* pframe); + uint8_t * getVideoRef(YangFrame* pframe); + int64_t getTimestamp(int64_t *timestamp); + int64_t getNextTimestamp(); + YangFrame* getCurVideoFrame(); + //int32_t playFrame(); +protected: + + +private: + int32_t m_headerLen; + + +}; +#endif diff --git a/include/yangutil/buffer/YangVideoDecoderBuffer.h b/include/yangutil/buffer/YangVideoDecoderBuffer.h new file mode 100755 index 00000000..00991fd7 --- /dev/null +++ b/include/yangutil/buffer/YangVideoDecoderBuffer.h @@ -0,0 +1,21 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangVideoDecoderBuffer__ +#define __YangVideoDecoderBuffer__ +#include +#include +#include +class YangVideoDecoderBuffer:public YangMediaBuffer +{ +public: + YangVideoDecoderBuffer(int num=8); + ~YangVideoDecoderBuffer(void); + + void putEVideo(YangFrame* pframe); + void getEVideo(YangFrame* pframe); +private: + + +}; +#endif diff --git a/include/yangutil/buffer/YangVideoEncoderBuffer.h b/include/yangutil/buffer/YangVideoEncoderBuffer.h new file mode 100755 index 00000000..dbc93449 --- /dev/null +++ b/include/yangutil/buffer/YangVideoEncoderBuffer.h @@ -0,0 +1,22 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangVideoEncoderBuffer__ +#define __YangVideoEncoderBuffer__ +#include +#include + +class YangVideoEncoderBuffer:public YangMediaBuffer +{ +public: + YangVideoEncoderBuffer(int32_t pcachenum); + ~YangVideoEncoderBuffer(void); + + + void getEVideo(YangFrame* pframe); + uint8_t * getEVideoRef(YangFrame* frame); + void putEVideo(YangFrame* pframe); +private: + +}; +#endif diff --git a/include/yangutil/sys/YangAmf.h b/include/yangutil/sys/YangAmf.h new file mode 100755 index 00000000..9d4c183a --- /dev/null +++ b/include/yangutil/sys/YangAmf.h @@ -0,0 +1,143 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangAMF_H__ +#define __YangAMF_H__ +#include +#include +#include +#include +#ifndef TRUE +#define TRUE 1 +#define FALSE 0 +#endif + + + + typedef enum AMFDataType + { AMF_NUMBER = 0, AMF_BOOLEAN, AMF_STRING, AMF_OBJECT, + AMF_MOVIECLIP, /* reserved, not used */ + AMF_NULL, AMF_UNDEFINED, AMF_REFERENCE, AMF_ECMA_ARRAY, AMF_OBJECT_END,//9 + AMF_STRICT_ARRAY, AMF_DATE, AMF_LONG_STRING, AMF_UNSUPPORTED, + AMF_RECORDSET, /* reserved, not used */ + AMF_XML_DOC, AMF_TYPED_OBJECT, + AMF_AVMPLUS, /* switch to AMF3 */ + AMF_INVALID = 0xff + } AMFDataType; + + typedef enum AMF3DataType + { AMF3_UNDEFINED = 0, AMF3_NULL, AMF3_FALSE, AMF3_TRUE, + AMF3_INTEGER, AMF3_DOUBLE, AMF3_STRING, AMF3_XML_DOC, AMF3_DATE, + AMF3_ARRAY, AMF3_OBJECT, AMF3_XML, AMF3_BYTE_ARRAY + } AMF3DataType; + + typedef struct AVal + { + char *av_val; + int32_t av_len; + }AVal; +#define AVC(str) {(char*)str,sizeof(str)-1} +#define AVMATCH(a1,a2) ((a1)->av_len == (a2)->av_len && !memcmp((a1)->av_val,(a2)->av_val,(a1)->av_len)) + + struct AMFObjectProperty; + + typedef struct AMFObject + { + int32_t o_num; + struct AMFObjectProperty *o_props; + } AMFObject; + + typedef struct AMFObjectProperty + { + struct AVal p_name; + enum AMFDataType p_type; + union + { + double p_number; + struct AVal p_aval; + struct AMFObject p_object; + } p_vu; + int16_t p_UTCoffset; + } AMFObjectProperty; + + + + + + char *AMF_EncodeString(char *output, char *outend, const struct AVal * str); + char *AMF_EncodeNumber(char *output, char *outend, double dVal); + char *AMF_EncodeInt16(char *output, char *outend, short nVal); + char *AMF_EncodeInt24(char *output, char *outend, int32_t nVal); + char *AMF_EncodeInt32(char *output, char *outend, int32_t nVal); + char *AMF_EncodeBoolean(char *output, char *outend, int32_t bVal); + + /* Shortcuts for AMFProp_Encode */ + char *AMF_EncodeNamedString(char *output, char *outend, const struct AVal * name, const struct AVal * value); + char *AMF_EncodeNamedNumber(char *output, char *outend, const struct AVal * name, double dVal); + char *AMF_EncodeNamedBoolean(char *output, char *outend, const struct AVal * name, int32_t bVal); + + unsigned short AMF_DecodeInt16(const char *data); + uint32_t AMF_DecodeInt24(const char *data); + uint32_t AMF_DecodeInt32(const char *data); + void AMF_DecodeString(const char *data,struct AVal * str); + void AMF_DecodeLongString(const char *data,struct AVal * str); + int32_t AMF_DecodeBoolean(const char *data); + double AMF_DecodeNumber(const char *data); + + char *AMF_Encode(struct AMFObject * obj, char *pBuffer, char *pBufEnd); + char *AMF_EncodeEcmaArray(struct AMFObject *obj, char *pBuffer, char *pBufEnd); + char *AMF_EncodeArray(struct AMFObject *obj, char *pBuffer, char *pBufEnd); + + int32_t AMF_Decode(struct AMFObject * obj, const char *pBuffer, int32_t nSize, + int32_t bDecodeName); + int32_t AMF_DecodeArray(struct AMFObject * obj, const char *pBuffer, int32_t nSize, + int32_t nArrayLen, int32_t bDecodeName); + int32_t AMF3_Decode(struct AMFObject * obj, const char *pBuffer, int32_t nSize, + int32_t bDecodeName); + void AMF_Dump(struct AMFObject * obj); + void AMF_Dump1(struct AMFObject * obj); + void AMF_Reset(struct AMFObject * obj); + + void AMF_AddProp(struct AMFObject * obj, const struct AMFObjectProperty * prop); + int32_t AMF_CountProp(struct AMFObject * obj); + struct AMFObjectProperty *AMF_GetProp(struct AMFObject * obj, const struct AVal * name, + int32_t nIndex); + + enum AMFDataType AMFProp_GetType(struct AMFObjectProperty * prop); + void AMFProp_SetNumber(struct AMFObjectProperty * prop, double dval); + void AMFProp_SetBoolean(struct AMFObjectProperty * prop, int32_t bflag); + void AMFProp_SetString(struct AMFObjectProperty * prop, struct AVal * str); + void AMFProp_SetObject(struct AMFObjectProperty * prop, struct AMFObject * obj); + + void AMFProp_GetName(struct AMFObjectProperty * prop, struct AVal * name); + void AMFProp_SetName(struct AMFObjectProperty * prop, struct AVal * name); + double AMFProp_GetNumber(struct AMFObjectProperty * prop); + int32_t AMFProp_GetBoolean(struct AMFObjectProperty * prop); + void AMFProp_GetString(struct AMFObjectProperty * prop, struct AVal * str); + void AMFProp_GetObject(struct AMFObjectProperty * prop, struct AMFObject * obj); + + int32_t AMFProp_IsValid(struct AMFObjectProperty * prop); + + char *AMFProp_Encode(struct AMFObjectProperty * prop, char *pBuffer, char *pBufEnd); + int32_t AMF3Prop_Decode(struct AMFObjectProperty * prop, const char *pBuffer, + int32_t nSize, int32_t bDecodeName); + int32_t AMFProp_Decode(struct AMFObjectProperty * prop, const char *pBuffer, + int32_t nSize, int32_t bDecodeName); + + void AMFProp_Dump(struct AMFObjectProperty * prop); + void AMFProp_Dump1(struct AMFObjectProperty * prop); + void AMFProp_Reset(struct AMFObjectProperty * prop); + + typedef struct AMF3ClassDef + { + struct AVal cd_name; + char cd_externalizable; + char cd_dynamic; + int32_t cd_num; + struct AVal *cd_props; + } AMF3ClassDef; + + void AMF3CD_AddProp(struct AMF3ClassDef * cd, struct AVal * prop); + struct AVal *AMF3CD_GetProp(struct AMF3ClassDef * cd, int32_t idx); + +#endif /* __AMF_H__ */ diff --git a/include/yangutil/sys/YangBits.h b/include/yangutil/sys/YangBits.h new file mode 100755 index 00000000..3706859e --- /dev/null +++ b/include/yangutil/sys/YangBits.h @@ -0,0 +1,111 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGBITS_H_ +#define INCLUDE_YANGUTIL_SYS_YANGBITS_H_ +#include +#ifdef _WIN32 +static __inline uint16_t +uint16_identity (uint16_t __x) +{ + return __x; +} +#define __bswap_constant_16(x) \ + ((__uint16_t) ((((x) >> 8) & 0xff) | (((x) & 0xff) << 8))) + +static __inline uint32_t +__uint32_identity (uint32_t __x) +{ + return __x; +} + +static __inline uint64_t +__uint64_identity (uint64_t __x) +{ + return __x; +} +#define __bswap_constant_64(x) \ + ((((x) & 0xff00000000000000ull) >> 56) \ + | (((x) & 0x00ff000000000000ull) >> 40) \ + | (((x) & 0x0000ff0000000000ull) >> 24) \ + | (((x) & 0x000000ff00000000ull) >> 8) \ + | (((x) & 0x00000000ff000000ull) << 8) \ + | (((x) & 0x0000000000ff0000ull) << 24) \ + | (((x) & 0x000000000000ff00ull) << 40) \ + | (((x) & 0x00000000000000ffull) << 56)) + + static __inline uint64_t +__bswap_64 (uint64_t __bsx) +{ +//#if __GNUC_PREREQ (4, 3) + return __builtin_bswap64 (__bsx); +//#else + //return __bswap_constant_64 (__bsx); +//#endif +} +/* Swap bytes in 32-bit value. */ +#define __bswap_constant_32(x) \ + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) \ + | (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) + +#define __bswap_constant_16(x) \ + ((__uint16_t) ((((x) >> 8) & 0xff) | (((x) & 0xff) << 8))) +static __inline uint16_t +__bswap_16 (uint16_t __bsx) +{ +//#if __GNUC_PREREQ (4, 8) + return __builtin_bswap16 (__bsx); +//#else + // return __bswap_constant_16 (__bsx); +//#endif +} +#define __bswap_32(x) ((unsigned int)__builtin_bswap32(x)) +#define __bswap_constant_32(x) \ + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) \ + | (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) + +//static inline uint32_t __bswap_32(uint32_t x) +//{ + // x = ((x << 8) &0xFF00FF00) | ((x >> 8) &0x00FF00FF); + // return (x >> 16) | (x << 16); +//} +#endif +// Convert srs_utime_t as ms. +#define srsu2ms(us) ((us) / YANG_UTIME_MILLISECONDS) +#define srsu2msi(us) int((us) / YANG_UTIME_MILLISECONDS) +# if __BYTE_ORDER == __LITTLE_ENDIAN +# define htobe16(x) __bswap_16 (x) +# define htole16(x) __uint16_identity (x) +# define be16toh(x) __bswap_16 (x) +# define le16toh(x) __uint16_identity (x) + +# define htobe32(x) __bswap_32 (x) +# define htole32(x) __uint32_identity (x) +# define be32toh(x) __bswap_32 (x) +# define le32toh(x) __uint32_identity (x) + +# define htobe64(x) __bswap_64 (x) +# define htole64(x) __uint64_identity (x) +# define be64toh(x) __bswap_64 (x) +# define le64toh(x) __uint64_identity (x) + +# else +# define htobe16(x) __uint16_identity (x) +# define htole16(x) __bswap_16 (x) +# define be16toh(x) __uint16_identity (x) +# define le16toh(x) __bswap_16 (x) + +# define htobe32(x) __uint32_identity (x) +# define htole32(x) __bswap_32 (x) +# define be32toh(x) __uint32_identity (x) +# define le32toh(x) __bswap_32 (x) + +# define htobe64(x) __uint64_identity (x) +# define htole64(x) __bswap_64 (x) +# define be64toh(x) __uint64_identity (x) +# define le64toh(x) __bswap_64 (x) +# endif + + + +#endif /* INCLUDE_YANGUTIL_SYS_YANGBITS_H_ */ diff --git a/include/yangutil/sys/YangCString.h b/include/yangutil/sys/YangCString.h new file mode 100755 index 00000000..6dc14da3 --- /dev/null +++ b/include/yangutil/sys/YangCString.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGCSTRING_H_ +#define INCLUDE_YANGUTIL_SYS_YANGCSTRING_H_ + +#include +#include +#include + +typedef struct { + int32_t capacity; + int32_t vsize; + char **str; +}YangStrings; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_cstr_random(int32_t len,char* data); +int32_t yang_cstr_split(char *src, char *delim, YangStrings* istr); +void yang_cstr_replace(char *str,char* dst, char *orig, char *rep); +void yang_destroy_strings(YangStrings* strs); +int32_t yang_cstr_userfindindex(char* p,char c); +int32_t yang_cstr_userfindupindex(char* p,char c,int32_t n); +int32_t yang_cstr_isnumber(char* p,int32_t n); +int32_t yang_strcmp(char* str1,char* str2); +void yang_itoa(int32_t num,char* data,int32_t n); +#ifdef __cplusplus +} +#endif + +#endif /* INCLUDE_YANGUTIL_SYS_YANGCSTRING_H_ */ diff --git a/include/yangutil/sys/YangCTime.h b/include/yangutil/sys/YangCTime.h new file mode 100755 index 00000000..b64e281c --- /dev/null +++ b/include/yangutil/sys/YangCTime.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_SYS_YANGCTIME_H_ +#define INCLUDE_YANGUTIL_SYS_YANGCTIME_H_ +#include +int64_t yang_get_system_micro_time(); +int64_t yang_get_system_micro_time(); + +int64_t yang_get_milli_time();//haomiao +int64_t yang_get_micro_time();//weimiao + +#define yang_get_milli_tick yang_get_milli_time +#define yang_get_micro_tick yang_get_micro_time + + +#ifdef _WIN32 +#include +int gettimeofday(struct timeval *tp, void *tzp); +#endif +int64_t yang_get_nano_tick();//namiao +typedef struct YangNtp{ + uint64_t system_ms; + uint64_t ntp; + uint32_t ntp_second; + uint32_t ntp_fractions; +}YangNtp; +void yang_ntp_from_time_ms(YangNtp* ntp,uint64_t ms); +void yang_ntp_to_time_ms(YangNtp* pntp,uint64_t ntp); + +#endif /* INCLUDE_YANGUTIL_SYS_YANGCTIME_H_ */ diff --git a/include/yangutil/sys/YangCTimer.h b/include/yangutil/sys/YangCTimer.h new file mode 100755 index 00000000..1459bcac --- /dev/null +++ b/include/yangutil/sys/YangCTimer.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGUTIL_SRC_YANGCTIMER_H_ +#define SRC_YANGUTIL_SRC_YANGCTIMER_H_ +#include +#include +#ifdef _WIN32 +#define Yang_Using_Phtread 1 +#else +#define Yang_Using_Phtread 0 +#endif + +typedef struct YangCTimer{ + int32_t taskId; + int32_t timerfd; + int32_t efd; + int32_t isStart; + int32_t isloop; + int32_t waitState; + int32_t waitTime; + pthread_t threadId; +#if Yang_Using_Phtread + pthread_mutex_t t_lock; + pthread_cond_t t_cond_mess; +#endif + void (*doTask)(int32_t taskId,void* user); + void* user; +}YangCTimer; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_timer(YangCTimer* timer,void* user,int32_t taskId,int32_t waitTime); +void yang_destroy_timer(YangCTimer* timer); +void yang_timer_start(YangCTimer* timer); +void yang_timer_stop(YangCTimer* timer); +#ifdef __cplusplus +} +#endif + + +#endif /* SRC_YANGUTIL_SRC_YANGTIMER_H_ */ diff --git a/include/yangutil/sys/YangCUrl.h b/include/yangutil/sys/YangCUrl.h new file mode 100755 index 00000000..ceee61a7 --- /dev/null +++ b/include/yangutil/sys/YangCUrl.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGCURL_H_ +#define INCLUDE_YANGUTIL_SYS_YANGCURL_H_ + +#include + +#define Yang_Websocket_Ws 0 +#define Yang_Websocket_Wss 1 +#define Yang_Websocket_Http 2 +#define Yang_Websocket_Https 3 +typedef struct YangUrlData{ + int32_t netType; + int32_t port; + char server[30]; + char app[20]; + char stream[20]; +}YangUrlData; +//webrtc://host[:port]/app/stream +int32_t yang_srs_url_parse(char* purl, YangUrlData* data); +//http://host:port/path ws://host:port/path wss://host:port/path +int32_t yang_ws_url_parse(char* purl, YangUrlData* data); + + + +#endif /* INCLUDE_YANGUTIL_SYS_YANGCURL_H_ */ diff --git a/include/yangutil/sys/YangEndian.h b/include/yangutil/sys/YangEndian.h new file mode 100755 index 00000000..383db7e9 --- /dev/null +++ b/include/yangutil/sys/YangEndian.h @@ -0,0 +1,24 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGUTIL_VIDEO_YANGEndian_H_ +#define YANGUTIL_VIDEO_YANGEndian_H_ +#include +#ifdef __cplusplus +extern "C"{ +#endif +char* yang_put_amf_string(char *c, const char *str); +char* yang_put_amf_double(char *c, double d); +char* yang_put_byte(char *output, uint8_t nVal); +char* yang_put_be16(char *output, uint16_t nVal); +char* yang_put_be24(char *output, uint32_t nVal); +char* yang_put_be32(char *output, uint32_t nVal); +char* yang_put_be64(char *output, uint64_t nVal); + +uint32_t yang_get_be32(uint8_t *output); +uint16_t yang_get_be16(uint8_t *output); +#ifdef __cplusplus +} +#endif +#endif /* YANGUTIL_VIDEO_YANGCOMMON_H_ */ diff --git a/include/yangutil/sys/YangFile.h b/include/yangutil/sys/YangFile.h new file mode 100755 index 00000000..1fe56533 --- /dev/null +++ b/include/yangutil/sys/YangFile.h @@ -0,0 +1,16 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGFILE_H_ +#define INCLUDE_YANGUTIL_SYS_YANGFILE_H_ +#include +#ifdef __cplusplus +extern "C"{ +#endif +int32_t yang_getCurpath(char* path); +int32_t yang_getLibpath(char* path); +int32_t yang_getCaFile(char* pem,char* key); +#ifdef __cplusplus +} +#endif +#endif /* INCLUDE_YANGUTIL_SYS_YANGFILE_H_ */ diff --git a/include/yangutil/sys/YangHttp.h b/include/yangutil/sys/YangHttp.h new file mode 100755 index 00000000..3771bf91 --- /dev/null +++ b/include/yangutil/sys/YangHttp.h @@ -0,0 +1,34 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGHTTP_H_ +#define INCLUDE_YANGUTIL_SYS_YANGHTTP_H_ +#include +#if Yang_HaveCurl +#include +#include +#include + +class YangHttp { +public: + YangHttp(); + virtual ~YangHttp(); + static size_t WriteFunction(void* input, size_t uSize, size_t uCount, void* avg); + int32_t queryPost(char* purl, std::string psdp,std::string &outsdp); + int32_t querySslPost(char* purl, std::string psdp,std::string &outsdp); + + private: + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + CURLcode (*yang_curl_global_init)(long flags); + CURL* (*yang_curl_easy_init)(void); + CURLcode (*yang_curl_easy_setopt)(CURL *curl, CURLoption option, ...); + struct curl_slist* (*yang_curl_slist_append)(struct curl_slist *, + const char *); + CURLcode (*yang_curl_easy_perform)(CURL *curl); + void (*yang_curl_easy_cleanup)(CURL *curl); + void (*yang_curl_global_cleanup)(void); +}; +#endif +#endif /* INCLUDE_YANGUTIL_SYS_YANGHTTP_H_ */ diff --git a/include/yangutil/sys/YangHttpSocket.h b/include/yangutil/sys/YangHttpSocket.h new file mode 100755 index 00000000..6a46f50c --- /dev/null +++ b/include/yangutil/sys/YangHttpSocket.h @@ -0,0 +1,15 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef SRC_YANGUSERDATA_INCLUDE_YangHttpSocket_H_ +#define SRC_YANGUSERDATA_INCLUDE_YangHttpSocket_H_ +#include +#ifdef __cplusplus +extern "C"{ +#endif + int32_t yang_http_post(char* rets,char* ip,int32_t port,char* api,uint8_t *p, int32_t plen); +#ifdef __cplusplus +} +#endif +#endif /* SRC_YANGUSERDATA_INCLUDE_YangHttpSocket_H_ */ diff --git a/include/yangutil/sys/YangIni.h b/include/yangutil/sys/YangIni.h new file mode 100755 index 00000000..2befd9cb --- /dev/null +++ b/include/yangutil/sys/YangIni.h @@ -0,0 +1,31 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YYangIni__ +#define __YYangIni__ +#include + +class YangIni { +public: + YangIni(); + ~YangIni(); +public: + void init(const char *p_filename); + int32_t IniReadValue(const char *section, const char *key, char *val); + int32_t readStringValue(const char *section, const char *key, char *val, + const char *p_defaultStr); + int32_t readIntValue(const char *section, const char *key, int32_t p_defaultInt); + void initVideo(YangVideoInfo *video); + void initAudio(YangAudioInfo *audio); + void initSys(YangSysInfo *sys); + void initEnc(YangVideoEncInfo *enc); + void initRtc(YangRtcInfo *rtc); + +protected: + +private: + char *m_file; + int32_t readStringValue1(const char *section, const char *key, char *val); + +}; +#endif diff --git a/include/yangutil/sys/YangJson.h b/include/yangutil/sys/YangJson.h new file mode 100755 index 00000000..f21bcc63 --- /dev/null +++ b/include/yangutil/sys/YangJson.h @@ -0,0 +1,15 @@ +#ifndef INCLUDE_YANGUTIL_SYS_YANGJSON_H_ +#define INCLUDE_YANGUTIL_SYS_YANGJSON_H_ +#include +#include +#include +using namespace std; +struct YangJsonData{ + string key; + string value; +}; + +void yang_gen_jsonstr(vector &jsons,string &outstr); + + +#endif /* INCLUDE_YANGUTIL_SYS_YANGJSON_H_ */ diff --git a/include/yangutil/sys/YangLibHandle.h b/include/yangutil/sys/YangLibHandle.h new file mode 100755 index 00000000..3be28448 --- /dev/null +++ b/include/yangutil/sys/YangLibHandle.h @@ -0,0 +1,25 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_SYS_YANGLIBHANDLE_H_ +#define INCLUDE_YANGUTIL_SYS_YANGLIBHANDLE_H_ + + typedef struct{ + + void* context; + void* (*loadObject)(void* context,const char *sofile); + void* (*loadSysObject)(void* context,const char *sofile); + void* (*loadFunction)(void* context,const char *name); + void (*unloadObject)(void* context); + char* (*getError)(); + }YangLibHandle; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_libhandle(YangLibHandle* handle); +void yang_destroy_libhandle(YangLibHandle* handle); +#ifdef __cplusplus +} +#endif +#endif /* INCLUDE_YANGUTIL_SYS_YANGLIBHANDLE_H_ */ diff --git a/include/yangutil/sys/YangLoadLib.h b/include/yangutil/sys/YangLoadLib.h new file mode 100755 index 00000000..aa1b17a2 --- /dev/null +++ b/include/yangutil/sys/YangLoadLib.h @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGUTIL_SYS_YANGLOADLIB_H_ +#define YANGUTIL_SYS_YANGLOADLIB_H_ +#include +#ifdef _WIN32 +#include +#endif +class YangLoadLib{ +public: + YangLoadLib(); + ~YangLoadLib(); + +void *loadObject(const char *sofile); +void *loadSysObject(const char *sofile); +void *loadFunction(const char *name); +void unloadObject(); + +#ifdef _WIN32 +HMODULE m_handle; +char *dlerror(); +#else +void *m_handle; +#endif + +}; + +#endif /* YANGUTIL_SYS_YANGLOADLIB_H_ */ diff --git a/include/yangutil/sys/YangLog.h b/include/yangutil/sys/YangLog.h new file mode 100755 index 00000000..f3f0f153 --- /dev/null +++ b/include/yangutil/sys/YangLog.h @@ -0,0 +1,50 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangLOG_H__ +#define __YangLOG_H__ +#include +#include +#include +#include + + +#include "yangutil/YangErrorCode.h" +#include +#define YANG_LOG_FATAL 0 +#define YANG_LOG_ERROR 1 +#define YANG_LOG_WARNING 2 +#define YANG_LOG_INFO 3 +#define YANG_LOG_DEBUG 4 +#define YANG_LOG_TRACE 5 +#ifdef __cplusplus +extern "C"{ +#endif +int32_t yang_error_wrap(int32_t errcode, const char *fmt, ...); +void yang_clog(int32_t level, const char *fmt, ...); +void yang_clogf(int32_t level, const char *fmt, ...); +void yang_setCLogFile(int32_t isSetLogFile); +void yang_closeCLogFile(); +void yang_setCLogLevel(int32_t plevel); +#ifdef __cplusplus +} +#endif + + + +#define yang_fatal( fmt, ...) yang_clog(0,fmt, ##__VA_ARGS__) +#define yang_error( fmt, ...) yang_clog(1,fmt, ##__VA_ARGS__) +#define yang_warn( fmt, ...) yang_clog(2,fmt, ##__VA_ARGS__) +#define yang_info( fmt, ...) yang_clog(3,fmt, ##__VA_ARGS__) +#define yang_debug( fmt, ...) yang_clog(4,fmt, ##__VA_ARGS__) + + +#define yang_debug2( fmt, ...) yang_clogf(4,fmt, ##__VA_ARGS__) +#define yang_info2( fmt, ...) yang_clogf(3,fmt, ##__VA_ARGS__) +#define yang_trace( fmt, ...) yang_clogf(5,fmt, ##__VA_ARGS__) + +#define yang_setLogLevle(x) yang_setCLogLevel(x) +#define yang_setLogFile(x) yang_setCLogFile(x) + +#define yang_closeLogFile yang_closeCLogFile +#endif diff --git a/include/yangutil/sys/YangMath.h b/include/yangutil/sys/YangMath.h new file mode 100755 index 00000000..32dea094 --- /dev/null +++ b/include/yangutil/sys/YangMath.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_SYS_YANGMATH_H_ +#define INCLUDE_YANGUTIL_SYS_YANGMATH_H_ +#include +static inline int16_t yang_rtp_seq_distance(const uint16_t prev_value, const uint16_t value) +{ + return (int16_t)(value - prev_value); +} +#ifdef __cplusplus +extern "C"{ +#endif +// The "distance" between two uint16 number, for example: +// distance(prev_value=3, value=5) === (int16_t)(uint16_t)((uint16_t)3-(uint16_t)5) === -2 +// distance(prev_value=3, value=65534) === (int16_t)(uint16_t)((uint16_t)3-(uint16_t)65534) === 5 +// distance(prev_value=65532, value=65534) === (int16_t)(uint16_t)((uint16_t)65532-(uint16_t)65534) === -2 +// For RTP sequence, it's only uint16 and may flip back, so 3 maybe 3+0xffff. +// @remark Note that yang_rtp_seq_distance(0, 32768)>0 is TRUE but for WebRTC jitter buffer it's FALSE and we follow it. +// @remark For srs_rtp_seq_distance(32768, 0)>0, it's FALSE definitely. +uint64_t yang_random(); +int32_t yang_insert_uint16(uint16_t a[],uint16_t value,uint32_t* alen); +#ifdef __cplusplus +} +#endif +#endif /* INCLUDE_YANGUTIL_SYS_YANGMATH_H_ */ diff --git a/include/yangutil/sys/YangSRtp.h b/include/yangutil/sys/YangSRtp.h new file mode 100755 index 00000000..a63d4f3f --- /dev/null +++ b/include/yangutil/sys/YangSRtp.h @@ -0,0 +1,25 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGSRTP_H_ +#define INCLUDE_YANGUTIL_SYS_YANGSRTP_H_ +#include +#include +typedef struct{ + srtp_t recvCtx; + srtp_t sendCtx; +}YangSRtp; +#ifdef __cplusplus +extern "C"{ +#endif + int32_t yang_create_srtp(YangSRtp* srtp,char* recv_key,int precvkeylen, char* send_key,int psendkeylen); + int32_t yang_destroy_srtp(YangSRtp* srtp); + int32_t yang_enc_rtp(YangSRtp* srtp,void* packet, int* nb_cipher); + int32_t yang_enc_rtcp(YangSRtp* srtp,void* packet, int* nb_cipher); + int32_t yang_dec_rtp(YangSRtp* srtp,void* packet, int* nb_plaintext); + int32_t yang_dec_rtcp(YangSRtp* srtp,void* packet, int* nb_plaintext); +#ifdef __cplusplus +} +#endif + +#endif /* INCLUDE_YANGUTIL_SYS_YANGSRTP_H_ */ diff --git a/include/yangutil/sys/YangSocket.h b/include/yangutil/sys/YangSocket.h new file mode 100755 index 00000000..975774cd --- /dev/null +++ b/include/yangutil/sys/YangSocket.h @@ -0,0 +1,15 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGSOCKETUTIL_H_ +#define YANGSOCKETUTIL_H_ +#include +#ifdef __cplusplus +extern "C"{ +#endif +int32_t yang_getLocalInfo(char* p); +void yang_getIp( char* domain, char* ip); +#ifdef __cplusplus +} +#endif +#endif /* YANGSOCKETUTIL_H_ */ diff --git a/include/yangutil/sys/YangSsl.h b/include/yangutil/sys/YangSsl.h new file mode 100755 index 00000000..a96f288c --- /dev/null +++ b/include/yangutil/sys/YangSsl.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGSSL_H_ +#define INCLUDE_YANGUTIL_SYS_YANGSSL_H_ + +#include +#include +#include +typedef struct{ + char fingerprint[128]; + int32_t ecdsa_mode; + X509* dtls_cert; + EVP_PKEY* dtls_pkey; + EC_KEY* eckey; +}YangCertificate; +#ifdef __cplusplus +extern "C"{ +#endif +int32_t hmac_encode(const char* algo, const char* key, const int key_length, + const char* input, const int32_t input_length, char* output, unsigned int* output_length); + +int32_t yang_create_certificate(YangCertificate* cer); +void yang_destroy_certificate(YangCertificate* cer); +void g_yang_create_srtp(); +void g_yang_destroy_srtp(); +#ifdef __cplusplus +} +#endif + + +#endif /* INCLUDE_YANGUTIL_SYS_YANGSSL_H_ */ diff --git a/include/yangutil/sys/YangSsrc.h b/include/yangutil/sys/YangSsrc.h new file mode 100755 index 00000000..d86e2e7b --- /dev/null +++ b/include/yangutil/sys/YangSsrc.h @@ -0,0 +1,19 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGWEBRTC_YANGRTCUTIL_H_ +#define YANGWEBRTC_YANGRTCUTIL_H_ +#include +#ifdef __cplusplus +extern "C"{ +#endif + +uint32_t yang_generate_ssrc(); +uint32_t yang_crc32_ieee(const void* buf, int32_t size, uint32_t previous); + +#ifdef __cplusplus +} +#endif + + +#endif /* YANGWEBRTC_YANGRTCUTIL_H_ */ diff --git a/include/yangutil/sys/YangString.h b/include/yangutil/sys/YangString.h new file mode 100755 index 00000000..52d0670a --- /dev/null +++ b/include/yangutil/sys/YangString.h @@ -0,0 +1,22 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGSTRING_H_ +#define INCLUDE_YANGUTIL_SYS_YANGSTRING_H_ +#include +#include +#include +#include +#include +using namespace std; + +vector yang_split(string s, char ch); +vector yang_split_first(string s, char ch); +std::vector yang_splits(const std::string& str, const std::string& delim); +std::string yang_int2str(int64_t value); +std::string yang_random_str(int32_t len); +void yang_replace(std::string& strBig, const std::string& strsrc, const std::string& strdst); +void skip_first_spaces(std::string& str); +std::string yang_read_string(YangBuffer* buf,int32_t len); +void yang_write_string(YangBuffer* buf,std::string value); +#endif /* INCLUDE_YANGUTIL_SYS_YANGSTRING_H_ */ diff --git a/include/yangutil/sys/YangSysMessageHandle.h b/include/yangutil/sys/YangSysMessageHandle.h new file mode 100755 index 00000000..b5557a61 --- /dev/null +++ b/include/yangutil/sys/YangSysMessageHandle.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGSYSMESSAGEHANDLE_H_ +#define INCLUDE_YANGUTIL_SYS_YANGSYSMESSAGEHANDLE_H_ +#include +#include +#include + + +using namespace std; + +class YangSysMessageHandle :public YangThread{ +public: + YangSysMessageHandle(); + virtual ~YangSysMessageHandle(); + virtual void handleMessage(YangSysMessage* mss)=0; + virtual void initAll()=0; + virtual void deleteAll()=0; + void putMessage(YangSysMessageI *handle,int32_t pst, int32_t puid, int32_t handleState,void* user=NULL); + // void putMessage(YangSysMessageI *handle,int32_t pst, int32_t puid, int32_t handleState,void* user); + static YangSysMessageHandle* m_instance; + int32_t m_isStart; + int32_t m_loop; + void stop(); +protected: + void run(); + void startLoop(); + void stopLoop(); + +private: + + vector m_sysMessages; + pthread_mutex_t m_lock; + pthread_cond_t m_cond_mess; + + int32_t m_waitState; + YangSysMessageHandleI* m_receive; + +}; + +#endif /* INCLUDE_YANGUTIL_SYS_YANGSYSMESSAGEHANDLE_H_ */ diff --git a/include/yangutil/sys/YangSysMessageI.h b/include/yangutil/sys/YangSysMessageI.h new file mode 100755 index 00000000..1dbd0460 --- /dev/null +++ b/include/yangutil/sys/YangSysMessageI.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGMESSAGEI_H_ +#define INCLUDE_YANGUTIL_SYS_YANGMESSAGEI_H_ +#include +#include + +class YangSysMessageI { +public: + YangSysMessageI(){}; + virtual ~YangSysMessageI(){}; + virtual void success()=0; + virtual void failure(int32_t errcode)=0; +}; +struct YangSysMessage { + int32_t uid; + int32_t messageId; + int32_t handleState; + YangSysMessageI *handle; + void* user; +}; +class YangSysMessageHandleI { +public: + YangSysMessageHandleI() {}; + virtual ~YangSysMessageHandleI() {}; + virtual void receiveSysMessage(YangSysMessage *psm, int32_t phandleRet)=0; +}; +void yang_post_message(int32_t st, int32_t uid,YangSysMessageI *mhandle,void* user=NULL); +//void yang_post_userMessage(int32_t st, int32_t uid,YangSysMessageI *mhandle,void* user); +void yang_post_state_message(int32_t st, int32_t uid, int32_t handleState,YangSysMessageI *mhandle); +#endif /* INCLUDE_YANGUTIL_SYS_YANGMESSAGEI_H_ */ diff --git a/include/yangutil/sys/YangThread.h b/include/yangutil/sys/YangThread.h new file mode 100755 index 00000000..ebd0310e --- /dev/null +++ b/include/yangutil/sys/YangThread.h @@ -0,0 +1,34 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangThread_H__ +#define YangThread_H__ +#include +#include +#include + +#define yang_stop_thread(x) if(x){while(x->m_isStart) yang_usleep(1000);} +#define yang_stop(x) if(x&&x->m_isStart){x->stop();} +class YangThread { +public: + YangThread(); + virtual ~YangThread(); + + int32_t start(); + void* join(); + int32_t detach(); + int32_t equals(YangThread *t); + void exitThread(void *value_ptr); + int32_t cancel(); + pthread_t getThread(); + virtual void stop()=0; +protected: + virtual void run() = 0; + +private: + static void* go(void *obj); + pthread_t m_thread; + +}; + +#endif diff --git a/include/yangutil/sys/YangTime.h b/include/yangutil/sys/YangTime.h new file mode 100755 index 00000000..b2d7a3f5 --- /dev/null +++ b/include/yangutil/sys/YangTime.h @@ -0,0 +1,37 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANG_TIME_H__ +#define YANG_TIME_H__ + + + + +#include +#ifdef __cplusplus +extern "C"{ +#include +} +#else +#include +#endif + +#define yang_get_system_time yang_get_system_micro_time +#define yang_update_system_time yang_get_system_micro_time +#ifdef __cplusplus + + +class YangWallClock +{ +public: + YangWallClock(); + virtual ~YangWallClock(); +public: + /** + * Current time in get_system_milli_time. + */ + virtual int64_t now(); +}; + +#endif +#endif diff --git a/include/yangutil/sys/YangTimer.h b/include/yangutil/sys/YangTimer.h new file mode 100755 index 00000000..9b9d1155 --- /dev/null +++ b/include/yangutil/sys/YangTimer.h @@ -0,0 +1,78 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGUTIL_SRC_YANGTIMER_H_ +#define SRC_YANGUTIL_SRC_YANGTIMER_H_ +#include +#include + + +#define Yang_Using_Phtread 1 + + +#if !Yang_Using_Phtread +#ifdef _WIN32 +#include +#endif +#endif + +//#include +class YangTimerTask { +public: + YangTimerTask() {}; + virtual ~YangTimerTask() {}; + virtual void doTask(int32_t taskId)=0; +}; + + +class YangTimer: public YangThread { +public: + YangTimer(); + virtual ~YangTimer(); + int32_t m_isStart; + int32_t m_loop; + int32_t m_waitState; + int32_t m_waitTime; + + void setTask(YangTimerTask *ptask); + void setTimelen(int32_t ptimelen);//hao miao + + + void setTaskId(int32_t ptaskId); + void stop(); + +#if !Yang_Using_Phtread +#ifdef _WIN32 + static void CALLBACK TimeEvent(PVOID lpParam, BOOLEAN TimerOrWaitFired); +#endif +#endif + +protected: + void run(); + void startLoop(); + void stopLoop(); +private: + YangTimerTask *m_task; + int32_t m_taskId; + // std::condition_variable_any m_threadCon; + // std::mutex m_ThreadLock; + + +#if Yang_Using_Phtread + pthread_mutex_t m_lock; + pthread_cond_t m_cond_mess; +#else +#ifdef _WIN32 + void startWindowsEventTime(int pwaitTime,DWORD_PTR duser); + HANDLE m_hTimerQueue; + HANDLE m_hTimerQueueTimer; + HANDLE m_winEvent; +#else + int m_timerfd; + int m_efd; +#endif +#endif + +}; + +#endif /* SRC_YANGUTIL_SRC_YANGTIMER_H_ */ diff --git a/include/yangutil/sys/YangUrl.h b/include/yangutil/sys/YangUrl.h new file mode 100755 index 00000000..17dd5137 --- /dev/null +++ b/include/yangutil/sys/YangUrl.h @@ -0,0 +1,14 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_SYS_YANGURL_H_ +#define INCLUDE_YANGUTIL_SYS_YANGURL_H_ +#ifdef __cplusplus +extern "C"{ +#include +} +#else +#include +#endif + +#endif /* INCLUDE_YANGUTIL_SYS_YANGURL_H_ */ diff --git a/include/yangutil/sys/YangVector.h b/include/yangutil/sys/YangVector.h new file mode 100755 index 00000000..769ef58f --- /dev/null +++ b/include/yangutil/sys/YangVector.h @@ -0,0 +1,76 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_SYS_YANGVECTOR_H_ +#define INCLUDE_YANGUTIL_SYS_YANGVECTOR_H_ + +#include +#include +typedef struct{ + int32_t capacity; + int32_t vsize; + char** payload; +}YangStringVector; + + + + +void yang_insert_stringVector(YangStringVector* vec,char* str); +void yang_insert_stringVector2(YangStringVector* vec,char* str,int plen); +void yang_create_stringVector(YangStringVector* vec); +void yang_destroy_stringVector(YangStringVector* vec); +void yang_clear_stringVector(YangStringVector* vec); + +#define yang_vector_declare(x) \ +typedef struct{ \ + int32_t capacity; \ + int32_t vsize; \ + x* payload; \ +}x##Vector; \ +void yang_create_##x##Vector(x##Vector* vec);\ +void yang_destroy_##x##Vector(x##Vector* vec);\ +void yang_clear_##x##Vector(x##Vector* vec);\ +void yang_insert_##x##Vector(x##Vector* vec,x* value);\ +void yang_remove_##x##Vector(x##Vector* vec,int32_t index);\ + + +#define yang_vector_impl(x) \ +void yang_insert_##x##Vector(x##Vector* vec,x* value){ \ + if(vec==NULL) return;\ + if(vec->vsize>=vec->capacity){\ + x* tmp=(x*)calloc(sizeof(x)*(vec->capacity+5),1);\ + memcpy(tmp,vec->payload,sizeof(x)*vec->vsize);\ + yang_free(vec->payload);\ + vec->payload=tmp;\ + vec->capacity+=5;\ + }\ + if(value)\ + memcpy(&vec->payload[vec->vsize++],value,sizeof(x));\ + else\ + memset(&vec->payload[vec->vsize++],0,sizeof(x));\ +}\ +void yang_create_##x##Vector(x##Vector* vec){\ + vec->capacity=5;\ + vec->payload=(x*)calloc(vec->capacity*sizeof(x),1);\ + vec->vsize=0;\ +}\ +void yang_destroy_##x##Vector(x##Vector* vec){\ + vec->vsize=0;\ + vec->capacity=0;\ + yang_free(vec->payload);\ +}\ +void yang_clear_##x##Vector(x##Vector* vec){\ + memset(vec->payload,0,vec->capacity*sizeof(x));\ + vec->vsize=0;\ +}\ +void yang_remove_##x##Vector(x##Vector* vec,int32_t index){\ + if(vec==NULL||vec->vsize==0||index>=vec->vsize) return;\ + if(vec->vsize==1) {yang_clear_##x##Vector(vec);return;}\ + memmove(vec->payload+index*sizeof(x),vec->payload+(index+1)*sizeof(x),sizeof(x)*(vec->vsize-index-1));\ + vec->vsize--;\ +}\ + + + +#endif /* INCLUDE_YANGUTIL_SYS_YANGVECTOR_H_ */ diff --git a/include/yangutil/sys/YangWebsocket.h b/include/yangutil/sys/YangWebsocket.h new file mode 100755 index 00000000..f28e78d2 --- /dev/null +++ b/include/yangutil/sys/YangWebsocket.h @@ -0,0 +1,33 @@ +#ifndef YANGRTP_YANGWEBSOCKET_H_ +#define YANGRTP_YANGWEBSOCKET_H_ +#include +#include +#include + + +typedef struct YangWebsocketData{ + uint8_t payload[1024]; + int nb; +}YangWebsocketData; + +typedef struct YangWebsocketCallback{ + void* context; + int (*receive)(void* context,YangSample* data); +}YangWebsocketCallback; + +typedef struct{ + void* context; + int32_t (*connectServer)(void* context,char* url); + int32_t (*disconnectServer)(void* context); + int32_t (*sendData)(void* context,uint8_t* p,int nb); +}YangWebsocket; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_websocket(YangWebsocket* web,YangWebsocketCallback* callback); +void yang_destroy_websocket(YangWebsocket* web); +#ifdef __cplusplus +} +#endif + +#endif /* YANGRTP_YANGWEBSOCKET_H_ */ diff --git a/include/yangutil/sys/YangWindowsMouse.h b/include/yangutil/sys/YangWindowsMouse.h new file mode 100755 index 00000000..bb282f3e --- /dev/null +++ b/include/yangutil/sys/YangWindowsMouse.h @@ -0,0 +1,58 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_YangWindowsMouse_ +#define INCLUDE_YANGUTIL_YangWindowsMouse_ +#ifdef _WIN32 +#include +#endif +#include +#include +using namespace std; +struct YangScreenKeyEvent{ + uint8_t key; + int x; + int y; + int wheel; + string direction; + string event; +}; + +class YangWindowsMouse +{ +public: + YangWindowsMouse(); + ~YangWindowsMouse(); + +public: + void moveTo(int x, int y); + void relativeMove(int cx, int cy); + void setPos(); + void restorePos(); + + void lockMouse(); + void unlockMouse(); + + void leftBClick(); + void leftbDClick(); + void leftBDown(); + void leftBUp(); + + void middleBClick(); + void middleBDbClick(); + void middleBDown(); + void middleBUp(); + void middleBRoll(int px,int py,int ch); + + + void rightBClick(); + void rightBDbClick(); + void rightBDown(); + void rightBUp(); +private: + #ifdef _WIN32 + POINT p; +#endif + +}; +#endif diff --git a/include/yangutil/yang_unistd.h b/include/yangutil/yang_unistd.h new file mode 100755 index 00000000..c65616c7 --- /dev/null +++ b/include/yangutil/yang_unistd.h @@ -0,0 +1,22 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_YANG_UNISTD_H_ +#define INCLUDE_YANGUTIL_YANG_UNISTD_H_ + +#ifdef _MSC_VER +#include +#define yang_usleep(x) Sleep(x>1000?x/1000:1) +//void yang_usleep(int ns){ + // Sleep(ns>1000?ns/1000:1); +//} +#define yang_sleep(x) Sleep(1000*x) +#else + + +#include +#define yang_usleep usleep +#define yang_sleep sleep +#endif + +#endif /* INCLUDE_YANGUTIL_YANG_UNISTD_H_ */ diff --git a/include/yangutil/yangavctype.h b/include/yangutil/yangavctype.h new file mode 100755 index 00000000..162cebf2 --- /dev/null +++ b/include/yangutil/yangavctype.h @@ -0,0 +1,78 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_YANGAVCTYPE_H_ +#define INCLUDE_YANGUTIL_YANGAVCTYPE_H_ +#include +#define YANG_Frametype_Spspps 9 +#define YANG_Frametype_I 1 +#define YANG_Frametype_P 0 +typedef enum { + Yang_Stream_Play, Yang_Stream_Publish, Yang_Stream_Both +}YangStreamOptType; +typedef enum YangAudioCodec{ + Yang_AED_AAC, + Yang_AED_MP3, + Yang_AED_SPEEX, + Yang_AED_OPUS +}YangAudioCodec; +typedef enum YangVideoCodec{ + Yang_VED_264, + Yang_VED_265, + Yang_VED_AV1, + Yang_VED_VP9 + +}YangVideoCodec; + +typedef struct { + enum YangAudioCodec encode; + int32_t sample; + int32_t channel; + int32_t audioClock; +}YangAudioParam; + +typedef struct { + enum YangVideoCodec encode; + int32_t videoClock; +}YangVideoParam; +typedef struct { + char serverIp[30]; + char localIp[20]; + char app[20]; + char stream[20]; + char url[50]; + int32_t localPort; + int32_t serverPort; + int32_t uid; + YangStreamOptType streamOptType; +}YangStreamConfig; +typedef struct{ + int32_t mediaType; + int32_t uid; + int32_t frametype; + int32_t nb; + int64_t pts; + int64_t dts; + uint8_t* payload; +}YangFrame; + +typedef enum YangRequestType { + Yang_Req_Sendkeyframe, Yang_Req_Connected, Yang_Req_Disconnected +}YangRequestType; + +typedef enum YangRtcMessageType{ + YangRTC_Decoder_Input +}YangRtcMessageType; +typedef struct YangRtcMessageNotify{ + void* context; + int (*notify)(void* context,int puid,YangRtcMessageType mess); +}YangRtcMessageNotify; +typedef struct{ + void* context; + void (*setRtcMessageNotify)(void* context,int puid,YangRtcMessageNotify *rtcmsg); + void (*setMediaConfig)(void* context,int32_t puid,YangAudioParam* audio,YangVideoParam* video); + void (*sendRequest)(void* context,int32_t puid,uint32_t ssrc,YangRequestType req); +}YangContextStream; + +#endif /* INCLUDE_YANGUTIL_YANGAVCTYPE_H_ */ diff --git a/include/yangutil/yangavctype_H.h b/include/yangutil/yangavctype_H.h new file mode 100755 index 00000000..4d7df47f --- /dev/null +++ b/include/yangutil/yangavctype_H.h @@ -0,0 +1,14 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGUTIL_YANGAVCTYPE_H_H_ +#define INCLUDE_YANGUTIL_YANGAVCTYPE_H_H_ + +void yang_frame_copy(YangFrame* src, YangFrame* dst); +void yang_frame_copy_buffer(YangFrame* src, YangFrame* dst); +void yang_frame_copy_nobuffer(YangFrame* src, YangFrame* dst); + + + +#endif /* INCLUDE_YANGUTIL_YANGAVCTYPE_H_H_ */ diff --git a/include/yangutil/yangavinfotype.h b/include/yangutil/yangavinfotype.h new file mode 100755 index 00000000..eebc02b7 --- /dev/null +++ b/include/yangutil/yangavinfotype.h @@ -0,0 +1,152 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGUTIL_YANGAVINFOTYPE_H_ +#define YANGUTIL_YANGAVINFOTYPE_H_ +#include +#include + +//#include + +/** + struct YangEncoderParam { + int32_t width; + int32_t height; + int32_t fps; + int32_t bitrate; + int32_t keyint_max; + int32_t level_idc; + int32_t profile; + }; + **/ +typedef struct YangAudioInfo { + int32_t sample; + int32_t frameSize; + int32_t bitrate; + int32_t channel; + + int32_t usingMono; + int32_t hasAec; + int32_t echoPath; + + int32_t aecBufferFrames; + int32_t audioCacheSize; + int32_t audioCacheNum; + int32_t audioPlayCacheNum; + + int32_t hasAudioHeader; + int32_t audioEncoderType; + int audioDecoderType; + int32_t audioPlayType; + + int32_t aIndex; + int32_t aSubIndex; +}YangAudioInfo; + +typedef struct YangVideoInfo { + int32_t width; //= 800 + int32_t height; //= 600 + int32_t outWidth; + int32_t outHeight; + int32_t rate; // 512 + int32_t frame; //25 + //int32_t bitcount; // 16 + int32_t bitDepth; + + int32_t videoCacheNum; + int32_t evideoCacheNum; + int32_t videoPlayCacheNum; + + enum YangYuvType videoCaptureFormat; + enum YangYuvType videoEncoderFormat; + enum YangYuvType videoDecoderFormat; + + int32_t videoEncoderType; + int32_t videoDecoderType; + int32_t videoEncHwType; + int32_t videoDecHwType; + int32_t vIndex; +}YangVideoInfo; +typedef struct YangVideoEncInfo { + int32_t preset; + int32_t level_idc; + int32_t profile; + int32_t keyint_max; + int32_t enc_threads; + int32_t gop; + bool createMeta; + + +}YangVideoEncInfo; +typedef struct YangSysInfo { + int32_t isMultCamera; + int32_t transType; + int32_t usingDataServer; + int32_t rtmpPort; + int32_t srtPort; + int32_t rtcPort; + int32_t rtcLocalPort; + int32_t httpPort; + int32_t dataPort; + int32_t hasLogFile; + int32_t logLevel; + int32_t cameraCount; + + char cameraIndexs[50]; + char rtmpServerIP[20]; + char srtServerIP[20]; + char rtcServerIP[20]; + char httpServerIP[20]; + char dataServerIP[20]; +}YangSysInfo; + +typedef struct YangRtcInfo { + int32_t sendTwcc; + int32_t mixAvqueue; + int32_t audioQueueCount; + int32_t videoQueueCount; + +}YangRtcInfo; + +typedef struct YangAVInfo{ + YangAudioInfo audio; + YangVideoInfo video; + YangVideoEncInfo enc; + YangSysInfo sys; + YangRtcInfo rtc; +}YangAVInfo; + + + +#ifdef __cplusplus +#include +class YangContext { +public: + YangContext(); + virtual ~YangContext(); + void init(char *filename); + void init(); + //YangCertificate* getCertificate(); + virtual void initExt(void *filename); + virtual void initExt(); +public: + YangAVInfo avinfo; + YangContextStream stream; +#if Yang_HaveVr + char bgFilename[256]; +#endif + + YangStreamManager streams; +private: + //YangCertificate* m_certificate; + +}; +extern "C"{ +void yang_init_avinfo(YangAVInfo* avinfo); +} +#else +void yang_init_avinfo(YangAVInfo* avinfo); +#endif +//void yang_init_context(YangContext* context,char* filename); +#endif /* YANGUTIL_YANGTYPE_H_ */ diff --git a/include/yangutil/yangavtype.h b/include/yangutil/yangavtype.h new file mode 100755 index 00000000..bf418456 --- /dev/null +++ b/include/yangutil/yangavtype.h @@ -0,0 +1,214 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_YANGAVTYPE_H_ +#define INCLUDE_YANGUTIL_YANGAVTYPE_H_ +#include +#include + + // 8 = audio +#define YangFrameTypeAudio 8 + // 9 = video +#define YangFrameTypeVideo 9 + // 18 = script data +#define kNalTypeMask 0x1F +enum YangYuvType{ + YangYuy2, + YangI420, + YangNv12, + YangYv12, + YangRgb, + YangArgb, + YangBgra, + YangP010, + YangP016 +}; + +/** + * the level for avc/h.264. + * @see Annex A Profiles and levels, ISO_IEC_14496-10-AVC-2003.pdf, page 207. + */ +enum YangAvcLevel +{ + YangAvcLevelReserved = 0, + + YangAvcLevel_1 = 10, + YangAvcLevel_11 = 11, + YangAvcLevel_12 = 12, + YangAvcLevel_13 = 13, + YangAvcLevel_2 = 20, + YangAvcLevel_21 = 21, + YangAvcLevel_22 = 22, + YangAvcLevel_3 = 30, + YangAvcLevel_31 = 31, + YangAvcLevel_32 = 32, + YangAvcLevel_4 = 40, + YangAvcLevel_41 = 41, + YangAvcLevel_5 = 50, + YangAvcLevel_51 = 51, +}; + +/** + * Table 7-6 – Name association to slice_type + * ISO_IEC_14496-10-AVC-2012.pdf, page 105. + */ +enum YangAvcSliceType +{ + YangAvcSliceTypeP = 0, + YangAvcSliceTypeB = 1, + YangAvcSliceTypeI = 2, + YangAvcSliceTypeSP = 3, + YangAvcSliceTypeSI = 4, + YangAvcSliceTypeP1 = 5, + YangAvcSliceTypeB1 = 6, + YangAvcSliceTypeI1 = 7, + YangAvcSliceTypeSP1 = 8, + YangAvcSliceTypeSI1 = 9, +}; + + +typedef enum +{ + // Unspecified + YangAvcNaluTypeReserved = 0, + YangAvcNaluTypeForbidden = 0, + + // Coded slice of a non-IDR picture slice_layer_without_partitioning_rbsp( ) + YangAvcNaluTypeNonIDR = 1, + // Coded slice data partition A slice_data_partition_a_layer_rbsp( ) + YangAvcNaluTypeDataPartitionA = 2, + // Coded slice data partition B slice_data_partition_b_layer_rbsp( ) + YangAvcNaluTypeDataPartitionB = 3, + // Coded slice data partition C slice_data_partition_c_layer_rbsp( ) + YangAvcNaluTypeDataPartitionC = 4, + // Coded slice of an IDR picture slice_layer_without_partitioning_rbsp( ) + YangAvcNaluTypeIDR = 5, + // Supplemental enhancement information (SEI) sei_rbsp( ) + YangAvcNaluTypeSEI = 6, + // Sequence parameter set seq_parameter_set_rbsp( ) + YangAvcNaluTypeSPS = 7, + // Picture parameter set pic_parameter_set_rbsp( ) + YangAvcNaluTypePPS = 8, + // Access unit delimiter access_unit_delimiter_rbsp( ) + YangAvcNaluTypeAccessUnitDelimiter = 9, + // End of sequence end_of_seq_rbsp( ) + YangAvcNaluTypeEOSequence = 10, + // End of stream end_of_stream_rbsp( ) + YangAvcNaluTypeEOStream = 11, + // Filler data filler_data_rbsp( ) + YangAvcNaluTypeFilterData = 12, + // Sequence parameter set extension seq_parameter_set_extension_rbsp( ) + YangAvcNaluTypeSPSExt = 13, + // Prefix NAL unit prefix_nal_unit_rbsp( ) + YangAvcNaluTypePrefixNALU = 14, + // Subset sequence parameter set subset_seq_parameter_set_rbsp( ) + YangAvcNaluTypeSubsetSPS = 15, + // Coded slice of an auxiliary coded picture without partitioning slice_layer_without_partitioning_rbsp( ) + YangAvcNaluTypeLayerWithoutPartition = 19, + // Coded slice extension slice_layer_extension_rbsp( ) + YangAvcNaluTypeCodedSliceExt = 20, +}YangAvcNaluType; + +enum YangAvcProfile +{ + YangAvcProfileReserved = 0, + + // @see ffmpeg, libavcodec/avcodec.h:2713 + YangAvcProfileBaseline = 66, + // FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag + // FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) + YangAvcProfileConstrainedBaseline = 578, + YangAvcProfileMain = 77, + YangAvcProfileExtended = 88, + YangAvcProfileHigh = 100, + YangAvcProfileHigh10 = 110, + YangAvcProfileHigh10Intra = 2158, + YangAvcProfileHigh422 = 122, + YangAvcProfileHigh422Intra = 2170, + YangAvcProfileHigh444 = 144, + YangAvcProfileHigh444Predictive = 244, + YangAvcProfileHigh444Intra = 2192, +}; +struct YangRect{ + short x; + short y; + short w; + short h; +}; +struct YangColor{ + uint8_t r; + uint8_t g; + uint8_t b; +}; +typedef struct { + int32_t vpsLen; + int32_t spsLen; + int32_t ppsLen; + uint8_t vps[128]; + uint8_t sps[128]; + uint8_t pps[64]; +}YangH2645Conf; +typedef struct { + uint8_t buffer[128]; + int32_t bufLen; +}YangRtmpMeta; +typedef struct { + int32_t isInit; + YangH2645Conf mp4Meta; + //YangRtmpMeta flvMeta; + YangRtmpMeta livingMeta; +}YangVideoMeta; + + +enum YangVideoHwType{ + Yang_Hw_Soft, + YangV_Hw_Intel, + YangV_Hw_Nvdia, + YangV_Hw_Android +}; + + + +//void yang_frame_init(YangFrame* frame); + +struct YangMessage{ + int32_t mediaType; + int32_t nb; + int64_t timestamp; + + char* payload; + + +}; +typedef struct { + int32_t nb; + char* bytes; +}YangSample; + YangSample* yang_sample_copy(YangSample* src); + + + +#include +#ifdef __cplusplus +extern "C"{ +#include +} +#else +#include +#endif +#ifdef __cplusplus +class YangBufferManager{ +public: + YangBufferManager(); + YangBufferManager(int32_t num,int32_t bufsize); + virtual ~YangBufferManager(); + void init(int32_t num,int32_t bufsize); + uint8_t* getBuffer(); +private: + uint8_t* m_cache; + int32_t m_curindex; + int32_t m_unitsize; + int32_t m_size; +}; +#endif +#endif /* INCLUDE_YANGUTIL_YANGAVTYPE_H_ */ diff --git a/include/yangutil/yangavtype_h265.h b/include/yangutil/yangavtype_h265.h new file mode 100755 index 00000000..138aa247 --- /dev/null +++ b/include/yangutil/yangavtype_h265.h @@ -0,0 +1,72 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_YANGAVTYPE_H265_H_ +#define INCLUDE_YANGUTIL_YANGAVTYPE_H265_H_ + +typedef enum YangHevcNaluType +{ + YANG_NAL_UNIT_CODED_SLICE_TRAIL_N = 0, + YANG_NAL_UNIT_CODED_SLICE_TRAIL_R, //1 + YANG_NAL_UNIT_CODED_SLICE_TSA_N, //2 + YANG_NAL_UNIT_CODED_SLICE_TLA, //3 + YANG_NAL_UNIT_CODED_SLICE_STSA_N, //4 + YANG_NAL_UNIT_CODED_SLICE_STSA_R, //5 + YANG_NAL_UNIT_CODED_SLICE_RADL_N, //6 + YANG_NAL_UNIT_CODED_SLICE_DLP, //7 + YANG_NAL_UNIT_CODED_SLICE_RASL_N, //8 + YANG_NAL_UNIT_CODED_SLICE_TFD, //9 + YANG_NAL_UNIT_RESERVED_10, + YANG_NAL_UNIT_RESERVED_11, + YANG_NAL_UNIT_RESERVED_12, + YANG_NAL_UNIT_RESERVED_13, + YANG_NAL_UNIT_RESERVED_14, + YANG_NAL_UNIT_RESERVED_15, + YANG_NAL_UNIT_CODED_SLICE_BLA, //16 + YANG_NAL_UNIT_CODED_SLICE_BLANT, //17 + YANG_NAL_UNIT_CODED_SLICE_BLA_N_LP, //18 + YANG_NAL_UNIT_CODED_SLICE_IDR, //19 + YANG_NAL_UNIT_CODED_SLICE_IDR_N_LP, //20 + YANG_NAL_UNIT_CODED_SLICE_CRA, //21 + YANG_NAL_UNIT_VPS=32, //32 + YANG_NAL_UNIT_SPS, // 33 + YANG_NAL_UNIT_PPS, //34 + YANG_NAL_UNIT_ACCESS_UNIT_DELIMITER, //35 + YANG_NAL_UNIT_EOS, //36 + YANG_NAL_UNIT_EOB, //37 + YANG_NAL_UNIT_FILLER_DATA, //38 + YANG_NAL_UNIT_SEI , //39Prefix SEI + YANG_NAL_UNIT_SEI_SUFFIX, //40Suffix SEI + YANG_NAL_UNIT_RESERVED_41, + YANG_NAL_UNIT_RESERVED_42, + YANG_NAL_UNIT_RESERVED_43, + YANG_NAL_UNIT_RESERVED_44, + YANG_NAL_UNIT_RESERVED_45, + YANG_NAL_UNIT_RESERVED_46, + YANG_NAL_UNIT_RESERVED_47, + YANG_NAL_UNIT_UNSPECIFIED_48, + YANG_NAL_UNIT_UNSPECIFIED_49, + YANG_NAL_UNIT_UNSPECIFIED_50, + YANG_NAL_UNIT_UNSPECIFIED_51, + YANG_NAL_UNIT_UNSPECIFIED_52, + YANG_NAL_UNIT_UNSPECIFIED_53, + YANG_NAL_UNIT_UNSPECIFIED_54, + YANG_NAL_UNIT_UNSPECIFIED_55, + YANG_NAL_UNIT_UNSPECIFIED_56, + YANG_NAL_UNIT_UNSPECIFIED_57, + YANG_NAL_UNIT_UNSPECIFIED_58, + YANG_NAL_UNIT_UNSPECIFIED_59, + YANG_NAL_UNIT_UNSPECIFIED_60, + YANG_NAL_UNIT_UNSPECIFIED_61, + YANG_NAL_UNIT_UNSPECIFIED_62, + YANG_NAL_UNIT_UNSPECIFIED_63, + YANG_NAL_UNIT_INVALID, +}YangHevcNaluType; + +//for nalu data first byte +#define YANG_HEVC_NALU_TYPE(code) (YangHevcNaluType)((code & 0x7E)>>1) +#define H265_kFuA 49 +#define H265_kStapA 48 + + +#endif /* INCLUDE_YANGUTIL_YANGAVTYPE_H265_H_ */ diff --git a/include/yangutil/yangtype.h b/include/yangutil/yangtype.h new file mode 100755 index 00000000..d7c08e09 --- /dev/null +++ b/include/yangutil/yangtype.h @@ -0,0 +1,61 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGUTIL_YANGTYPE_H_ +#define INCLUDE_YANGUTIL_YANGTYPE_H_ +#include +#include +#include +#include +#include "YangErrorCode.h" +#include "../Yang_Config.h" + + +#define yang_delete(a) {if( (a)) {delete (a); (a) = NULL;}} +#define yang_deleteA(a) {if( (a)) {delete[] (a); (a) = NULL;}} +#define yang_free(a) {if( (a)) {free((a)); (a) = NULL;}} +#define yang_min(a, b) (((a) < (b))? (a) : (b)) +#define yang_max(a, b) (((a) < (b))? (b) : (a)) + +#define Yang_Rtmp 0 +#define Yang_Srt 1 +#define Yang_Webrtc 2 + +#define YANG_VIDEO_ENCODE_BUFFER_LEN 1024 * 1024 +#ifdef __cplusplus +#define YangAutoFree(className, instance) \ +impl_YangAutoFree _auto_free_##instance(&instance, false) +#define YangAutoFreeA(className, instance) \ +impl_YangAutoFree _auto_free_array_##instance(&instance, true) +template +class impl_YangAutoFree +{ +private: + T** ptr; + bool is_array; +public: + impl_YangAutoFree(T** p, bool array) { + ptr = p; + is_array = array; + } + + virtual ~impl_YangAutoFree() { + if (ptr == NULL || *ptr == NULL) { + return; + } + + if (is_array) { + delete[] *ptr; + } else { + delete *ptr; + } + + *ptr = NULL; + } +}; +#else +#define bool int32_t +#define true 1 +#define false 0 +#endif +#endif /* INCLUDE_YANGUTIL_YANGTYPE_H_ */ diff --git a/include/yangwebrtc/YangAVContext.h b/include/yangwebrtc/YangAVContext.h new file mode 100755 index 00000000..8949bef3 --- /dev/null +++ b/include/yangwebrtc/YangAVContext.h @@ -0,0 +1,23 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGWEBRTC_YANGAVCONTEXT_H_ +#define INCLUDE_YANGWEBRTC_YANGAVCONTEXT_H_ +#include +#include +#include +typedef struct{ + void (*receiveAudio)(YangFrame *audioFrame, void *user); + void (*receiveVideo)(YangFrame *videoFrame, void *user); +} YangRtcReceiveCallback; + +typedef struct{ + YangAVInfo *avinfo; + void (*sendRequest)(int32_t puid, uint32_t ssrc, YangRequestType req,void* user); + void (*setPlayMediaConfig)( YangAudioParam *remote_audio,YangVideoParam *remote_video,void* user); + YangRtcReceiveCallback recvcb; +} YangAVContext; + + +#endif /* INCLUDE_YANGWEBRTC_YANGAVCONTEXT_H_ */ diff --git a/include/yangwebrtc/YangMetaConnection.h b/include/yangwebrtc/YangMetaConnection.h new file mode 100755 index 00000000..a90af3d4 --- /dev/null +++ b/include/yangwebrtc/YangMetaConnection.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGWEBRTC_YANGMETACONNECTION_H_ +#define INCLUDE_YANGWEBRTC_YANGMETACONNECTION_H_ +#include +typedef struct{ + void *context; + void (*init)(void* context,int32_t sample,int32_t channel,int32_t echopath); + void (*closeAec)(void* context); + + void (*echoCapture)(void* context,short *rec, short *out); + void (*preprocessRun)(void* context,short *pcm); + void (*echoStateReset)(void* context); + void (*echoPlayback)(void* context,short *play); + void (*echoCancellation)(void* context,const short *rec, const short *play, + short *out); +}YangAec; +typedef struct{ + void (*receiveAudio)(YangFrame *audioFrame, void *user); + void (*receiveVideo)(YangFrame *videoFrame, void *user); + void (*sendRequest)(int32_t puid, uint32_t ssrc, YangRequestType req,void* user); + void (*setPlayMediaConfig)( YangAudioParam *remote_audio,YangVideoParam *remote_video,void* user); +} YangMetaRtcCallback; +typedef struct { + void* context; + void (*init)(void* context,YangMetaRtcCallback* callback,void* user); + int32_t (*initParam)(void* context,char* url,YangStreamOptType opt); + void (*parseHeader)(YangVideoCodec codec,uint8_t *buf, uint8_t *src, int32_t *hLen); + int32_t (*connectServer)(void* context); + int32_t (*disconnectServer)(void* context); + void (*setExtradata)(void* context,YangVideoCodec codec,uint8_t *extradata,int32_t extradata_size); + int32_t (*publishAudio)(void* context,YangFrame* audioFrame); + int32_t (*publishVideo)(void* context,YangFrame* videoFrame); + int32_t (*getState)(void* context); + int32_t (*recvvideoNotify)(void* context, YangRtcMessageType mess); +}YangMetaConnection; +void yang_create_metaConnection(YangMetaConnection* metaconn); +void yang_destroy_metaConnection(YangMetaConnection* metaconn); +void yang_create_aec(YangAec* aec); +void yang_destroy_aec(YangAec* aec); +#endif /* INCLUDE_YANGWEBRTC_YANGMETACONNECTION_H_ */ diff --git a/include/yangwebrtc/YangPeerConnection.h b/include/yangwebrtc/YangPeerConnection.h new file mode 100755 index 00000000..d467e9a2 --- /dev/null +++ b/include/yangwebrtc/YangPeerConnection.h @@ -0,0 +1,33 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGWEBRTC_YANGPEERCONNECTION_H_ +#define INCLUDE_YANGWEBRTC_YANGPEERCONNECTION_H_ + +#include +typedef struct{ + void* session; + YangAVContext* avcontext; + YangStreamConfig* streamconfig; + void* user; +}YangPeer; +typedef struct { + YangPeer peer; + void (*init)(YangPeer* peer); + int32_t (*initParam)(char* url,YangStreamConfig* stream,YangAVContext* context,YangStreamOptType opt); + void (*parseHeader)(YangVideoCodec codec,uint8_t *buf, uint8_t *src, int32_t *hLen); + int32_t (*connectServer)(YangPeer* peer); + int32_t (*disconnectServer)(YangPeer* peer); + int32_t (*publishAudio)(YangPeer* peer,YangFrame* audioFrame); + int32_t (*publishVideo)(YangPeer* peer,YangFrame* videoFrame); + int32_t (*getState)(YangPeer* peer); + int32_t (*recvvideo_notify)(YangPeer* peer, YangRtcMessageType mess); +}YangPeerConnection; +void yang_create_peerConnection(YangPeerConnection* peerconn); +void yang_destroy_peerConnection(YangPeerConnection* peerconn); + + + + +#endif /* INCLUDE_YANGWEBRTC_YANGPEERCONNECTION_H_ */ diff --git a/include/yangwebrtc/YangRtcHandle.h b/include/yangwebrtc/YangRtcHandle.h new file mode 100755 index 00000000..2ae11abb --- /dev/null +++ b/include/yangwebrtc/YangRtcHandle.h @@ -0,0 +1,31 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef INCLUDE_YANGWEBRTC_YANGRTCHANDLE_H_ +#define INCLUDE_YANGWEBRTC_YANGRTCHANDLE_H_ +#include +#include +#include +typedef struct{ + void* context; + void (*init)(void* handle,void* context,YangStreamConfig* pconf); + + int32_t (*connectRtcServer)(void* context); + int32_t (*disconnectServer)(void* context); + int32_t (*getState)(void* context); + int32_t (*publishVideo)(void* context,YangStreamCapture* videoFrame); + int32_t (*publishAudio)(void* context,YangStreamCapture* audioFrame); + YangReceiveCallback* recvcb; + YangStreamConfig *streamconfig; + YangContextStream* stream; +}YangRtcHandle; +#ifdef __cplusplus +extern "C"{ +#endif +void yang_create_rtcstream_handle(YangRtcHandle* handle,YangAVInfo* avinfo,YangContextStream* stream); +void yang_destroy_rtcstream_handle(YangRtcHandle* handle); +#ifdef __cplusplus +} +#endif + +#endif /* INCLUDE_YANGWEBRTC_YANGRTCHANDLE_H_ */ diff --git a/libmetartc3/CMakeLists.txt b/libmetartc3/CMakeLists.txt new file mode 100755 index 00000000..643196ad --- /dev/null +++ b/libmetartc3/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 2.8) +project(libmetartc2) + +add_definitions(-D__STDC_FORMAT_MACROS) + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu11 -ffunction-sections -fdata-sections") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++11 -ffunction-sections -fdata-sections") +set(HOME_BASE "../") + +# 头文件目录 +include_directories(${HOME_BASE}/include) +include_directories(${HOME_BASE}/thirdparty/include) +include_directories(${HOME_BASE}/thirdparty/user_include) +include_directories(${HOME_BASE}/thirdparty/user_include/ffmpeg) +include_directories(${HOME_BASE}/libmetartc2/src) + + +# 发现目录下的源文件 +aux_source_directory(./src/yangaudiodev DIR_SRCS) +aux_source_directory(./src/yangaudiodev/linux DIR_SRCS) +aux_source_directory(./src/yangutil DIR_SRCS) +aux_source_directory(./src/yangutil/buffer DIR_SRCS) +aux_source_directory(./src/yangavutil DIR_SRCS) +aux_source_directory(./src/yangstream DIR_SRCS) +aux_source_directory(./src/yangcapture DIR_SRCS) +aux_source_directory(./src/yangencoder DIR_SRCS) +aux_source_directory(./src/yangdecoder DIR_SRCS) +aux_source_directory(./src/yangplayer DIR_SRCS) +aux_source_directory(./src/yangpush DIR_SRCS) +aux_source_directory(./src/yangrecord DIR_SRCS) +aux_source_directory(./src/yangrtmp DIR_SRCS) +aux_source_directory(./src/yangsrs DIR_SRCS) +aux_source_directory(./src/yangsrt DIR_SRCS) + +include(utils.cmake) +# 排除不参与编译的文件 + + +add_library(metartc2 ${DIR_SRCS}) diff --git a/libmetartc3/metartc3.pro b/libmetartc3/metartc3.pro new file mode 100755 index 00000000..dfc88e57 --- /dev/null +++ b/libmetartc3/metartc3.pro @@ -0,0 +1,243 @@ +CONFIG -= qt + +TEMPLATE = lib +CONFIG += staticlib + +CONFIG += c++14 + +# The following define makes your compiler emit warnings if you use +# any Qt feature that has been marked deprecated (the exact warnings +# depend on your compiler). Please consult the documentation of the +# deprecated API in order to know how to port your code away from it. +DEFINES += QT_DEPRECATED_WARNINGS +DEFINES += __STDC_FORMAT_MACROS +HOME_BASE=../ +INCLUDEPATH += $$HOME_BASE/include +INCLUDEPATH += $$HOME_BASE/thirdparty/include +INCLUDEPATH += $$HOME_BASE/thirdparty/user_include +INCLUDEPATH += $$HOME_BASE/thirdparty/user_include/ffmpeg +INCLUDEPATH += $$HOME_BASE/libmetartc3/src + +unix{ + CONFIG(debug, debug|release) { + DESTDIR += $$HOME_BASE/bin/lib_debug + }else{ + + DESTDIR += $$HOME_BASE/bin/lib_release + } +} +win32{ + DEFINES += _AMD64_ + INCLUDEPATH += $$HOME_BASE\thirdparty\include\win + CONFIG(debug, debug|release) { + DESTDIR += $$HOME_BASE\bin\lib_win_debug + }else{ + DESTDIR += $$HOME_BASE\bin\lib_win_release + } + + msvc{ + QMAKE_CFLAGS += /utf-8 + QMAKE_CXXFLAGS += /utf-8 + # QMAKE_CXXFLAGS += /source-charset:utf-8 /execution-charset:utf-8 + DEFINES +=HAVE_STRUCT_TIMESPEC + DEFINES +=WIN32_LEAN_AND_MEAN + INCLUDEPATH += $$HOME_BASE\thirdparty\include\win\include + } + +} +# You can also make your code fail to compile if it uses deprecated APIs. +# In order to do so, uncomment the following line. +# You can also select to disable deprecated APIs only up to a certain version of Qt. +#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0 + +SOURCES += \ + src/yangaudiodev/YangAudioCapture.cpp \ + src/yangaudiodev/YangAudioCaptureData.cpp \ + src/yangaudiodev/YangAudioCaptureHandle.cpp \ + src/yangaudiodev/YangAudioPlay.cpp \ + src/yangaudiodev/YangAudioRenderData.cpp \ + src/yangaudiodev/linux/YangAlsaDeviceHandle.cpp \ + src/yangaudiodev/linux/YangAlsaHandle.cpp \ + src/yangaudiodev/linux/YangAudioCaptureImpl.cpp \ + src/yangaudiodev/linux/YangAudioPlayAlsa.cpp \ + src/yangaudiodev/win/YangRecAudioCaptureHandle.cpp \ + src/yangaudiodev/win/YangWinAudioApi.cpp \ + src/yangaudiodev/win/YangWinAudioApiAec.cpp \ + src/yangaudiodev/win/YangWinAudioApiCapture.cpp \ + src/yangaudiodev/win/YangWinAudioApiDevice.cpp \ + src/yangaudiodev/win/YangWinAudioApiRender.cpp \ + src/yangaudiodev/win/YangWinAudioCapture.cpp \ + src/yangaudiodev/win/YangWinAudioCaptureHandle.cpp \ + src/yangaudiodev/win/YangWinRecordAudioCapture.cpp \ + src/yangavutil/YangImageConvert.cpp \ + src/yangavutil/YangMakeWave.cpp \ + src/yangavutil/YangPicUtilFfmpeg.cpp \ + src/yangavutil/YangSwResample.cpp \ + src/yangavutil/YangVideoEncoderMeta.cpp \ + src/yangavutil/YangYuvConvert.cpp \ + src/yangcapture/YangAudioDeviceQuery.cpp \ + src/yangcapture/YangCaptureFactory.cpp \ + src/yangcapture/YangDXGIManager.cpp \ + src/yangcapture/YangScreenCaptureImpl.cpp \ + src/yangcapture/YangScreenShare.cpp \ + src/yangcapture/YangVideoCapture.cpp \ + src/yangcapture/YangVideoCaptureHandle.cpp \ + src/yangcapture/YangVideoCaptureImpl.cpp \ + src/yangcapture/YangVideoDeviceQuery.cpp \ + src/yangcapture/win/YangVideoSrc.cpp \ + src/yangcapture/win/YangWinVideoCapture.cpp \ + src/yangcapture/win/YangWinVideoCaptureHandle.cpp \ + src/yangdecoder/YangAudioDecoder.cpp \ + src/yangdecoder/YangAudioDecoderAac.cpp \ + src/yangdecoder/YangAudioDecoderHandle.cpp \ + src/yangdecoder/YangAudioDecoderHandles.cpp \ + src/yangdecoder/YangAudioDecoderOpus.cpp \ + src/yangdecoder/YangAudioDecoderSpeex.cpp \ + src/yangdecoder/YangDecoderFactory.cpp \ + src/yangdecoder/YangH2645VideoDecoderFfmpeg.cpp \ + src/yangdecoder/YangH264DecoderSoft.cpp \ + src/yangdecoder/YangH264Header.cpp \ + src/yangdecoder/YangHeaderParseFfmpeg.cpp \ + src/yangdecoder/YangVideoDecoderHandle.cpp \ + src/yangdecoder/YangVideoDecoderHandles.cpp \ + src/yangdecoder/YangVideoDecoderIntel.cpp \ + src/yangencoder/YangAudioEncoder.cpp \ + src/yangencoder/YangAudioEncoderAac.cpp \ + src/yangencoder/YangAudioEncoderHandle.cpp \ + src/yangencoder/YangAudioEncoderHandleCb.cpp \ + src/yangencoder/YangAudioEncoderMeta.cpp \ + src/yangencoder/YangAudioEncoderMp3.cpp \ + src/yangencoder/YangAudioEncoderOpus.cpp \ + src/yangencoder/YangAudioEncoderSpeex.cpp \ + src/yangencoder/YangEncoderFactory.cpp \ + src/yangencoder/YangFfmpegEncoderMeta.cpp \ + src/yangencoder/YangH264EncHeader.cpp \ + src/yangencoder/YangH264EncoderIntel.cpp \ + src/yangencoder/YangH264EncoderMeta.cpp \ + src/yangencoder/YangH264EncoderSoft.cpp \ + src/yangencoder/YangH265EncoderMeta.cpp \ + src/yangencoder/YangH265EncoderSoft.cpp \ + src/yangencoder/YangVideoEncoder.cpp \ + src/yangencoder/YangVideoEncoderFfmpeg.cpp \ + src/yangencoder/YangVideoEncoderHandle.cpp \ + src/yangplayer/YangPlayFactory.cpp \ + src/yangplayer/YangPlayReceive.cpp \ + src/yangplayer/YangPlayerBase.cpp \ + src/yangplayer/YangPlayerDecoder.cpp \ + src/yangplayer/YangPlayerHandleImpl.cpp \ + src/yangplayer/YangPlayerPlay.cpp \ + src/yangplayer/YangRtcReceive.cpp \ + src/yangpush/YangPushCapture.cpp \ + src/yangpush/YangPushEncoder.cpp \ + src/yangpush/YangPushFactory.cpp \ + src/yangpush/YangPushHandleImpl.cpp \ + src/yangpush/YangPushMessageHandle.cpp \ + src/yangpush/YangPushPublish.cpp \ + src/yangpush/YangRtcPublish.cpp \ + src/yangpush/YangSendVideoImpl.cpp \ + src/yangrecord/YangFlvWrite.cpp \ + src/yangrecord/YangMp4File.cpp \ + src/yangrecord/YangMp4FileApp.cpp \ + src/yangrecord/YangRecEncoder.cpp \ + src/yangrecord/YangRecord.cpp \ + src/yangrecord/YangRecordApp.cpp \ + src/yangrecord/YangRecordCapture.cpp \ + src/yangrecord/YangRecordHandle.cpp \ + src/yangrecord/YangRecordMp4.cpp \ + src/yangsrt/YangSrtBase.cpp \ + src/yangsrt/YangTsBuffer.cpp \ + src/yangsrt/YangTsMuxer.cpp \ + src/yangsrt/YangTsPacket.cpp \ + src/yangsrt/YangTsdemux.cpp \ + src/yangsrt/common.cpp \ + src/yangsrt/crc.cpp \ + src/yangsrt/srt_data.cpp \ + src/yangstream/YangStreamHandle.cpp \ + src/yangstream/YangStreamManager.cpp \ + src/yangstream/YangStreamSrt.cpp \ + src/yangstream/YangSynBuffer.cpp \ + src/yangutil/YangAvinfo.cpp \ + src/yangutil/YangIniImpl.cpp \ + src/yangutil/YangJson.cpp \ + src/yangutil/YangLoadLib.cpp \ + src/yangutil/YangString.cpp \ + src/yangutil/YangSysMessageHandle.cpp \ + src/yangutil/YangThread.cpp \ + src/yangutil/YangTimer.cpp \ + src/yangutil/YangWindowsMouse.cpp \ + src/yangutil/buffer/YangAudioBuffer.cpp \ + src/yangutil/buffer/YangAudioEncoderBuffer.cpp \ + src/yangutil/buffer/YangAudioPlayBuffer.cpp \ + src/yangutil/buffer/YangMediaBuffer.cpp \ + src/yangutil/buffer/YangVideoBuffer.cpp \ + src/yangutil/buffer/YangVideoDecoderBuffer.cpp \ + src/yangutil/buffer/YangVideoEncoderBuffer.cpp + + +HEADERS += \ + src/yangaudiodev/YangAudioCaptureHandle.h \ + src/yangaudiodev/YangCaptureCallback.h \ + src/yangaudiodev/linux/YangAlsaDeviceHandle.h \ + src/yangaudiodev/linux/YangAlsaHandle.h \ + src/yangaudiodev/linux/YangAudioCaptureImpl.h \ + src/yangaudiodev/linux/YangAudioPlayAlsa.h \ + src/yangaudiodev/win/YangAudioApiCapture.h \ + src/yangaudiodev/win/YangRecAudioCaptureHandle.h \ + src/yangaudiodev/win/YangWinAudioApi.h \ + src/yangaudiodev/win/YangWinAudioApiAec.h \ + src/yangaudiodev/win/YangWinAudioApiCapture.h \ + src/yangaudiodev/win/YangWinAudioApiDevice.h \ + src/yangaudiodev/win/YangWinAudioApiRender.h \ + src/yangaudiodev/win/YangWinAudioCapture.h \ + src/yangaudiodev/win/YangWinAudioCaptureHandle.h \ + src/yangaudiodev/win/YangWinAudioDevice.h \ + src/yangaudiodev/win/YangWinRecordAudioCapture.h \ + src/yangavutil/YangImageConvert.h \ + src/yangcapture/YangDXGIManager.h \ + src/yangcapture/YangScreenCaptureImpl.h \ + src/yangcapture/YangScreenShare.h \ + src/yangcapture/YangVideoCaptureHandle.h \ + src/yangcapture/YangVideoCaptureImpl.h \ + src/yangcapture/YangVideoDeviceQuery.h \ + src/yangcapture/win/YangVideoSrc.h \ + src/yangcapture/win/YangWinVideoCapture.h \ + src/yangcapture/win/YangWinVideoCaptureHandle.h \ + src/yangdecoder/YangAudioDecoderAac.h \ + src/yangdecoder/YangAudioDecoderOpus.h \ + src/yangdecoder/YangAudioDecoderSpeex.h \ + src/yangdecoder/YangH2645VideoDecoderFfmpeg.h \ + src/yangdecoder/YangH264Dec.h \ + src/yangdecoder/YangH264DecoderSoft.h \ + src/yangdecoder/YangH264DecoderSoftFactory.h \ + src/yangdecoder/YangH264Header.h \ + src/yangdecoder/YangH264Header1.h \ + src/yangdecoder/YangHeaderParseFfmpeg.h \ + src/yangdecoder/YangVideoDecoderIntel.h \ + src/yangencoder/YangAudioEncoderAac.h \ + src/yangencoder/YangAudioEncoderMp3.h \ + src/yangencoder/YangAudioEncoderOpus.h \ + src/yangencoder/YangAudioEncoderSpeex.h \ + src/yangencoder/YangFfmpegEncoderMeta.h \ + src/yangencoder/YangH264EncHeader.h \ + src/yangencoder/YangH264EncoderIntel.h \ + src/yangencoder/YangH264EncoderIntel1.h \ + src/yangencoder/YangH264EncoderMeta.h \ + src/yangencoder/YangH264EncoderSoft.h \ + src/yangencoder/YangH265EncoderMeta.h \ + src/yangencoder/YangH265EncoderSoft.h \ + src/yangencoder/YangVideoEncoderFfmpeg.h \ + src/yangencoder/lame.h \ + src/yangplayer/YangPlayerHandleImpl.h \ + src/yangplayer/YangRtcReceive.h \ + src/yangpush/YangPushEncoder.h \ + src/yangpush/YangPushHandleImpl.h \ + src/yangpush/YangPushMessageHandle.h \ + src/yangpush/YangSendVideoImpl.h \ + src/yangrecord/YangRecordHandle.h \ + src/yangstream/YangStreamSrt.h + +# Default rules for deployment. +unix { + target.path = $$[QT_INSTALL_PLUGINS]/generic +} +!isEmpty(target.path): INSTALLS += target diff --git a/libmetartc3/src/yangaudiodev/YangAudioCapture.cpp b/libmetartc3/src/yangaudiodev/YangAudioCapture.cpp new file mode 100755 index 00000000..342acb2e --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangAudioCapture.cpp @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "stdlib.h" +#include +#include +#include "yangutil/sys/YangLog.h" +#include "memory.h" +#include "malloc.h" + + + +YangAudioCapture::YangAudioCapture() +{ + + aIndex=0; + m_isStart=0; + m_context=NULL; + //m_resample=NULL; + +} + +YangAudioCapture::~YangAudioCapture() +{ + + m_context=NULL; + + +} + + +void YangAudioCapture::run(){ + m_isStart=1; + startLoop(); + m_isStart=0; +} +void YangAudioCapture::stop(){ + stopLoop(); + //m_isStart=0; +} + + diff --git a/libmetartc3/src/yangaudiodev/YangAudioCaptureData.cpp b/libmetartc3/src/yangaudiodev/YangAudioCaptureData.cpp new file mode 100755 index 00000000..65f88cb9 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangAudioCaptureData.cpp @@ -0,0 +1,74 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include + +YangAudioCaptureData::YangAudioCaptureData() { + m_cacheLen = 1024 * 4 * 4; + m_cache = new uint8_t[m_cacheLen]; + m_size = 0; + m_pos = 0; + m_cb = NULL; + memset(&m_res,0,sizeof(YangAudioResample)); + yang_create_audioresample(&m_res); + m_res.init(m_res.context,48000, 2, 48000, 2, 20); + memset(&m_audioFrame,0,sizeof(YangFrame)); +} + +YangAudioCaptureData::~YangAudioCaptureData() { + yang_deleteA(m_cache); + m_cb = NULL; + yang_destroy_audioresample(&m_res); +} + +void YangAudioCaptureData::initIn(int psample, int pchannel) { + m_res.initIn(m_res.context,psample, pchannel); + +} + +void YangAudioCaptureData::initOut(int psample, int pchannel) { + m_res.initOut(m_res.context,psample, pchannel); +} + +void YangAudioCaptureData::caputure(YangFrame *audioFrame) { + if(m_size + audioFrame->nb > m_cacheLen) return; + if ((m_pos + m_size + audioFrame->nb) >= m_cacheLen) { + memmove(m_cache, m_cache + m_pos, m_size); + m_pos = 0; + } + + if (audioFrame->payload && audioFrame->nb > 0) { + + memcpy(m_cache + m_pos + m_size, audioFrame->payload, audioFrame->nb); + m_size += audioFrame->nb; + } + + while(m_size >= m_res.context->inBytes) { + if (m_pos + m_res.context->inBytes >= m_cacheLen) { + memmove(m_cache, m_cache + m_pos, m_size); + m_pos = 0; + } + captureData(); + + m_pos += m_res.context->inBytes; + m_size -= m_res.context->inBytes; + } + +} + +int YangAudioCaptureData::getOutLength(){ + return m_res.context->outBytes; +} + +void YangAudioCaptureData::captureData() { + m_audioFrame.payload=m_cache + m_pos; + m_audioFrame.nb=m_res.context->inBytes; + m_res.resample(m_res.context,&m_audioFrame); + + if (m_cb) + m_cb->caputureAudioData(&m_audioFrame); + +} + diff --git a/libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.cpp b/libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.cpp new file mode 100755 index 00000000..a04fc872 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.cpp @@ -0,0 +1,124 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include "yangutil/yang_unistd.h" + + +YangAudioCaptureHandle::YangAudioCaptureHandle(YangContext *pcontext) +{ + + isBuf=0; + m_audioList=NULL; + m_aec=NULL; + m_aecPlayBuffer=NULL; + pcm=new short[4096/2]; + hasPlayData=1; + isFirst=1; + m_aecBufferFrames=pcontext->avinfo.audio.aecBufferFrames; + memset(&m_audioFrame,0,sizeof(YangFrame)); + +} +YangAudioCaptureHandle::~YangAudioCaptureHandle(void) +{ + m_aec=NULL; + m_aecPlayBuffer=NULL; + m_audioList=NULL; + yang_deleteA(pcm); +} + + + +void YangAudioCaptureHandle::startRecordWave(char* filename){ + //wavRecord.start(1,filename); + //isRecordAudio=1; +} +void YangAudioCaptureHandle::stopRecordWave(){ + //isRecordAudio=0; + //wavRecord.stop(); + +} + +void YangAudioCaptureHandle::setOutAudioBuffer(YangAudioBuffer *pbuf) +{ + m_audioList=pbuf; +} + +//int32_t tcou=0; + void YangAudioCaptureHandle::putBuffer(uint8_t *pBuffer,int32_t plen) + { + if(!isBuf) return; + if(m_aec) { + if(hasPlayData) { + m_aec->echoCapture(m_aec->context,(short*)pBuffer,pcm); + m_aec->preprocessRun(m_aec->context,pcm); + } + if(m_aecPlayBuffer&&m_aecPlayBuffer->size()>m_aecBufferFrames){ + uint8_t* tmp=m_aecPlayBuffer->getAudioRef(&m_audioFrame); + if(tmp) + m_aec->echoPlayback(m_aec->context,(short*)tmp); + //printf("%d,",m_aecPlayBuffer->size()); + if(isFirst){ + if(m_audioList) m_audioList->resetIndex(); + isFirst=0; + } + hasPlayData=1; + //m_aec->echo_cancellation((short*)pBuffer,(short*)m_aecPlayBuffer->getAudio(),pcm); + //speex_echo_cancellation() + }else + hasPlayData=0; + + + if(hasPlayData){ + if(m_audioList) { + m_audioFrame.payload=(uint8_t *)pcm; + m_audioFrame.nb=plen; + m_audioList->putAudio(&m_audioFrame); + } + }else{ + if(m_audioList) { + m_audioFrame.payload=pBuffer; + m_audioFrame.nb=plen; + m_audioList->putAudio(&m_audioFrame); + } + } + }else{ + + if(m_audioList) { + m_audioFrame.payload=pBuffer; + m_audioFrame.nb=plen; + m_audioList->putAudio(&m_audioFrame); + } + } + + + } + void YangAudioCaptureHandle::putEchoPlay(short* pbuf,int32_t plen){ + if(!isBuf) return; + if(m_aec) m_aec->echoPlayback(m_aec->context,pbuf); + } + void YangAudioCaptureHandle::putEchoBuffer( uint8_t *pBuffer,int32_t plen){ + if(!isBuf) return; + if(m_aec) { + m_aec->echoCapture(m_aec->context,(short*)pBuffer,pcm); + m_aec->preprocessRun(m_aec->context,pcm); + + if(m_audioList){ + m_audioFrame.payload=pBuffer; + m_audioFrame.nb=plen; + m_audioList->putAudio(&m_audioFrame); + } + } + + } + void YangAudioCaptureHandle::putBuffer1( uint8_t *pBuffer,int32_t plen){ + + if(!isBuf) return; + if(m_audioList) { + m_audioFrame.payload=pBuffer; + m_audioFrame.nb=plen; + m_audioList->putAudio(&m_audioFrame); + } + + } + diff --git a/libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.h b/libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.h new file mode 100755 index 00000000..85806c74 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangAudioCaptureHandle.h @@ -0,0 +1,38 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef ___YangAudioCaptureHandle__ +#define ___YangAudioCaptureHandle__ +//#ifdef _WIN32 +#include + +#include +#include + +class YangAudioCaptureHandle { +public: + YangAudioCaptureHandle(YangContext *pcontext); + virtual ~YangAudioCaptureHandle(void); + YangRtcAec *m_aec; + +public: + void putBuffer(uint8_t *pBuffer,int32_t plen); + void putBuffer1(uint8_t *pBuffer,int32_t plen); + void putEchoBuffer(uint8_t *pBuffer,int32_t plen); + void putEchoPlay(short *pbuf,int32_t plen); + void startRecordWave(char *filename); + void stopRecordWave(); + void setOutAudioBuffer(YangAudioBuffer *plist); + YangAudioBuffer *m_aecPlayBuffer; + int32_t isBuf; +private: + + int32_t hasPlayData; + int32_t m_aecBufferFrames; + int32_t isFirst; + short *pcm; + YangFrame m_audioFrame; + YangAudioBuffer *m_audioList; +}; +//#endif +#endif diff --git a/libmetartc3/src/yangaudiodev/YangAudioPlay.cpp b/libmetartc3/src/yangaudiodev/YangAudioPlay.cpp new file mode 100755 index 00000000..0e65a10a --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangAudioPlay.cpp @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include +#include + +#include +#include +YangAudioPlay::YangAudioPlay(YangContext* pcontext) { //m_rc=0; + + m_context = pcontext; + m_ace = NULL; + aIndex = 0; + m_frames = 0; + m_channel = pcontext->avinfo.audio.channel; + m_isStart = 0; + + m_sample = pcontext->avinfo.audio.sample; + m_isStart=0; + m_audioData.setInAudioBuffer(pcontext->streams.m_playBuffer); + m_audioData.setInAudioBuffers(pcontext->streams.m_playBuffers); + m_audioData.initPlay(pcontext->avinfo.audio.sample,pcontext->avinfo.audio.channel); +} + +YangAudioPlay::~YangAudioPlay() { + m_context = NULL; + m_ace = NULL; +} +void YangAudioPlay::run() { + m_isStart=1; + startLoop(); + m_isStart=0; +} +void YangAudioPlay::stop() { + stopLoop(); +} +void YangAudioPlay::setAudioBuffers(vector *pal) { + // if(m_audioData.m_in_audioBuffers) m_audioData.m_in_audioBuffers->setInAudioBuffers(pal); + //m_in_audioBuffer = pal; +} +void YangAudioPlay::setAudioBuffer(YangAudioPlayBuffer* pal) { + //m_buf=pal; + //if(m_audioData.m_syn) m_audioData.m_syn->setInAudioBuffer(pal); + //m_in_audioBuffer = pal; +} +void YangAudioPlay::setAecBase(YangRtcAec *pace) { + if (pace != NULL) + m_aecInit = 1; + m_ace = pace; +} + diff --git a/libmetartc3/src/yangaudiodev/YangAudioRenderData.cpp b/libmetartc3/src/yangaudiodev/YangAudioRenderData.cpp new file mode 100755 index 00000000..c77d7446 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangAudioRenderData.cpp @@ -0,0 +1,180 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include +#include +using namespace std; +#define Yang_Mix_BUF_Len 4096 +YangAudioRenderData::YangAudioRenderData() { + m_cacheLen = 1024 * 4 * 10; + m_cache = new uint8_t[m_cacheLen]; + m_size = 0; + m_pos = 0; + + m_aecBufLen = 0; + m_renderLen=0; + + m_syn = NULL; + m_in_audioBuffers=NULL; + + m_hasAec=false; + m_aecBuf=NULL; + m_mixBuf=NULL; + + m_preProcess=NULL; + m_mixPos=0; + memset(&m_res,0,sizeof(YangAudioResample)); + yang_create_audioresample(&m_res); + m_res.init(m_res.context,48000, 2, 48000, 2, 20); +} + +YangAudioRenderData::~YangAudioRenderData() { + yang_deleteA(m_cache); + yang_deleteA(m_mixBuf); + m_syn = NULL; + m_in_audioBuffers=NULL; + + m_preProcess=NULL; + yang_destroy_audioresample(&m_res); +} + +void YangAudioRenderData::setInAudioBuffer(YangSynBuffer *pal) { + m_syn = pal; +} + +void YangAudioRenderData::initRender(int psample, int pchannel) { + m_res.initOut(m_res.context,psample, pchannel); +} + +void YangAudioRenderData::initPlay(int psample, int pchannel) { + m_res.initIn(m_res.context,psample, pchannel); +} + + + +void YangAudioRenderData::setAec(){ + + if(m_aecBuf==NULL) m_aecBuf=new uint8_t[Yang_Mix_BUF_Len]; + m_hasAec=true; +} +void YangAudioRenderData::setInAudioBuffers(std::vector *pal) { + if(m_in_audioBuffers) return; + m_in_audioBuffers = pal; + if(m_mixBuf==NULL){ + m_mixBuf=new uint8_t[960*8]; + } +} +bool YangAudioRenderData::hasData() { + if (!m_in_audioBuffers) + return 0; + for (int32_t i = 0; i < (int) m_in_audioBuffers->size(); i++) { + + if (m_in_audioBuffers->at(i)->getAudioSize() > 0) + return true; + } + + return false; +} + + +uint8_t* YangAudioRenderData::getAudioRef(YangFrame* pframe){ + if(m_syn) { + + return m_syn->getAudioRef(pframe); + } + if(m_in_audioBuffers&&hasData()){ + uint8_t *tmp = NULL; + for (size_t i = 0; i < m_in_audioBuffers->size(); i++) { + if (m_in_audioBuffers->at(i) && m_in_audioBuffers->at(i)->getAudioSize() > 0) { + //YangFrame* frame=m_in_audioBuffer->at(i)->getAudios(); + + tmp = m_in_audioBuffers->at(i)->getAudioRef(pframe); + if (tmp) { + // if (m_preProcess) m_preProcess->preprocess_run((short*) tmp); + if (i == 0) { + memcpy(m_mixBuf, tmp, pframe->nb); + } else { + yang_mixaudio_mix1((short*)m_mixBuf, (short*)tmp, pframe->nb, 128); + } + } + tmp = NULL; + + // if (m_in_audioBuffer->at(i)->size() > m_audioPlayCacheNum) m_in_audioBuffer->at(i)->resetIndex(); + } + } + return m_mixBuf; + } + return NULL; +} + +uint8_t* YangAudioRenderData::getAecAudioData(){ + if(m_renderLeninBytes||m_aecBufLeninBytes) return NULL; + + if((m_mixPos+m_res.context->inBytes)>=Yang_Mix_BUF_Len) { + memmove(m_aecBuf,m_aecBuf+m_mixPos,m_aecBufLen); + m_mixPos=0; + } + + uint8_t* p=m_aecBuf+m_mixPos; + m_aecBufLen-=m_res.context->inBytes; + m_mixPos+=m_res.context->inBytes; + m_renderLen=0; + return p; +} +void YangAudioRenderData::setRenderLen(int plen){ + m_renderLen+=(m_res.context->inBytes*plen)/m_res.context->outBytes; +} +uint8_t* YangAudioRenderData::getAudioData(YangFrame* frame){ + + + uint8_t* tmp=getAudioRef(frame); + + if(m_hasAec&&tmp){ + if((m_mixPos+m_aecBufLen+m_res.context->inBytes)>=Yang_Mix_BUF_Len) { + memmove(m_aecBuf,m_aecBuf+m_mixPos,m_aecBufLen); + m_mixPos=0; + } + + memcpy(m_aecBuf+m_mixPos+m_aecBufLen,tmp,m_res.context->inBytes); + m_aecBufLen+=m_res.context->inBytes; + } + + return tmp; +} + +void YangAudioRenderData::setAudioData(YangFrame* frame){ + uint8_t *buf =getAudioData(frame); + frame->payload=buf; + m_res.resample(m_res.context,frame); +} + +uint8_t* YangAudioRenderData::getRenderAudioData(int len) { + if ((m_pos + m_size + len) >= m_cacheLen) { + memmove(m_cache, m_cache + m_pos, m_size); + m_pos = 0; + } + + m_audioFrame.payload=NULL; + m_audioFrame.nb=0; + + if((m_size+(len<<1))0) { + if ((m_pos + m_size + m_audioFrame.nb) >= m_cacheLen) { + memmove(m_cache, m_cache + m_pos, m_size); + m_pos = 0; + } + memcpy(m_cache + m_pos + m_size, m_audioFrame.payload, m_audioFrame.nb); + m_size += m_audioFrame.nb; + } + + if (len > m_size) return NULL; + uint8_t *p = m_cache + m_pos; + m_pos += len; + m_size -= len; + return p; + +} + diff --git a/libmetartc3/src/yangaudiodev/YangCaptureCallback.h b/libmetartc3/src/yangaudiodev/YangCaptureCallback.h new file mode 100755 index 00000000..92b2052c --- /dev/null +++ b/libmetartc3/src/yangaudiodev/YangCaptureCallback.h @@ -0,0 +1,17 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGCAPTURE_WIN_API_YANGCAPTURECALLBACK_H_ +#define YANGCAPTURE_WIN_API_YANGCAPTURECALLBACK_H_ +#include +class YangCaptureCallback{ +public: + YangCaptureCallback(){}; + virtual ~YangCaptureCallback(){}; + virtual void caputureAudioData(YangFrame* audioFrame)=0; +}; + + + + +#endif /* YANGCAPTURE_WIN_API_YANGCAPTURECALLBACK_H_ */ diff --git a/libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.cpp b/libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.cpp new file mode 100755 index 00000000..09b36cc6 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.cpp @@ -0,0 +1,582 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifndef _WIN32 +#include +#include +YangAlsaDeviceHandle::YangAlsaDeviceHandle(YangContext *pcontext) { + m_context = pcontext; + m_ahandle = new YangAudioCaptureHandle(pcontext); + m_audioPlayCacheNum = m_context->avinfo.audio.audioPlayCacheNum; + aIndex = 0; + m_ret = 0; + m_size = 0; + m_loops = 0; + + m_buffer = NULL; + m_isInit = 0; + m_dev = NULL; + + m_frames = 1024; + m_channel = pcontext->avinfo.audio.channel; + m_sample = pcontext->avinfo.audio.sample; + m_preProcess = NULL; + m_audioData.initPlay(m_sample,m_channel); + m_audioData.initRender(m_sample,m_channel); + m_audioData.setInAudioBuffers(pcontext->streams.m_playBuffers); + +} +YangAlsaDeviceHandle::~YangAlsaDeviceHandle() { +// m_in_audioBuffer = NULL; + if (m_loops) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + alsa_device_close(); + m_preProcess = NULL; + yang_delete(m_buffer); + yang_delete(m_ahandle); +} +void YangAlsaDeviceHandle::setCatureStart() { + m_ahandle->isBuf = 1; +} +void YangAlsaDeviceHandle::setCatureStop() { + m_ahandle->isBuf = 0; +} +void YangAlsaDeviceHandle::setOutAudioBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->setOutAudioBuffer(pbuffer); +} +void YangAlsaDeviceHandle::setPlayAudoBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->m_aecPlayBuffer = pbuffer; +} +void YangAlsaDeviceHandle::setAec(YangRtcAec *paec) { + m_ahandle->m_aec = paec; +} +void YangAlsaDeviceHandle::setPreProcess(YangPreProcess *pp) { + m_preProcess = pp; + m_audioData.m_preProcess=pp; +} +int32_t YangAlsaDeviceHandle::alsa_device_open(char *device_name, + uint32_t rate, int32_t channels, int32_t period) { + int32_t dir; + int32_t err; + snd_pcm_hw_params_t *hw_params; + snd_pcm_sw_params_t *sw_params; + snd_pcm_uframes_t period_size = period; + snd_pcm_uframes_t buffer_size = 2 * period; + static snd_output_t *jcd_out; + m_dev = (YangAlsaDevice*) malloc( + (unsigned long) sizeof(YangAlsaDevice)); + if (!m_dev) + return ERROR_SYS_NoAudioDevice; + m_dev->device_name = (char*) malloc(1 + strlen(device_name)); + if (!m_dev->device_name) { + free(m_dev); + return ERROR_SYS_NoAudioDevice; + } + strcpy(m_dev->device_name, device_name); + m_dev->channels = channels; + m_dev->period = period; + err = snd_output_stdio_attach(&jcd_out, stdout, 0); + + if ((err = snd_pcm_open(&m_dev->capture_handle, m_dev->device_name, + SND_PCM_STREAM_CAPTURE, 0)) < 0) { + + yang_error("cannot open audio device %s (%s)", m_dev->device_name, + snd_strerror(err)); + catpureDeviceState = 0; + //_exit(1); + } + if (catpureDeviceState) { + if ((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) { + + yang_error("cannot allocate hardware parameter structure (%s)", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_any(m_dev->capture_handle, hw_params)) + < 0) { + + yang_error("cannot initialize hardware parameter structure (%s)", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_access(m_dev->capture_handle, + hw_params, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { + + yang_error("cannot set access type (%s)", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_format(m_dev->capture_handle, + hw_params, SND_PCM_FORMAT_S16_LE)) < 0) { + + yang_error("cannot set sample format (%s)", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_rate_near(m_dev->capture_handle, + hw_params, &rate, 0)) < 0) { + + yang_error("cannot set sample rate (%s)", snd_strerror(err)); + _exit(1); + } + /* yang_error( "rate = %d", rate);*/ + + if ((err = snd_pcm_hw_params_set_channels(m_dev->capture_handle, + hw_params, channels)) < 0) { + + yang_error("cannot set channel count (%s)", snd_strerror(err)); + _exit(1); + } + + period_size = period; + dir = 0; + if ((err = snd_pcm_hw_params_set_period_size_near(m_dev->capture_handle, + hw_params, &period_size, &dir)) < 0) { + + yang_error("cannot set period size (%s)", snd_strerror(err)); + _exit(1); + } + +// if ((err = snd_pcm_hw_params_set_periods(m_dev->capture_handle, hw_params,2, 0)) < 0) { +// yang_error( "cannot set number of periods (%s)",snd_strerror(err)); +// _exit(1); +// } + + buffer_size = period_size * 2; + dir = 0; + if ((err = snd_pcm_hw_params_set_buffer_size_near(m_dev->capture_handle, + hw_params, &buffer_size)) < 0) { + + yang_error("cannot set buffer time (%s)", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params(m_dev->capture_handle, hw_params)) < 0) { + + yang_error("cannot set capture parameters (%s)", snd_strerror(err)); + _exit(1); + } + /*snd_pcm_dump_setup(dev->capture_handle, jcd_out);*/ + snd_pcm_hw_params_free(hw_params); + + if ((err = snd_pcm_sw_params_malloc(&sw_params)) < 0) { + + yang_error("cannot allocate software parameters structure (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_current(m_dev->capture_handle, sw_params)) + < 0) { + + yang_error("cannot initialize software parameters structure (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_set_avail_min(m_dev->capture_handle, + sw_params, period)) < 0) { + + yang_error("cannot set minimum available count (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params(m_dev->capture_handle, sw_params)) < 0) { + + yang_error("cannot set software parameters (%s)", + snd_strerror(err)); + _exit(1); + } + } + + + + if ((err = snd_pcm_open(&m_dev->playback_handle, m_dev->device_name, + SND_PCM_STREAM_PLAYBACK, 0)) < 0) { + + yang_error("cannot open audio device %s (%s)", m_dev->device_name, snd_strerror(err)); + playDeviceState=0; + } + if(playDeviceState){ + if ((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) { + + yang_error("cannot allocate hardware parameter structure (%s)", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_any(m_dev->playback_handle, hw_params)) < 0) { + + yang_error("cannot initialize hardware parameter structure (%s)", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_access(m_dev->playback_handle, hw_params, + SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { + + yang_error("cannot set access type (%s)", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_format(m_dev->playback_handle, hw_params, + SND_PCM_FORMAT_S16_LE)) < 0) { + + yang_error("cannot set sample format (%s)", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_rate_near(m_dev->playback_handle, + hw_params, &rate, 0)) < 0) { + + yang_error("cannot set sample rate (%s)", snd_strerror(err)); + _exit(1); + } + /* yang_error( "rate = %d", rate);*/ + + if ((err = snd_pcm_hw_params_set_channels(m_dev->playback_handle, hw_params, + channels)) < 0) { + + yang_error("cannot set channel count (%s)", snd_strerror(err)); + _exit(1); + } + + period_size = period; + dir = 0; + if ((err = snd_pcm_hw_params_set_period_size_near(m_dev->playback_handle, + hw_params, &period_size, &dir)) < 0) { + + yang_error("cannot set period size (%s)", snd_strerror(err)); + _exit(1); + } +// if ((err = snd_pcm_hw_params_set_periods(m_dev->playback_handle, hw_params, 2, 0)) < 0) { +// yang_error( "cannot set number of periods (%s)", snd_strerror(err)); +// _exit(1); +// } + buffer_size = period_size * 2; + dir = 0; + if ((err = snd_pcm_hw_params_set_buffer_size_near(m_dev->playback_handle, + hw_params, &buffer_size)) < 0) { + + yang_error("cannot set buffer time (%s)", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params(m_dev->playback_handle, hw_params)) < 0) { + + yang_error("cannot set playback parameters (%s)", snd_strerror(err)); + _exit(1); + } + + /*snd_pcm_dump_setup(dev->playback_handle, jcd_out);*/ + snd_pcm_hw_params_free(hw_params); + + if ((err = snd_pcm_sw_params_malloc(&sw_params)) < 0) { + + yang_error("cannot allocate software parameters structure (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_current(m_dev->playback_handle, sw_params)) + < 0) { + + yang_error("cannot initialize software parameters structure (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_set_avail_min(m_dev->playback_handle, + sw_params, period)) < 0) { + + yang_error("cannot set minimum available count (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_set_start_threshold(m_dev->playback_handle, + sw_params, period)) < 0) { + + yang_error("cannot set start mode (%s)", snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params(m_dev->playback_handle, sw_params)) < 0) { + + yang_error("cannot set software parameters (%s)", snd_strerror(err)); + _exit(1); + } + + snd_pcm_link(m_dev->capture_handle, m_dev->playback_handle); + if ((err = snd_pcm_prepare(m_dev->capture_handle)) < 0) { + + yang_error("cannot prepare audio interface for use (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_prepare(m_dev->playback_handle)) < 0) { + + yang_error("cannot prepare audio interface for use (%s)", + snd_strerror(err)); + _exit(1); + } + } + + if(catpureDeviceState){ + m_dev->readN = snd_pcm_poll_descriptors_count(m_dev->capture_handle); + m_dev->read_fd = (pollfd*) malloc(m_dev->readN * sizeof(*m_dev->read_fd)); + if (snd_pcm_poll_descriptors(m_dev->capture_handle, m_dev->read_fd, + m_dev->readN) != m_dev->readN) { + + yang_error("cannot obtain capture file descriptors (%s)", + snd_strerror(err)); + _exit(1); + } + } + if(playDeviceState){ + m_dev->writeN = snd_pcm_poll_descriptors_count(m_dev->playback_handle); + m_dev->write_fd = (pollfd*) malloc(m_dev->writeN * sizeof(*m_dev->read_fd)); + if (snd_pcm_poll_descriptors(m_dev->playback_handle, m_dev->write_fd, + m_dev->writeN) != m_dev->writeN) { + + yang_error("cannot obtain playback file descriptors (%s)", + snd_strerror(err)); + _exit(1); + } + } + + + if(!catpureDeviceState&&!playDeviceState){ + return ERROR_SYS_NoAudioDevice; + }else if(!catpureDeviceState){ + return ERROR_SYS_NoAudioCaptureDevice; + }else if(!playDeviceState){ + return ERROR_SYS_NoAudioPlayDevice; + } + + + return Yang_Ok; +} + +void YangAlsaDeviceHandle::alsa_device_close() { + if (m_dev) { + snd_pcm_close(m_dev->capture_handle); + snd_pcm_close(m_dev->playback_handle); + free(m_dev->device_name); + free(m_dev); + m_dev = NULL; + } +} + +int32_t YangAlsaDeviceHandle::alsa_device_read(short *pcm, int32_t len) { + + if ((m_ret = snd_pcm_readi(m_dev->capture_handle, pcm, len)) != len) { + if (m_ret < 0) { + if (m_ret == -EPIPE) { + + yang_error("An overrun has occured, reseting capture"); + } else { + + yang_error("read from audio interface failed (%s)", + snd_strerror(m_ret)); + //m_ret = snd_pcm_recover(m_dev->capture_handle, m_ret, 0); + } + if ((m_ret = snd_pcm_prepare(m_dev->capture_handle)) < 0) { + + yang_error("cannot prepare audio interface for use (%s)", + snd_strerror(m_ret)); + } + if ((m_ret = snd_pcm_start(m_dev->capture_handle)) < 0) { + + yang_error("cannot prepare audio interface for use (%s)", + snd_strerror(m_ret)); + } + + } else { + + yang_error( + "Couldn't read as many samples as I wanted (%d instead of %d)", + m_ret, len); + } + return 1; + } + return Yang_Ok; +} + +int32_t YangAlsaDeviceHandle::alsa_device_write(const short *pcm, int32_t len) { + + if ((m_ret = snd_pcm_writei(m_dev->playback_handle, pcm, len)) != len) { + if (m_ret < 0) { + if (m_ret == -EPIPE) { + // yang_usleep(1000); + yang_error("An underrun has occured, reseting playback, len=%d",len); + } else { + yang_error("write to audio interface failed (%s)", + snd_strerror(m_ret)); + } + if ((m_ret = snd_pcm_prepare(m_dev->playback_handle)) < 0) { + yang_error("cannot prepare audio interface for use (%s)", + snd_strerror(m_ret)); + } + } else { + yang_error( + "Couldn't write as many samples as I wanted (%d instead of %d)", + m_ret, len); + } + + return 1; + } + return Yang_Ok; +} + +int32_t YangAlsaDeviceHandle::alsa_device_capture_ready(struct pollfd *pfds, + uint32_t nfds) { + unsigned short revents = 0; + + if ((m_ret = snd_pcm_poll_descriptors_revents(m_dev->capture_handle, pfds, + m_dev->readN, &revents)) < 0) { + + yang_error("error in alsa_device_capture_ready: %s", + snd_strerror(m_ret)); + return pfds[0].revents & POLLIN; + } + + return revents & POLLIN; +} + +int32_t YangAlsaDeviceHandle::alsa_device_playback_ready(struct pollfd *pfds, + uint32_t nfds) { + unsigned short revents = 0; + //int32_t err; + if ((m_ret = snd_pcm_poll_descriptors_revents(m_dev->playback_handle, + pfds + m_dev->readN, m_dev->writeN, &revents)) < 0) { + yang_error("error in alsa_device_playback_ready: %s", + snd_strerror(m_ret)); + return pfds[1].revents & POLLOUT; + } + //cerr << (revents & POLLERR) << endl; + return revents & POLLOUT; +} + +int32_t YangAlsaDeviceHandle::alsa_device_nfds() { + return m_dev->writeN + m_dev->readN; +} + +void YangAlsaDeviceHandle::alsa_device_getfds(struct pollfd *pfds, + uint32_t nfds) { + int32_t i; + //assert(nfds >= m_dev->writeN + m_dev->readN); + for (i = 0; i < m_dev->readN; i++) + pfds[i] = m_dev->read_fd[i]; + for (i = 0; i < m_dev->writeN; i++) + pfds[i + m_dev->readN] = m_dev->write_fd[i]; +} +void YangAlsaDeviceHandle::setInAudioBuffer(vector *pal) { + //m_in_audioBuffer = pal; +} +void YangAlsaDeviceHandle::stopLoop() { + m_loops = 0; +} + +void YangAlsaDeviceHandle::run() { + startLoop(); +} + + + +int32_t YangAlsaDeviceHandle::init() { + if (m_isInit) + return Yang_Ok; + if (m_context->avinfo.audio.usingMono) { + m_channel = 1; + m_sample = 16000; + } + + m_frames=m_sample*m_channel/50; + if (m_preProcess) { + m_preProcess->init(m_preProcess->context,m_frames, m_sample, m_channel); + } + + int32_t ret = alsa_device_open((char*)"default", m_sample, m_channel, m_frames); + + m_size = m_frames * 2 * m_channel; // 2 bytes/sample, 2 channels + m_buffer = (uint8_t*) malloc(m_size); + m_isInit = 1; + return ret; +} + +void YangAlsaDeviceHandle::startLoop() { + + m_loops = 1; + int32_t nfds = alsa_device_nfds(); + + + pollfd *pfds = (pollfd*) malloc(sizeof(*pfds) * nfds); + alsa_device_getfds(pfds, nfds); + int32_t audiolen = m_frames * m_channel * 2; + + short* pcm_short=new short[audiolen/2]; + + uint8_t *pcm_write = (uint8_t*)pcm_short;//new uint8_t[audiolen]; + uint8_t *tmp = NULL; + + int32_t readStart = 0; + YangFrame frame; + memset(&frame,0,sizeof(YangFrame)); + while (m_loops) { + + poll(pfds, nfds, -1); + if (playDeviceState&&alsa_device_playback_ready(pfds, nfds)) { + + tmp=m_audioData.getRenderAudioData(audiolen); + if(tmp){ + memcpy(pcm_write, tmp, audiolen); + if (!readStart) readStart = 1; + }else{ + memset(pcm_write, 0, audiolen); + } + /** + if (m_in_audioBuffer && hasData()) { + for (size_t i = 0; i < m_in_audioBuffer->size(); i++) { + if (m_in_audioBuffer->at(i) + && m_in_audioBuffer->at(i)->size() > 0) { + tmp = m_in_audioBuffer->at(i)->getAudios(&frame); + if (tmp) { + if (m_preProcess) + m_preProcess->preprocess_run((short*) tmp); + if (i == 0) { + memcpy(pcm_write, tmp, audiolen); + } else { + m_mix.yangMix1(pcm_write, tmp, audiolen, 128); + } + } + tmp = NULL; + + if (m_in_audioBuffer->at(i)->size() + > m_audioPlayCacheNum) + m_in_audioBuffer->at(i)->resetIndex(); + } + } + if (!readStart) readStart = 1; + }**/ + + alsa_device_write( pcm_short, m_frames); + if (readStart) m_ahandle->putEchoPlay(pcm_short,audiolen); + } + if (catpureDeviceState&&alsa_device_capture_ready(pfds, nfds)) { + alsa_device_read((short*) m_buffer, m_frames); + if (readStart) + m_ahandle->putEchoBuffer(m_buffer,audiolen); + else + m_ahandle->putBuffer1(m_buffer,audiolen); + + } + } + free(pfds); + pfds = NULL; + yang_deleteA(pcm_short); + pcm_write=NULL; + +} +#endif diff --git a/libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.h b/libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.h new file mode 100755 index 00000000..1e7ef944 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAlsaDeviceHandle.h @@ -0,0 +1,82 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef Yang_ALSA_DEVICE_H1 +#define Yang_ALSA_DEVICE_H1 + +#ifndef _WIN32 +#include +#include +//#include +#include +//#include +#include +#include +#include +struct YangAlsaDevice { + char *device_name; + int32_t channels; + int32_t period; + snd_pcm_t *capture_handle; + snd_pcm_t *playback_handle; + int32_t readN, writeN; + struct pollfd *read_fd, *write_fd; +}; + +class YangAlsaDeviceHandle: public YangAudioCapture { +public: + YangAlsaDeviceHandle(YangContext *pcontext); + ~YangAlsaDeviceHandle(); + int32_t alsa_device_open(char *device_name, uint32_t rate, + int32_t channels, int32_t period); +public: + YangAudioCaptureHandle *m_ahandle; + int32_t init(); + void setInAudioBuffer(vector *pal); + void setPreProcess(YangPreProcess *pp); + void setCatureStart(); + void setCatureStop(); + void setOutAudioBuffer(YangAudioBuffer *pbuffer); + void setPlayAudoBuffer(YangAudioBuffer *pbuffer); + void setAec(YangRtcAec *paec); + +protected: + void run(); + + void startLoop(); + + void stopLoop(); + YangAudioRenderData m_audioData; +private: + //YangResample *m_resample; + //vector *m_in_audioBuffer; + uint8_t *m_buffer; + int32_t m_isInit; + int32_t m_ret; + //YangAudioMix m_mix; + YangPreProcess *m_preProcess; + //int32_t hasData(); + + void alsa_device_close(); + int32_t alsa_device_read(short *pcm, int32_t len); + int32_t alsa_device_write(const short *pcm, int32_t len); + int32_t alsa_device_capture_ready(struct pollfd *pfds, uint32_t nfds); + int32_t alsa_device_playback_ready(struct pollfd *pfds, uint32_t nfds); + //void alsa_device_start(); + int32_t alsa_device_nfds(); + void alsa_device_getfds(struct pollfd *pfds, uint32_t nfds); + +private: + YangAlsaDevice *m_dev; + int32_t playDeviceState=1; + int32_t catpureDeviceState = 1; + int32_t m_size; + int32_t m_loops; + int32_t m_channel; + uint32_t m_sample; + snd_pcm_uframes_t m_frames; + int32_t m_audioPlayCacheNum; +}; + +#endif +#endif diff --git a/libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.cpp b/libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.cpp new file mode 100755 index 00000000..2fc84672 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.cpp @@ -0,0 +1,358 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#ifndef _WIN32 + + +#include "stdlib.h" +#include +#include "memory.h" +#include "malloc.h" +YangAlsaHandle::YangAlsaHandle(YangContext *pcontext) //:YangAudioCapture(pcontext) + { + + m_context = pcontext; + m_ahandle = new YangAudioCaptureHandle(pcontext); + m_writeInit = 0, m_writeRet = 0; + m_readInit = 0; + aIndex = 0; + m_size = 0; + m_loops = 0; + m_readHandle = NULL; + m_writeHandle = NULL; + m_in_audioBuffer = NULL; + m_buffer = NULL; + m_frames = 1024; + m_channel = 2; + m_sample = 44100; +} + +YangAlsaHandle::~YangAlsaHandle() { + + + yang_delete(m_buffer); + yang_delete(m_ahandle); +} +void YangAlsaHandle::setCatureStart(){ + m_ahandle->isBuf=1; +} +void YangAlsaHandle::setCatureStop(){ + m_ahandle->isBuf=0; +} +void YangAlsaHandle::setOutAudioBuffer(YangAudioBuffer *pbuffer){ + m_ahandle->setOutAudioBuffer(pbuffer); +} +void YangAlsaHandle::setPlayAudoBuffer(YangAudioBuffer *pbuffer){ + m_ahandle->m_aecPlayBuffer=pbuffer; +} +void YangAlsaHandle::setAec(YangRtcAec *paec){ + m_ahandle->m_aec=paec; +} +void YangAlsaHandle::setPreProcess(YangPreProcess *pp) { + //m_preProcess = pp; +} + +int32_t YangAlsaHandle::init() { + if (m_context->avinfo.audio.usingMono) { + m_frames = 320; + m_channel = 1; + m_sample = 16000; + } else { + m_frames = 1024; + } + initRead(); + initWrite(); + return Yang_Ok; +} + +void YangAlsaHandle::setInAudioBuffer(vector *pal) { + m_in_audioBuffer = pal; +} +void YangAlsaHandle::initWrite() { + if (m_writeInit == 1) + return; + + uint32_t val = 0; + int32_t dir = 0; + snd_pcm_hw_params_t *hw_params; + int32_t err = 0; + if ((err = snd_pcm_open(&m_writeHandle, "default", SND_PCM_STREAM_PLAYBACK, + 0)) < 0) { + + yang_error( "unable to open pcm device: %s\n", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) { + yang_error("cannot allocate hardware parameter structure (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_any(m_writeHandle, hw_params)) < 0) { + + yang_error( + "cannot initialize hardware parameter structure (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_access(m_writeHandle, hw_params, + SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { + + yang_error( "cannot set access type (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_format(m_writeHandle, hw_params, + SND_PCM_FORMAT_S16_LE)) < 0) { + + yang_error( "cannot set sample format (%s)\n", + snd_strerror(err)); + _exit(1); + } + + val = m_sample; + if ((err = snd_pcm_hw_params_set_rate_near(m_writeHandle, hw_params, &val, + 0)) < 0) { + + yang_error( "cannot set sample rate (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_channels(m_writeHandle, hw_params, + m_channel)) < 0) { + + yang_error( "cannot set channel count (%s)\n", + snd_strerror(err)); + _exit(1); + } + + //m_frames = m_frames; + if ((err = snd_pcm_hw_params_set_period_size_near(m_writeHandle, hw_params, + &m_frames, &dir)) < 0) { + + yang_error( "cannot set period size (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params(m_writeHandle, hw_params)) < 0) { + + yang_error( "cannot set write parameters (%s)\n", + snd_strerror(err)); + _exit(1); + } + + snd_pcm_hw_params_free(hw_params); + hw_params=NULL; + m_size = m_frames * 2 * m_channel; // 2 bytes/sample, 2 channels +//mw1.start("/home/yang/bmp/out1.wav"); + m_writeInit = 1; + +} +void YangAlsaHandle::initRead() { + if (m_readInit == 1) + return; + int32_t dir = 0; + snd_pcm_hw_params_t *hw_params; + int32_t err = 0; + if ((err = snd_pcm_open(&m_readHandle, "default", SND_PCM_STREAM_CAPTURE, 0)) + < 0) { + + yang_error( "unable to open pcm device: %s\n", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) { + + yang_error( + "cannot allocate hardware parameter structure (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_any(m_readHandle, hw_params)) < 0) { + + yang_error( + "cannot initialize hardware parameter structure (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_access(m_readHandle, hw_params, + SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { + + yang_error( "cannot set access type (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_format(m_readHandle, hw_params, + SND_PCM_FORMAT_S16_LE)) < 0) { + + yang_error( "cannot set sample format (%s)\n", + snd_strerror(err)); + _exit(1); + } + + // val=t_sample;//44100; + if ((err = snd_pcm_hw_params_set_rate_near(m_readHandle, hw_params, + &m_sample, 0)) < 0) { + + yang_error( "cannot set sample rate (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_channels(m_readHandle, hw_params, + m_channel)) < 0) { + + yang_error( "cannot set channel count (%s)\n", + snd_strerror(err)); + _exit(1); + } + + // m_frames=1024; + if ((err = snd_pcm_hw_params_set_period_size_near(m_readHandle, hw_params, + &m_frames, &dir)) < 0) { + + yang_error( "cannot set period size (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params(m_readHandle, hw_params)) < 0) { + + yang_error( "cannot set read parameters (%s)\n", + snd_strerror(err)); + _exit(1); + } + //int32_t dir=0; + // snd_pcm_uframes_t t1=0,t2=0; + //snd_pcm_hw_params_get_period_size(hw_params, &t1, &dir); + //snd_pcm_hw_params_get_buffer_size(hw_params, &t2); + + snd_pcm_hw_params_free(hw_params); + hw_params=NULL; + m_size = m_frames * 2 * m_channel; // 2 bytes/sample, 2 channels + m_buffer = (uint8_t*) malloc(m_size); + m_readInit = 1; +} +//CMyMakeWave mw1; +void YangAlsaHandle::startLoop() { + // loops = 5000000 / val; + m_loops = 1; + int32_t status = 0; + // mw1.start("/home/yang/bmp/ttt.wav"); + int32_t audiolen = m_frames * m_channel * 2; + + if ((status = snd_pcm_prepare(m_readHandle)) < 0) { + + yang_error( + "cannot prepare audio interface for use (%s)\n", + snd_strerror(status)); + _exit(1); + } + uint8_t *pcm = new uint8_t[audiolen]; + uint8_t *tmp = NULL; + int32_t readLen = 0; + + YangFrame frame; + memset(&frame,0,sizeof(YangFrame)); + while (m_loops == 1) { + readLen = 0; + if (hasData() > 0) { + + memset(pcm, 0, audiolen); + for (size_t i = 0; i < m_in_audioBuffer->size(); i++) { + tmp = m_in_audioBuffer->at(i)->getAudios(&frame); + if (tmp) + yang_mixaudio_mix5(pcm, tmp, audiolen, 128); + } + status = snd_pcm_writei(m_writeHandle, pcm, m_frames); + m_ahandle->putEchoPlay((short*) pcm,audiolen); + readLen = audiolen; + if (status < 0) { + if (status == -EPIPE) { + // EPIPE means overrun + + yang_usleep(1000); + snd_pcm_prepare(m_writeHandle); + continue; + //snd_pcm_prepare(m_handle); + } + status = snd_pcm_recover(m_writeHandle, status, 0); + if (status < 0) { + + yang_error( + "ALSA write failed (unrecoverable): %s\n", + snd_strerror(status)); + } + + } + + } + if ((status = snd_pcm_readi(m_readHandle, m_buffer, m_frames)) + != m_frames) { + + yang_error( + "read from audio interface failed (%s)\n", + snd_strerror(status)); + } + if (status == -EAGAIN) { + snd_pcm_wait(m_readHandle, 2 * m_channel); + status = 0; + } else if (status < 0) { + status = snd_pcm_recover(m_readHandle, status, 0); + if (status < 0) { + + yang_error( "ALSA read failed (unrecoverable): %s\n", + snd_strerror(status)); + } + continue; + } + if (readLen > 0) + m_ahandle->putEchoBuffer(m_buffer,audiolen); + else + m_ahandle->putBuffer1(m_buffer,audiolen); + + // rc = write(1, m_buffer, m_size); + //zm_ahandle->putBuffer(m_buffer); + //mw1.write(m_buffer,4096); + + } + //mw1.stop(); + printf("\n********************AudioCapture stop.......\n"); + snd_pcm_drain(m_readHandle); + snd_pcm_close(m_readHandle); + snd_pcm_drain(m_writeHandle); + snd_pcm_close(m_writeHandle); + free(m_buffer); + m_readHandle = NULL; + m_writeHandle = NULL; + //m_params=NULL; + m_buffer = NULL; +} + +void YangAlsaHandle::stopLoop() { + m_loops = 0; +} + +void YangAlsaHandle::run() { + startLoop(); +} + +int32_t YangAlsaHandle::hasData() { + if(!m_in_audioBuffer) return 0; + for (int32_t i = 0; i < (int) m_in_audioBuffer->size(); i++) { + if (m_in_audioBuffer->at(i)->size() > 0) + return 1; + } + return 0; +} +#endif diff --git a/libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.h b/libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.h new file mode 100755 index 00000000..4e23feec --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAlsaHandle.h @@ -0,0 +1,63 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGCAPTURE_SRC_YANGALSAHANDLE_H_ +#define YANGCAPTURE_SRC_YANGALSAHANDLE_H_ + + +#ifndef _WIN32 +#include +#include "yangavutil/audio/YangAudioMix.h" +#include "yangavutil/audio/YangPreProcess.h" +#include "yangutil/sys/YangThread.h" +#include +#include +#include + + +class YangAlsaHandle: public YangAudioCapture { +public: + YangAlsaHandle(YangContext *pcontext); + virtual ~YangAlsaHandle(); +public: + + int32_t aIndex; + YangAudioCaptureHandle *m_ahandle; + void initRead(); + void initWrite(); + //void stop(); + void setPreProcess(YangPreProcess *pp); + int32_t init(); + void setInAudioBuffer(vector *pal); + void setCatureStart(); + void setCatureStop(); + void setOutAudioBuffer(YangAudioBuffer *pbuffer); + void setPlayAudoBuffer(YangAudioBuffer *pbuffer); + void setAec(YangRtcAec *paec); + //Int32 audio_setparams(Alsa_Env *pEnv); +protected: + void run(); + void startLoop(); + void stopLoop(); +private: + //YangContext *m_context; + int32_t m_readInit; + int32_t m_writeInit, m_writeRet; + vector *m_in_audioBuffer; + + int32_t m_size; + int32_t m_loops; + int32_t m_channel; + uint32_t m_sample; + snd_pcm_uframes_t m_frames; + uint8_t *m_buffer; + snd_pcm_t *m_readHandle; + snd_pcm_t *m_writeHandle; + //YangLog *m_log; + //YangAudioMix m_mix; + int32_t hasData(); + //snd_pcm_hw_params_t *m_params; +}; +#endif +#endif /* YANGCAPTURE_SRC_YANGALSAHANDLE_H_ */ diff --git a/libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.cpp b/libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.cpp new file mode 100755 index 00000000..c9c10e12 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.cpp @@ -0,0 +1,227 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef _WIN32 +#include +#include +#include +#include "stdlib.h" +#include +#include "memory.h" +#include "malloc.h" + +YangAudioCaptureImpl::YangAudioCaptureImpl(YangContext *pcontext) //:YangAudioCapture(pcontext) + { + + m_context = pcontext; + m_ahandle = new YangAudioCaptureHandle(pcontext); + aIndex = 0; + m_size = 0; + m_loops = 0; + m_handle = NULL; + m_buffer = NULL; + + m_channel = pcontext->avinfo.audio.channel; + m_sample = pcontext->avinfo.audio.sample; + if(pcontext->avinfo.audio.audioEncoderType==Yang_AED_AAC){ + m_frames = 1024; + }else{ + m_frames=m_sample/50; + } + + + onlySupportSingle = 0; + +} + +YangAudioCaptureImpl::~YangAudioCaptureImpl() { + if (m_loops) { + stop(); + while (m_isStart) { + yang_usleep(500); + } + } + if (m_handle) { + //snd_pcm_drain(m_handle); + snd_pcm_close(m_handle); + m_handle = NULL; + } + //m_context = NULL; + //m_log = NULL; + yang_delete(m_buffer); + yang_delete(m_ahandle); +} +void YangAudioCaptureImpl::setCatureStart() { + m_ahandle->isBuf = 1; +} +void YangAudioCaptureImpl::setCatureStop() { + m_ahandle->isBuf = 0; +} +void YangAudioCaptureImpl::setOutAudioBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->setOutAudioBuffer(pbuffer); +} +void YangAudioCaptureImpl::setPlayAudoBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->m_aecPlayBuffer = pbuffer; +} +void YangAudioCaptureImpl::setAec(YangRtcAec *paec) { + m_ahandle->m_aec = paec; +} +void YangAudioCaptureImpl::setInAudioBuffer(vector *pal) { + +} +void YangAudioCaptureImpl::setPreProcess(YangPreProcess *pp) { + +} + + +int32_t YangAudioCaptureImpl::init() { + int32_t dir = 0; + snd_pcm_hw_params_t *hw_params; + int32_t err = 0; + char device_name[64] = { 0 }; + if (m_context->avinfo.audio.aIndex > -1) + sprintf(device_name, "hw:%d,%d", m_context->avinfo.audio.aIndex, m_context->avinfo.audio.aSubIndex); + //printf("\nindex==%d,audioHw===%s", m_para->aIndex, device_name); + if ((err = snd_pcm_open(&m_handle, + m_context->avinfo.audio.aIndex == -1 ? "default" : device_name, + SND_PCM_STREAM_CAPTURE, 0)) < 0) { + + yang_error("unable to open pcm device: %s\n", snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) { + + yang_error("cannot allocate hardware parameter structure (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_any(m_handle, hw_params)) < 0) { + + yang_error("cannot initialize hardware parameter structure (%s)\n", + snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_access(m_handle, hw_params, + SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { + + yang_error("cannot set access type (%s)\n", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_format(m_handle, hw_params, + SND_PCM_FORMAT_S16_LE)) < 0) { + + yang_error("cannot set sample format (%s)\n", snd_strerror(err)); + _exit(1); + } + + // val=t_sample;//44100; + if ((err = snd_pcm_hw_params_set_rate_near(m_handle, hw_params, &m_sample, + 0)) < 0) { + + yang_error("cannot set sample rate (%s)\n", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params_set_channels(m_handle, hw_params, m_channel)) + < 0) { + yang_error("cannot set double channel (%s)\n", snd_strerror(err)); + err = snd_pcm_hw_params_set_channels(m_handle, hw_params, 1); + if (err < 0) { + yang_error("cannot set single channel (%s)\n", snd_strerror(err)); + _exit(1); + } + onlySupportSingle = 1; + //_exit(1); + } + + if ((err = snd_pcm_hw_params_set_period_size_near(m_handle, hw_params, + &m_frames, &dir)) < 0) { + + yang_error("cannot set period size (%s)\n", snd_strerror(err)); + _exit(1); + } + + if ((err = snd_pcm_hw_params(m_handle, hw_params)) < 0) { + + yang_error("cannot set parameters (%s)\n", snd_strerror(err)); + _exit(1); + } + //int32_t dir=0; + // snd_pcm_uframes_t t1=0,t2=0; + //snd_pcm_hw_params_get_period_size(hw_params, &t1, &dir); + //snd_pcm_hw_params_get_buffer_size(hw_params, &t2); + + snd_pcm_hw_params_free(hw_params); + m_size = m_frames * 2 * m_channel; // 2 bytes/sample, 2 channels + m_buffer = (uint8_t*) malloc(m_size); + return Yang_Ok; +} + +void YangAudioCaptureImpl::startLoop() { + // loops = 5000000 / val; + m_loops = 1; + unsigned long status = 0; +// YangMakeWave mw1; +// mw1.start(0,"/home/yang/bmp/rec1.wav"); + + uint8_t *tmp = NULL; + if (onlySupportSingle) { + tmp = new uint8_t[m_frames * 2 * 2]; + } + if ((status = snd_pcm_prepare(m_handle)) < 0) { + + yang_error("cannot prepare audio interface for use (%s)\n", + snd_strerror(status)); + _exit(1); + } + int32_t audiolen = m_frames * m_channel * 2; + while (m_loops == 1) { + if ((status = snd_pcm_readi(m_handle, m_buffer, m_frames)) + != m_frames) { + + yang_error("read from audio interface failed (%s)\n", + snd_strerror(status)); + // exit (1); + } + if (status == -EAGAIN) { + //snd_pcm_wait(m_handle, 2 * m_channel); + yang_error("An overrun has occured: %s\n", snd_strerror(status)); + status = 0; + } else if (status < 0) { + status = snd_pcm_recover(m_handle, status, 0); + if (status < 0) { + yang_error("ALSA read failed (unrecoverable): %s\n", + snd_strerror(status)); + //return; + } + continue; + } + + if (onlySupportSingle) { + MonoToStereo((int16_t*) m_buffer, (int16_t*) tmp, m_frames); + m_ahandle->putBuffer1(tmp,audiolen); + } else { + m_ahandle->putBuffer1(m_buffer,audiolen); + } + //mw1.write(m_buffer,4096); + + } + //mw1.stop(); + + //snd_pcm_drain(m_handle); + + snd_pcm_close(m_handle); + yang_deleteA(tmp); + free(m_buffer); + m_handle = NULL; + m_buffer = NULL; + +} + +void YangAudioCaptureImpl::stopLoop() { + m_loops = 0; +} +#endif diff --git a/libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.h b/libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.h new file mode 100755 index 00000000..262417e7 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAudioCaptureImpl.h @@ -0,0 +1,49 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGCAPTURE_SRC_YANGAUDIOCAPTUREIMPL_H_ +#define YANGCAPTURE_SRC_YANGAUDIOCAPTUREIMPL_H_ +#include +#include +#include "yangavutil/audio/YangPreProcess.h" +#ifndef _WIN32 +#include +#include +using namespace std; +//#define REQ_BUF_NUM 4 //申请的缓冲区个数,最多5个,缓冲区太少可能会导致图像有间断 + +class YangAudioCaptureImpl: public YangAudioCapture { +public: + YangAudioCaptureImpl(YangContext *pcontext); + ~YangAudioCaptureImpl(); +public: + YangAudioCaptureHandle *m_ahandle; + int32_t init(); + void setPreProcess(YangPreProcess *pp); + void setCatureStart(); + void setCatureStop(); + void setOutAudioBuffer(YangAudioBuffer *pbuffer); + void setPlayAudoBuffer(YangAudioBuffer *pbuffer); + void setInAudioBuffer(vector *pal); + void setAec(YangRtcAec *paec); + + +protected: + void startLoop(); + void stopLoop(); + //int32_t single2Double(short *pData, short* dData,int32_t nSize); + +private: + //YangContext *m_context; + int32_t m_size; + int32_t m_loops; + int32_t m_channel; + uint32_t m_sample; + snd_pcm_uframes_t m_frames; + uint8_t *m_buffer; + snd_pcm_t *m_handle; + int32_t onlySupportSingle; + +}; +#endif +#endif /* YANGCAPTURE_SRC_YANGAUDIOCAPTUREIMPL_H_ */ diff --git a/libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.cpp b/libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.cpp new file mode 100755 index 00000000..5574765c --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.cpp @@ -0,0 +1,224 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifndef _WIN32 +#include +#include +#include +#include +#include +#include +#include +YangAudioPlayAlsa::YangAudioPlayAlsa(YangContext *pcontext):YangAudioPlay(pcontext){ + m_handle = NULL; + m_loops = 0; + ret = 0; + m_frames = 0; + m_isStart = 0; + m_contextt = 0; +} + +YangAudioPlayAlsa::~YangAudioPlayAlsa() { + + closeAudio(); +} + +int YangAudioPlayAlsa::init() { + if (m_contextt == 1) + return Yang_Ok; + + m_frames = m_context->avinfo.audio.sample / 50; + + uint32_t val = 0; + int32_t dir = 0; + snd_pcm_hw_params_t *hw_params; + snd_pcm_sw_params_t *sw_params; + int32_t err = 0; + if ((err = snd_pcm_open(&m_handle, "default", SND_PCM_STREAM_PLAYBACK, 0)) + < 0) { + fprintf(stderr, "unable to open pcm device: %s\n", snd_strerror(err)); + exit(1); + } + if ((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) { + fprintf(stderr, "cannot allocate hardware parameter structure (%s)\n", + snd_strerror(err)); + exit(1); + } + + if ((err = snd_pcm_hw_params_any(m_handle, hw_params)) < 0) { + fprintf(stderr, "cannot initialize hardware parameter structure (%s)\n", + snd_strerror(err)); + exit(1); + } + + if ((err = snd_pcm_hw_params_set_access(m_handle, hw_params, + SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { + fprintf(stderr, "cannot set access type (%s)\n", snd_strerror(err)); + exit(1); + } + + if ((err = snd_pcm_hw_params_set_format(m_handle, hw_params, + SND_PCM_FORMAT_S16_LE)) < 0) { + fprintf(stderr, "cannot set sample format (%s)\n", snd_strerror(err)); + exit(1); + } + + val = m_sample; + if ((err = snd_pcm_hw_params_set_rate_near(m_handle, hw_params, &val, 0)) + < 0) { + fprintf(stderr, "cannot set sample rate (%s)\n", snd_strerror(err)); + exit(1); + } + + if ((err = snd_pcm_hw_params_set_channels(m_handle, hw_params, m_channel)) + < 0) { + fprintf(stderr, "cannot set channel count (%s)\n", snd_strerror(err)); + exit(1); + } + + //m_frames = m_frames; + if ((err = snd_pcm_hw_params_set_period_size_near(m_handle, hw_params, + &m_frames, &dir)) < 0) { + fprintf(stderr, "cannot set period size (%s)\n", snd_strerror(err)); + exit(1); + } + snd_pcm_uframes_t buffer_size = m_frames * 2; + dir = 0; + if ((err = snd_pcm_hw_params_set_buffer_size_near(m_handle, + hw_params, &buffer_size)) < 0) { + + yang_error("cannot set buffer time (%s)", snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_hw_params(m_handle, hw_params)) < 0) { + fprintf(stderr, "cannot set parameters (%s)\n", snd_strerror(err)); + exit(1); + } + + snd_pcm_hw_params_free(hw_params); + //m_size = m_frames * 2 * m_channel; // 2 bytes/sample, 2 channels + if ((err = snd_pcm_sw_params_malloc(&sw_params)) < 0) { + + yang_error("cannot allocate software parameters structure (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_current(m_handle, sw_params)) + < 0) { + + yang_error("cannot initialize software parameters structure (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_set_avail_min(m_handle, + sw_params, m_frames)) < 0) { + + yang_error("cannot set minimum available count (%s)", + snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params_set_start_threshold(m_handle, + sw_params, m_frames)) < 0) { + + yang_error("cannot set start mode (%s)", snd_strerror(err)); + _exit(1); + } + if ((err = snd_pcm_sw_params(m_handle, sw_params)) < 0) { + + yang_error("cannot set software parameters (%s)", snd_strerror(err)); + _exit(1); + } + m_audioData.initRender(m_sample,m_channel); + m_contextt = 1; + return Yang_Ok; + +} + +void YangAudioPlayAlsa::closeAudio() { + + if (m_handle) { + //snd_pcm_drain(m_handle); + snd_pcm_close(m_handle); + m_handle = NULL; + } + +} + +void YangAudioPlayAlsa::stopLoop() { + m_loops = 0; +} + +void YangAudioPlayAlsa::startLoop() { + + m_loops = 1; + unsigned long status = 0; + uint8_t *pcm = new uint8_t[4096*2]; + memset(pcm,0,4096*2); + YangAutoFreeA(uint8_t,pcm); + uint8_t *tmp = NULL; + if ((status = snd_pcm_prepare(m_handle)) < 0) { + fprintf(stderr, "cannot prepare audio interface for use (%s)\n", + snd_strerror(status)); + exit(1); + } + YangFrame frame; + memset(&frame,0,sizeof(YangFrame)); + int err = 0; + struct pollfd *ufds; + int count = snd_pcm_poll_descriptors_count(m_handle); + ufds = (struct pollfd *)malloc(sizeof(struct pollfd) * count); + if ((err = snd_pcm_poll_descriptors(m_handle, ufds, count)) < 0) { + yang_error("Unable to obtain poll descriptors for playback: %s\n", + snd_strerror(err)); + return; + } + int32_t audiolen = m_frames * m_channel * 2; + while (m_loops == 1) { + unsigned short revents = 0; + status=0; + //int32_t err; + err = snd_pcm_poll_descriptors_revents(m_handle, ufds, count, &revents); + if (err < 0) { + yang_error("error in alsa_device_playback_ready: %s",snd_strerror(err)); + return; + } + if (revents & POLLOUT) { + frame.nb=0; + frame.payload=NULL; + tmp =m_audioData.getRenderAudioData(audiolen); + + if (tmp){ + status = snd_pcm_writei(m_handle, tmp, m_frames); + }else{ + status = snd_pcm_writei(m_handle, pcm, m_frames); + } + + if (status !=m_frames) { + if (status == -EPIPE) { + // EPIPE means overrun + yang_warn("underrun occurred"); + yang_usleep(1000); + snd_pcm_prepare(m_handle); + continue; + //snd_pcm_prepare(m_handle); + } + status = snd_pcm_recover(m_handle, status, 0); + if (status < 0) { + yang_error("ALSA write failed (unrecoverable): %s", snd_strerror(status)); + } + + } + }//endif + + } + + //snd_pcm_drain(m_handle); + snd_pcm_close(m_handle); + tmp = NULL; + + if(ufds) free(ufds); + m_handle = NULL; + +} +#endif diff --git a/libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.h b/libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.h new file mode 100755 index 00000000..65744ddc --- /dev/null +++ b/libmetartc3/src/yangaudiodev/linux/YangAudioPlayAlsa.h @@ -0,0 +1,41 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGPLAYER_SRC_YANGAUDIOPLAYALSA_H_ +#define YANGPLAYER_SRC_YANGAUDIOPLAYALSA_H_ + +#ifndef _WIN32 +#include + + +#include + +class YangAudioPlayAlsa:public YangAudioPlay{ +public: + YangAudioPlayAlsa(YangContext *pcontext); + ~YangAudioPlayAlsa(); + + int init(); + + + +protected: + void startLoop(); + void stopLoop(); + +private: + int32_t m_loops; + +private: + + int32_t m_contextt; + snd_pcm_t *m_handle; + snd_pcm_uframes_t m_frames; + + + void closeAudio(); + int32_t ret; + +}; +#endif +#endif /* YANGPLAYER_SRC_YANGAUDIOPLAYALSA_H_ */ diff --git a/libmetartc3/src/yangaudiodev/win/YangAudioApiCapture.h b/libmetartc3/src/yangaudiodev/win/YangAudioApiCapture.h new file mode 100755 index 00000000..cf5c5607 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangAudioApiCapture.h @@ -0,0 +1,25 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGCAPTURE_WIN_API_YANGAUDIOAPICAPTURE_H_ +#define SRC_YANGCAPTURE_WIN_API_YANGAUDIOAPICAPTURE_H_ +#include +#include +#include + +class YangAudioApiCapture:public YangThread{ +public: + YangAudioApiCapture(){m_isStart=0;}; + virtual ~YangAudioApiCapture(){}; + virtual void setCaptureCallback(YangCaptureCallback* cb)=0; + virtual int initCapture()=0; + virtual int startCpature()=0; + virtual int stopCapture()=0; + virtual void captureThread()=0; + virtual int getAudioOutLength()=0; + int m_isStart; +}; + + + +#endif /* SRC_YANGCAPTURE_WIN_API_YANGAUDIOAPICAPTURE_H_ */ diff --git a/libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.cpp b/libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.cpp new file mode 100755 index 00000000..d5835bdb --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.cpp @@ -0,0 +1,72 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/yang_unistd.h" +#ifdef _WIN32 +#include +#include "YangRecAudioCaptureHandle.h" + +YangRecAudioCaptureHandle::YangRecAudioCaptureHandle(YangContext *pcontext):YangAudioCaptureHandle(pcontext) +{ + readStart=0; + + if (pcontext->avinfo.audio.usingMono) { + m_len=320*2; + } else { + m_len = 1024*4; + } + if(pcontext->avinfo.audio.sample==48000) m_len=960*6; + m_buf=new uint8_t[m_len]; + //m_preProcess=NULL; + m_aec=NULL; + +} +YangRecAudioCaptureHandle::~YangRecAudioCaptureHandle(void) +{ + + if(m_buf) delete[] m_buf; + m_buf=NULL; +} + + + STDMETHODIMP_(ULONG) YangRecAudioCaptureHandle::AddRef() { return 1; } + STDMETHODIMP_(ULONG) YangRecAudioCaptureHandle::Release() { return 2; } + + STDMETHODIMP YangRecAudioCaptureHandle::QueryInterface(REFIID riid, void **ppvObject) + { + // printf("*********************************\n"); + if (NULL == ppvObject) return E_POINTER; + if (riid == __uuidof(IUnknown)) + { + *ppvObject = static_cast(this); + return S_OK; + } + if (riid == IID_ISampleGrabber) + { + *ppvObject = static_cast(this); + return S_OK; + } + return E_NOTIMPL; + } + + STDMETHODIMP YangRecAudioCaptureHandle::SampleCB(double Time, IMediaSample *pSample) + { + //printf(".len=%d..\n",pSample->GetSize()); + //list->putAudio( + //printf("") + return E_NOTIMPL; + } + + STDMETHODIMP YangRecAudioCaptureHandle::BufferCB(double Time, BYTE *pBuffer, long BufferLen) + { + //printf() + //printf("a%d,",BufferLen); + memset(m_buf,0,m_len); + putBuffer1(pBuffer,BufferLen); + + + return E_NOTIMPL; + } +//int32_t tcou=0; + +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.h b/libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.h new file mode 100755 index 00000000..549c298c --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangRecAudioCaptureHandle.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef ___YangRecAudioCaptureHandlee__ +#define ___YangRecAudioCaptureHandlee__ +#ifdef _WIN32 +#include "qedit.h" + +#include "yangavutil/audio/YangMakeWave.h" +#include "yangutil/buffer/YangAudioBuffer.h" +#include "../YangAudioCaptureHandle.h" + + +class YangRecAudioCaptureHandle: public ISampleGrabberCB,public YangAudioCaptureHandle +{ +public: + YangRecAudioCaptureHandle(YangContext *pcontext); + virtual ~YangRecAudioCaptureHandle(void); + int32_t readStart; + + STDMETHODIMP_(ULONG) AddRef(); + STDMETHODIMP_(ULONG) Release(); + STDMETHODIMP QueryInterface(REFIID riid, void **ppvObject); + STDMETHODIMP SampleCB(double Time, IMediaSample *pSample); + STDMETHODIMP BufferCB(double Time, BYTE *pBuffer, long BufferLen); +private: + uint8_t *m_buf; + int32_t m_len; + +}; +#endif +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApi.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioApi.cpp new file mode 100755 index 00000000..541c32c1 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApi.cpp @@ -0,0 +1,183 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 +#include + +#include +#include +#include +#include +#define SAFE_RELEASE(x) if(x){x->Release();x=NULL;} + +#define Yang_Release(x) if(x){x->Release();x=NULL;} +#define EXIT_ON_ERROR(hres) \ + do { \ + if (FAILED(hres)) \ + goto Exit; \ + } while (0) + + +YangWinAudioApi::YangWinAudioApi() +{ + m_enum=NULL; + CoInitialize(NULL); //初始化COM库 + CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, + __uuidof(IMMDeviceEnumerator), + reinterpret_cast(&m_enum)); + +} +YangWinAudioApi::~YangWinAudioApi() +{ + Yang_Release(m_enum); + +} + +int YangWinAudioApi::getListDevice(IMMDeviceEnumerator* penum,EDataFlow dir,int index,IMMDevice** ppDevice) { + HRESULT hr(S_OK); + IMMDeviceCollection* pCollection = NULL; + + hr = penum->EnumAudioEndpoints( + dir, + DEVICE_STATE_ACTIVE, // only active endpoints are OK + &pCollection); + if (FAILED(hr)) { + + SAFE_RELEASE(pCollection); + return 1; + } + + hr = pCollection->Item(index, ppDevice); + if (FAILED(hr)) { + + SAFE_RELEASE(pCollection); + return 1; + } + + return 0; +} +int YangWinAudioApi::getDeviceListCount(IMMDeviceCollection* pcollection,EDataFlow dir){ + HRESULT hr = S_OK; + UINT count = 0; + + hr = pcollection->GetCount(&count); + + + return static_cast(count); +} +int YangWinAudioApi::getDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer,int bufferLen) { + + + static const WCHAR szDefault[] = L""; + + HRESULT hr = E_FAIL; + LPWSTR pwszID = NULL; + + assert(pszBuffer != NULL); + assert(bufferLen > 0); + + if (pDevice != NULL) { + hr = pDevice->GetId(&pwszID); + } + + if (hr == S_OK) { + // Found the device ID. + wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE); + } else { + // Failed to find the device ID. + wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE); + } + + CoTaskMemFree(pwszID); + return 0; +} +int YangWinAudioApi::getDefaultDeviceID( IMMDeviceEnumerator* _ptrEnumerator,EDataFlow dir,ERole role,LPWSTR szBuffer,int bufferLen) { + + + HRESULT hr = S_OK; + IMMDevice* pDevice = NULL; + + hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice); + + if (FAILED(hr)) { + + SAFE_RELEASE(pDevice); + return 1; + } + + int32_t res = getDeviceID(pDevice, szBuffer, bufferLen); + SAFE_RELEASE(pDevice); + return res; +} + +int YangWinAudioApi::getDefaultDeviceIndex(IMMDeviceEnumerator* penum,EDataFlow dir, ERole role,int* index) { + HRESULT hr = S_OK; + + + WCHAR szDefaultDeviceID[MAX_PATH] = {0}; + WCHAR szDeviceID[MAX_PATH] = {0}; + + const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]); + assert(kDeviceIDLength == + sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0])); + + if (getDefaultDeviceID(penum,dir, role, szDefaultDeviceID, kDeviceIDLength)) { + yang_error ( "getDefaultDeviceI r failed hr==%ld",hr); + return 1; + } + + IMMDeviceCollection* collection; + hr = penum->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE, + &collection); + + if (!collection) { + yang_error ( "Device collection not valid"); + return 1; + } + + UINT count = 0; + hr = collection->GetCount(&count); + if (FAILED(hr)) { + yang_error("..........collection GetCount failed================hr==%ld",hr); + return 1; + } + + *index = -1; + for (UINT i = 0; i < count; i++) { + memset(szDeviceID, 0, sizeof(szDeviceID)); + + IMMDevice* device; + { + IMMDevice* ptrDevice = NULL; + hr = collection->Item(i, &ptrDevice); + if (FAILED(hr) || ptrDevice == NULL) { + yang_error("..........collection Item failed==================hr==%ld",hr); + return 1; + } + device = ptrDevice; + SAFE_RELEASE(ptrDevice); + } + + if (getDeviceID(device, szDeviceID, kDeviceIDLength)) { + yang_error("..........getDeviceID failed====================hr==%ld",hr); + return 1; + } + + if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0) { + // Found a match. + *index = i; + break; + } + } + + if (*index == -1) { + yang_error( "Unable to find collection index for default device=hr==%ld",hr); + return 1; + } + SAFE_RELEASE(collection); + + + return 0; +} +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApi.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioApi.h new file mode 100755 index 00000000..16e459d8 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApi.h @@ -0,0 +1,28 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangWinAudioApi_H +#define YangWinAudioApi_H +#ifdef _WIN32 +#include "windows.h" +#include + +#include + +class YangWinAudioApi +{ +public: + YangWinAudioApi(); +virtual ~YangWinAudioApi(); +protected: + HRESULT enumEndpointDevicesAll(IMMDeviceEnumerator* penum,EDataFlow dataFlow); + int getDefaultDeviceID( IMMDeviceEnumerator* penum,EDataFlow dir,ERole role,LPWSTR szBuffer,int bufferLen); + int getDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer,int bufferLen); + int getDefaultDeviceIndex(IMMDeviceEnumerator* penum,EDataFlow dir, ERole role, int* index); + int getDeviceListCount(IMMDeviceCollection* pcollection,EDataFlow dir); + int getListDevice(IMMDeviceEnumerator* penum,EDataFlow dir,int index,IMMDevice** ppDevice); +protected: + IMMDeviceEnumerator* m_enum; +}; +#endif +#endif // YangWinAudioApi_H diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.cpp new file mode 100755 index 00000000..67d7d092 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.cpp @@ -0,0 +1,363 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#ifdef _MSC_VER +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#pragma comment (lib,"wmcodecdspuuid.lib") +#pragma comment (lib,"dmoguids.lib") +#pragma comment (lib,"msdmo.lib") +#define Yang_Release(x) if(x){x->Release();x=NULL;} + +class MediaBufferImpl final : public IMediaBuffer { +public: + explicit MediaBufferImpl(BYTE* p,DWORD maxLength) + : _data(p), + _length(0), + _maxLength(maxLength), + _refCount(0) { + + + } + void init(BYTE* p){ + _data=p; + _length=0; + } + // IMediaBuffer methods. + STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength)) { + // if (!ppBuffer || !pcbLength) { + // return E_POINTER; + // } + + if(ppBuffer) *ppBuffer = _data; + if(pcbLength) *pcbLength = _length; + + return S_OK; + } + + STDMETHOD(GetMaxLength(DWORD* pcbMaxLength)) { + if (!pcbMaxLength) { + return E_POINTER; + } + + *pcbMaxLength = _maxLength; + return S_OK; + } + + STDMETHOD(SetLength(DWORD cbLength)) { + if (cbLength > _maxLength) { + return E_INVALIDARG; + } + + _length = cbLength; + return S_OK; + } + + // IUnknown methods. + STDMETHOD_(ULONG, AddRef()) { return InterlockedIncrement(&_refCount); } + + STDMETHOD(QueryInterface(REFIID riid, void** ppv)) { + if (!ppv) { + return E_POINTER; + } else if (riid != IID_IMediaBuffer && riid != IID_IUnknown) { + return E_NOINTERFACE; + } + + *ppv = static_cast(this); + AddRef(); + return S_OK; + } + + STDMETHOD_(ULONG, Release()) { + LONG refCount = InterlockedDecrement(&_refCount); + if (refCount == 0) { + delete this; + } + + return refCount; + } + +private: + ~MediaBufferImpl() {_data=NULL; } + + BYTE* _data; + DWORD _length; + const DWORD _maxLength; + LONG _refCount; +}; +YangWinAudioApiAec::YangWinAudioApiAec() { + m_isStart=0; + m_loops=0; + //m_cb=NULL; + m_dmo=NULL; + // m_mediaBuffer=NULL; + m_micIndex=0; + m_ps=NULL; + m_dataBuf=new BYTE[640]; + CoCreateInstance(CLSID_CWMAudioAEC, NULL, CLSCTX_INPROC_SERVER, + IID_IMediaObject, reinterpret_cast(&m_dmo)); + if(m_dmo) m_dmo->QueryInterface(IID_IPropertyStore, reinterpret_cast(&m_ps)); + m_dwStatus=0; + memset(&m_audioFrame,0,sizeof(YangFrame)); + m_audioData.initIn(16000, 1); + m_audioData.initOut(16000, 1); +} + +YangWinAudioApiAec::~YangWinAudioApiAec() { + + Yang_Release(m_ps); + + Yang_Release(m_dmo); + delete[] m_dataBuf; + +} +void YangWinAudioApiAec::setCaptureCallback(YangCaptureCallback* cb){ + m_audioData.m_cb=cb; + +} +int YangWinAudioApiAec::initCapture() { + getDefaultDeviceIndex(m_enum,eCapture,eConsole,&m_micIndex); + int ret=initRecordingDMO(); + + if(ret) return yang_error_wrap(ret,"int recording dmo fail.."); + + return Yang_Ok; + +} + + +void YangWinAudioApiAec::captureThread(){ + run(); +} + int YangWinAudioApiAec::startCpature(){ + return Yang_Ok; + } + int YangWinAudioApiAec::stopCapture(){ + return Yang_Ok; + } + void YangWinAudioApiAec::stop(){ + stopLoop(); + } + + void YangWinAudioApiAec::stopLoop() { + m_loops = 0; + + + } + + void YangWinAudioApiAec::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; + } + + + + + int YangWinAudioApiAec::setVtI4Property(IPropertyStore* ptrPS,REFPROPERTYKEY key,LONG value) { + PROPVARIANT pv; + PropVariantInit(&pv); + pv.vt = VT_I4; + pv.lVal = value; + HRESULT hr = ptrPS->SetValue(key, pv); + PropVariantClear(&pv); + if (FAILED(hr)) { + // _TraceCOMError(hr); + return yang_error_wrap(1,"setVtI4Property error.....key="); + + } + return 0; + } + int YangWinAudioApiAec::setBoolProperty(IPropertyStore* ptrPS, REFPROPERTYKEY key,VARIANT_BOOL value) { + PROPVARIANT pv; + PropVariantInit(&pv); + pv.vt = VT_BOOL; + pv.boolVal = value; + HRESULT hr = ptrPS->SetValue(key, pv); + PropVariantClear(&pv); + if (FAILED(hr)) { + return yang_error_wrap(1,"setVtI4Property error.....key="); + } + return 0; + } + + int YangWinAudioApiAec::setDMOProperties() { + HRESULT hr = S_OK; + if(m_dmo == NULL) return 1; + + + + + if (FAILED(hr) || m_ps == NULL) { + return yang_error_wrap(hr,"get IID_IPropertyStore COM failed!......."); + } + // Set the AEC system mode. + // SINGLE_CHANNEL_AEC - AEC processing only. + if (setVtI4Property(m_ps, MFPKEY_WMAAECMA_SYSTEM_MODE, SINGLE_CHANNEL_AEC)) { + yang_error("set SINGLE_CHANNEL_AEC COM failed!......."); + return 1; + } + + // Set the AEC source mode. + // VARIANT_TRUE - Source mode (we poll the AEC for captured data). + if (setBoolProperty(m_ps, MFPKEY_WMAAECMA_DMO_SOURCE_MODE, VARIANT_TRUE)) { + yang_error("set MFPKEY_WMAAECMA_DMO_SOURCE_MODE COM failed!......."); + return 1; + } + + // Enable the feature mode. + // This lets us override all the default processing settings below. + if (setBoolProperty(m_ps, MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) ) { + yang_error("set MFPKEY_WMAAECMA_FEATURE_MODE COM failed!......."); + return 1; + } + + // Disable analog AGC (default enabled). + if (setBoolProperty(m_ps, MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER, VARIANT_FALSE) ) { + yang_error("set MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER COM failed!......."); + return 1; + } + + // Disable noise suppression (default enabled). + // 0 - Disabled, 1 - Enabled + if (setVtI4Property(m_ps, MFPKEY_WMAAECMA_FEATR_NS, 0)) { + yang_error("set MFPKEY_WMAAECMA_FEATR_NS COM failed!......."); + return 1; + } + + // search for the device index. + int inDevIndex = m_micIndex; + + int outDevIndex =0; + this->getDefaultDeviceIndex(m_enum,eRender,eConsole,&outDevIndex);//0; + + DWORD devIndex = static_cast(outDevIndex << 16) + + static_cast(0x0000ffff & inDevIndex); + // yang_error( "Capture device index: " << inDevIndex << ", render device index: " << outDevIndex; + if (setVtI4Property(m_ps, MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex)) { + + yang_error("set MFPKEY_WMAAECMA_FEATR_NS COM failed!......."); + return 1; + } + + return 0; + } + + int YangWinAudioApiAec::initRecordingDMO() { + + + if(m_dmo == NULL) return 1; + + if (setDMOProperties()) { + yang_error("..........setDMOProperties failed===================="); + return 1; + } + + DMO_MEDIA_TYPE mt = {}; + HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX)); + if (FAILED(hr)) { + MoFreeMediaType(&mt); + yang_error("..........MoInitMediaType failed===================="); + return 1; + } + + mt.majortype = MEDIATYPE_Audio; + mt.subtype = MEDIASUBTYPE_PCM; + mt.formattype = FORMAT_WaveFormatEx; + + WAVEFORMATEX* ptrWav = reinterpret_cast(mt.pbFormat); + ptrWav->wFormatTag = WAVE_FORMAT_PCM; + ptrWav->nChannels = 1; + ptrWav->nSamplesPerSec = 16000; + ptrWav->nAvgBytesPerSec = 32000; + ptrWav->nBlockAlign = 2; + ptrWav->wBitsPerSample = 16; + ptrWav->cbSize = 0; + + // Set the VoE format equal to the AEC output format. + + + // Set the DMO output format parameters. + hr = m_dmo->SetOutputType(0, &mt, 0); + MoFreeMediaType(&mt); + if (FAILED(hr)) { + yang_error("..........SetOutputType failed===================="); + return 1; + } + + hr = m_dmo->AllocateStreamingResources(); + if (FAILED(hr)) { + return 1; + } + return 0; + } + + + void YangWinAudioApiAec::startLoop(){ + DWORD taskIndex(0); + HANDLE hMmTask = AvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex); + if (hMmTask) { + if (FALSE == AvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) { + yang_warn( "failed to boost wincaecapture-thread using MMCSS"); + } + yang_trace("wincaecapture thread is now registered with MMCSS (taskIndex=%d)", + taskIndex ); + } else { + yang_error( "failed to enable MMCSS on wincaecapture thread (err=%lu", GetLastError() ); + + } + m_loops = 1; + uint8_t* tmp=new uint8_t[1024*4]; + YangAutoFreeA(uint8_t,tmp); + + MediaBufferImpl* m_mediaBuffer = new MediaBufferImpl(tmp,4096); + DMO_OUTPUT_DATA_BUFFER OutputBufferStruct; + OutputBufferStruct.pBuffer = m_mediaBuffer; + OutputBufferStruct.pBuffer->AddRef(); + YangFrame audioFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + unsigned long bufLen=0; + while(m_loops){ + yang_usleep(1000*10); + m_mediaBuffer->SetLength(0); + bufLen=0; + do{ + OutputBufferStruct.dwStatus = 0; + HRESULT hr = m_dmo->ProcessOutput(0, 1, &OutputBufferStruct, &m_dwStatus); + + if (hr == S_FALSE) { + bufLen=0; + } else { + hr = m_mediaBuffer->GetBufferAndLength(NULL, &bufLen); + + } + + } while (OutputBufferStruct.dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE); + // yang_trace("a%d,",bufLen); + if(bufLen>0){ + audioFrame.payload=tmp; + audioFrame.nb=bufLen; + m_audioData.caputure(&audioFrame); + + } + + + } + Yang_Release(m_mediaBuffer); + } + +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.h new file mode 100755 index 00000000..3b2e6685 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiAec.h @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGCAPTURE_WIN_API_YANGWINAUDIOAPIAEC_H_ +#define YANGCAPTURE_WIN_API_YANGWINAUDIOAPIAEC_H_ +#ifdef _MSC_VER +#include +#include +#include +#include +#include +#include +#include + +class YangWinAudioApiAec:public YangAudioApiCapture,public YangWinAudioApi { +public: + YangWinAudioApiAec(); + virtual ~YangWinAudioApiAec(); + void setCaptureCallback(YangCaptureCallback* cb); + int initCapture(); + int startCpature(); + int stopCapture(); + void captureThread(); + int getAudioOutLength(){return 640;} + + + int initRecordingDMO(); + int setDMOProperties(); + int getRenderIndex(); + // int getBuffer(uint8_t* p,unsigned long* pbufLen); + YangAudioCaptureData m_audioData; + + void stop(); +protected: + void run(); + void startLoop(); + void stopLoop(); + int setBoolProperty(IPropertyStore* ptrPS, REFPROPERTYKEY key,VARIANT_BOOL value); + int setVtI4Property(IPropertyStore* ptrPS,REFPROPERTYKEY key,LONG value); + int m_loops; + //YangCaptureCallback* m_cb; +private: + IMediaObject* m_dmo; + + + DWORD m_dwStatus; + + int m_micIndex; + BYTE* m_dataBuf; + IPropertyStore* m_ps; + // YangRecData m_recData; + YangFrame m_audioFrame; +}; +#endif +#endif /* YANGCAPTURE_WIN_API_YANGWINAUDIOAPIAEC_H_ */ diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.cpp new file mode 100755 index 00000000..8ea1ef83 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.cpp @@ -0,0 +1,372 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 +#include +#include +#include +#include + +#define Yang_Release(x) if(x){x->Release();x=NULL;} + +const float MAX_MICROPHONE_VOLUME = 255.0f; +const float MIN_MICROPHONE_VOLUME = 0.0f; +YangWinAudioApiCapture::YangWinAudioApiCapture(YangContext *pcontext) { + m_inputDeviceIndex = 0; + m_isStart = 0; + m_loops = 0; + m_bufferLength = 0; + m_blockSize = 480; + m_frameSize=4; + m_captureCollection = NULL; + m_deviceIn = NULL; + m_clientIn = NULL; + m_captureClient = NULL; + m_captureVolume = NULL; + m_samplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + m_audioData.initOut(pcontext->avinfo.audio.sample, pcontext->avinfo.audio.channel); + + m_channlList[0] = 2; // stereo is prio 1 + m_channlList[1] = 1; // mono is prio 2 + m_channlList[2] = 4; // quad is prio 3 + + m_blank = new uint8_t[960 * 8]; + memset(m_blank, 0, 960 * 8); +} +YangWinAudioApiCapture::~YangWinAudioApiCapture() { + yang_stop(this); + yang_stop_thread(this); + if (NULL != m_samplesReadyEvent) { + CloseHandle(m_samplesReadyEvent); + m_samplesReadyEvent = NULL; + } + Yang_Release(m_captureCollection); + Yang_Release(m_deviceIn); + Yang_Release(m_clientIn); + Yang_Release(m_captureClient); + Yang_Release(m_captureVolume); + yang_deleteA(m_blank); +} + +void YangWinAudioApiCapture::setCaptureCallback(YangCaptureCallback *cb) { + m_audioData.m_cb = cb; +} + +int YangWinAudioApiCapture::setMicrophoneVolume(int volume) { + + if (volume < static_cast(MIN_MICROPHONE_VOLUME) + || volume > static_cast(MAX_MICROPHONE_VOLUME)) { + return 1; + } + + HRESULT hr = S_OK; + // scale input volume to valid range (0.0 to 1.0) + const float fLevel = static_cast(volume) / MAX_MICROPHONE_VOLUME; + + // m_lock.lock(); + m_captureVolume->SetMasterVolumeLevelScalar(fLevel, NULL); +// m_lock.unlock(); + if (FAILED(hr)) + return 1; + + return 0; + +} +int YangWinAudioApiCapture::getMicrophoneVolume(int &volume) { + + HRESULT hr = S_OK; + float fLevel(0.0f); + volume = 0; + // m_lock.lock(); + hr = m_captureVolume->GetMasterVolumeLevelScalar(&fLevel); + // m_lock.unlock(); + if (FAILED(hr)) + return 1; + + // scale input volume range [0.0,1.0] to valid output range + volume = static_cast(fLevel * MAX_MICROPHONE_VOLUME); + + return 0; + +} + +int YangWinAudioApiCapture::setMicrophoneMute(bool enable) { + + if (m_deviceIn == NULL) { + return 1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume *pVolume = NULL; + + // Set the microphone system mute state. + hr = m_deviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + if (FAILED(hr)) + return 1; + + const BOOL mute(enable); + hr = pVolume->SetMute(mute, NULL); + if (FAILED(hr)) + return 1; + + Yang_Release(pVolume); + return 0; + +} + +int YangWinAudioApiCapture::initMicrophone() { + //eCommunications eConsole + int ret = getListDevice(m_enum, eCapture, m_inputDeviceIndex, &m_deviceIn); + if (ret != 0 || (m_deviceIn == NULL)) { + Yang_Release(m_deviceIn); + return yang_error_wrap(ERROR_SYS_AudioCapture, "get capture devicein fail..."); + } + + Yang_Release(m_captureVolume); + ret = m_deviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&m_captureVolume)); + if (ret != 0 || m_captureVolume == NULL) { + Yang_Release(m_captureVolume); + return 1; + } + + return 0; + // if(m_deviceIn==NULL) getDefaultDevice(eCapture,eCommunications,&m_deviceIn); +} +int YangWinAudioApiCapture::initCapture() { + getDefaultDeviceIndex(m_enum, eCapture, eConsole, &m_inputDeviceIndex); + + if (initMicrophone()) { + return yang_error_wrap(ERROR_SYS_AudioCapture, "init microphone fail..."); + } + if (m_deviceIn == NULL) { + yang_error("..............initCapture failed"); + return 1; + } + + HRESULT hr = S_OK; + WAVEFORMATEX *pWfxIn = NULL; + WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE(); + WAVEFORMATEX *pWfxClosestMatch = NULL; + + // Create COM object with IAudioClient interface. + Yang_Release(m_clientIn); + hr = m_deviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, + (void**) &m_clientIn); + // qDebug()<<"m_clientIn....."<GetMixFormat(&pWfxIn); + if (SUCCEEDED(hr)) { + + } + + // Set wave format + Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + Wfx.Format.wBitsPerSample = 16; + Wfx.Format.cbSize = 22; + Wfx.dwChannelMask = 0; + Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample; + Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; + + const int freqs[6] = { 48000, 44100, 16000, 96000, 32000, 8000 }; + hr = S_FALSE; + + // Iterate over frequencies and channels, in order of priority + for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); + freq++) { + for (unsigned int chan = 0; + chan < sizeof(m_channlList) / sizeof(m_channlList[0]); chan++) { + Wfx.Format.nChannels = m_channlList[chan]; + Wfx.Format.nSamplesPerSec = freqs[freq]; + Wfx.Format.nBlockAlign = Wfx.Format.nChannels + * Wfx.Format.wBitsPerSample / 8; + Wfx.Format.nAvgBytesPerSec = Wfx.Format.nSamplesPerSec + * Wfx.Format.nBlockAlign; + // If the method succeeds and the audio endpoint device supports the + // specified stream format, it returns S_OK. If the method succeeds and + // provides a closest match to the specified format, it returns S_FALSE. + hr = m_clientIn->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, + (WAVEFORMATEX*) &Wfx, &pWfxClosestMatch); + if (hr == S_OK) { + break; + } else { + if (pWfxClosestMatch) { + CoTaskMemFree(pWfxClosestMatch); + pWfxClosestMatch = NULL; + } else { + yang_error(" is not supported. No closest match."); + } + } + } + if (hr == S_OK) + break; + } + + if (hr == S_OK) { + m_frameSize= Wfx.Format.nBlockAlign; + m_blockSize = Wfx.Format.nSamplesPerSec / 100; + + m_audioData.initIn(Wfx.Format.nSamplesPerSec, Wfx.Format.nChannels); + yang_trace("\ncapture: sample==%d,channle==%d,nBlockAlign==%d,blockSize==%d\n",Wfx.Format.nSamplesPerSec,Wfx.Format.nChannels,m_frameSize,m_blockSize); + } + + + // Create a capturing stream. + hr = m_clientIn->Initialize(AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications + AUDCLNT_STREAMFLAGS_EVENTCALLBACK | // processing of the audio buffer by + // the client will be event driven + AUDCLNT_STREAMFLAGS_NOPERSIST,// volume and mute settings for an + // audio session will not persist + // across system restarts + 0,// required for event-driven shared mode + 0, // periodicity + (WAVEFORMATEX*) &Wfx, // selected wave format + NULL); + + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioCapture," capture audioclient Initialize fail..."); + + + hr = m_clientIn->GetBufferSize(&m_bufferLength); +// yang_trace("\ncapture bufferLength==%d..............", m_bufferLength); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioCapture, " capture audioclient GetBufferSize fail..."); + hr = m_clientIn->SetEventHandle(m_samplesReadyEvent); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioCapture, " capture audioclient SetEventHandle fail..."); + // Get an IAudioCaptureClient interface. + Yang_Release(m_captureClient); + hr = m_clientIn->GetService(__uuidof(IAudioCaptureClient), + (void**) &m_captureClient); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioCapture, " captureclient Initialize fail..."); + + CoTaskMemFree(pWfxIn); + CoTaskMemFree(pWfxClosestMatch); + + return 0; + +} +int YangWinAudioApiCapture::startCpature() { + + if (m_clientIn) { + HRESULT hr = m_clientIn->Start(); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioCapture, "capure clientin start fail..."); + return Yang_Ok; + } + return yang_error_wrap(ERROR_SYS_AudioCapture, "capure clientin is null"); + +} +int YangWinAudioApiCapture::stopCapture() { + if (m_clientIn) { + m_clientIn->Stop(); + } + return Yang_Ok; +} +int YangWinAudioApiCapture::captureFrame(YangFrame *audioFrame) { + audioFrame->nb = 0; + UINT32 packetLength=0; + BYTE *pData = 0; + UINT32 framesAvailable = 0; + DWORD flags = 0; + UINT64 recTime = 0; + UINT64 recPos = 0; + HRESULT hr =m_captureClient->GetNextPacketSize(&packetLength); + while(packetLength!=0){ + hr = m_captureClient->GetBuffer(&pData, // packet which is ready to be read by used + &framesAvailable, // #frames in the captured packet (can be zero) + &flags, // support flags (check) + &recPos, // device position of first audio frame in data packet + &recTime); // value of performance counter at the time of recording + if (SUCCEEDED(hr)) { + if (AUDCLNT_S_BUFFER_EMPTY == hr) { + // Buffer was empty => start waiting for a new capture notification + // event + return 1; + } + + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { + pData = NULL; + } + + if (pData) { + audioFrame->payload = pData; + audioFrame->nb = framesAvailable * m_frameSize; + + } else { + audioFrame->payload = m_blank; + audioFrame->nb = framesAvailable * m_frameSize; + } + m_audioData.caputure(audioFrame); + + hr = m_captureClient->ReleaseBuffer(framesAvailable); + } + m_captureClient->GetNextPacketSize(&packetLength); + } + + return 0; +} +int YangWinAudioApiCapture::getAudioOutLength(){ + return m_audioData.getOutLength(); +} +void YangWinAudioApiCapture::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangWinAudioApiCapture::stop() { + stopLoop(); +} +void YangWinAudioApiCapture::stopLoop() { + m_loops = 0; + stopCapture(); +} + +void YangWinAudioApiCapture::startLoop() { + DWORD taskIndex(0); + HANDLE hMmTask = AvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex); + // ERROR_INVALID_TASK_INDEX + if (hMmTask) { + if (FALSE == AvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) { + yang_warn( "failed to boost wincapture-thread using MMCSS"); + } + yang_trace("wincapture thread is now registered with MMCSS (taskIndex=%d)", + taskIndex ); + } else { + yang_error( "failed to enable MMCSS on wincapture thread (err=%lu", GetLastError() ); + + } + m_loops = 1; + HRESULT hr = m_clientIn->GetBufferSize(&m_bufferLength); + if (FAILED(hr)) yang_error( "capure clientin getbufferSize fail..."); + yang_trace("\ncapture bufferLength==%d..............", m_bufferLength); + if (startCpature()) { + yang_error("start capture fail"); + return; + } + + HANDLE waitArray[1] = { m_samplesReadyEvent }; + YangFrame audioFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + while (m_loops == 1) { + DWORD waitResult = WaitForMultipleObjects(1, waitArray, FALSE, 500); + if (waitResult == (WAIT_OBJECT_0 + 0)) { + audioFrame.payload = NULL; + audioFrame.nb = 0; + captureFrame(&audioFrame); + } + + } + +} +void YangWinAudioApiCapture::captureThread() { + run(); +} + +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.h new file mode 100755 index 00000000..01426ed9 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiCapture.h @@ -0,0 +1,63 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangWinAudioApiCapture_H1 +#define YangWinAudioApiCapture_H1 + +#ifdef _WIN32 +#include +#include + +#include +#include +#include +#include + +class YangWinAudioApiCapture:public YangAudioApiCapture,public YangWinAudioApi +{ +public: + YangWinAudioApiCapture(YangContext* pcontext); + virtual ~YangWinAudioApiCapture(); + void setCaptureCallback(YangCaptureCallback *cb); + void captureThread(); + int initCapture(); + int startCpature(); + int stopCapture(); + int getAudioOutLength(); + int getMicrophoneVolume(int& volume); + int setMicrophoneVolume(int volume); + int setMicrophoneMute(bool enable); + + + + IMMDeviceCollection* m_captureCollection; + IMMDevice* m_deviceIn; + IAudioClient* m_clientIn; + + IAudioCaptureClient* m_captureClient; + IAudioEndpointVolume* m_captureVolume; + + int m_inputDeviceIndex; + int captureFrame(YangFrame* paudioFrame); + + void stop(); +protected: + virtual void startLoop(); + void stopLoop(); + void run(); + int m_loops; +private: + int initMicrophone(); + UINT32 m_bufferLength; + int m_blockSize; + int m_frameSize; + + uint16_t m_channlList[3]; + YangAudioCaptureData m_audioData; + HANDLE m_samplesReadyEvent; + uint8_t* m_blank; + + +}; +#endif +#endif // YangWinAudioApiCapture_H diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.cpp new file mode 100755 index 00000000..17f6fed2 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.cpp @@ -0,0 +1,137 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 +#include +#include +#include +#include + +YangWinAudioApiDevice::YangWinAudioApiDevice(YangContext *pcontext,bool isRecord, + bool usingBuiltinAec) { + m_usingBuiltinAec = usingBuiltinAec; + m_isStart = 0; + m_isInit = 0; + m_loops = 0; + m_player=NULL; + m_context=pcontext; +#ifdef _MSC_VER + if (m_usingBuiltinAec&&!isRecord) + m_capture = new YangWinAudioApiAec(); + else{ +#endif + m_capture = new YangWinAudioApiCapture(pcontext); +#ifdef _MSC_VER + } +#else + m_usingBuiltinAec=false; +#endif + m_outLen=640; + m_ahandle = new YangAudioCaptureHandle(pcontext); + if(!isRecord) m_player = new YangWinAudioApiRender(pcontext); + +} + +YangWinAudioApiDevice::~YangWinAudioApiDevice() { + + + yang_stop(m_player); + yang_stop(m_capture); + yang_stop_thread(m_player); + yang_stop_thread(m_capture); + yang_delete(m_ahandle); + yang_delete(m_player); + yang_delete(m_capture); + + +} + +void YangWinAudioApiDevice::setCatureStart() { + if(m_ahandle) m_ahandle->isBuf = 1; +} +void YangWinAudioApiDevice::setCatureStop() { + if(m_ahandle) m_ahandle->isBuf = 0; +} +void YangWinAudioApiDevice::setOutAudioBuffer(YangAudioBuffer *pbuffer) { + if(m_ahandle) m_ahandle->setOutAudioBuffer(pbuffer); +} +void YangWinAudioApiDevice::setPlayAudoBuffer(YangAudioBuffer *pbuffer) { + //m_player->m_audioData.setAudioList(pbuffer); + if(m_ahandle) m_ahandle->m_aecPlayBuffer = pbuffer; +} +void YangWinAudioApiDevice::setAec(YangRtcAec *paec) { + if(!m_usingBuiltinAec){ + if(m_ahandle&&m_player){ + m_ahandle->m_aec = paec; + m_player->setAec(paec); + } + + } + +} +void YangWinAudioApiDevice::setPreProcess(YangPreProcess *pp) { +if(!m_usingBuiltinAec&&m_player) m_player->m_audioData.m_preProcess = pp; + +} + +void YangWinAudioApiDevice::setInAudioBuffer( + vector *pal) { + //if(m_player&&m_player->m_audioData.m_syn) m_player->m_audioData.m_syn->setInAudioBuffer(pal); + +} +void YangWinAudioApiDevice::caputureAudioData(YangFrame *audioFrame) { + //printf("%d-%d,",m_usingBuiltinAec,m_player->m_hasRenderEcho); +#ifdef _MSC_VER + if (m_usingBuiltinAec){ + if(m_ahandle) m_ahandle->putBuffer1(audioFrame->payload, m_outLen); + }else { +#endif + if (m_player&&m_player->m_hasRenderEcho){ + if(m_ahandle) m_ahandle->putEchoBuffer(audioFrame->payload, m_outLen); //aec filter render data + }else{ + if(m_ahandle) m_ahandle->putBuffer1(audioFrame->payload, m_outLen); + } +#ifdef _MSC_VER + } +#endif +} + +void YangWinAudioApiDevice::stopLoop() { + m_loops = 0; + if(m_capture) m_capture->stop(); +} + + + +int YangWinAudioApiDevice::init() { + if (m_isInit) return Yang_Ok; + + if(m_capture) { + m_capture->initCapture(); + m_capture->setCaptureCallback(this); + m_outLen=m_capture->getAudioOutLength(); + } + if(m_player) m_player->init(); + m_isInit = 1; + return Yang_Ok; +} +void YangWinAudioApiDevice::startLoop() { + + + + m_loops = 1; + if (m_player) m_player->start(); + if(m_capture){ + m_capture->captureThread(); + } + if (m_player) + m_player->stop(); + if (m_capture){ + m_capture->stop(); + } + + +} + +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.h new file mode 100755 index 00000000..516c4661 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiDevice.h @@ -0,0 +1,54 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGCAPTURE_SRC_WIN_YANGWINAUDIOAPI_H_ +#define SRC_YANGCAPTURE_SRC_WIN_YANGWINAUDIOAPI_H_ + +#ifdef _WIN32 +#include +#include +#include +#include +#include +#include + +#include "YangWinAudioCaptureHandle.h" +#include +#include +#include + + + +class YangWinAudioApiDevice: public YangAudioCapture,public YangCaptureCallback { +public: + YangWinAudioApiDevice(YangContext *pcontext,bool isRecord,bool usingBuiltinAec=false); + ~YangWinAudioApiDevice(); + int init(); + void setInAudioBuffer(std::vector *pal); + void setPreProcess(YangPreProcess *pp); + void setCatureStart(); + void setCatureStop(); + void setOutAudioBuffer(YangAudioBuffer *pbuffer); + void setPlayAudoBuffer(YangAudioBuffer *pbuffer); + void setAec(YangRtcAec *paec); + void caputureAudioData(YangFrame* audioFrame); + YangAudioCaptureHandle *m_ahandle; + +protected: + void startLoop(); + void stopLoop(); + +private: + int m_loops; + int m_isInit; + int m_outLen; + + +private: + bool m_usingBuiltinAec; + YangAudioApiCapture *m_capture; + YangWinAudioApiRender* m_player; +}; + +#endif /* SRC_YANGCAPTURE_SRC_WIN_YANGWINAUDIOAPI_H_ */ +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.cpp new file mode 100755 index 00000000..50a9c9f4 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.cpp @@ -0,0 +1,512 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#define Yang_Release(x) if(x){x->Release();x=NULL;} +#define ROUND(x) ((x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5)) + + +#define CONTINUE_ON_ERROR(x) if(x!=S_OK){continue;} + +const float MAX_SPEAKER_VOLUME = 255.0f; +const float MIN_SPEAKER_VOLUME = 0.0f; + +YangWinAudioApiRender::YangWinAudioApiRender(YangContext *pcontext):YangAudioPlay(pcontext) { + m_outputDeviceIndex = 0; + m_renderCollection = NULL; + m_deviceOut = NULL; + m_clientOut = NULL; + m_renderClient = NULL; + m_renderSimpleVolume = NULL; + m_dataBufP = NULL; + m_contextt = 0; + + m_resTmp=NULL; + m_resBuf=NULL; + m_aec=NULL; + + m_bufferLength = 0; + keepPlaying = false; + flags = 0; + padding = 0; + framesAvailable = 0; + m_audioPlayCacheNum = pcontext->avinfo.audio.audioPlayCacheNum; + m_audioData.initPlay(pcontext->avinfo.audio.sample, pcontext->avinfo.audio.channel); + m_sample=pcontext->avinfo.audio.sample; + + m_size = (pcontext->avinfo.audio.sample / 50) * pcontext->avinfo.audio.channel * 2; + + + m_loops=0; + m_isStart=0; + m_samplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + + m_bufLen=0; + m_hasRenderEcho=0; + m_blockSize=4; + memset(&m_audioFrame,0,sizeof(YangFrame)); + +} +YangWinAudioApiRender::~YangWinAudioApiRender() { + if (NULL != m_samplesReadyEvent) { + CloseHandle(m_samplesReadyEvent); + m_samplesReadyEvent = NULL; + } + Yang_Release(m_renderCollection); + Yang_Release(m_deviceOut); + Yang_Release(m_clientOut); + Yang_Release(m_renderClient); + Yang_Release(m_renderSimpleVolume); + yang_deleteA(m_resTmp); + yang_deleteA(m_resBuf); + m_aec=NULL; +} + +//void YangWinAudioApiRender::setAudioList(YangAudioPlayBuffer *pal) { + // if(m_audioData.m_syn) m_audioData.m_syn->setAudioList(pal) ; +//} + +void YangWinAudioApiRender::setAec(YangRtcAec *paec){ + + m_aec=paec; + m_audioData.setAec(); +} +int YangWinAudioApiRender::setSpeakerVolume(int volume) { + if (m_deviceOut == NULL) { + return 1; + } + if (volume < (int) MIN_SPEAKER_VOLUME + || volume > (int) MAX_SPEAKER_VOLUME) { + return 1; + } + + HRESULT hr = S_OK; + + // scale input volume to valid range (0.0 to 1.0) + const float fLevel = (float) volume / MAX_SPEAKER_VOLUME; + // m_lock.lock(); + hr = m_renderSimpleVolume->SetMasterVolume(fLevel, NULL); + // m_lock.unlock(); + if (FAILED(hr)) + return 1; + return 0; + +} +int YangWinAudioApiRender::getSpeakerVolume(int &volume) { + if (m_deviceOut == NULL) { + return 1; + } + + HRESULT hr = S_OK; + float fLevel(0.0f); + + //m_lock.lock(); + hr = m_renderSimpleVolume->GetMasterVolume(&fLevel); + // m_lock.unlock(); + if (FAILED(hr)) + return 1; + + // scale input volume range [0.0,1.0] to valid output range + volume = static_cast(fLevel * MAX_SPEAKER_VOLUME); + + return 0; + +} +int YangWinAudioApiRender::getSpeakerMute(bool &enabled) { + + if (m_deviceOut == NULL) { + return 1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume *pVolume = NULL; + + // Query the speaker system mute state. + hr = m_deviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + if (FAILED(hr)) + return 1; + + BOOL mute; + hr = pVolume->GetMute(&mute); + if (FAILED(hr)) + return 1; + + enabled = (mute == TRUE) ? true : false; + + Yang_Release(pVolume); + + return 0; + +} +int YangWinAudioApiRender::setSpeakerMute(bool enable) { + if (m_deviceOut == NULL) { + return 1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume *pVolume = NULL; + + // Set the speaker system mute state. + hr = m_deviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + if (FAILED(hr)) + return 1; + + const BOOL mute(enable); + hr = pVolume->SetMute(mute, NULL); + if (FAILED(hr)) + return 1; + + Yang_Release(pVolume); + + return 0; + +} +int YangWinAudioApiRender::initSpeaker(int pind) { + // int nDevices = playoutDeviceCount(); + // if(pind>=nDevices) return 1; + // getListDevice(m_enum, eRender, pind, &m_deviceOut); + IAudioSessionManager *pManager = NULL; + int ret = m_deviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, + NULL, (void**) &pManager); + if (ret != 0 || pManager == NULL) { + Yang_Release(pManager); + return 1; + } + Yang_Release(m_renderSimpleVolume); + ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &m_renderSimpleVolume); + if (ret != 0 || m_renderSimpleVolume == NULL) { + Yang_Release(pManager); + Yang_Release(m_renderSimpleVolume); + return 1; + } + Yang_Release(pManager); + return 0; +} + +int YangWinAudioApiRender::initPlay(int pind) { + if (m_deviceOut == NULL) { + yang_error("1_Init play failed device is null"); + return 1; + } + + // Initialize the speaker (devices might have been added or removed) + if (initSpeaker(pind)) { + yang_error("InitSpeaker() failed"); + } + + // Ensure that the updated rendering endpoint device is valid + if (m_deviceOut == NULL) { + yang_error("2_Init play failed device is null"); + return 1; + } + + HRESULT hr = S_OK; + WAVEFORMATEX *pWfxOut = NULL; + WAVEFORMATEX Wfx = WAVEFORMATEX(); + WAVEFORMATEX *pWfxClosestMatch = NULL; + + // Create COM object with IAudioClient interface. + Yang_Release(m_clientOut); + hr = m_deviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, + (void**) &m_clientOut); + if (FAILED(hr)) return yang_error_wrap(ERROR_SYS_AudioRender,"create IAudioClient fail ..."); + + + hr = m_clientOut->GetMixFormat(&pWfxOut); + // Set wave format + Wfx.wFormatTag = WAVE_FORMAT_PCM; + Wfx.wBitsPerSample = 16; + Wfx.cbSize = 0; + const int freqs[] = { 48000, 44100, 16000, 96000, 32000, 8000 }; + hr = S_FALSE; + int _playChannelsPrioList[2]; + _playChannelsPrioList[0] = 2; // stereo is prio 1 + _playChannelsPrioList[1] = 1; // mono is prio 2 + // Iterate over frequencies and channels, in order of priority + for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); + freq++) { + for (unsigned int chan = 0; + chan + < sizeof(_playChannelsPrioList) + / sizeof(_playChannelsPrioList[0]); chan++) { + Wfx.nChannels = _playChannelsPrioList[chan]; + Wfx.nSamplesPerSec = freqs[freq]; + Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8; + Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign; + // If the method succeeds and the audio endpoint device supports the + // specified stream format, it returns S_OK. If the method succeeds and + // provides a closest match to the specified format, it returns S_FALSE. + hr = m_clientOut->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &Wfx, + &pWfxClosestMatch); + if (hr == S_OK) { + break; + } else { + if (pWfxClosestMatch) { + + CoTaskMemFree(pWfxClosestMatch); + pWfxClosestMatch = NULL; + } + } + } + if (hr == S_OK) + break; + } + + // TODO(andrew): what happens in the event of failure in the above loop? + // Is _ptrClientOut->Initialize expected to fail? + // Same in InitRecording(). + if (hr == S_OK) { + // Block size is the number of samples each channel in 10ms. + m_blockSize=Wfx.nSamplesPerSec / 100; + } + yang_trace("\nrender: sample==%d,channle==%d,nBlockAlign==%d,blockSize==%d",Wfx.nSamplesPerSec,Wfx.nChannels,Wfx.nBlockAlign,m_blockSize); + m_audioData.initRender(Wfx.nSamplesPerSec, Wfx.nChannels); + REFERENCE_TIME hnsBufferDuration = 0;//20 * 10000; // ask for minimum buffer size (default) + if (Wfx.nSamplesPerSec == 44100) { + hnsBufferDuration = 30 * 10000; + } + hr = m_clientOut->Initialize(AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications + AUDCLNT_STREAMFLAGS_EVENTCALLBACK, // processing of the audio buffer by + // the client will be event driven + hnsBufferDuration, // requested buffer capacity as a time value (in + // 100-nanosecond units) + 0,// periodicity + &Wfx, // selected wave format + NULL); // session GUID + + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioRender,"AudioClient Initialize param fail ..."); + + // Get the actual size of the shared (endpoint buffer). + // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. + + hr = m_clientOut->GetBufferSize(&m_bufferLength); + yang_trace("\nbufersize==================%d",m_bufferLength); + if (SUCCEEDED(hr)) { + + } + + // Set the event handle that the system signals when an audio buffer is ready + // to be processed by the client. + hr = m_clientOut->SetEventHandle(m_samplesReadyEvent); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioRender,"SetEventHandle fail ..."); + // EXIT_ON_ERROR(hr); + + // Get an IAudioRenderClient interface. + Yang_Release(m_renderClient); + hr = m_clientOut->GetService(__uuidof(IAudioRenderClient), + (void**) &m_renderClient); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioRender,"create AudioRenderClient fail ..."); + + CoTaskMemFree(pWfxOut); + CoTaskMemFree(pWfxClosestMatch); + + + return 0; + +} + +int YangWinAudioApiRender::startRender() { + uint32_t err=Yang_Ok; + if (m_clientOut) { + err=m_clientOut->Start(); + if(err != S_OK ) { + return yang_error_wrap(err,"client start fail.."); + } + return Yang_Ok; + } + yang_error("client is null"); + return 1; +} +int YangWinAudioApiRender::stopRender() { + if (m_clientOut) { + m_clientOut->Stop(); + return 0; + } + + return 1; +} +int YangWinAudioApiRender::init() { + + if (m_contextt) + return 0; + //getDefaultDeviceIndex(m_enum, eRender, eConsole, &m_outputDeviceIndex); + m_enum->GetDefaultAudioEndpoint(eRender, eConsole, &m_deviceOut); + // getListDevice(m_enum,eRender, m_outputDeviceIndex,&m_deviceOut); + + int err=Yang_Ok; + err=initPlay(m_outputDeviceIndex); + if(err) { + return yang_error_wrap(err,"init render fail......."); + } + + m_contextt = 1; + return 0; +} + +int YangWinAudioApiRender::render_10ms(){ + HRESULT hr = m_clientOut->GetCurrentPadding(&padding); + if (FAILED(hr)) return yang_error_wrap(ERROR_SYS_AudioRender,"outClient GetCurrentPadding fail......."); + m_dataBufP=NULL; + framesAvailable = m_bufferLength - padding; + + if(framesAvailable0) { + const uint32_t n10ms = (framesAvailable /m_blockSize); + + for (uint32_t n = 0; n < n10ms; n++) { + // render buffer. + hr = m_renderClient->GetBuffer(m_blockSize, &m_dataBufP); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioRender,"renderClient getBuffer fail......."); + + if(m_dataBufP) { + uint8_t* tmp=m_audioData.getRenderAudioData(m_blockSize<<2); + if(tmp){ + memcpy(m_dataBufP,tmp,m_blockSize<<2); + + }else{ + + memset(m_dataBufP,0,m_blockSize<<2); + } + } + + DWORD dwFlags(0); + hr = m_renderClient->ReleaseBuffer(m_blockSize, dwFlags); + if (FAILED(hr)) return yang_error_wrap(ERROR_SYS_AudioRender,"renderClient getBuffer fail......."); + + + } + + + } + return Yang_Ok; +} + + +int YangWinAudioApiRender::render_aec_10ms(){ + HRESULT hr = m_clientOut->GetCurrentPadding(&padding); + if (FAILED(hr)) return yang_error_wrap(ERROR_SYS_AudioRender,"outClient GetCurrentPadding fail......."); + m_dataBufP=NULL; + framesAvailable = m_bufferLength - padding; + if(framesAvailable0) { + const uint32_t n10ms = (framesAvailable /m_blockSize); + for (uint32_t n = 0; n < n10ms; n++) { + // render buffer. + hr = m_renderClient->GetBuffer(m_blockSize, &m_dataBufP); + if (FAILED(hr)) + return yang_error_wrap(ERROR_SYS_AudioRender,"renderClient getBuffer fail......."); + + if(m_dataBufP) { + uint8_t* tmp=m_audioData.getRenderAudioData(m_blockSize<<2); + if(tmp){ + memcpy(m_dataBufP,tmp,m_blockSize<<2); + m_audioData.setRenderLen(m_blockSize<<2); + }else + memset(m_dataBufP,0,m_blockSize<<2); + } + + DWORD dwFlags(0); + hr = m_renderClient->ReleaseBuffer(m_blockSize, dwFlags); + uint8_t* aectmp=m_audioData.getAecAudioData(); + if(aectmp){ + if(m_aec) m_aec->echoPlayback(m_aec->context,(short*)aectmp); + if(!m_hasRenderEcho) m_hasRenderEcho=1; + + } + if (FAILED(hr)) return yang_error_wrap(ERROR_SYS_AudioRender,"renderClient getBuffer fail......."); + + + } + + + + } + return Yang_Ok; + +} + +/** +void YangWinAudioApiRender::run() { + m_isStart=1; + startLoop(); + m_isStart=0; +} +void YangWinAudioApiRender::stop() { + stopLoop(); + +}**/ +void YangWinAudioApiRender::stopLoop() { + stopRender(); + m_loops = 0; +} + + void YangWinAudioApiRender::startLoop() { + + DWORD taskIndex(0); + HANDLE hMmTask = AvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex); + if (hMmTask) { + if (FALSE == AvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) { + yang_warn( "failed to boost play-thread using MMCSS"); + } + yang_trace("render thread is now registered with MMCSS (taskIndex=%d)", + taskIndex ); + } else { + yang_error( "failed to enable MMCSS on render thread (err=%lu", GetLastError() ); + + } + + + m_loops = 1; + UINT32 bufferLength = 0; + HRESULT hr = m_clientOut->GetBufferSize(&bufferLength); + if (FAILED(hr)) + yang_error("renderClient getBuffersize fail......."); + BYTE* pData = NULL; + hr = m_renderClient->GetBuffer(bufferLength, &pData); + if (FAILED(hr)) + yang_error("renderClient getBuffer fail......."); + + hr = m_renderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT); + if(startRender()){ + yang_error("start render fail"); + return; + } + + HANDLE waitArray[1] = {m_samplesReadyEvent}; + while (m_loops == 1) { + DWORD waitResult = WaitForMultipleObjects(1, waitArray, FALSE, 500); + if(waitResult==(WAIT_OBJECT_0 + 0)){ + if(m_aec) + render_aec_10ms(); + else + render_10ms(); + + } + + + } + +} + +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.h new file mode 100755 index 00000000..2913db6d --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioApiRender.h @@ -0,0 +1,101 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangWinAudioApiRender_H +#define YangWinAudioApiRender_H +#ifdef _WIN32 +#include +#include +//#include +#include +#include +#include +#include +#include +#include + +class YangWinAudioApiRender:public YangWinAudioApi,public YangAudioPlay +{ +public: + YangWinAudioApiRender(YangContext *pcontext); +virtual ~YangWinAudioApiRender(); +public: + int initRender(); + void setAec(YangRtcAec *paec); + //void setAudioList(YangAudioPlayBuffer *pal); + int playThread(); + int startRender(); + int stopRender(); + int render_10ms(); + int render_aec_10ms(); + + int m_contextt; + // int m_isStart; + //YangRenderAudioData m_audioData; + + int init(); + int m_hasRenderEcho; + int32_t m_loops; + // void stop(); +protected: + + YangFrame m_audioFrame; +private: + + int32_t m_audioPlayCacheNum; + // int32_t m_sample; + int32_t m_size; + YangResample m_res; + + uint8_t* m_resBuf; + uint8_t* m_resTmp; + + + int32_t m_bufLen; + uint32_t m_blockSize; + + // uint8_t* m_aecBuf; + // int32_t m_aecBufLen; + + +protected: + virtual void startLoop(); + void stopLoop(); + // void run(); + + YangRtcAec *m_aec; + HANDLE m_samplesReadyEvent; +private: + + IMMDeviceCollection* m_renderCollection; + IMMDevice* m_deviceOut; + IAudioClient* m_clientOut; + IAudioRenderClient* m_renderClient; + ISimpleAudioVolume* m_renderSimpleVolume; + + + UINT32 m_bufferLength; + + int getSpeakerVolume(int& volume); + int setSpeakerVolume(int volume); + int getSpeakerMute(bool& enabled); + int setSpeakerMute(bool enable); + + int initSpeaker(int pind); + int initPlay(int pind); + int setFormat(WAVEFORMATEX *pwfx); + + + int m_outputDeviceIndex; + + // YangPlayData m_playData; + + BYTE* m_dataBufP ; + DWORD flags ; + UINT32 padding; + uint32_t framesAvailable; + bool keepPlaying; + +}; +#endif +#endif // YangWinAudioApiRender_H diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.cpp new file mode 100755 index 00000000..bd811851 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.cpp @@ -0,0 +1,263 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 +#include +#include +#include +#include + +using namespace std; +#define SAFE_RELEASE(x) { if (x) x->Release(); x = NULL; } +YangWinAudioCapture::YangWinAudioCapture(YangContext *pcontext){ // @suppress("Class members should be properly initialized") + m_context = pcontext; + m_ahandle = new YangWinAudioCaptureHandle(pcontext); + + //m_writeInit = 0, m_writeRet = 0; + //m_readInit = 0; + aIndex = 0; + m_ret = 0; + m_size = 0; + m_loops = 0; + + m_isInit = 0; + + m_frames = 1024; + m_channel = 2; + m_sample = 44100; + m_pg = NULL; + m_pb = NULL; + m_pm = NULL; + m_grabber = NULL; + m_grabberF = NULL; + m_event = NULL; + m_nullRender = NULL; + m_audioSrc = NULL; + m_moniker = NULL; + m_mt = NULL; + aIndex = 1; + if (m_context->avinfo.audio.usingMono) { + m_frames = 320; + m_channel = 1; + m_sample = 16000; + } else { + m_frames = 1024; + } + +} +YangWinAudioCapture::~YangWinAudioCapture() { + + if (m_loops) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + //a delete pMoniker1,pGrabber,pGrabberF,pEvent; + if(m_ahandle){ + SAFE_RELEASE(m_grabber); + SAFE_RELEASE(m_grabberF); + + SAFE_RELEASE(m_event); + SAFE_RELEASE(m_pm); + SAFE_RELEASE(m_pg); + SAFE_RELEASE(m_pb); + SAFE_RELEASE(m_audioSrc); + // SAFE_RELEASE(pMoniker1); + yang_delete(m_ahandle); + } +} +void YangWinAudioCapture::setCatureStart() { + m_ahandle->isBuf = 1; +} +void YangWinAudioCapture::setCatureStop() { + m_ahandle->isBuf = 0; +} +void YangWinAudioCapture::setOutAudioBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->setOutAudioBuffer(pbuffer); +} +void YangWinAudioCapture::setPlayAudoBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->m_aecPlayBuffer = pbuffer; +} +void YangWinAudioCapture::setAec(YangRtcAec *paec) { + m_ahandle->setAec(paec); +} +void YangWinAudioCapture::setPreProcess(YangPreProcess *pp) { + m_ahandle->m_preProcess = pp; + if (pp) { + pp->init(pp->context,m_frames, m_sample, m_channel); + } +} + +void YangWinAudioCapture::setInAudioBuffer(vector *pal) { + m_ahandle->setInAudioBuffer(pal); +} +void YangWinAudioCapture::stopLoop() { + m_loops = 0; + if(m_pm!=NULL) m_pm->Stop(); +} + +HRESULT YangWinAudioCapture::GetUnconnectPin(IBaseFilter *pFilter, + PIN_DIRECTION dir, IPin **ppPin) { + *ppPin = 0; + IEnumPins *pEnum = 0; + IPin *pPin = 0; + HRESULT hr = pFilter->EnumPins(&pEnum); + if (FAILED(hr)) { + return hr; + } + while (pEnum->Next(1, &pPin, NULL) == S_OK) { + PIN_DIRECTION thisPinDir; + pPin->QueryDirection(&thisPinDir); + if (thisPinDir == dir) { + IPin *pTemp = 0; + hr = pPin->ConnectedTo(&pTemp); + if (SUCCEEDED(hr)) { + pTemp->Release(); + } else { + pEnum->Release(); + *ppPin = pPin; + return S_OK; + } + } + pPin->Release(); + } + pEnum->Release(); + return E_FAIL; +} + +IPin* YangWinAudioCapture::FindPin(IBaseFilter *pFilter, PIN_DIRECTION dir) { + IEnumPins *pEnumPins; + IPin *pOutpin; + PIN_DIRECTION pDir; + pFilter->EnumPins(&pEnumPins); + while (pEnumPins->Next(1, &pOutpin, NULL) == S_OK) { + pOutpin->QueryDirection(&pDir); + + if (pDir == dir) { + return pOutpin; + } + } + return 0; +} +void YangWinAudioCapture::setPara() { +// printf("set pararm is starting...\n"); + int32_t nBytesPerSample = 2; + IAMStreamConfig *config = NULL; + HRESULT hr = m_pb->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Audio, + m_audioSrc, IID_IAMStreamConfig, (void**) &config); + AM_MEDIA_TYPE *pmt = { 0 }; + hr = config->GetFormat(&pmt); + if (SUCCEEDED(hr)) { + + WAVEFORMATEX *pWF = (WAVEFORMATEX*) pmt->pbFormat; + pmt->subtype = MEDIASUBTYPE_PCM; + + pWF->nChannels = (WORD) m_channel; //m_sample;//2; + pWF->nSamplesPerSec = m_sample; //44100; + pWF->nAvgBytesPerSec = nBytesPerSample * m_channel * m_sample;//4 * 44100; + pWF->wBitsPerSample = nBytesPerSample * 8; + pWF->nBlockAlign = nBytesPerSample * m_channel; //4; + hr = config->SetFormat(pmt); + SAFE_RELEASE(config); + + IAMBufferNegotiation *pNeg = NULL; + hr = m_pb->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Audio, + m_audioSrc, IID_IAMBufferNegotiation, (void**) &pNeg); + ALLOCATOR_PROPERTIES prop = { 0 }; + prop.cbBuffer = m_frames * nBytesPerSample * m_channel; + prop.cBuffers = 6; + prop.cbAlign = nBytesPerSample * m_channel; + hr = pNeg->SuggestAllocatorProperties(&prop); + SAFE_RELEASE(pNeg); +//DeleteMediaType(pmt); + } else { + printf("set parar is fail!..........\n"); + } + +} +int32_t YangWinAudioCapture::init() { + if (m_isInit) + return Yang_Ok; + + CoInitialize(NULL); + + ULONG cFetched; + ICreateDevEnum *devEnum = NULL; + CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC, + IID_PPV_ARGS(&devEnum)); + IEnumMoniker *classEnum = NULL; + devEnum->CreateClassEnumerator(CLSID_AudioInputDeviceCategory, + &classEnum, 0); + int32_t aco = 0; + while (classEnum->Next(1, &m_moniker, &cFetched) == S_OK) { + aco++; + if (aco != aIndex) + continue; + m_moniker->BindToObject(0, 0, IID_IBaseFilter, (void**) &m_audioSrc); + m_moniker->Release(); + break; + } + classEnum->Release(); + devEnum->Release(); + if (m_audioSrc == NULL) + return 1; + CoCreateInstance(CLSID_CaptureGraphBuilder2, 0, CLSCTX_INPROC_SERVER, + IID_ICaptureGraphBuilder2, (void**) &m_pb); + CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, + IID_IGraphBuilder, (void**) &m_pg); + + m_pb->SetFiltergraph(m_pg); + m_pg->QueryInterface(IID_IMediaControl, (void**) &m_pm); + m_pg->AddFilter(m_audioSrc, L"Audio"); + + CLSID CLSID_SampleGrabber = { 0xC1F400A0, 0x3F08, 0x11d3, { 0x9F, 0x0B, + 0x00, 0x60, 0x08, 0x03, 0x9E, 0x37 } }; + //sample*********************************** + HRESULT hr = CoCreateInstance(CLSID_SampleGrabber, NULL, + CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&m_grabberF)); + hr = m_pg->AddFilter(m_grabberF, L"Sample Grabber"); + hr = m_grabberF->QueryInterface(IID_ISampleGrabber, (void**) &m_grabber); + //printf("pgrabber is %s",NUM_FRAMES_TO_GRAB); + hr = m_pg->QueryInterface(IID_IMediaEventEx, (void**) &m_event); + + m_nullRender = NULL; + CLSID CLSID_NullRenderer = { 0xC1F400A4, 0x3F08, 0x11d3, { 0x9F, 0x0B, 0x00, + 0x60, 0x08, 0x03, 0x9E, 0x37 } }; + hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC, + IID_IBaseFilter, (void**) &m_nullRender); + setPara(); + + IPin *pAudioOut = FindPin(m_audioSrc, PINDIR_OUTPUT); + IPin *pGrabin = FindPin(m_grabberF, PINDIR_INPUT); + IPin *pGrabout = FindPin(m_grabberF, PINDIR_OUTPUT); + IPin *pNullIn = FindPin(m_nullRender, PINDIR_INPUT); + hr = m_pg->Connect(pAudioOut, pGrabin); + hr = m_pg->Connect(pGrabout, pNullIn); + + //cs->list=list; + if (m_mt == NULL) m_mt = new AM_MEDIA_TYPE(); + hr = m_grabber->GetConnectedMediaType(m_mt); + //WAVEFORMATEX *wfx = (WAVEFORMATEX*) m_mt->pbFormat; + //printf("ahz=%d,channel=%d,bit=%d\n", wfx->nSamplesPerSec, wfx->nChannels,wfx->wBitsPerSample); + hr = m_grabber->SetMediaType(m_mt); + hr = m_grabber->SetBufferSamples(TRUE); + hr = m_grabber->SetOneShot(FALSE); + m_grabber->SetCallback(m_ahandle, 1); + m_isInit = 1; + return Yang_Ok; +} + +void YangWinAudioCapture::startLoop() { + m_loops = 1; + if (m_pm != NULL) { + m_pm->Run(); + long eventCode; + if(m_ahandle) m_ahandle->startRender(); + m_event->WaitForCompletion(INFINITE, &eventCode); + if(m_ahandle) m_ahandle->stopRender(); + } + //yang_deleteA(pcm_read); +} +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.h new file mode 100755 index 00000000..5036e91d --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioCapture.h @@ -0,0 +1,76 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef Yang_YangWinAudioCapture_H1 +#define Yang_YangWinAudioCapture_H1 + +#ifdef _WIN32 +#include "amstream.h" +#include +#include "qedit.h" +#include "yangavutil/audio/YangAudioMix.h" +#include "yangavutil/audio/YangPreProcess.h" +#include +#include + +#include "yangavutil/audio/YangRtcAec.h" +class YangWinAudioCapture: public YangAudioCapture { +public: + YangWinAudioCapture(YangContext *pcontext); + ~YangWinAudioCapture(); + +public: + YangWinAudioCaptureHandle *m_ahandle; + + int32_t init(); + void setInAudioBuffer(vector *pal); + void setPreProcess(YangPreProcess *pp); + void setCatureStart(); + void setCatureStop(); + void setOutAudioBuffer(YangAudioBuffer *pbuffer); + void setPlayAudoBuffer(YangAudioBuffer *pbuffer); + void setAec(YangRtcAec *paec); + +protected: + //void run(); + + void startLoop(); + + void stopLoop(); +private: + + int32_t m_isInit; + int32_t m_ret; + void setPara(); + void getBaseFilter(const GUID gi,IBaseFilter *ib,int32_t ind); + HRESULT GetUnconnectPin(IBaseFilter * pFilter,PIN_DIRECTION dir,IPin **ppPin); + IPin * FindPin(IBaseFilter * pFilter,PIN_DIRECTION dir); + IPin * FindPin1(IBaseFilter * pFilter,PIN_DIRECTION dir); + HRESULT EnumerateDevices(REFGUID category, IEnumMoniker **ppEnum); + +private: + //YangAlsaDevice *m_dev; + int32_t m_size; + int32_t m_loops; + int32_t m_channel; + uint32_t m_sample; + int32_t m_frames; + + AM_MEDIA_TYPE *m_mt; + IMediaControl *m_pm; + IGraphBuilder *m_pg; + ICaptureGraphBuilder2 *m_pb; + + IBaseFilter *m_audioSrc; + IBaseFilter *m_nullRender; + + IMoniker *m_moniker; + ISampleGrabber *m_grabber; + IBaseFilter *m_grabberF; + IMediaEventEx *m_event; +}; +//#ifdef __cplusplus +//} +//#endif +#endif +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.cpp b/libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.cpp new file mode 100755 index 00000000..96c6dbe4 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.cpp @@ -0,0 +1,117 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/yang_unistd.h" +#ifdef _WIN32 +#include +#include "YangWinAudioCaptureHandle.h" +#include +YangWinAudioCaptureHandle::YangWinAudioCaptureHandle(YangContext *pcontext):YangAudioCaptureHandle(pcontext) +{ + readStart=0; +#if UsingRenderApi + m_player = new YangWinAudioApiRender(pcontext); + if (m_player) m_player->init(); + m_isStartRender=0; +#else + m_player=new YangAudioPlaySdl(pcontext); + if(m_player) m_player->init(); +#endif + if (pcontext->avinfo.audio.usingMono) { + m_len=320*2; + } else { + m_len = 1024*4; + } + m_buf=new uint8_t[m_len]; + m_preProcess=NULL; + m_aec=NULL; + + m_size=m_len; + + m_asyn=1;//pcontext->audioPlayType; + // m_asynStart=0; + +} +YangWinAudioCaptureHandle::~YangWinAudioCaptureHandle(void) +{ +#if UsingRenderApi + if(m_asyn){ + if(m_player&&m_player->m_isStart){ + yang_stop(m_player); + yang_stop_thread(m_player); + } + }else{ + if(m_isStartRender&&m_player) m_player->stopRender(); + } +#endif + m_preProcess = NULL; + if(m_player) delete m_player; + m_player=NULL; + if(m_buf) delete[] m_buf; + m_buf=NULL; + +} + + +STDMETHODIMP_(ULONG) YangWinAudioCaptureHandle::AddRef() { return 1; } +STDMETHODIMP_(ULONG) YangWinAudioCaptureHandle::Release() { return 2; } + +STDMETHODIMP YangWinAudioCaptureHandle::QueryInterface(REFIID riid, void **ppvObject) +{ + + if (NULL == ppvObject) return E_POINTER; + if (riid == __uuidof(IUnknown)) + { + *ppvObject = static_cast(this); + return S_OK; + } + if (riid == IID_ISampleGrabber) + { + *ppvObject = static_cast(this); + return S_OK; + } + return E_NOTIMPL; +} + +HRESULT STDMETHODCALLTYPE YangWinAudioCaptureHandle::SampleCB(double Time, IMediaSample *pSample) +{ + + return E_NOTIMPL; +} +void YangWinAudioCaptureHandle::setInAudioBuffer(vector *pal) { + //if(m_player&&m_player->m_audioData.m_syn) m_player->m_audioData.m_syn->setInAudioBuffer(pal); +} + +void YangWinAudioCaptureHandle::startRender(){ +#if UsingRenderApi + if(m_asyn&&m_player) m_player->start(); +#endif +} +void YangWinAudioCaptureHandle::stopRender(){ +#if UsingRenderApi + if(m_asyn){ + yang_stop(m_player); + yang_stop_thread(m_player); + } +#endif +} +void YangWinAudioCaptureHandle::setAec(YangRtcAec *aec){ + this->m_aec=aec; + if(m_player) m_player->setAec(aec); +} +HRESULT STDMETHODCALLTYPE YangWinAudioCaptureHandle::BufferCB(double Time, BYTE *pBuffer, LONG BufferLen) +{ + if (!readStart) { + readStart=m_player->m_hasRenderEcho; + } + if (readStart) + putEchoBuffer(pBuffer,BufferLen); + else + putBuffer1(pBuffer,BufferLen); + + + return E_NOTIMPL; +} + + +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.h new file mode 100755 index 00000000..874b548d --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioCaptureHandle.h @@ -0,0 +1,62 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef ___YangWinAudioCaptureHandlee__ +#define ___YangWinAudioCaptureHandlee__ +#ifdef _WIN32 +#include "qedit.h" +#include "yangavutil/audio/YangMakeWave.h" +#include "yangutil/buffer/YangAudioBuffer.h" +#include +#include "../YangAudioCaptureHandle.h" +#define UsingRenderApi 1 + +#if UsingRenderApi +#include "YangWinAudioApiRender.h" +#else +#include "YangAudioPlaySdl.h" +#endif + + +class YangWinAudioCaptureHandle: public ISampleGrabberCB,public YangAudioCaptureHandle +{ +public: + YangWinAudioCaptureHandle(YangContext *pcontext); + virtual ~YangWinAudioCaptureHandle(void); + int32_t readStart; + + STDMETHODIMP_(ULONG) AddRef(); + STDMETHODIMP_(ULONG) Release(); + STDMETHODIMP QueryInterface(REFIID riid, void **ppvObject); + HRESULT STDMETHODCALLTYPE SampleCB(double Time, IMediaSample *pSample); + HRESULT STDMETHODCALLTYPE BufferCB(double Time, BYTE *pBuffer, LONG BufferLen); + +public: + YangPreProcess *m_preProcess; + void setInAudioBuffer(vector *pal); + void setAec(YangRtcAec *aec); + void startRender(); + void stopRender(); + + +private: + uint8_t *m_buf; + int32_t m_len; + + int32_t m_size; + int m_asyn; + // int m_asynStart; + + + +#if UsingRenderApi + YangWinAudioApiRender* m_player; + int m_isStartRender; +#else + YangAudioPlaySdl *m_player; +#endif + + +}; +#endif +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinAudioDevice.h b/libmetartc3/src/yangaudiodev/win/YangWinAudioDevice.h new file mode 100755 index 00000000..bfd9cf51 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinAudioDevice.h @@ -0,0 +1,113 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGWINAUDIODEVICE_H +#define YANGWINAUDIODEVICE_H +#ifdef _WIN32 +#include +#include +#include +#include +#include +const float MAX_SPEAKER_VOLUME = 255.0f; +const float MIN_SPEAKER_VOLUME = 0.0f; +const float MAX_MICROPHONE_VOLUME = 255.0f; +const float MIN_MICROPHONE_VOLUME = 0.0f; +class YangWinAudioDevice +{ +public: + YangWinAudioDevice(); + virtual ~YangWinAudioDevice(); + void init(); + void startBuiltInAec(); + int32_t getDeviceListCount(EDataFlow dir); + int32_t playoutDeviceCount(); + int32_t enumEndpointDevicesAll(EDataFlow dataFlow); + int32_t getDeviceID(IMMDevice* pDevice,LPWSTR pszBuffer, int32_t bufferLen); + int32_t getDefaultDeviceID(EDataFlow dir, ERole role,LPWSTR szBuffer,int32_t bufferLen); + int32_t getDefaultDevice(EDataFlow dir,ERole role,IMMDevice** ppDevice); + int32_t getDefaultDeviceIndex(EDataFlow dir,ERole role,int* index); + + int32_t initCapture(); + int32_t initMicrophone(int32_t pind); + int32_t initSpeaker(int32_t pind); + int32_t initPlay(); + int32_t getListDevice(EDataFlow dir,int32_t index,IMMDevice** ppDevice); + + int32_t getMicrophoneVolume(int& volume); + int32_t setMicrophoneVolume(int32_t volume); + int32_t setMicrophoneMute(bool enable); + + int32_t getSpeakerVolume(int& volume); + int32_t setSpeakerVolume(int32_t volume); + int32_t getSpeakerMute(bool& enabled); + int32_t setSpeakerMute(bool enable); + + void captureThread(); + int32_t playThread(); +private: + IMMDeviceEnumerator* m_enum; + IMMDeviceCollection* m_renderCollection; + IMMDeviceCollection* m_captureCollection; + IMMDevice* m_deviceOut; + IMMDevice* m_deviceIn; + + IMediaObject* m_dmo; + + IAudioClient* m_clientOut; + IAudioClient* m_clientIn; + IAudioRenderClient* m_renderClient; + IAudioCaptureClient* m_captureClient; + IAudioEndpointVolume* m_captureVolume; + ISimpleAudioVolume* m_renderSimpleVolume; + + boolean _builtInAecEnabled; +double _perfCounterFactor; +uint32_t _sndCardPlayDelay; +int32_t _inputDeviceIndex=0,_outputDeviceIndex=0; +UINT64 _writtenSamples; +UINT64 _readSamples; + + UINT _playAudioFrameSize; +uint32_t _playSampleRate; +uint32_t _devicePlaySampleRate; +uint32_t _playBlockSize; +uint32_t _devicePlayBlockSize; +uint32_t _playChannels; + +UINT _recAudioFrameSize; +uint32_t _recSampleRate; +uint32_t _recBlockSize; +uint32_t _recChannels; + std::mutex m_lock; +private: + int32_t initRecordingDMO(); + int32_t setDMOProperties(); + int32_t setBoolProperty(IPropertyStore* ptrPS, REFPROPERTYKEY key,VARIANT_BOOL value); + int32_t setVtI4Property(IPropertyStore* ptrPS, REFPROPERTYKEY key,LONG value); + + HANDLE _hRenderSamplesReadyEvent; + HANDLE _hPlayThread; + HANDLE _hRenderStartedEvent; + HANDLE _hShutdownRenderEvent; + + HANDLE _hCaptureSamplesReadyEvent; + HANDLE _hRecThread; + HANDLE _hCaptureStartedEvent; + HANDLE _hShutdownCaptureEvent; + +LARGE_INTEGER _perfCounterFreq; + uint16_t _recChannelsPrioList[3]; + uint16_t _playChannelsPrioList[2]; + + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _speakerIsInitialized; + bool _microphoneIsInitialized; + +}; +#endif +#endif // YANGWINAUDIODEVICE_H diff --git a/libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.cpp b/libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.cpp new file mode 100755 index 00000000..cedd4399 --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.cpp @@ -0,0 +1,256 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 +#include +#include +#include +#include +//#pragma comment(lib, "strmiids") +using namespace std; + +#define SAFE_RELEASE(x) { if (x) x->Release(); x = NULL; } +YangWinRecordAudioCapture::YangWinRecordAudioCapture(YangContext *pcontext) { + m_context = pcontext; + m_ahandle = new YangRecAudioCaptureHandle(pcontext); + + aIndex = 0; + m_ret = 0; + m_size = 0; + m_loops = 0; + + m_isInit = 0; + + m_frames=960; + m_channel = pcontext->avinfo.audio.channel; + m_sample = pcontext->avinfo.audio.sample; + + m_pg = NULL; + m_pb = NULL; + m_pm = NULL; + m_grabber = NULL; + m_grabberF = NULL; + m_event = NULL; + m_nullRender = NULL; + m_audioSrc = NULL; + m_moniker = NULL; + m_mt = NULL; + aIndex = 1; + + if(m_sample==44100) m_frames = 1024; + + if(m_sample==48000) m_frames=960; + +} +YangWinRecordAudioCapture::~YangWinRecordAudioCapture() { + + if (m_loops) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + + if(m_ahandle){ + SAFE_RELEASE(m_grabber); + SAFE_RELEASE(m_grabberF); + + SAFE_RELEASE(m_event); + SAFE_RELEASE(m_pm); + SAFE_RELEASE(m_pg); + SAFE_RELEASE(m_pb); + SAFE_RELEASE(m_audioSrc); + + yang_delete(m_ahandle); + } +} +void YangWinRecordAudioCapture::setCatureStart() { + m_ahandle->isBuf = 1; +} +void YangWinRecordAudioCapture::setCatureStop() { + m_ahandle->isBuf = 0; +} +void YangWinRecordAudioCapture::setOutAudioBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->setOutAudioBuffer(pbuffer); +} +void YangWinRecordAudioCapture::setPlayAudoBuffer(YangAudioBuffer *pbuffer) { + m_ahandle->m_aecPlayBuffer = pbuffer; +} +void YangWinRecordAudioCapture::setAec(YangRtcAec *paec) { + m_ahandle->m_aec = paec; +} +void YangWinRecordAudioCapture::setPreProcess(YangPreProcess *pp) { + +} + +void YangWinRecordAudioCapture::setInAudioBuffer(vector *pal) { + +} +void YangWinRecordAudioCapture::stopLoop() { + m_loops = 0; + if(m_pm!=NULL) m_pm->Stop(); +} + + + +HRESULT YangWinRecordAudioCapture::GetUnconnectPin(IBaseFilter *pFilter, + PIN_DIRECTION dir, IPin **ppPin) { + *ppPin = 0; + IEnumPins *pEnum = 0; + IPin *pPin = 0; + HRESULT hr = pFilter->EnumPins(&pEnum); + if (FAILED(hr)) { + return hr; + } + while (pEnum->Next(1, &pPin, NULL) == S_OK) { + PIN_DIRECTION thisPinDir; + pPin->QueryDirection(&thisPinDir); + if (thisPinDir == dir) { + IPin *pTemp = 0; + hr = pPin->ConnectedTo(&pTemp); + if (SUCCEEDED(hr)) { + pTemp->Release(); + } else { + pEnum->Release(); + *ppPin = pPin; + return S_OK; + } + } + pPin->Release(); + } + pEnum->Release(); + return E_FAIL; +} + +IPin* YangWinRecordAudioCapture::FindPin(IBaseFilter *pFilter, PIN_DIRECTION dir) { + IEnumPins *pEnumPins; + IPin *pOutpin; + PIN_DIRECTION pDir; + pFilter->EnumPins(&pEnumPins); + while (pEnumPins->Next(1, &pOutpin, NULL) == S_OK) { + pOutpin->QueryDirection(&pDir); + + if (pDir == dir) { + return pOutpin; + } + } + return 0; +} +void YangWinRecordAudioCapture::setPara() { +// printf("set pararm is starting...\n"); + int32_t nBytesPerSample = 2; + IAMStreamConfig *config = NULL; + HRESULT hr = m_pb->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Audio, + m_audioSrc, IID_IAMStreamConfig, (void**) &config); + AM_MEDIA_TYPE *pmt = { 0 }; + hr = config->GetFormat(&pmt); + if (SUCCEEDED(hr)) { + + WAVEFORMATEX *pWF = (WAVEFORMATEX*) pmt->pbFormat; + pmt->subtype = MEDIASUBTYPE_PCM; + + pWF->nChannels = (WORD) m_channel; //m_sample;//2; + pWF->nSamplesPerSec = m_sample; //44100; + pWF->nAvgBytesPerSec = nBytesPerSample * m_channel * m_sample;//4 * 44100; + pWF->wBitsPerSample = nBytesPerSample * 8; + pWF->nBlockAlign = nBytesPerSample * m_channel; //4; + hr = config->SetFormat(pmt); + SAFE_RELEASE(config); + + IAMBufferNegotiation *pNeg = NULL; + hr = m_pb->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Audio, + m_audioSrc, IID_IAMBufferNegotiation, (void**) &pNeg); + ALLOCATOR_PROPERTIES prop = { 0 }; + prop.cbBuffer = m_frames * nBytesPerSample * m_channel; + prop.cBuffers = 6; + prop.cbAlign = nBytesPerSample * m_channel; + hr = pNeg->SuggestAllocatorProperties(&prop); + SAFE_RELEASE(pNeg); +//DeleteMediaType(pmt); + } else { + printf("set parar is fail!..........\n"); + } + +} +int32_t YangWinRecordAudioCapture::init() { + if (m_isInit) + return Yang_Ok; + + CoInitialize(NULL); + + ULONG cFetched; + ICreateDevEnum *devEnum = NULL; + CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC, + IID_PPV_ARGS(&devEnum)); + IEnumMoniker *classEnum = NULL; + devEnum->CreateClassEnumerator(CLSID_AudioInputDeviceCategory, + &classEnum, 0); + int32_t aco = 0; + while (classEnum->Next(1, &m_moniker, &cFetched) == S_OK) { + aco++; + if (aco != aIndex) + continue; + m_moniker->BindToObject(0, 0, IID_IBaseFilter, (void**) &m_audioSrc); + m_moniker->Release(); + break; + } + classEnum->Release(); + devEnum->Release(); + if (m_audioSrc == NULL) + return 1; + CoCreateInstance(CLSID_CaptureGraphBuilder2, 0, CLSCTX_INPROC_SERVER, + IID_ICaptureGraphBuilder2, (void**) &m_pb); + CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, + IID_IGraphBuilder, (void**) &m_pg); + + m_pb->SetFiltergraph(m_pg); + m_pg->QueryInterface(IID_IMediaControl, (void**) &m_pm); + m_pg->AddFilter(m_audioSrc, L"Audio"); + + CLSID CLSID_SampleGrabber = { 0xC1F400A0, 0x3F08, 0x11d3, { 0x9F, 0x0B, + 0x00, 0x60, 0x08, 0x03, 0x9E, 0x37 } }; + + HRESULT hr = CoCreateInstance(CLSID_SampleGrabber, NULL, + CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&m_grabberF)); + hr = m_pg->AddFilter(m_grabberF, L"Sample Grabber"); + hr = m_grabberF->QueryInterface(IID_ISampleGrabber, (void**) &m_grabber); + + hr = m_pg->QueryInterface(IID_IMediaEventEx, (void**) &m_event); + + m_nullRender = NULL; + CLSID CLSID_NullRenderer = { 0xC1F400A4, 0x3F08, 0x11d3, { 0x9F, 0x0B, 0x00, + 0x60, 0x08, 0x03, 0x9E, 0x37 } }; + hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC, + IID_IBaseFilter, (void**) &m_nullRender); + setPara(); + + IPin *pAudioOut = FindPin(m_audioSrc, PINDIR_OUTPUT); + IPin *pGrabin = FindPin(m_grabberF, PINDIR_INPUT); + IPin *pGrabout = FindPin(m_grabberF, PINDIR_OUTPUT); + IPin *pNullIn = FindPin(m_nullRender, PINDIR_INPUT); + hr = m_pg->Connect(pAudioOut, pGrabin); + hr = m_pg->Connect(pGrabout, pNullIn); + + + if (m_mt == NULL) m_mt = new AM_MEDIA_TYPE(); + hr = m_grabber->GetConnectedMediaType(m_mt); + + hr = m_grabber->SetMediaType(m_mt); + hr = m_grabber->SetBufferSamples(TRUE); + hr = m_grabber->SetOneShot(FALSE); + m_grabber->SetCallback(m_ahandle, 1); + m_isInit = 1; + return Yang_Ok; +} + +void YangWinRecordAudioCapture::startLoop() { + m_loops = 1; + if (m_pm != NULL) { + m_pm->Run(); + long eventCode; + m_event->WaitForCompletion(INFINITE, &eventCode); + } + +} +#endif diff --git a/libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.h b/libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.h new file mode 100755 index 00000000..c9f9b0dd --- /dev/null +++ b/libmetartc3/src/yangaudiodev/win/YangWinRecordAudioCapture.h @@ -0,0 +1,76 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef Yang_YangWinRecordAudioCapture_H1 +#define Yang_YangWinRecordAudioCapture_H1 + +#ifdef _WIN32 +#include "amstream.h" +#include +#include "qedit.h" +#include "yangavutil/audio/YangAudioMix.h" +#include "yangavutil/audio/YangPreProcess.h" +#include +#include + +#include "yangavutil/audio/YangRtcAec.h" +class YangWinRecordAudioCapture: public YangAudioCapture { +public: + YangWinRecordAudioCapture(YangContext *pcontext); + ~YangWinRecordAudioCapture(); + +public: + YangRecAudioCaptureHandle *m_ahandle; + + int32_t init(); + void setInAudioBuffer(vector *pal); + void setPreProcess(YangPreProcess *pp); + void setCatureStart(); + void setCatureStop(); + void setOutAudioBuffer(YangAudioBuffer *pbuffer); + void setPlayAudoBuffer(YangAudioBuffer *pbuffer); + void setAec(YangRtcAec *paec); + +protected: + //void run(); + + void startLoop(); + + void stopLoop(); +private: + + int32_t m_isInit; + int32_t m_ret; + void setPara(); + void getBaseFilter(const GUID gi,IBaseFilter *ib,int32_t ind); + HRESULT GetUnconnectPin(IBaseFilter * pFilter,PIN_DIRECTION dir,IPin **ppPin); + IPin * FindPin(IBaseFilter * pFilter,PIN_DIRECTION dir); + IPin * FindPin1(IBaseFilter * pFilter,PIN_DIRECTION dir); + HRESULT EnumerateDevices(REFGUID category, IEnumMoniker **ppEnum); + +private: + //YangAlsaDevice *m_dev; + int32_t m_size; + int32_t m_loops; + int32_t m_channel; + uint32_t m_sample; + int32_t m_frames; + + AM_MEDIA_TYPE *m_mt; + IMediaControl *m_pm; + IGraphBuilder *m_pg; + ICaptureGraphBuilder2 *m_pb; + + IBaseFilter *m_audioSrc; + IBaseFilter *m_nullRender; + + IMoniker *m_moniker; + ISampleGrabber *m_grabber; + IBaseFilter *m_grabberF; + IMediaEventEx *m_event; +}; +//#ifdef __cplusplus +//} +//#endif +#endif +#endif diff --git a/libmetartc3/src/yangavutil/YangImageConvert.cpp b/libmetartc3/src/yangavutil/YangImageConvert.cpp new file mode 100755 index 00000000..6e9f0dd1 --- /dev/null +++ b/libmetartc3/src/yangavutil/YangImageConvert.cpp @@ -0,0 +1,427 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + + +#include +#ifndef _WIN32 +#include + +#include +#include +#define MY(a,b,c) (( a* 0.2989 + b* 0.5866 + c* 0.1145)) +#define MU(a,b,c) (( a*(-0.1688) + b*(-0.3312) + c* 0.5000 + 128)) +#define MV(a,b,c) (( a* 0.5000 + b*(-0.4184) + c*(-0.0816) + 128)) + +#define DY(a,b,c) (MY(a,b,c) > 255 ? 255 : (MY(a,b,c) < 0 ? 0 : MY(a,b,c))) +#define DU(a,b,c) (MU(a,b,c) > 255 ? 255 : (MU(a,b,c) < 0 ? 0 : MU(a,b,c))) +#define DV(a,b,c) (MV(a,b,c) > 255 ? 255 : (MV(a,b,c) < 0 ? 0 : MV(a,b,c))) +#define uint8_t uint8_t +YangImageConvert::YangImageConvert() { + // TODO Auto-generated constructor stub + +} + +YangImageConvert::~YangImageConvert() { + // TODO Auto-generated destructor stub +} + + +void YangImageConvert::RGB24_TO_YV12(uint8_t* yv12,uint8_t* rgb24,int32_t w,int32_t h) +{ + int32_t iBufLen = w * h; + int32_t i,j,vay,vau,vav; + uint8_t* cv; // 当前坐标的v(current v); + uint8_t* nv; // 在cv下一行的对应位置的 v; + uint8_t* cu; // 当前坐标的u(current u); + uint8_t* nu; // 在cu下一行的对应位置的 u; + uint8_t v01,v02,v11,v12,u01,u02,u11,u12; // 需要整合的相邻的4个象素 如下 + + uint8_t* vv = new uint8_t[iBufLen]; // 每个RGB单位对应的V! + uint8_t* uu = new uint8_t[iBufLen]; // 每个RGB单位对应的U! + + // 按标准算法从RGB24计算出所有YUV + + RGB24 * pRGB = (RGB24*)rgb24; + uint8_t* y = yv12; // 这里直接用 yuv 缓冲。。省了copy了。 + uint8_t* v = vv; + uint8_t* u = uu; + + for(i = 0; i < h; i++) + { + for(j = 0; j < w; j++) + { + + //Y = 0.299R + 0.587G + 0.114B + //U = -0.147R - 0.289G + 0.436B + //V = 0.615R - 0.515G - 0.100B + + // 这个算法颜色不正啊。。 + //vay = pRGB->r * 0.299 + pRGB->g * 0.587 + pRGB->b * 0.114; // 根据公式计算出Y + //vav = pRGB->r * 0.615 + pRGB->g * -0.515 + pRGB->b * -0.100; // 根据公式计算出V + //vau = pRGB->r * -0.147 + pRGB->g * -0.289 + pRGB->b * 0.436; // 根据公式计算出U + + + //Y = round( 0.256788 * R + 0.504129 * G + 0.097906 * B) + 16 + //U = round(-0.148223 * R - 0.290993 * G + 0.439216 * B) + 128 + //V = round( 0.439216 * R - 0.367788 * G - 0.071427 * B) + 128 + + + // 好象这个算法颜色正,而且是MSDN中列出的算法 + vay = 0.256788 * pRGB->r + 0.504129 * pRGB->g + 0.097906 * pRGB->b + 16; + vau = -0.148223 * pRGB->r - 0.290993 * pRGB->g + 0.439216 * pRGB->b + 128; + vav = 0.439216 * pRGB->r - 0.367788 * pRGB->g - 0.071427 * pRGB->b + 128; + + + *y = vay < 0 ? 0 : (vay > 255 ? 255: vay); // 如果Y小于0置换成0,如果Y大于255就置换成255 + *v = vav < 0 ? 0 : (vav > 255 ? 255: vav); // 如果V小于0置换成0,如果V大于255就置换成255 + *u = vau < 0 ? 0 : (vau > 255 ? 255: vau); // 如果U小于0置换成0,如果U大于255就置换成255 + + y++; // 移动到下一位! + v++; + u++; + pRGB++; + } + } + + u = yv12 + iBufLen; // 记录成品YV12的U的位置 + + v = u + (iBufLen >> 2); // 记录成品YV12的V的位置 + + for(i = 0; i < h; i+=2) // 由于 V 和 U 只记录隔行的,所以 += 2; + { + cv = vv + i * w; // 取得第i 行的v; + nv = vv + (i + 1) * w; // 取得第i + 1 行的v + + cu = uu + i * w; // 取得第i 行的u; + nu = uu + (i + 1) * w; // 取得第i + 1 行的u + + + for(j = 0; j < w; j+=2) // 由于 一躺循环 我们访问 两个 uu 或 vv 所以 += 2; + { + v01 = *(cv + j); // 取得第i 行的第j 个v的具体的值 + v02 = *(cv + j + 1); // 取得第i 行的第j + 1 个v的具体的值 + v11 = *(nv + j); // 取得第i + 1 行的第j 个v的具体的值 + v12 = *(nv + j + 1); // 取得第i + 1 行的第j + 1 个v的具体的值 + + *v = (v01 + v02 + v11 + v12) / 4; // 取v01,v02,v11,v12的平均值给v + + + u01 = *(cu + j); // 取得第i 行的第j 个u的具体的值 + u02 = *(cu + j + 1); // 取得第i 行的第j + 1 个u的具体的值 + u11 = *(nu + j); // 取得第i + 1 行的第j 个u的具体的值 + u12 = *(nu + j + 1); // 取得第i + 1 行的第j + 1 个u的具体的值 + + *u = (u01 + u02 + u11 + u12) / 4; // 取u01,u02,u11,u12的平均值给u + + v++; // 移动到下一位! + u++; + + } + } + + delete [] vv; + delete [] uu; + + } + +void YangImageConvert::RGB24_To_I420( uint8_t *RGBbuf, uint8_t *YUV, int32_t width, int32_t height ) +{ + + int32_t i,x,y,j; + uint8_t *Y = NULL; + uint8_t *U = NULL; + uint8_t *V = NULL; + + uint8_t *RGB = NULL; + + int32_t imgSize = width*height; + + RGB = (uint8_t*)malloc(imgSize*3); + + memcpy(RGB, RGBbuf, width*height*3); +// for(i=HEIGHT-1,j=0; i>=0; i--,j++)//µ÷Õû˳Ðò +// { +// memcpy(RGB+j*WIDTH*3,RGB+WIDTH*HEIGHT*3+i*WIDTH*3,WIDTH*3); +// } + + /************************************************************************/ + // ÊäÈëµÄRGBbufÈôΪBGRÐòÁУ¬Ôò×¢Ê͸öΠ+ //˳Ðòµ÷Õû +// uint8_t temp; +// for(i=0; (unsigned int)i < WIDTH*HEIGHT*3; i+=3) +// { +// temp = RGB[i]; +// RGB[i] = RGB[i+2]; +// RGB[i+2] = temp; +// } + /************************************************************************/ + + + Y = YUV; + U = YUV + width*height; + V = U + ((width*height)>>2); + + for(y=0; y < height; y++) + for(x=0; x < width; x++) + { + j = y*width + x; + i = j*3; + Y[j] = (uint8_t)(DY(RGB[i], RGB[i+1], RGB[i+2])); + + if(x%2 == 1 && y%2 == 1) + { + j = (width>>1) * (y>>1) + (x>>1); + //ÉÏÃæiÈÔÓÐЧ + U[j] = (uint8_t) + ((DU(RGB[i ], RGB[i+1], RGB[i+2]) + + DU(RGB[i-3], RGB[i-2], RGB[i-1]) + + DU(RGB[i -width*3], RGB[i+1-width*3], RGB[i+2-width*3]) + + DU(RGB[i-3-width*3], RGB[i-2-width*3], RGB[i-1-width*3]))/4); + + V[j] = (uint8_t) + ((DV(RGB[i ], RGB[i+1], RGB[i+2]) + + DV(RGB[i-3], RGB[i-2], RGB[i-1]) + + DV(RGB[i -width*3], RGB[i+1-width*3], RGB[i+2-width*3]) + + DV(RGB[i-3-width*3], RGB[i-2-width*3], RGB[i-1-width*3]))/4); + } + + } + + if(RGB) free(RGB); +} +uint8_t YangImageConvert::clip255(long v) +{ + if(v < 0) v=0; + else if( v > 255) v=255; + return (uint8_t )v; +} +void YangImageConvert::YUY2_To_RGB24(uint8_t *YUY2buff,uint8_t *RGBbuff,unsigned long dwSize) +{ + // + //lC = lY - 16 + //lD = btU - 128 + //lE = btV - 128 + //btR = clip(( 298 * lC + 409 * lE + 128) >> 8) + //btG = clip(( 298 * lC - 100 * lD - 208 * lE + 128) >> 8) + //btB = clip(( 298 * lC + 516 * lD + 128) >> 8) + uint8_t *orgRGBbuff = RGBbuff; + for( unsigned long count = 0; count < dwSize; count += 4 ) + { + //Y0 U0 Y1 V0 + uint8_t btY0 = *YUY2buff; + uint8_t btU = *(++YUY2buff); + uint8_t btY1 = *(++YUY2buff); + uint8_t btV = *(++YUY2buff); + ++YUY2buff; + + long lY,lC,lD,lE; + uint8_t btR,btG,btB; + + lY = btY0; + lC = lY - 16; + lD = btU - 128; + lE = btV - 128; + btR = clip255(( 298 * lC + 409 * lE + 128) >> 8); + btG = clip255(( 298 * lC - 100 * lD - 208 * lE + 128) >> 8); + btB = clip255(( 298 * lC + 516 * lD + 128) >> 8); + + *(RGBbuff) = btB; + *(++RGBbuff) = btG; + *(++RGBbuff) = btR; + + lY = btY1; + lC = lY-16; + lD = btU-128; + lE = btV-128; + btR = clip255(( 298 * lC + 409 * lE + 128) >> 8); + btG = clip255(( 298 * lC - 100 * lD - 208 * lE + 128) >> 8); + btB = clip255(( 298 * lC + 516 * lD + 128) >> 8); + *(++RGBbuff) = btB; + *(++RGBbuff) = btG; + *(++RGBbuff) = btR; + ++RGBbuff; + } +} + +void YangImageConvert::plusYuy2(uint8_t* src,uint8_t * dest,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight,int32_t model){ + + int32_t i=0; + int32_t j=0; + int32_t sh=srcHeight-destHeight; + int32_t sw=srcWidth-destWidth; + int32_t start=0; + uint8_t* temp; + if(model==4){ + start=(srcHeight-destHeight+1)*srcWidth*2-destWidth*2; + temp=src+start; + for(i=0;i + + struct RGB24{ + uint8_t b; + uint8_t g; + uint8_t r; +} ; +class YangImageConvert { +public: + YangImageConvert(); + virtual ~YangImageConvert(); + void RGB24_TO_YV12(uint8_t* yv12,uint8_t* rgb24,int32_t w,int32_t h); + void RGB24_To_I420( uint8_t *RGBbuf, uint8_t *YUV, int32_t width, int32_t height ); + void YUY2_To_RGB24(uint8_t *YUY2buff,uint8_t *RGBbuff,unsigned long dwSize); + int32_t YUVBlending(void* pBGYUV, void* pFGYUV, int32_t width, int32_t height, bool alphaBG, bool alphaFG) ; + void plusAuthor(uint8_t* src,uint8_t * dest,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight,int32_t model); + void plusYuy2(uint8_t* src,uint8_t * dest,int32_t srcWidth,int32_t srcHeight,int32_t destWidth,int32_t destHeight,int32_t model); +private: + uint8_t clip255(long v); +}; + +#endif /* YANGAVUTIL_SRC_YANGIMAGECONVERT_H_ */ diff --git a/libmetartc3/src/yangavutil/YangMakeWave.cpp b/libmetartc3/src/yangavutil/YangMakeWave.cpp new file mode 100755 index 00000000..b5184ffd --- /dev/null +++ b/libmetartc3/src/yangavutil/YangMakeWave.cpp @@ -0,0 +1,87 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +YangMakeWave::YangMakeWave(){ + waveFile=NULL; + PCMSize=0; + m_isMono=0; +} +/** +int32_t YangMakeWave::isMp3(char *p){ + //char p[20]; + //printf("a==%c,%c,%c",p[0],p[2],p[22]); + if(p[0]=='M'&&p[2]=='P'&&p[22]=='3') { + printf("****************mp3"); + return 1 + ;} + return 0; +}**/ +void YangMakeWave::write(uint8_t * data,int32_t len){ +fwrite(data,1,len,waveFile); +fflush(waveFile); +PCMSize+=len; + +} +void YangMakeWave::start(int32_t pisMono,char * filename){ + m_isMono=pisMono; + waveFile=fopen(filename,"wb"); + writeHeader(m_isMono,waveFile,0); + PCMSize=0; +} +void YangMakeWave::stop(){ + writeHeader(m_isMono,waveFile,PCMSize); + fclose(waveFile); + waveFile=NULL; +} +void YangMakeWave::writeHeader(int32_t isMono,FILE *WavFile,unsigned long len){ + + WaveHdr WaveHeader; + + WaveHeader.fileID[0] = 'R'; + WaveHeader.fileID[1] = 'I'; + WaveHeader.fileID[2] = 'F'; + WaveHeader.fileID[3] = 'F'; + WaveHeader.fileleth = 0; + WaveHeader.wavTag[0] = 'W'; + WaveHeader.wavTag[1] = 'A'; + WaveHeader.wavTag[2] = 'V'; + WaveHeader.wavTag[3] = 'E'; + + WaveHeader.FmtHdrID[0] = 'f'; + WaveHeader.FmtHdrID[1] = 'm'; + WaveHeader.FmtHdrID[2] = 't'; + WaveHeader.FmtHdrID[3] = ' '; + + WaveHeader.FmtHdrLeth = 0; + +// ChunkHdr FmtHdr = {"fmt ",}; + WaveHeader.DataHdrID[0] = 'd'; + WaveHeader.DataHdrID[1] = 'a'; + WaveHeader.DataHdrID[2] = 't'; + WaveHeader.DataHdrID[3] = 'a'; + WaveHeader.DataHdrLeth = 0; + rewind(WavFile); + + WaveHeader.fileleth = len + 32; + WaveHeader.FmtHdrLeth = 16; + WaveHeader.BitsPerSample = 16; + WaveHeader.Channels = 2; + WaveHeader.FormatTag = 0x0001; + if(isMono){ + WaveHeader.SamplesPerSec = 16000; + WaveHeader.AvgBytesPerSec = 2*16000; + }else{ + WaveHeader.SamplesPerSec = 44100; + WaveHeader.AvgBytesPerSec = 4*44100; + } + + WaveHeader.BlockAlign = 4; + + WaveHeader.DataHdrLeth = len; + + fwrite(&WaveHeader,sizeof(WaveHdr),1,WavFile); + fflush( WavFile ); + +} + diff --git a/libmetartc3/src/yangavutil/YangPicUtilFfmpeg.cpp b/libmetartc3/src/yangavutil/YangPicUtilFfmpeg.cpp new file mode 100755 index 00000000..09b3cdda --- /dev/null +++ b/libmetartc3/src/yangavutil/YangPicUtilFfmpeg.cpp @@ -0,0 +1,111 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangavutil/video/YangPicUtilFfmpeg.h" +#if HavePicUtilFfmpeg + +void YangPicUtilFfmpeg::loadLib(){ + //yang_opus_encoder_create=(OpusEncoder *(*)(opus_int32_t Fs, int32_t channels, int32_t application, int32_t *error))m_lib.loadFunction(""); +yang_sws_getContext =(struct SwsContext *(*)(int32_t srcW, int32_t srcH, enum AVPixelFormat srcFormat, + int32_t dstW, int32_t dstH, enum AVPixelFormat dstFormat, + int32_t flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param))m_lib.loadFunction("sws_getContext"); + +yang_sws_scale =(int32_t (*)(struct SwsContext *c, const uint8_t *const srcSlice[], + const int32_t srcStride[], int32_t srcSliceY, int32_t srcSliceH, + uint8_t *const dst[], const int32_t dstStride[]))m_lib.loadFunction("sws_scale"); +yang_sws_freeContext =(void (*)(struct SwsContext *swsContext))m_lib.loadFunction("sws_freeContext"); + + + + +yang_avpicture_alloc =(int32_t (*)(AVPicture *picture, enum AVPixelFormat pix_fmt, int32_t width, int32_t height))m_lib1.loadFunction("avpicture_alloc"); +yang_avpicture_free =(void (*)(AVPicture *picture))m_lib1.loadFunction("avpicture_free"); + + +} + +void YangPicUtilFfmpeg::unloadLib(){ + yang_sws_getContext=NULL; + yang_sws_scale=NULL; + yang_sws_freeContext=NULL; + yang_avpicture_alloc=NULL; + yang_avpicture_free=NULL; + +} +YangPicUtilFfmpeg::YangPicUtilFfmpeg(void) +{ + inWidth=0,inHeight=0,outWidth=0,outHeight=0; + img_convert_ctx=NULL; + unloadLib(); + + //in=in_pic.data[0]; + //out=out_pic.data[0]; +} + + +YangPicUtilFfmpeg::~YangPicUtilFfmpeg(void) +{ + closeAll(); + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); + // m_lib2.unloadObject(); +} +void YangPicUtilFfmpeg::closeAll() +{ + if(img_convert_ctx!=NULL) yang_sws_freeContext(img_convert_ctx); + yang_avpicture_free(&out_pic); + yang_avpicture_free(&in_pic); +} + + + +void YangPicUtilFfmpeg::getAddr(uint8_t **p_in,uint8_t **p_out){ + *p_in=in_pic.data[0]; + *p_out=out_pic.data[0]; +} +void YangPicUtilFfmpeg::init420P(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight) +{ + init(pinWidth,pinHeight,poutWidth,poutHeight,AV_PIX_FMT_YUV420P,AV_PIX_FMT_YUV420P); +} +void YangPicUtilFfmpeg::initYuy2(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight) +{ + init(pinWidth,pinHeight,poutWidth,poutHeight,AV_PIX_FMT_YUYV422,AV_PIX_FMT_YUYV422); +} +void YangPicUtilFfmpeg::initNv12(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight) +{ + init(pinWidth,pinHeight,poutWidth,poutHeight,AV_PIX_FMT_NV12,AV_PIX_FMT_NV12); +} +void YangPicUtilFfmpeg::initYuy2_Bgr24(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight) +{ + init(pinWidth,pinHeight,poutWidth,poutHeight,AV_PIX_FMT_YUYV422,AV_PIX_FMT_BGR24); +} + +void YangPicUtilFfmpeg::init(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight,AVPixelFormat src,AVPixelFormat dst){ + m_lib.loadObject("libswscale"); + m_lib1.loadObject("libavcodec"); + //m_lib2.loadObject("libopencv_imgcodecs"); + loadLib(); + inWidth=pinWidth; + inHeight=pinHeight; + outWidth=poutWidth; + outHeight=poutHeight; + img_convert_ctx = yang_sws_getContext(inWidth, inHeight, + src, outWidth, outHeight, dst,SWS_FAST_BILINEAR,//SWS_POINT,//SWS_GAUSS, + NULL, NULL, NULL); + + yang_avpicture_alloc(&in_pic, src, inWidth, inHeight); + yang_avpicture_alloc(&out_pic, dst, outWidth, outHeight); +} +void YangPicUtilFfmpeg::initBgr24_Yuy2(int32_t pinWidth,int32_t pinHeight,int32_t poutWidth,int32_t poutHeight){ + init(pinWidth,pinHeight,poutWidth,poutHeight,AV_PIX_FMT_BGR24,AV_PIX_FMT_YUYV422); + //sws_scale +} + +void YangPicUtilFfmpeg::resize() +{ + yang_sws_scale(img_convert_ctx, in_pic.data, in_pic.linesize, 0, inHeight, out_pic.data, out_pic.linesize); + +} +#endif diff --git a/libmetartc3/src/yangavutil/YangSwResample.cpp b/libmetartc3/src/yangavutil/YangSwResample.cpp new file mode 100755 index 00000000..f174669d --- /dev/null +++ b/libmetartc3/src/yangavutil/YangSwResample.cpp @@ -0,0 +1,67 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +YangSwResample::YangSwResample() +{ + swr_ctx = NULL; + m_swrData=NULL; + m_channel=2; + m_inSample=48000; + m_outSample=44100; + m_frameSize=441; + m_contextt=0; + +} +YangSwResample::~YangSwResample() +{ + if (swr_ctx) { + swr_free(&swr_ctx); + } + if (m_swrData) { + av_freep(&m_swrData[0]); + free(m_swrData); + m_swrData = NULL; + } + + +} +int YangSwResample::init(int32_t pchannel,int32_t pinsample,int32_t poutsample,int32_t pframeSize){ + if(m_contextt) return Yang_Ok; + m_inSample=pinsample; + m_outSample=poutsample; + m_channel=pchannel; + m_frameSize=pframeSize; + + swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, m_inSample, + AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, m_outSample, 0, NULL); + if (!swr_ctx) { + return yang_error_wrap(1, "YangSwResample fail to swr_alloc\n"); + + } + int error; + char err_buf[AV_ERROR_MAX_STRING_SIZE] = {0}; + if ((error = swr_init(swr_ctx)) < 0) { + return yang_error_wrap(1,"open swr(%d:%s)", error, av_make_error_string(err_buf, AV_ERROR_MAX_STRING_SIZE, error)); + } + + if (!(m_swrData = (uint8_t **)calloc(m_channel, sizeof(*m_swrData)))) { + return yang_error_wrap(1, "alloc swr buffer"); + } + + if ((error = av_samples_alloc(m_swrData, NULL, m_channel, m_frameSize, AV_SAMPLE_FMT_S16, 0)) < 0) { + return yang_error_wrap(1, "alloc swr buffer(%d:%s)", error, + av_make_error_string(err_buf, AV_ERROR_MAX_STRING_SIZE, error)); + } + m_contextt=1; + return Yang_Ok; + +} + +void YangSwResample::resample(const uint8_t *pin,uint32_t pinLen,uint8_t* pout,uint32_t *poutLen){ + int frame_size = swr_convert(swr_ctx, m_swrData, m_frameSize, &pin, pinLen); + *poutLen=frame_size; + memcpy(pout,*m_swrData,frame_size<<2); + +} diff --git a/libmetartc3/src/yangavutil/YangVideoEncoderMeta.cpp b/libmetartc3/src/yangavutil/YangVideoEncoderMeta.cpp new file mode 100755 index 00000000..d71ec1ac --- /dev/null +++ b/libmetartc3/src/yangavutil/YangVideoEncoderMeta.cpp @@ -0,0 +1,19 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include + + +YangVideoEncoderMeta::YangVideoEncoderMeta() { + + +} + +YangVideoEncoderMeta::~YangVideoEncoderMeta() { + +} + diff --git a/libmetartc3/src/yangavutil/YangYuvConvert.cpp b/libmetartc3/src/yangavutil/YangYuvConvert.cpp new file mode 100755 index 00000000..bd5803ae --- /dev/null +++ b/libmetartc3/src/yangavutil/YangYuvConvert.cpp @@ -0,0 +1,289 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include "yangavutil/video/YangYuvConvert.h" + + + + +void YangYuvConvert::loadLib() { + + yang_YUY2ToNV12 = (int32_t (*)(const uint8_t *src_yuy2, int32_t src_stride_yuy2, + uint8_t *dst_y, int32_t dst_stride_y, uint8_t *dst_uv, + int32_t dst_stride_uv, int32_t width, int32_t height)) m_lib.loadFunction( + "YUY2ToNV12"); + yang_YUY2ToI420 =(int32_t (*)(const uint8_t *src_yuy2, int32_t src_stride_yuy2, + uint8_t *dst_y, int32_t dst_stride_y, uint8_t *dst_u, + int32_t dst_stride_u, uint8_t *dst_v, int32_t dst_stride_v, + int32_t width, int32_t height)) m_lib.loadFunction("YUY2ToI420"); + + yang_YUY2ToARGB=(int (*)(const uint8_t* src_yuy2, + int src_stride_yuy2, + uint8_t* dst_argb, + int dst_stride_argb, + int width, + int height)) m_lib.loadFunction("YUY2ToARGB"); + yang_I420ToNV12=(int32_t (*)(const uint8_t* src_y, int32_t src_stride_y, const uint8_t* src_u, + int32_t src_stride_u,const uint8_t* src_v,int32_t src_stride_v, uint8_t* dst_y, + int32_t dst_stride_y, uint8_t* dst_uv, int32_t dst_stride_uv, int32_t width, int32_t height)) m_lib.loadFunction("I420ToNV12"); + + yang_I420ToNV21=(int32_t (*)(const uint8_t* src_y, + int32_t src_stride_y, + const uint8_t* src_u, + int32_t src_stride_u, + const uint8_t* src_v, + int32_t src_stride_v, + uint8_t* dst_y, + int32_t dst_stride_y, + uint8_t* dst_vu, + int32_t dst_stride_vu, + int32_t width, + int32_t height)) m_lib.loadFunction("I420ToNV21"); + + yang_I420ToRGB24 = (int32_t (*)(const uint8_t *src_y, int32_t src_stride_y, + const uint8_t *src_u, int32_t src_stride_u, const uint8_t *src_v, + int32_t src_stride_v, uint8_t *dst_rgb24, int32_t dst_stride_rgb24, + int32_t width, int32_t height)) m_lib.loadFunction("I420ToRGB24"); + yang_NV12ToRGB24 = (int32_t (*)(const uint8_t *src_y, int32_t src_stride_y, + const uint8_t *src_uv, int32_t src_stride_uv, uint8_t *dst_rgb24, + int32_t dst_stride_rgb24, int32_t width, int32_t height)) m_lib.loadFunction( + "NV12ToRGB24"); + yang_NV21ToRGB24 = (int32_t (*)(const uint8_t *src_y, int32_t src_stride_y, + const uint8_t *src_vu, int32_t src_stride_vu, uint8_t *dst_rgb24, + int32_t dst_stride_rgb24, int32_t width, int32_t height)) m_lib.loadFunction( + "NV21ToRGB24"); + yang_ARGBToRGB24 = + (int32_t (*)(const uint8_t *src_argb, int32_t src_stride_argb, + uint8_t *dst_rgb24, int32_t dst_stride_rgb24, int32_t width, + int32_t height)) m_lib.loadFunction("ARGBToRGB24"); + yang_RGB24ToI420 = + (int32_t (*)(const uint8_t *src_rgb24, int32_t src_stride_rgb24, + uint8_t *dst_y, int32_t dst_stride_y, uint8_t *dst_u, + int32_t dst_stride_u, uint8_t *dst_v, int32_t dst_stride_v, + int32_t width, int32_t height)) m_lib.loadFunction("RGB24ToI420"); + + yang_RGBAToI420=(int (*)(const uint8_t* src_rgba, + int src_stride_rgba, uint8_t* dst_y, int dst_stride_y, + uint8_t* dst_u, int dst_stride_u, uint8_t* dst_v, + int dst_stride_v, int width, int height)) m_lib.loadFunction("RGBAToI420"); + yang_BGRAToI420=(int (*)(const uint8_t* src_bgra, int src_stride_bgra, + uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, int dst_stride_u, + uint8_t* dst_v, int dst_stride_v,int width, int height)) m_lib.loadFunction("BGRAToI420"); + yang_ARGBToI420=(int (*)(const uint8_t* src_argb, int src_stride_argb, + uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u,int dst_stride_u, + uint8_t* dst_v, int dst_stride_v, int width,int height)) m_lib.loadFunction("ARGBToI420"); + yang_RGB24ToARGB=(int32_t (*)(const uint8_t* src_rgb24, + int32_t src_stride_rgb24, + uint8_t* dst_argb, + int32_t dst_stride_argb, + int32_t width, + int32_t height)) m_lib.loadFunction("RGB24ToARGB"); + yang_RAWToARGB=(int32_t (*)(const uint8_t* src_raw, + int32_t src_stride_raw, + uint8_t* dst_argb, + int32_t dst_stride_argb, + int32_t width, + int32_t height)) m_lib.loadFunction("RAWToARGB"); + yang_NV12Scale = (int32_t (*)(const uint8_t *src_y, int32_t src_stride_y, + const uint8_t *src_uv, int32_t src_stride_uv, int32_t src_width, + int32_t src_height, uint8_t *dst_y, int32_t dst_stride_y, uint8_t *dst_uv, + int32_t dst_stride_uv, int32_t dst_width, int32_t dst_height, + enum FilterMode filtering)) m_lib.loadFunction("NV12Scale"); + yang_I420Scale = (int32_t (*)(const uint8_t *src_y, int32_t src_stride_y, + const uint8_t *src_u, int32_t src_stride_u, const uint8_t *src_v, + int32_t src_stride_v, int32_t src_width, int32_t src_height, uint8_t *dst_y, + int32_t dst_stride_y, uint8_t *dst_u, int32_t dst_stride_u, uint8_t *dst_v, + int32_t dst_stride_v, int32_t dst_width, int32_t dst_height, + enum FilterMode filtering)) m_lib.loadFunction("I420Scale"); + yang_ScalePlane = + (void (*)(const uint8_t *src, int32_t src_stride, int32_t src_width, + int32_t src_height, uint8_t *dst, int32_t dst_stride, int32_t dst_width, + int32_t dst_height, enum FilterMode filtering)) m_lib.loadFunction( + "ScalePlane"); + yang_ARGBScale=(int32_t (*)(const uint8_t* src_argb, + int32_t src_stride_argb, + int32_t src_width, + int32_t src_height, + uint8_t* dst_argb, + int32_t dst_stride_argb, + int32_t dst_width, + int32_t dst_height, + enum FilterMode filtering)) m_lib.loadFunction( + "ARGBScale"); +} + +void YangYuvConvert::unloadLib() { //srt_cleanup + yang_YUY2ToNV12 = NULL; + yang_YUY2ToI420 = NULL; + yang_YUY2ToARGB = NULL; + yang_I420ToNV12 = NULL; + yang_I420ToNV21 = NULL; + yang_I420ToRGB24 = NULL; + yang_NV12ToRGB24 = NULL; + yang_NV21ToRGB24 = NULL; + yang_ARGBToRGB24 = NULL; + yang_RGB24ToARGB=NULL; + yang_RAWToARGB=NULL; + yang_RGB24ToI420 = NULL; + yang_BGRAToI420=NULL; + yang_ARGBToI420=NULL; + yang_NV12Scale = NULL; + yang_I420Scale = NULL; + yang_ScalePlane = NULL; + yang_RGBAToI420=NULL; + yang_ARGBScale=NULL; + +} +YangYuvConvert::YangYuvConvert() { + + m_lib.loadObject("libyuv"); + loadLib(); +} + +YangYuvConvert::~YangYuvConvert() { + unloadLib(); + m_lib.unloadObject(); +} + +int32_t YangYuvConvert::yuy2tonv12(uint8_t *src, uint8_t *dst, int32_t width, int32_t height) { + return yang_YUY2ToNV12((const uint8_t*) src, width << 1, dst, width, + dst + (width * height), width, width, height); + //return ret; +} +int32_t YangYuvConvert::yuy2toi420(uint8_t *src, uint8_t *dst, int32_t width, int32_t height) { + + return yang_YUY2ToI420((const uint8_t*) src, width << 1, dst, width, + dst + (width * height), (width >> 1), + dst + (width * height) + (int) (width * height / 4), (width >> 1), + width, height); + +} +int32_t YangYuvConvert::yuy2toargb(uint8_t *src, uint8_t *dst, int32_t width, int32_t height) { + return yang_YUY2ToARGB((const uint8_t*) src, width << 1, + dst,width * 4, + width, height); + + //return ret; +} +int32_t YangYuvConvert::i420tonv12(uint8_t* src,uint8_t *dst,int32_t width,int32_t height){ + return yang_I420ToNV12((const uint8_t*) src, width, + (const uint8_t*) (src + (width * height)), (width >> 1), + (const uint8_t*) (src + (width * height)+ (int) (width * height / 4)), (width >> 1), + dst, width ,dst+width*height,width, + width, height); +} +int32_t YangYuvConvert::i420tonv21(uint8_t* src,uint8_t *dst,int32_t width,int32_t height){ + return yang_I420ToNV21((const uint8_t*) src, width, + (const uint8_t*) (src + (width * height)), (width >> 1), + (const uint8_t*) (src + (width * height)+ (int) (width * height / 4)), (width >> 1), + dst, width ,dst+width*height,width, + width, height); +} +//int32_t YangYuvUtil::bgr2yuy2(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeiht){ +// return 0; +//} + +int32_t YangYuvConvert::I420torgb24(uint8_t *src, uint8_t *dst, int32_t width, + int32_t height) { + return yang_I420ToRGB24((const uint8_t*) src, width, + (const uint8_t*) (src + (width * height)), (width >> 1), + (const uint8_t*) (src + (width * height) + + (int) (width * height / 4)), (width >> 1), dst, width * 3, + width, height); +} +int32_t YangYuvConvert::nv12torgb24(uint8_t *src, uint8_t *dst, int32_t width, + int32_t height) { + return yang_NV12ToRGB24((const uint8_t*) src, width, + (const uint8_t*) (src + (width * height)), width, dst, + width * 3, width, height); +} +int32_t YangYuvConvert::nv21torgb24(uint8_t *src, uint8_t *dst, int32_t width, + int32_t height) { + return yang_NV21ToRGB24((const uint8_t*) src, width, + (const uint8_t*) (src + (width * height)), width, dst, + width * 3, width, height); +} +int32_t YangYuvConvert::argbtorgb24(uint8_t *src, uint8_t *dst, int32_t width, + int32_t height) { + return yang_ARGBToRGB24((const uint8_t*) src, width * 4, dst, + width * 3, width, height); +} +int32_t YangYuvConvert::rgb24toargb(uint8_t *src, uint8_t *dst, int32_t width, + int32_t height) { + + return yang_RGB24ToARGB((const uint8_t*) src, width * 3, dst, + width * 4, width, height); +} +int32_t YangYuvConvert::rgb24toI420(uint8_t *src_rgb24, uint8_t *dst, int32_t width, + int32_t height) { + return yang_RGB24ToI420((const uint8_t*) src_rgb24, width * 3, dst, + width, dst + (width * height), (width >> 1), + dst + (width * height) + (int) (width * height / 4), (width >> 1), + width, height); +} + +int32_t YangYuvConvert::rgbatoI420(uint8_t* src_rgba,uint8_t *dst,int32_t width,int32_t height) { + return yang_RGBAToI420((const uint8_t*) src_rgba, width * 4, dst, + width, dst + (width * height), (width >> 1), + dst + (width * height) + (int) (width * height / 4), (width >> 1), + width, height); +} +int32_t YangYuvConvert::bgratoI420(uint8_t* src_bgra,uint8_t *dst,int32_t width,int32_t height) { + //return yang_BGRAToI420((const uint8_t*) src_bgra, width * 4, dst, + return yang_ARGBToI420((const uint8_t*) src_bgra, width * 4, dst, + width, dst + (width * height), (width >> 1), + dst + (width * height) + (int) (width * height / 4), (width >> 1), + width, height); +} +int32_t YangYuvConvert::scaleNv12(uint8_t *src, uint8_t *dst, int32_t srcWidth, + int32_t srcHeight, int32_t dstWidth, int32_t dstHeight, int32_t mode) { + return yang_NV12Scale((const uint8_t*) src, srcWidth, + (const uint8_t*) src + srcWidth * srcHeight, srcWidth, srcWidth, + srcHeight, dst, dstWidth, dst + dstWidth * dstHeight, dstWidth, + dstWidth, dstHeight, (libyuv::FilterMode) mode); +} +int32_t YangYuvConvert::scaleI420(uint8_t *src, uint8_t *dst, int32_t srcWidth, + int32_t srcHeight, int32_t dstWidth, int32_t dstHeight, int32_t mode) { + int32_t srcLen=srcWidth*srcHeight; + int32_t dstLen=dstWidth*dstHeight; + return yang_I420Scale((const uint8_t*) src, srcWidth, + (const uint8_t*) (src + srcLen), srcWidth/2, + (const uint8_t*) (src + srcLen*5/4),srcWidth/2, + srcWidth, srcHeight, + dst, dstWidth, + dst + dstLen, dstWidth/2, + dst + dstLen*5/4, dstWidth/2, + dstWidth, dstHeight, (libyuv::FilterMode) mode); +} + +int32_t YangYuvConvert::scaleYuy2(uint8_t *src, uint8_t *dst, int32_t srcWidth, + int32_t srcHeight, int32_t dstWidth, int32_t dstHeight, int32_t mode) { + yang_ScalePlane((const uint8_t*) src, srcWidth << 1, srcWidth, srcHeight, + dst, dstWidth << 1, dstWidth, dstHeight, (libyuv::FilterMode) mode); + return 0; +} + +int32_t YangYuvConvert::scaleArgb(uint8_t* src,uint8_t *dst,int32_t srcWidth,int32_t srcHeight,int32_t dstWidth,int32_t dstHeight,int32_t mode){ + +return yang_ARGBScale((const uint8_t*)src,srcWidth*4, +srcWidth,srcHeight, +dst,dstWidth*4, +dstWidth,dstHeight, +(libyuv::FilterMode)mode +); +} + +/** + int32_t scaleRgb(uint8_t* src,int32_t srcWidth,int32_t srcHeiht,uint8_t *dst,int32_t dstWidth,int32_t dstHeight,int32_t mode=2){ + libyuv::Scale() + return yang_ScaleR((const uint8_t*)src,srcWidth<<1, + srcWidth,srcHeight, + dst,dstWidth<<1, + dstWidth,dstHeight, + (libyuv::FilterMode)mode + ); + } +**/ + diff --git a/libmetartc3/src/yangcapture/YangAudioDeviceQuery.cpp b/libmetartc3/src/yangcapture/YangAudioDeviceQuery.cpp new file mode 100755 index 00000000..ee3adca9 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangAudioDeviceQuery.cpp @@ -0,0 +1,111 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef _WIN32 +#include +#endif +#include +#include "yangutil/sys/YangLog.h" +YangAudioDeviceQuery::YangAudioDeviceQuery() { + + +} + +YangAudioDeviceQuery::~YangAudioDeviceQuery() { + +} + + +vector* YangAudioDeviceQuery::getCaptureDeviceList(){ + #ifndef _WIN32 + getDeviceList((int)SND_PCM_STREAM_CAPTURE,&m_captureDeviceList); + #endif + return &m_captureDeviceList; +} +vector* YangAudioDeviceQuery::getPlayDeviceList(){ + #ifndef _WIN32 + getDeviceList((int)SND_PCM_STREAM_PLAYBACK,&m_playDeviceList); + #endif + return &m_playDeviceList; +} +#ifndef _WIN32 +void YangAudioDeviceQuery::getDeviceList(int32_t pstream,vector* plist) { + snd_pcm_stream_t stream=(snd_pcm_stream_t)pstream; + snd_ctl_t *handle; + int32_t card, err, dev, idx; + snd_ctl_card_info_t *info; + snd_pcm_info_t *pcminfo; + snd_ctl_card_info_alloca(&info); + snd_pcm_info_alloca(&pcminfo); + card = -1; + int32_t aindex=0; + while (1) { + if (snd_card_next(&card) < 0 || card < 0) { + yang_error("no soundcards found..."); + return; + } + + //printf("**** List of %s Hardware Devices ****\n",snd_pcm_stream_name(stream)); + char name[32]; + sprintf(name, "hw:%d", card); + if ((err = snd_ctl_open(&handle, name, 0)) < 0) { + yang_error("control open (%i): %s", card, snd_strerror(err)); + return; + } + if ((err = snd_ctl_card_info(handle, info)) < 0) { + yang_error("control hardware info (%i): %s", card, + snd_strerror(err)); + snd_ctl_close(handle); + return; + } + + dev = -1; + while (1) { + uint32_t count; + if (snd_ctl_pcm_next_device(handle, &dev) < 0) + yang_error("\nsnd_ctl_pcm_next_device"); + if (dev < 0) + break; + snd_pcm_info_set_device(pcminfo, dev); + snd_pcm_info_set_subdevice(pcminfo, 0); + snd_pcm_info_set_stream(pcminfo, stream); + if ((err = snd_ctl_pcm_info(handle, pcminfo)) < 0) { + if (err != -ENOENT) + yang_error("\ncontrol digital audio info (%i): %s", card, + snd_strerror(err)); + continue; + } + //printf("card %i: %s [%s], device %i: %s [%s]\n", card, + // snd_ctl_card_info_get_id(info), + // snd_ctl_card_info_get_name(info), dev, + // snd_pcm_info_get_id(pcminfo), + // snd_pcm_info_get_name(pcminfo)); + count = snd_pcm_info_get_subdevices_count(pcminfo); + //printf(" Subdevices: %i/%i\n",snd_pcm_info_get_subdevices_avail(pcminfo), count); + for (idx = 0; idx < (int) count; idx++) { + snd_pcm_info_set_subdevice(pcminfo, idx); + if ((err = snd_ctl_pcm_info(handle, pcminfo)) < 0) { + yang_error("control digital audio playback info (%i): %s", + card, snd_strerror(err)); + } else { + plist->push_back(YangAudioDeivce()); + char sn[64]={0}; + sprintf(sn,"%s [%s]",snd_ctl_card_info_get_id(info),snd_ctl_card_info_get_name(info)); + plist->back().name=string(sn); + char sn1[64]={0}; + sprintf(sn1,"%s [%s]",snd_pcm_info_get_id(pcminfo),snd_pcm_info_get_name(pcminfo)); + plist->back().deviceName=string(sn1); + plist->back().subName=string(snd_pcm_info_get_subdevice_name(pcminfo)); + plist->back().aIndex=aindex; + plist->back().aSubIndex=dev; + plist->back().aIdx=idx; + //printf(" Subdevice #%i: %s\n", idx,snd_pcm_info_get_subdevice_name(pcminfo)); + } + } + } + snd_ctl_close(handle); + aindex++; + } + +} +#endif diff --git a/libmetartc3/src/yangcapture/YangCaptureFactory.cpp b/libmetartc3/src/yangcapture/YangCaptureFactory.cpp new file mode 100755 index 00000000..b01bdf86 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangCaptureFactory.cpp @@ -0,0 +1,65 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "yangcapture/YangCaptureFactory.h" +#ifndef _WIN32 +#include +#include +#include +#include "YangVideoCaptureImpl.h" + +#else +#include +#include +#include +#include "win/YangWinVideoCapture.h" +#endif +#include "YangScreenCaptureImpl.h" + +YangCaptureFactory::YangCaptureFactory() { + + +} + +YangCaptureFactory::~YangCaptureFactory() { + +} + +YangAudioCapture *YangCaptureFactory::createAudioCapture(YangContext *pcontext){ +#ifndef _WIN32 + return new YangAlsaDeviceHandle(pcontext);//new YangAlsaHandle(pcontext);//YangAudioCaptureImpl(pcontext); +#else + // return new YangWinAudioCapture(pcontext); + return new YangWinAudioApiDevice(pcontext,0,true); +#endif + +} +YangAudioCapture *YangCaptureFactory::createRecordAudioCapture(YangContext *pcontext){ +#ifndef _WIN32 + return new YangAudioCaptureImpl(pcontext);//new YangAlsaHandle(pcontext);//YangAudioCaptureImpl(pcontext); +#else + return new YangWinRecordAudioCapture(pcontext); + //return new YangWinAudioApiDevice(pcontext,1); +#endif +} + + +YangMultiVideoCapture *YangCaptureFactory::createVideoCapture(YangVideoInfo *pcontext){ +#ifndef _WIN32 + return new YangVideoCaptureImpl(pcontext); +#else + return new YangWinVideoCapture(pcontext); +#endif +} +YangMultiVideoCapture *YangCaptureFactory::createRecordVideoCapture(YangVideoInfo *pcontext){ +#ifndef _WIN32 + return new YangVideoCaptureImpl(pcontext); +#else + return new YangWinVideoCapture(pcontext); +#endif +} + +YangScreenCapture *YangCaptureFactory::createScreenCapture(YangContext *pcontext){ + return new YangScreenCaptureImpl(pcontext); +} diff --git a/libmetartc3/src/yangcapture/YangDXGIManager.cpp b/libmetartc3/src/yangcapture/YangDXGIManager.cpp new file mode 100755 index 00000000..7d2d82c9 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangDXGIManager.cpp @@ -0,0 +1,732 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifdef _MSC_VER + +#include "YangDXGIManager.h" +#include +#include +#ifdef _MSC_VER +#pragma comment (lib,"D3D11.lib") +#pragma comment (lib,"dxgi.lib") +#pragma comment (lib,"gdiplus.lib") +#endif +using namespace Gdiplus; + +YangDXGIPointerInfo::YangDXGIPointerInfo(BYTE* pPointerShape, UINT uiPointerShapeBufSize, DXGI_OUTDUPL_FRAME_INFO fi, DXGI_OUTDUPL_POINTER_SHAPE_INFO psi) + : m_pPointerShape(pPointerShape), + m_uiPointerShapeBufSize(uiPointerShapeBufSize), + m_FI(fi), + m_PSI(psi) +{ + +} + +YangDXGIPointerInfo::~YangDXGIPointerInfo() +{ + if(m_pPointerShape) + { + delete [] m_pPointerShape; + } +} + +BYTE* YangDXGIPointerInfo::GetBuffer() +{ + return m_pPointerShape; +} + +UINT YangDXGIPointerInfo::GetBufferSize() +{ + return m_uiPointerShapeBufSize; +} + +DXGI_OUTDUPL_FRAME_INFO& YangDXGIPointerInfo::GetFrameInfo() +{ + return m_FI; +} + +DXGI_OUTDUPL_POINTER_SHAPE_INFO& YangDXGIPointerInfo::GetShapeInfo() +{ + return m_PSI; +} + +YangDXGIOutputDuplication::YangDXGIOutputDuplication(IDXGIAdapter1* pAdapter, + ID3D11Device* pD3DDevice, + ID3D11DeviceContext* pD3DDeviceContext, + IDXGIOutput1* pDXGIOutput1, + IDXGIOutputDuplication* pDXGIOutputDuplication) + : m_Adapter(pAdapter), + m_D3DDevice(pD3DDevice), + m_D3DDeviceContext(pD3DDeviceContext), + m_DXGIOutput1(pDXGIOutput1), + m_DXGIOutputDuplication(pDXGIOutputDuplication) +{ +} + +HRESULT YangDXGIOutputDuplication::GetDesc(DXGI_OUTPUT_DESC& desc) +{ + m_DXGIOutput1->GetDesc(&desc); + return S_OK; +} + +HRESULT YangDXGIOutputDuplication::AcquireNextFrame(IDXGISurface1** pDXGISurface, YangDXGIPointerInfo*& pDXGIPointer) +{ + DXGI_OUTDUPL_FRAME_INFO fi; + CComPtr spDXGIResource; + HRESULT hr = m_DXGIOutputDuplication->AcquireNextFrame(33, &fi, &spDXGIResource); + if(FAILED(hr)) + { + // yang_error("m_DXGIOutputDuplication->AcquireNextFrame failed with hr=0x%08x", hr); + return hr; + } + + CComQIPtr spTextureResource = spDXGIResource; + + D3D11_TEXTURE2D_DESC desc; + spTextureResource->GetDesc(&desc); + + D3D11_TEXTURE2D_DESC texDesc; + ZeroMemory( &texDesc, sizeof(texDesc) ); + texDesc.Width = desc.Width; + texDesc.Height = desc.Height; + texDesc.MipLevels = 1; + texDesc.ArraySize = 1; + texDesc.SampleDesc.Count = 1; + texDesc.SampleDesc.Quality = 0; + texDesc.Usage = D3D11_USAGE_STAGING; + texDesc.Format = desc.Format; + texDesc.BindFlags = 0; + texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ; + texDesc.MiscFlags = 0; + + CComPtr spD3D11Texture2D = NULL; + hr = m_D3DDevice->CreateTexture2D(&texDesc, NULL, &spD3D11Texture2D); + if(FAILED(hr)) + return hr; + + m_D3DDeviceContext->CopyResource(spD3D11Texture2D, spTextureResource); + + CComQIPtr spDXGISurface = spD3D11Texture2D; + + *pDXGISurface = spDXGISurface.Detach(); + + // Updating mouse pointer, if visible + if(fi.PointerPosition.Visible) + { + BYTE* pPointerShape = new BYTE[fi.PointerShapeBufferSize]; + + DXGI_OUTDUPL_POINTER_SHAPE_INFO psi = {}; + UINT uiPointerShapeBufSize = fi.PointerShapeBufferSize; + hr = m_DXGIOutputDuplication->GetFramePointerShape(uiPointerShapeBufSize, pPointerShape, &uiPointerShapeBufSize, &psi); + if(hr == DXGI_ERROR_MORE_DATA) + { + pPointerShape = new BYTE[uiPointerShapeBufSize]; + + hr = m_DXGIOutputDuplication->GetFramePointerShape(uiPointerShapeBufSize, pPointerShape, &uiPointerShapeBufSize, &psi); + } + + if(hr == S_OK) + { + // yang_trace("\nPointerPosition Visible=%d x=%d y=%d w=%d h=%d type=%d\n", fi.PointerPosition.Visible, fi.PointerPosition.Position.x, fi.PointerPosition.Position.y, psi.Width, psi.Height, psi.Type); + + if((psi.Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME || + psi.Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR || + psi.Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR) && + psi.Width <= 128 && psi.Height <= 128) + { + // Here we can obtain pointer shape + if(pDXGIPointer) + { + delete pDXGIPointer; + } + + pDXGIPointer = new YangDXGIPointerInfo(pPointerShape, uiPointerShapeBufSize, fi, psi); + + pPointerShape = NULL; + } + + DXGI_OUTPUT_DESC outDesc; + GetDesc(outDesc); + + if(pDXGIPointer) + { + pDXGIPointer->GetFrameInfo().PointerPosition.Position.x = outDesc.DesktopCoordinates.left + fi.PointerPosition.Position.x; + pDXGIPointer->GetFrameInfo().PointerPosition.Position.y = outDesc.DesktopCoordinates.top + fi.PointerPosition.Position.y; + } + } + + if(pPointerShape) + { + yang_deleteA(pPointerShape); + } + } + + return hr; +} + +HRESULT YangDXGIOutputDuplication::ReleaseFrame() +{ + m_DXGIOutputDuplication->ReleaseFrame(); + return S_OK; +} + +bool YangDXGIOutputDuplication::IsPrimary() +{ + DXGI_OUTPUT_DESC outdesc; + m_DXGIOutput1->GetDesc(&outdesc); + + MONITORINFO mi; + mi.cbSize = sizeof(MONITORINFO); + GetMonitorInfo(outdesc.Monitor, &mi); + if(mi.dwFlags & MONITORINFOF_PRIMARY) + { + return true; + } + return false; +} + +YangDXGIManager::YangDXGIManager() +{ + m_CaptureSource = CSUndefined; + SetRect(&m_rcCurrentOutput, 0, 0, 0, 0); + m_pBuf = NULL; + m_pDXGIPointer = NULL; + m_bInitialized = false; + m_isDrawmouse=false; +} + +YangDXGIManager::~YangDXGIManager() +{ + GdiplusShutdown(m_gdiplusToken); + + if(m_pBuf) + { + delete [] m_pBuf; + m_pBuf = NULL; + } + + if(m_pDXGIPointer) + { + delete m_pDXGIPointer; + m_pDXGIPointer = NULL; + } +} + +HRESULT YangDXGIManager::SetCaptureSource(YangCaptureSource cs) +{ + m_CaptureSource = cs; + return S_OK; +} + +YangCaptureSource YangDXGIManager::GetCaptureSource() +{ + return m_CaptureSource; +} + +HRESULT YangDXGIManager::Init() +{ + if(m_bInitialized) + return S_OK; + + GdiplusStartupInput gdiplusStartupInput; + GdiplusStartup(&m_gdiplusToken, &gdiplusStartupInput, NULL); + + HRESULT hr = CreateDXGIFactory1(__uuidof(IDXGIFactory1), (void**)(&m_spDXGIFactory1) ); + if( FAILED(hr) ) + { + yang_error("Failed to CreateDXGIFactory1 hr=%08x", hr); + return hr; + } + + // Getting all adapters + vector> vAdapters; + + CComPtr spAdapter; + for(int i=0; m_spDXGIFactory1->EnumAdapters1(i, &spAdapter) != DXGI_ERROR_NOT_FOUND; i++) + { + vAdapters.push_back(spAdapter); + spAdapter.Release(); + } + + // Iterating over all adapters to get all outputs + for(vector>::iterator AdapterIter = vAdapters.begin(); + AdapterIter != vAdapters.end(); + AdapterIter++) + { + vector> vOutputs; + + CComPtr spDXGIOutput; + for(int i=0; (*AdapterIter)->EnumOutputs(i, &spDXGIOutput) != DXGI_ERROR_NOT_FOUND; i++) + { + DXGI_OUTPUT_DESC outputDesc; + spDXGIOutput->GetDesc(&outputDesc); + + /** yang_trace("\nDisplay output found. DeviceName=%ls AttachedToDesktop=%d Rotation=%d DesktopCoordinates={(%d,%d),(%d,%d)}", + outputDesc.DeviceName, + outputDesc.AttachedToDesktop, + outputDesc.Rotation, + outputDesc.DesktopCoordinates.left, + outputDesc.DesktopCoordinates.top, + outputDesc.DesktopCoordinates.right, + outputDesc.DesktopCoordinates.bottom); + **/ + if(outputDesc.AttachedToDesktop) + { + vOutputs.push_back(spDXGIOutput); + } + + spDXGIOutput.Release(); + } + + if(vOutputs.size() == 0) + continue; + + // Creating device for each adapter that has the output + CComPtr spD3D11Device; + CComPtr spD3D11DeviceContext; + D3D_FEATURE_LEVEL fl = D3D_FEATURE_LEVEL_9_1; + hr = D3D11CreateDevice((*AdapterIter), D3D_DRIVER_TYPE_UNKNOWN, NULL, 0, NULL, 0, D3D11_SDK_VERSION, &spD3D11Device, &fl, &spD3D11DeviceContext); + if( FAILED(hr) ) + { + yang_error("Failed to create D3D11CreateDevice hr=%08x", hr); + return hr; + } + + for(std::vector>::iterator OutputIter = vOutputs.begin(); + OutputIter != vOutputs.end(); + OutputIter++) + { + CComQIPtr spDXGIOutput1 = *OutputIter; + if (!spDXGIOutput1) + { + yang_error("spDXGIOutput1 is NULL"); + continue; + } + + CComQIPtr spDXGIDevice = spD3D11Device; + if (!spDXGIDevice) + { + yang_error("spDXGIDevice is NULL"); + continue; + } + + CComPtr spDXGIOutputDuplication; + hr = spDXGIOutput1->DuplicateOutput(spDXGIDevice, &spDXGIOutputDuplication); + if (FAILED(hr)) + { + yang_error("Failed to duplicate output hr=%08x", hr); + continue; + } + + m_vOutputs.push_back( + YangDXGIOutputDuplication((*AdapterIter), + spD3D11Device, + spD3D11DeviceContext, + spDXGIOutput1, + spDXGIOutputDuplication)); + } + } + + hr = m_spWICFactory.CoCreateInstance(CLSID_WICImagingFactory); + if( FAILED(hr) ) + { + yang_error("Failed to create WICImagingFactory hr=%08x", hr); + return hr; + } + + m_bInitialized = true; + + return S_OK; +} + +HRESULT YangDXGIManager::GetOutputRect(RECT& rc) +{ + // Nulling rc just in case... + SetRect(&rc, 0, 0, 0, 0); + + HRESULT hr = Init(); + if(hr != S_OK) + return hr; + + vector vOutputs = GetOutputDuplication(); + + RECT rcShare; + SetRect(&rcShare, 0, 0, 0, 0); + + for(vector::iterator iter = vOutputs.begin(); + iter != vOutputs.end(); + iter++) + { + YangDXGIOutputDuplication& out = *iter; + + DXGI_OUTPUT_DESC outDesc; + out.GetDesc(outDesc); + RECT rcOutCoords = outDesc.DesktopCoordinates; + + UnionRect(&rcShare, &rcShare, &rcOutCoords); + } + + CopyRect(&rc, &rcShare); + + return S_OK; +} + +HRESULT YangDXGIManager::GetOutputBits(BYTE* pBits, RECT& rcDest) +{ + HRESULT hr = S_OK; + + DWORD dwDestWidth = rcDest.right - rcDest.left; + DWORD dwDestHeight = rcDest.bottom - rcDest.top; + + RECT rcOutput; + hr = GetOutputRect(rcOutput); + if( FAILED(hr) ) + return hr; + + DWORD dwOutputWidth = rcOutput.right - rcOutput.left; + DWORD dwOutputHeight = rcOutput.bottom - rcOutput.top; + + BYTE* pBuf = NULL; + if(rcOutput.right > (LONG)dwDestWidth || rcOutput.bottom > (LONG)dwDestHeight) + { + // Output is larger than pBits dimensions + if(!m_pBuf || !EqualRect(&m_rcCurrentOutput, &rcOutput)) + { + DWORD dwBufSize = dwOutputWidth*dwOutputHeight*4; + + if(m_pBuf) + { + delete [] m_pBuf; + m_pBuf = NULL; + } + + m_pBuf = new BYTE[dwBufSize]; + + CopyRect(&m_rcCurrentOutput, &rcOutput); + } + + pBuf = m_pBuf; + } + else + { + // Output is smaller than pBits dimensions + pBuf = pBits; + dwOutputWidth = dwDestWidth; + dwOutputHeight = dwDestHeight; + } + + vector vOutputs = GetOutputDuplication(); + + for(vector::iterator iter = vOutputs.begin(); + iter != vOutputs.end(); + iter++) + { + YangDXGIOutputDuplication& out = *iter; + + DXGI_OUTPUT_DESC outDesc; + out.GetDesc(outDesc); + RECT rcOutCoords = outDesc.DesktopCoordinates; + + CComPtr spDXGISurface1; + hr = out.AcquireNextFrame(&spDXGISurface1, m_pDXGIPointer); + if( FAILED(hr) ) + break; + + DXGI_MAPPED_RECT map; + spDXGISurface1->Map(&map, DXGI_MAP_READ); + + RECT rcDesktop = outDesc.DesktopCoordinates; + DWORD dwWidth = rcDesktop.right - rcDesktop.left; + DWORD dwHeight = rcDesktop.bottom - rcDesktop.top; + + OffsetRect(&rcDesktop, -rcOutput.left, -rcOutput.top); + + DWORD dwMapPitchPixels = map.Pitch/4; + + switch(outDesc.Rotation) + { + case DXGI_MODE_ROTATION_IDENTITY: + { + // Just copying + DWORD dwStripe = dwWidth*4; + for(unsigned int i=0; iUnmap(); + + out.ReleaseFrame(); + } + + if(FAILED(hr)) + return hr; + + // Now pBits have the desktop. Let's paint mouse pointer! + if(m_isDrawmouse){ + if(pBuf != pBits) + { + DrawMousePointer(pBuf, rcOutput, rcOutput); + } + else + { + DrawMousePointer(pBuf, rcOutput, rcDest); + } + } + // We have the pBuf filled with current desktop/monitor image. + if(pBuf != pBits) + { + // pBuf contains the image that should be resized + CComPtr spBitmap = NULL; + hr = m_spWICFactory->CreateBitmapFromMemory(dwOutputWidth, dwOutputHeight, GUID_WICPixelFormat32bppBGRA, dwOutputWidth*4, dwOutputWidth*dwOutputHeight*4, (BYTE*)pBuf, &spBitmap); + if( FAILED(hr) ) + return hr; + + CComPtr spBitmapScaler = NULL; + hr = m_spWICFactory->CreateBitmapScaler(&spBitmapScaler); + if( FAILED(hr) ) + return hr; + + dwOutputWidth = rcOutput.right - rcOutput.left; + dwOutputHeight = rcOutput.bottom - rcOutput.top; + + double aspect = (double)dwOutputWidth/(double)dwOutputHeight; + + DWORD scaledWidth = dwDestWidth; + DWORD scaledHeight = dwDestHeight; + + if(aspect > 1) + { + scaledWidth = dwDestWidth; + scaledHeight = (DWORD)(dwDestWidth/aspect); + } + else + { + scaledWidth = (DWORD)(aspect*dwDestHeight); + scaledHeight = dwDestHeight; + } + yang_trace("\nscaledWidth==%ld,scaledHeight=%ld",scaledWidth,scaledHeight); + spBitmapScaler->Initialize( + spBitmap, scaledWidth, scaledHeight, WICBitmapInterpolationModeNearestNeighbor); + + spBitmapScaler->CopyPixels(NULL, scaledWidth*4, dwDestWidth*dwDestHeight*4, pBits); + } + return hr; +} + +void YangDXGIManager::DrawMousePointer(BYTE* pDesktopBits, RECT rcDesktop, RECT rcDest) +{ + if(!m_pDXGIPointer) + return; + +// DWORD dwDesktopWidth = rcDesktop.right - rcDesktop.left; +// DWORD dwDesktopHeight = rcDesktop.bottom - rcDesktop.top; + + DWORD dwDestWidth = rcDest.right - rcDest.left; + DWORD dwDestHeight = rcDest.bottom - rcDest.top; + + int PtrX = m_pDXGIPointer->GetFrameInfo().PointerPosition.Position.x - rcDesktop.left; + int PtrY = m_pDXGIPointer->GetFrameInfo().PointerPosition.Position.y - rcDesktop.top; + switch(m_pDXGIPointer->GetShapeInfo().Type) + { + case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR: + { + Bitmap bmpBitmap(dwDestWidth, dwDestHeight, dwDestWidth*4, PixelFormat32bppARGB, pDesktopBits); + Graphics* graphics=Graphics::FromImage(&bmpBitmap); + Bitmap bmpPointer(m_pDXGIPointer->GetShapeInfo().Width, m_pDXGIPointer->GetShapeInfo().Height, m_pDXGIPointer->GetShapeInfo().Width*4, PixelFormat32bppARGB, m_pDXGIPointer->GetBuffer()); + + graphics->DrawImage(&bmpPointer, PtrX, PtrY); + yang_delete(graphics); + } + break; + case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME: + case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR: + { + RECT rcPointer; + + if(m_pDXGIPointer->GetShapeInfo().Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME) + { + SetRect(&rcPointer, PtrX, PtrY, PtrX + m_pDXGIPointer->GetShapeInfo().Width, PtrY + m_pDXGIPointer->GetShapeInfo().Height/2); + } + else + { + SetRect(&rcPointer, PtrX, PtrY, PtrX + m_pDXGIPointer->GetShapeInfo().Width, PtrY + m_pDXGIPointer->GetShapeInfo().Height); + } + + RECT rcDesktopPointer; + IntersectRect(&rcDesktopPointer, &rcPointer, &rcDesktop); + + CopyRect(&rcPointer, &rcDesktopPointer); + OffsetRect(&rcPointer, -PtrX, -PtrY); + + BYTE* pShapeBuffer = m_pDXGIPointer->GetBuffer(); + UINT* pDesktopBits32 = (UINT*)pDesktopBits; + + if(m_pDXGIPointer->GetShapeInfo().Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME) + { + for(int j = rcPointer.top, jDP = rcDesktopPointer.top; + j> (i % 8); + BYTE AndMask = pShapeBuffer[i/8 + (m_pDXGIPointer->GetShapeInfo().Pitch)*j] & Mask; + BYTE XorMask = pShapeBuffer[i/8 + (m_pDXGIPointer->GetShapeInfo().Pitch)*(j + m_pDXGIPointer->GetShapeInfo().Height / 2)] & Mask; + + UINT AndMask32 = (AndMask) ? 0xFFFFFFFF : 0xFF000000; + UINT XorMask32 = (XorMask) ? 0x00FFFFFF : 0x00000000; + + pDesktopBits32[jDP*dwDestWidth + iDP] = (pDesktopBits32[jDP*dwDestWidth + iDP] & AndMask32) ^ XorMask32; + } + } + } + else + { + UINT* pShapeBuffer32 = (UINT*)pShapeBuffer; + for(int j = rcPointer.top, jDP = rcDesktopPointer.top; + jGetShapeInfo().Pitch/4)*j]; + if (MaskVal) + { + // Mask was 0xFF + pDesktopBits32[jDP*dwDestWidth + iDP] = (pDesktopBits32[jDP*dwDestWidth + iDP] ^ pShapeBuffer32[i + (m_pDXGIPointer->GetShapeInfo().Pitch/4)*j]) | 0xFF000000; + } + else + { + // Mask was 0x00 - replacing pixel + pDesktopBits32[jDP*dwDestWidth + iDP] = pShapeBuffer32[i + (m_pDXGIPointer->GetShapeInfo().Pitch/4)*j]; + } + } + } + } + } + break; + } +} + +vector YangDXGIManager::GetOutputDuplication() +{ + vector outputs; + switch(m_CaptureSource) + { + case CSMonitor1: + { + // Return the one with IsPrimary + for(vector::iterator iter = m_vOutputs.begin(); + iter != m_vOutputs.end(); + iter++) + { + YangDXGIOutputDuplication& out = *iter; + if(out.IsPrimary()) + { + outputs.push_back(out); + break; + } + } + } + break; + + case CSMonitor2: + { + // Return the first with !IsPrimary + for(vector::iterator iter = m_vOutputs.begin(); + iter != m_vOutputs.end(); + iter++) + { + YangDXGIOutputDuplication& out = *iter; + if(!out.IsPrimary()) + { + outputs.push_back(out); + break; + } + } + } + break; + + case CSDesktop: + { + // Return all outputs + for(vector::iterator iter = m_vOutputs.begin(); + iter != m_vOutputs.end(); + iter++) + { + YangDXGIOutputDuplication& out = *iter; + outputs.push_back(out); + } + } + break; + } + return outputs; +} + +BOOL CALLBACK MonitorEnumProc(HMONITOR hMonitor, HDC hdcMonitor, LPRECT lprcMonitor, LPARAM dwData) +{ + int *Count = (int*)dwData; + (*Count)++; + return TRUE; +} + +int YangDXGIManager::GetMonitorCount() +{ + int Count = 0; + if (EnumDisplayMonitors(NULL, NULL, MonitorEnumProc, (LPARAM)&Count)) + return Count; + return -1; +} +#endif diff --git a/libmetartc3/src/yangcapture/YangDXGIManager.h b/libmetartc3/src/yangcapture/YangDXGIManager.h new file mode 100755 index 00000000..2f56b6fc --- /dev/null +++ b/libmetartc3/src/yangcapture/YangDXGIManager.h @@ -0,0 +1,95 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifdef _MSC_VER + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +class YangDXGIPointerInfo; + +enum YangCaptureSource +{ + CSUndefined, + CSMonitor1, + CSMonitor2, + CSDesktop +}; + +class YangDXGIPointerInfo +{ +public: + YangDXGIPointerInfo(BYTE* pPointerShape, UINT uiPointerShapeBufSize, DXGI_OUTDUPL_FRAME_INFO fi, DXGI_OUTDUPL_POINTER_SHAPE_INFO psi); + ~YangDXGIPointerInfo(); + BYTE* GetBuffer(); + UINT GetBufferSize(); + DXGI_OUTDUPL_FRAME_INFO& GetFrameInfo(); + DXGI_OUTDUPL_POINTER_SHAPE_INFO& GetShapeInfo(); + +private: + BYTE* m_pPointerShape; + UINT m_uiPointerShapeBufSize; + DXGI_OUTDUPL_POINTER_SHAPE_INFO m_PSI; + DXGI_OUTDUPL_FRAME_INFO m_FI; +}; + +class YangDXGIOutputDuplication +{ +public: + YangDXGIOutputDuplication(IDXGIAdapter1* pAdapter, + ID3D11Device* pD3DDevice, + ID3D11DeviceContext* pD3DDeviceContext, + IDXGIOutput1* pDXGIOutput1, + IDXGIOutputDuplication* pDXGIOutputDuplication); + + HRESULT GetDesc(DXGI_OUTPUT_DESC& desc); + HRESULT AcquireNextFrame(IDXGISurface1** pD3D11Texture2D, YangDXGIPointerInfo*& pDXGIPointer); + HRESULT ReleaseFrame(); + + bool IsPrimary(); + + +private: + CComPtr m_Adapter; + CComPtr m_D3DDevice; + CComPtr m_D3DDeviceContext; + CComPtr m_DXGIOutput1; + CComPtr m_DXGIOutputDuplication; +}; + +class YangDXGIManager +{ +public: + YangDXGIManager(); + ~YangDXGIManager(); + HRESULT SetCaptureSource(YangCaptureSource type); + YangCaptureSource GetCaptureSource(); + + HRESULT GetOutputRect(RECT& rc); + HRESULT GetOutputBits(BYTE* pBits, RECT& rcDest); + bool m_isDrawmouse; +private: + HRESULT Init(); + int GetMonitorCount(); + vector GetOutputDuplication(); + void DrawMousePointer(BYTE* pDesktopBits, RECT rcDesktop, RECT rcDest); +private: + CComPtr m_spDXGIFactory1; + vector m_vOutputs; + bool m_bInitialized; + YangCaptureSource m_CaptureSource; + RECT m_rcCurrentOutput; + BYTE* m_pBuf; + + CComPtr m_spWICFactory; + ULONG_PTR m_gdiplusToken; + YangDXGIPointerInfo* m_pDXGIPointer; +}; +#endif diff --git a/libmetartc3/src/yangcapture/YangScreenCaptureImpl.cpp b/libmetartc3/src/yangcapture/YangScreenCaptureImpl.cpp new file mode 100755 index 00000000..2567d5cd --- /dev/null +++ b/libmetartc3/src/yangcapture/YangScreenCaptureImpl.cpp @@ -0,0 +1,225 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangScreenCaptureImpl.h" +#include +#include +#include "yangutil/sys/YangTime.h" +#include "yangutil/sys/YangLog.h" +#include +#include + +#include +#include +#include + +#ifndef _WIN32 +#include +#include +int32_t YangScreenCaptureImpl::init(){ + + struct fb_var_screeninfo fb_var_info; + struct fb_fix_screeninfo fb_fix_info; + + + + // 打开framebuffer设备 + m_fd = open("/dev/fb0", O_RDONLY); + if(m_fd < 0) + { + yang_error("can not open dev\n"); + return ERROR_SYS_Linux_ScreenDeviceOpenFailure; + } + + // 获取LCD的可变参数 + ioctl(m_fd, FBIOGET_VSCREENINFO, &fb_var_info); + // 一个像素多少位 + printf("bits_per_pixel: %d\n", fb_var_info.bits_per_pixel); + // 一个像素多少位 + printf("bits_per_pixel: %d\n", fb_var_info.bits_per_pixel); + // x分辨率 + printf("xres: %d\n", fb_var_info.xres); + // y分辨率 + printf("yres: %d\n", fb_var_info.yres); + // r分量长度(bit) + printf("red_length: %d\n", fb_var_info.red.length); + // g分量长度(bit) + printf("green_length: %d\n", fb_var_info.green.length); + // b分量长度(bit) + printf("blue_length: %d\n", fb_var_info.blue.length); + // t(透明度)分量长度(bit) + printf("transp_length: %d\n", fb_var_info.transp.length); + // r分量偏移 + printf("red_offset: %d\n", fb_var_info.red.offset); + // g分量偏移 + printf("green_offset: %d\n", fb_var_info.green.offset); + // b分量偏移 + printf("blue_offset: %d\n", fb_var_info.blue.offset); + // t分量偏移 + printf("transp_offset: %d\n", fb_var_info.transp.offset); + + // 获取LCD的固定参数 + ioctl(m_fd, FBIOGET_FSCREENINFO, &fb_fix_info); + // 一帧大小 + printf("smem_len: %d\n", fb_fix_info.smem_len); + // 一行大小 + printf("line_length: %d\n", fb_fix_info.line_length); + // 一帧大小 + m_bufLen = (fb_var_info.xres * fb_var_info.yres * fb_var_info.bits_per_pixel / 8); + m_width=fb_var_info.xres; + m_height=fb_var_info.yres; + m_rgb = (uint8_t *)malloc(m_bufLen); + //if(trgb == NULL) exit(0); + m_rgb = (uint8_t *)malloc(fb_var_info.xres * fb_var_info.yres * 3); + //if(m_rgb == NULL) + //{ + // goto here; + //} + + return Yang_Ok; +} +#else +#include "YangDXGIManager.h" + +int32_t YangScreenCaptureImpl::init() { + + if (m_out_videoBuffer == NULL) + m_out_videoBuffer = new YangVideoBuffer( + m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + if (m_pre_videoBuffer == NULL) + m_pre_videoBuffer = new YangVideoBuffer( + m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + return Yang_Ok; +} + +#endif +YangScreenCaptureImpl::YangScreenCaptureImpl(YangContext *pcontext) { + m_context = pcontext; + m_width = 0; + m_height = 0; + m_rgb = NULL; + m_bufLen = 0; + m_fd = 0; + m_out_videoBuffer = nullptr; + m_pre_videoBuffer = nullptr; + isCapture = 0; + m_loop = 0; + m_state = 0; + m_interval=40; + m_isDrawmouse=false; +} + +YangScreenCaptureImpl::~YangScreenCaptureImpl() { + if(m_isStart){ + stop(); + yang_stop_thread(this); + } + m_context = NULL; + yang_delete(m_pre_videoBuffer); + yang_delete(m_out_videoBuffer); +} + +void YangScreenCaptureImpl::setVideoCaptureStart() { + m_state = 1; +} +void YangScreenCaptureImpl::setVideoCaptureStop() { + m_state = 0; +} +int32_t YangScreenCaptureImpl::getVideoCaptureState(){ + return m_state; +} +YangVideoBuffer* YangScreenCaptureImpl::getPreVideoBuffer() { + return m_pre_videoBuffer; +} +YangVideoBuffer* YangScreenCaptureImpl::getOutVideoBuffer() { + return m_out_videoBuffer; +} +void YangScreenCaptureImpl::putBuffer(YangFrame *videoFrame) { + m_out_videoBuffer->putVideo(videoFrame); +} + +void YangScreenCaptureImpl::setDrawmouse(bool isDraw) { + m_isDrawmouse=isDraw; + +} + +bool YangScreenCaptureImpl::getisDrawmouse() { + return m_isDrawmouse; +} + +void YangScreenCaptureImpl::stopLoop() { + m_loop = 0; +} +void YangScreenCaptureImpl::startLoop() { +#ifdef _MSC_VER + CoInitialize(NULL); + YangDXGIManager g_DXGIManager; + g_DXGIManager.SetCaptureSource(CSDesktop); + + RECT rcDim; + g_DXGIManager.GetOutputRect(rcDim); + + int dwWidth = rcDim.right - rcDim.left; + int dwHeight = rcDim.bottom - rcDim.top; + if (m_out_videoBuffer == NULL) + m_out_videoBuffer = new YangVideoBuffer( + m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + if (m_pre_videoBuffer == NULL) + m_pre_videoBuffer = new YangVideoBuffer( + m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + m_out_videoBuffer->init(dwWidth, dwHeight, m_context->avinfo.video.videoEncoderFormat); + m_pre_videoBuffer->init(dwWidth, dwHeight, m_context->avinfo.video.videoEncoderFormat); + yang_trace("dwWidth=%d dwHeight=%d\n", dwWidth, dwHeight); + + DWORD dwBufSize = dwWidth * dwHeight * 4; + int yuvLen=dwWidth * dwHeight * 3 / 2; + YangYuvConvert yuv; + uint8_t *pBuf = new uint8_t[dwBufSize]; + uint8_t *dst = new uint8_t[yuvLen]; + YangFrame frame; + memset(&frame,0,sizeof(YangFrame)); + int64_t startTime = 0; + frame.payload = dst; + frame.nb = dwWidth * dwHeight * 3 / 2; + if(m_context->avinfo.video.videoEncoderFormat==YangArgb) { + frame.nb = dwWidth * dwHeight * 4; + frame.payload=pBuf; + } + HRESULT hr = 0; + m_loop = 1; + while (m_loop) { + yang_usleep(m_interval*1000); + g_DXGIManager.m_isDrawmouse=m_isDrawmouse; + hr = g_DXGIManager.GetOutputBits(pBuf, rcDim); + if (FAILED(hr)) { + if(hr==DXGI_ERROR_WAIT_TIMEOUT){ + //yang_error("GetOutputBits failed because DXGI_ERROR_WAIT_TIMEOUT\n"); + }else + yang_error("GetOutputBits failed with hr=0x%08x\n", hr); + + } else { + if (startTime == 0) + startTime = yang_get_system_time(); + + if(m_context->avinfo.video.videoEncoderFormat==YangI420) yuv.bgratoI420(pBuf, dst, dwWidth, dwHeight); + //yuv.rgbatoI420(pBuf, dst, dwWidth, dwHeight); + + frame.pts = yang_get_system_time() - startTime; + m_pre_videoBuffer->putVideo(&frame); + if (m_state){ + //yang_trace("%d,",frame.nb); + m_out_videoBuffer->putVideo(&frame); + } + + } + + } + yang_deleteA(pBuf); + yang_deleteA(dst); + CoUninitialize(); +#endif +} + +void YangScreenCaptureImpl::setInterval(int32_t pinterval) { + m_interval=pinterval; +} diff --git a/libmetartc3/src/yangcapture/YangScreenCaptureImpl.h b/libmetartc3/src/yangcapture/YangScreenCaptureImpl.h new file mode 100755 index 00000000..211c57e8 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangScreenCaptureImpl.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGCAPTURE_SRC_YANGSCREENCAPTUREIMPL_H_ +#define YANGCAPTURE_SRC_YANGSCREENCAPTUREIMPL_H_ +#include +#include "yangutil/buffer/YangVideoBuffer.h" +#include + +class YangScreenCaptureImpl:public YangScreenCapture { +public: + YangScreenCaptureImpl(YangContext* pcontext); + virtual ~YangScreenCaptureImpl(); + int32_t m_width,m_height; + int32_t init(); + void setInterval(int32_t pinterval); + void setDrawmouse(bool isDraw); + bool getisDrawmouse(); + YangVideoBuffer * getOutVideoBuffer(); + YangVideoBuffer * getPreVideoBuffer(); + void setVideoCaptureStart(); + void setVideoCaptureStop(); + int32_t getVideoCaptureState(); + YangVideoBuffer* getScreenBuffer(); + uint8_t *m_rgb; + int32_t m_bufLen,isCapture; + int32_t m_fd; + void putBuffer(YangFrame* videoFrame); + void startLoop(); + void stopLoop(); +private: + YangVideoBuffer *m_out_videoBuffer; + YangVideoBuffer *m_pre_videoBuffer; + YangContext* m_context; + int m_loop; + int m_state; + int32_t m_interval; + bool m_isDrawmouse; +}; + +#endif /* YANGCAPTURE_SRC_YANGSCREENCAPTUREIMPL_H_ */ diff --git a/libmetartc3/src/yangcapture/YangScreenShare.cpp b/libmetartc3/src/yangcapture/YangScreenShare.cpp new file mode 100755 index 00000000..5bed94e1 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangScreenShare.cpp @@ -0,0 +1,90 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangScreenShare.h" +#include "yangutil/yang_unistd.h" +#include "yangavutil/video/YangYuvConvert.h" + +YangScreenCapture::YangScreenCapture(){ + m_isStart=0; +} +YangScreenCapture::~YangScreenCapture(){ + + } + +void YangScreenCapture::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangScreenCapture::stop() { + stopLoop(); + +} +YangScreenShare::YangScreenShare() { + //m_capture=new YangScreenShareImpl(); + //m_vhandle=new YangVideoCaptureHandle(); + m_isloop=0; + m_out_videoBuffer=NULL; + m_capture=NULL; + m_isCapture=0; + m_interval=0; +} + +YangScreenShare::~YangScreenShare() { + m_capture=NULL; + m_out_videoBuffer=NULL; +} +void YangScreenShare::setOutVideoBuffer(YangVideoBuffer *pbuf){ + m_out_videoBuffer=pbuf; +} +void YangScreenShare::setScreenHandle(YangScreenCaptureHandleI *handle){ + m_capture=handle; +} +void YangScreenShare::setInterval(int32_t pinterval){ + m_interval=1000*pinterval; +} +void YangScreenShare::setVideoCaptureStart() { + m_isCapture = 1; +} +void YangScreenShare::setVideoCaptureStop() { + m_isCapture = 0; +} +int32_t YangScreenShare::getVideoCaptureState() { + return m_isCapture; +} + +void YangScreenShare::initstamp() { + //m_vhandle->initstamp(); +} +void YangScreenShare::stopLoop() { + m_isloop = 0; +} +int32_t YangScreenShare::init() { + return m_capture->init(); +} +void YangScreenShare::startLoop() { + m_isloop = 1; + /** + //m_vhandle->m_start_time = 0; + int32_t width=m_capture->m_width; + int32_t height=m_capture->m_height; + uint8_t buf[width*height*4]; + uint8_t yuv[width*height*3/2]; + int32_t yuvLen=width*height*3/2; + YangYuvConvert con; + int64_t timestamp=0; + YangFrame videoFrame; +memset(&m_audioFrame,0,sizeof(YangFrame)); + while (m_isloop) { + m_capture->captureFrame(buf); + con.rgb24toI420(buf,yuv,width,height); + videoFrame.payload=yuv; + videoFrame.nb=yuvLen; + videoFrame.timestamp=timestamp; + m_out_videoBuffer->putVideo(&videoFrame); + yang_usleep(3000); + }**/ +} + diff --git a/libmetartc3/src/yangcapture/YangScreenShare.h b/libmetartc3/src/yangcapture/YangScreenShare.h new file mode 100755 index 00000000..ca441993 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangScreenShare.h @@ -0,0 +1,38 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGCAPTURE_SRC_YANGSCREENSHARE_H_ +#define YANGCAPTURE_SRC_YANGSCREENSHARE_H_ +#include "yangcapture/YangScreenCapture.h" +class YangScreenShare :public YangScreenCapture{ +public: + YangScreenShare(); + virtual ~YangScreenShare(); + + +public: + void setScreenHandle(YangScreenCaptureHandleI *handle); + void setInterval(int32_t pinterval); + int32_t init(); + void setVideoCaptureStart(); + void setVideoCaptureStop(); + void setOutVideoBuffer(YangVideoBuffer *pbuf); + int32_t getVideoCaptureState(); + void initstamp(); + void stopLoop(); +private: + int32_t m_isloop; + int32_t m_isCapture; + int32_t m_interval; + YangScreenCaptureHandleI *m_capture; + YangVideoBuffer *m_out_videoBuffer; + + +protected: + void startLoop(); +}; + + + + +#endif /* YANGCAPTURE_SRC_YANGSCREENSHARE_H_ */ diff --git a/libmetartc3/src/yangcapture/YangVideoCapture.cpp b/libmetartc3/src/yangcapture/YangVideoCapture.cpp new file mode 100755 index 00000000..d4159b40 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoCapture.cpp @@ -0,0 +1,31 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +YangVideoCapture::YangVideoCapture() { + cameraIndex=0; + m_isStart=0; + m_para=NULL; +} + +YangVideoCapture::~YangVideoCapture() { + m_para=NULL; +} + +void YangVideoCapture::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangVideoCapture::stop() { + stopLoop(); + +} + +void yang_get_camera_indexs(std::vector *pvs,std::string pcamindex){ + vector res=yang_split(pcamindex,','); + for(size_t i=0;ipush_back(atoi(res[i].c_str())); + } +} diff --git a/libmetartc3/src/yangcapture/YangVideoCaptureHandle.cpp b/libmetartc3/src/yangcapture/YangVideoCaptureHandle.cpp new file mode 100755 index 00000000..b16fd139 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoCaptureHandle.cpp @@ -0,0 +1,98 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangVideoCaptureHandle.h" + +#include "time.h" +#include "stdlib.h" +#include "stdio.h" + + +YangVideoCaptureHandle::YangVideoCaptureHandle(YangVideoInfo *pcontext) { + memset(&m_videoFrame,0,sizeof(YangFrame)); + curstamp = 0; + basesatmp = 0; + m_isCapture = 0; + m_isLivingCaptrue=0; + m_isFilm=0; + vtick=0; + + m_start_time = 0; + m_out_videoBuffer = NULL; + m_pre_videoBuffer=NULL; + m_living_out_videoBuffer=NULL; + m_film_out_videoBuffer=NULL; + m_width=pcontext->width; + m_height=pcontext->height; + m_buf=NULL; + m_bufLen=m_width*m_height*3/2; + + m_encoderVideoFormat=pcontext->videoEncoderFormat; + +} +YangVideoCaptureHandle::~YangVideoCaptureHandle(void) { + m_out_videoBuffer=NULL; + m_pre_videoBuffer=NULL; + m_living_out_videoBuffer=NULL; + m_film_out_videoBuffer=NULL; + if(m_buf) delete[] m_buf; + m_buf=NULL; + +} +void YangVideoCaptureHandle::setCaptureFormat(int32_t pformat){ + if(pformat==YangYuy2&&!m_buf) m_buf=new uint8_t[m_bufLen]; +} +void YangVideoCaptureHandle::setVideoBuffer(YangVideoBuffer *pbuf){ + m_out_videoBuffer=pbuf; +} +void YangVideoCaptureHandle::setPreVideoBuffer(YangVideoBuffer *pbuf){ + m_pre_videoBuffer=pbuf; +} +void YangVideoCaptureHandle::setLivingVideoBuffer(YangVideoBuffer *pbuf){ + m_living_out_videoBuffer=pbuf; +} +void YangVideoCaptureHandle::setFilmVideoBuffer(YangVideoBuffer *pbuf){ + m_film_out_videoBuffer=pbuf; +} +void YangVideoCaptureHandle::initstamp() { + basesatmp = curstamp; //-m_adjust_time*10000; +} + +void YangVideoCaptureHandle::putBuffer(int64_t pstamtime, uint8_t *pBuffer, int32_t BufferLen) { + + curstamp = pstamtime; + if(basesatmp==0) basesatmp=pstamtime; + vtick = pstamtime - basesatmp; + + m_videoFrame.pts=vtick; + if(m_buf){ + if(m_encoderVideoFormat==YangI420) m_yuv.yuy2toi420(pBuffer,m_buf,m_width,m_height); + if(m_encoderVideoFormat==YangNv12) m_yuv.yuy2tonv12(pBuffer,m_buf,m_width,m_height); + if(m_encoderVideoFormat==YangArgb) m_yuv.yuy2toargb(pBuffer,m_buf,m_width,m_height); + m_videoFrame.payload=m_buf; + m_videoFrame.nb=m_bufLen; + putBuffers(); + + }else{ + m_videoFrame.payload=pBuffer; + m_videoFrame.nb=BufferLen; + putBuffers(); + + } + //return; +} + +void YangVideoCaptureHandle::putBuffers() { + + if(m_pre_videoBuffer) m_pre_videoBuffer->putVideo(&m_videoFrame); + if(m_isFilm&&m_film_out_videoBuffer) m_film_out_videoBuffer->putVideo(&m_videoFrame); + if (m_isCapture&&m_out_videoBuffer) { + m_out_videoBuffer->putVideo(&m_videoFrame); + + } + if (m_isLivingCaptrue&&m_living_out_videoBuffer){ + m_living_out_videoBuffer->putVideo(&m_videoFrame); + } + + +} diff --git a/libmetartc3/src/yangcapture/YangVideoCaptureHandle.h b/libmetartc3/src/yangcapture/YangVideoCaptureHandle.h new file mode 100755 index 00000000..cc044ac0 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoCaptureHandle.h @@ -0,0 +1,45 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangLivingVideoCaptureHandle__ +#define __YangLivingVideoCaptureHandle__ +#include +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +class YangVideoCaptureHandle +{ +public: + YangVideoCaptureHandle(YangVideoInfo *pcontext); + virtual ~YangVideoCaptureHandle(void); + void reset(YangVideoInfo* p_config); + + void putBuffer(int64_t startTime, uint8_t *pBuffer, int32_t BufferLen); + void putBuffers(); + void saveFile(char* fileName,double Time, uint8_t *pBuffer, int32_t BufferLen); + void initstamp(); + void startLoop(); + long m_start_time; + int32_t m_isCapture; + int32_t m_isLivingCaptrue; + int32_t m_isFilm; + void setVideoBuffer(YangVideoBuffer *pbuf); + void setLivingVideoBuffer(YangVideoBuffer *pbuf); + void setFilmVideoBuffer(YangVideoBuffer *pbuf); + void setPreVideoBuffer(YangVideoBuffer *plist); + void setCaptureFormat(int32_t pformat); +private: + + YangVideoBuffer *m_out_videoBuffer,*m_pre_videoBuffer,*m_living_out_videoBuffer,*m_film_out_videoBuffer; + YangFrame m_videoFrame; + YangYuvConvert m_yuv; + uint8_t *m_buf; + int32_t m_width,m_height,m_bufLen; + int64_t vtick; + int64_t curstamp,basesatmp; + int m_encoderVideoFormat; + + + +}; + +#endif diff --git a/libmetartc3/src/yangcapture/YangVideoCaptureImpl.cpp b/libmetartc3/src/yangcapture/YangVideoCaptureImpl.cpp new file mode 100755 index 00000000..380c5b1e --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoCaptureImpl.cpp @@ -0,0 +1,455 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangVideoCaptureImpl.h" +YangMultiVideoCapture::YangMultiVideoCapture() { + + +} +YangMultiVideoCapture::~YangMultiVideoCapture() { + +} + + +#ifndef _WIN32 +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +YangVideoCaptureImpl::YangVideoCaptureImpl(YangVideoInfo *pcontext) { + + m_para = pcontext; + + m_vhandle = new YangVideoCaptureHandle(pcontext); + cameraIndex = pcontext->vIndex; + m_width = m_para->width; + m_height = m_para->height; + m_vd_id = 0; + + memset(&m_buf, 0, sizeof(m_buf)); + m_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + m_buf.memory = V4L2_MEMORY_MMAP; + m_isloop = 0; + m_isFirstFrame = 0; + m_buffer_count = 0; + m_timestatmp = 0; + m_hasYuy2 = 0, m_hasI420 = 0, m_hasNv12 = 0, m_hasYv12 = 0, m_hasP010 = 0,m_hasP016=0; +} + +YangVideoCaptureImpl::~YangVideoCaptureImpl() { + if (m_isloop) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + //m_log=NULL; + //m_context=NULL; + stop_capturing(); + uninit_camer_device(); + close_camer_device(); + delete m_vhandle; + m_vhandle = NULL; +} +void YangVideoCaptureImpl::setVideoCaptureStart() { + m_vhandle->m_isCapture = 1; +} +void YangVideoCaptureImpl::setVideoCaptureStop() { + m_vhandle->m_isCapture = 0; +} +int32_t YangVideoCaptureImpl::getVideoCaptureState() { + return m_vhandle->m_isCapture; +} +int32_t YangVideoCaptureImpl::getLivingVideoCaptureState() { + return m_vhandle->m_isLivingCaptrue; +} +int32_t YangVideoCaptureImpl::getFilmVideoCaptureState() { + return m_vhandle->m_isFilm; +} + +void YangVideoCaptureImpl::setLivingVideoCaptureStart() { + m_vhandle->m_isLivingCaptrue = 1; +} +void YangVideoCaptureImpl::setLivingVideoCaptureStop() { + m_vhandle->m_isLivingCaptrue = 0; +} + +void YangVideoCaptureImpl::setFilmVideoCaptureStart() { + m_vhandle->m_isFilm = 1; +} +void YangVideoCaptureImpl::setFilmVideoCaptureStop() { + m_vhandle->m_isFilm = 0; +} + +void YangVideoCaptureImpl::setOutVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setVideoBuffer(pbuf); +} +void YangVideoCaptureImpl::setLivingOutVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setLivingVideoBuffer(pbuf); +} +void YangVideoCaptureImpl::setFilmOutVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setFilmVideoBuffer(pbuf); +} +void YangVideoCaptureImpl::setPreVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setPreVideoBuffer(pbuf); +} +void YangVideoCaptureImpl::initstamp() { + m_vhandle->initstamp(); +} +int32_t YangVideoCaptureImpl::enum_camera_frmival(int32_t fd, struct v4l2_frmsizeenum *framesize) { + struct v4l2_frmivalenum frmival; + memset(&frmival, 0, sizeof(frmival)); + frmival.pixel_format = framesize->pixel_format; + frmival.width = framesize->discrete.width; + frmival.height = framesize->discrete.height; + //frmival.type = V4L2_FRMIVAL_TYPE_DISCRETE; + frmival.index = 0; + //yang_trace("the frame intervals enum"); + + while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmival) == 0) { + //输出分数,即帧间隔 + if(frmival.width==m_para->width) { + yang_trace("%d.frameinterval:%u/%u\n ", frmival.index, + frmival.discrete.numerator, frmival.discrete.denominator); + } + frmival.index++; + } + //yang_trace(".........................................\n"); + return 0; +} + +void YangVideoCaptureImpl::setReselution(__u32 format,int32_t val){ + if (format == V4L2_PIX_FMT_YUYV) + m_hasYuy2 = val; + if (format == V4L2_PIX_FMT_YUV420) + m_hasI420 = val; + if (format == V4L2_PIX_FMT_NV12) + m_hasNv12 = val; + if (format == V4L2_PIX_FMT_YVU420) + m_hasYv12 = val; +} +void YangVideoCaptureImpl::setReselutionPara(__u32 pformat){ + //struct v4l2_capability cap; + struct v4l2_fmtdesc fmt; + int32_t vet=0; + while ((vet = ioctl(m_vd_id, VIDIOC_ENUM_FMT, &fmt)) != -1) { + fmt.index++; + if(fmt.pixelformat==pformat){ + struct v4l2_frmsizeenum frmsize; + frmsize.pixel_format = fmt.pixelformat; + frmsize.index = 0; + while (!ioctl(m_vd_id, VIDIOC_ENUM_FRAMESIZES, &frmsize)) { + if (frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE) { + m_para->width=frmsize.discrete.width; + m_para->height=frmsize.discrete.height; + m_width=frmsize.discrete.width; + m_height=frmsize.discrete.height; + } + enum_camera_frmival(m_vd_id, &frmsize); + frmsize.index++; + } + } + } + +} +int32_t YangVideoCaptureImpl::setPara() { + struct v4l2_capability cap; + struct v4l2_fmtdesc fmt; + memset(&fmt, 0, sizeof(struct v4l2_fmtdesc)); + fmt.index = 0; + fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + int32_t vet = 0; + + + if (ioctl(m_vd_id, VIDIOC_QUERYCAP, &cap) != 0) { + yang_error("\n VIDIOC_QUERYCAP error!"); + return ERROR_SYS_Linux_NoVideoDriver; + } + yang_trace("\ndriver name %s card = %s cap = %0x\n", cap.driver, cap.card, + cap.capabilities); + + while ((vet = ioctl(m_vd_id, VIDIOC_ENUM_FMT, &fmt)) != -1) { + fmt.index++; + setReselution(fmt.pixelformat,1); + //if(fmt.==m_para->width) + yang_trace("\n{ pixelformat = ''%c%c%c%c'', description = ''%s'' }\n", + fmt.pixelformat & 0xFF, (fmt.pixelformat >> 8) & 0xFF, + (fmt.pixelformat >> 16) & 0xFF, (fmt.pixelformat >> 24) & 0xFF, + fmt.description); + struct v4l2_frmsizeenum frmsize; + frmsize.pixel_format = fmt.pixelformat; + frmsize.index = 0; + while (!ioctl(m_vd_id, VIDIOC_ENUM_FRAMESIZES, &frmsize)) { + if (frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE) { + if(m_para->width==(int)frmsize.discrete.width&&m_para->height==(int)frmsize.discrete.height){ + yang_trace("DISCRETE: line-%d %d: {%d*%d}\n", __LINE__, + frmsize.index, frmsize.discrete.width,frmsize.discrete.height); + setReselution(fmt.pixelformat,2); + } + + } + + else if (frmsize.type == V4L2_FRMSIZE_TYPE_STEPWISE) { + yang_trace("\nSTEPWISE: line: %d %d: {%d*%d}\n", __LINE__, + frmsize.index, frmsize.stepwise.max_width, + frmsize.stepwise.max_height); + } + + enum_camera_frmival(m_vd_id, &frmsize); + frmsize.index++; + } + } + + return Yang_Ok; +} +std::string getVideoFormat(uint32_t pformat ){ + std::string res=""; + if(pformat==V4L2_PIX_FMT_YUV420) res="I420"; + if(pformat==V4L2_PIX_FMT_NV12) res="Nv12"; + if(pformat==V4L2_PIX_FMT_YVU420) res="Yv12"; + if(pformat==V4L2_PIX_FMT_YUYV) res="Yuy2"; + return res; +} +int32_t YangVideoCaptureImpl::init() { + char devStr[30]; + memset(devStr, 0, 30); + sprintf(devStr, "/dev/video%d", cameraIndex); + //if ((m_vd_id = open(devStr, O_RDWR|O_NONBLOCK)) == -1) { + if ((m_vd_id = open(devStr, O_RDWR)) == -1) { + yang_error("open video device Error!"); + return ERROR_SYS_Linux_VideoDeveceOpenFailure; + } + + int32_t ret = Yang_Ok; + ret = setPara(); + if (ret) return ret; + uint32_t format = V4L2_PIX_FMT_YUYV; + if(m_hasYuy2>1||m_hasI420>1||m_hasNv12>1||m_hasYv12>1){ + if(m_hasI420>1) { + format = V4L2_PIX_FMT_YUV420; + m_para->videoCaptureFormat=YangI420; + }else if(m_hasNv12>1){ + format = V4L2_PIX_FMT_NV12; + m_para->videoCaptureFormat=YangNv12; + }else if(m_hasYv12>1){ + format = V4L2_PIX_FMT_YVU420; + m_para->videoCaptureFormat=YangYv12; + }else if(m_hasYuy2>1){ + format = V4L2_PIX_FMT_YUYV; + m_para->videoCaptureFormat=YangYuy2; + } + #if Yang10bit + if(m_para->videoCaptureFormat==YangP010) format=V4L2_PIX_FMT_P010; + if(m_para->videoCaptureFormat==YangP016) format=V4L2_PIX_FMT_P016; + #endif + + }else if(m_hasYuy2||m_hasI420||m_hasNv12||m_hasYv12){ + if(m_hasI420) { + format = V4L2_PIX_FMT_YUV420; + m_para->videoCaptureFormat=YangI420; + }else if(m_hasNv12){ + format = V4L2_PIX_FMT_NV12; + m_para->videoCaptureFormat=YangNv12; + }else if(m_hasYv12){ + format = V4L2_PIX_FMT_YVU420; + m_para->videoCaptureFormat=YangYv12; + }else if(m_hasYuy2){ + format = V4L2_PIX_FMT_YUYV; + m_para->videoCaptureFormat=YangYuy2; + } + setReselutionPara(format); + }else{ + return ERROR_SYS_Linux_NoVideoCatpureInterface; + } + + struct v4l2_format v4_format; + v4_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + v4_format.fmt.pix.width = m_width; + v4_format.fmt.pix.height = m_height; + v4_format.fmt.pix.pixelformat = format; //V4L2_PIX_FMT_YUYV; //V4L2_PIX_FMT_YUYV; + v4_format.fmt.pix.field = V4L2_FIELD_INTERLACED; + + if ((ioctl(m_vd_id, VIDIOC_S_FMT, &v4_format)) != 0) { + yang_error("\n set fmt error!"); + + } + if(m_vhandle) m_vhandle->setCaptureFormat(m_para->videoCaptureFormat); + + + + + struct v4l2_streamparm Stream_Parm; + memset(&Stream_Parm, 0, sizeof(struct v4l2_streamparm)); + Stream_Parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + Stream_Parm.parm.capture.timeperframe.denominator =m_para->frame;; + Stream_Parm.parm.capture.timeperframe.numerator = 1; + + if( ioctl(m_vd_id, VIDIOC_S_PARM, &Stream_Parm)){ + yang_error("\n..........................set video frame error!"); + } + + printf("\n..................index==%d......set.format============%s\n",cameraIndex,getVideoFormat(format).c_str()); + printf("\n ...................index==%d,set framerate===%d",cameraIndex,m_para->frame); + // setfps.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + // setfps.parm.capture.timeperframe.numerator = 10; + // setfps.parm.capture.timeperframe.denominator = 10; + struct v4l2_requestbuffers tV4L2_reqbuf; + uint32_t i = 0; + memset(&tV4L2_reqbuf, 0, sizeof(struct v4l2_requestbuffers)); + tV4L2_reqbuf.count = REQ_BUF_NUM; + tV4L2_reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + tV4L2_reqbuf.memory = V4L2_MEMORY_MMAP; + + if (ioctl(m_vd_id, VIDIOC_REQBUFS, &tV4L2_reqbuf)) { + + } + m_buffer_count = tV4L2_reqbuf.count; + + if (m_user_buffer == NULL) { + yang_error("calloc Error"); + exit(1); + } + for (i = 0; i < tV4L2_reqbuf.count; i++) { + struct v4l2_buffer tV4L2buf; + memset(&tV4L2buf, 0, sizeof(struct v4l2_buffer)); + tV4L2buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + tV4L2buf.memory = V4L2_MEMORY_MMAP; + tV4L2buf.index = i; + if (ioctl(m_vd_id, VIDIOC_QUERYBUF, &tV4L2buf)) + printf("search!"); + //m_buf_length=tV4L2buf.length; + m_user_buffer[i].length = tV4L2buf.length; + m_user_buffer[i].start = (uint8_t*) mmap( NULL, tV4L2buf.length, + PROT_READ | PROT_WRITE, MAP_SHARED, m_vd_id, tV4L2buf.m.offset); + if (MAP_FAILED == m_user_buffer[i].start) { + yang_error(" error! mmap"); + exit(1); + } + } + return Yang_Ok; +} + +long YangVideoCaptureImpl::m_difftime(struct timeval *p_start, + struct timeval *p_end) { + return (p_end->tv_sec - p_start->tv_sec) * 1000000 + + (p_end->tv_usec - p_start->tv_usec); +} + +int32_t YangVideoCaptureImpl::read_buffer() { + + if (ioctl(m_vd_id, VIDIOC_DQBUF, &m_buf) != 0) { + yang_error("VIDIOC_DQBUF"); + exit(1); + } + if (m_isFirstFrame) { + m_timestatmp = m_difftime(&m_startTime, &m_buf.timestamp); + } else { + m_isFirstFrame = 1; + m_startTime.tv_sec = m_buf.timestamp.tv_sec; + m_startTime.tv_usec = m_buf.timestamp.tv_usec; + m_timestatmp = 0; + } + + if (m_vhandle) + m_vhandle->putBuffer(m_timestatmp, m_user_buffer[m_buf.index].start, + m_user_buffer[m_buf.index].length); + + // printf("\n%ld",m_timestatmp); + if (ioctl(m_vd_id, VIDIOC_QBUF, &m_buf) != 0) { + yang_error("VIDIOC_QBUF"); + exit(1); + } + + return Yang_Ok; +} +void YangVideoCaptureImpl::stopLoop() { + m_isloop = 0; +} + +void YangVideoCaptureImpl::stop_capturing() { + enum v4l2_buf_type type; + + type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + if (-1 == ioctl(m_vd_id, VIDIOC_STREAMOFF, &type)) { + yang_error("Fail to ioctl 'VIDIOC_STREAMOFF'"); + exit(EXIT_FAILURE); + } + +} +void YangVideoCaptureImpl::uninit_camer_device() { + int32_t i = 0; + + for (i = 0; i < m_buffer_count; i++) { + if (-1 == munmap(m_user_buffer[i].start, m_user_buffer[i].length)) { + exit(EXIT_FAILURE); + } + } + + //free(user_buffer); + +} +void YangVideoCaptureImpl::close_camer_device() { + if (-1 == close(m_vd_id)) { + yang_error("Fail to close fd"); + exit(EXIT_FAILURE); + } + +} + +void YangVideoCaptureImpl::startLoop() { + for (int32_t i = 0; i < m_buffer_count; i++) { + struct v4l2_buffer tV4L2buf; + memset(&tV4L2buf, 0, sizeof(struct v4l2_buffer)); + tV4L2buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + tV4L2buf.memory = V4L2_MEMORY_MMAP; + tV4L2buf.index = i; + if (ioctl(m_vd_id, VIDIOC_QBUF, &tV4L2buf)) { + yang_error("VIDIOC_QBUF"); + } + } + + enum v4l2_buf_type v4l2type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + if (ioctl(m_vd_id, VIDIOC_STREAMON, &v4l2type)) { + yang_error("VIDIOC_STREAMON"); + } + + fd_set fds; + struct timeval tv; + int32_t r; + FD_ZERO(&fds); + FD_SET(m_vd_id, &fds); + m_isloop = 1; + m_vhandle->m_start_time = 0; + + while (m_isloop) { + tv.tv_sec = 2; + tv.tv_usec = 0; + r = select(m_vd_id + 1, &fds, NULL, NULL, &tv); + if (-1 == r) { + if (EINTR == errno) + continue; + yang_error("video capture Fail to select"); + exit(EXIT_FAILURE); + } + if (0 == r) { + yang_error("video capture select Timeout\n"); + exit(EXIT_FAILURE); + } + read_buffer(); + } + +} +#endif diff --git a/libmetartc3/src/yangcapture/YangVideoCaptureImpl.h b/libmetartc3/src/yangcapture/YangVideoCaptureImpl.h new file mode 100755 index 00000000..388a255f --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoCaptureImpl.h @@ -0,0 +1,71 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGCAPTURE_SRC_YANGVIDEOCAPTUREIMPL_H_ +#define YANGCAPTURE_SRC_YANGVIDEOCAPTUREIMPL_H_ +#include "yangcapture/YangMultiVideoCapture.h" + +#ifndef _WIN32 +#include "linux/videodev2.h" + +#include "YangVideoCaptureHandle.h" +#define REQ_BUF_NUM 4 //申请的缓冲区个数,最多5个,缓冲区太少可能会导致图像有间断 +typedef struct buffer_type_r buffer_type; +struct buffer_type_r { + uint8_t *start; + int32_t length; +}; + +class YangVideoCaptureImpl: public YangMultiVideoCapture { +public: + YangVideoCaptureImpl(YangVideoInfo *pcontext); + ~YangVideoCaptureImpl(); + YangVideoCaptureHandle *m_vhandle; + int32_t init(); + void setVideoCaptureStart(); + void setVideoCaptureStop(); + void setOutVideoBuffer(YangVideoBuffer *pbuf); + void setPreVideoBuffer(YangVideoBuffer *pbuf); + int32_t getVideoCaptureState(); + void initstamp(); + void stopLoop(); + + int32_t getLivingVideoCaptureState(); + int32_t getFilmVideoCaptureState(); + + void setLivingOutVideoBuffer(YangVideoBuffer *pbuf); + void setLivingVideoCaptureStart(); + void setLivingVideoCaptureStop(); + + void setFilmOutVideoBuffer(YangVideoBuffer *pbuf); + void setFilmVideoCaptureStart(); + void setFilmVideoCaptureStop(); +protected: + void startLoop(); + long m_difftime(struct timeval *p_start, struct timeval *p_end); +private: + int32_t setPara(); + void setReselution(__u32 format, int32_t val); + void setReselutionPara(__u32 pformat); + void process_image(char *p_addr, int32_t p_length); + int32_t read_buffer(); + void stop_capturing(); + void uninit_camer_device(); + void close_camer_device(); + int32_t enum_camera_frmival(int32_t fd, struct v4l2_frmsizeenum *framesize); + + int32_t m_hasYuy2, m_hasI420, m_hasNv12, m_hasYv12, m_hasP010, m_hasP016; + int32_t m_width, m_height; + int32_t m_vd_id; + struct v4l2_buffer m_buf; + buffer_type m_user_buffer[REQ_BUF_NUM]; + int32_t m_buffer_count; + int32_t m_isloop; + int32_t m_isFirstFrame; + struct timeval m_startTime; + long m_timestatmp; + +}; +#endif +#endif /* YANGCAPTURE_SRC_YANGVIDEOCAPTUREIMPL_H_ */ diff --git a/libmetartc3/src/yangcapture/YangVideoDeviceQuery.cpp b/libmetartc3/src/yangcapture/YangVideoDeviceQuery.cpp new file mode 100755 index 00000000..d9520f65 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoDeviceQuery.cpp @@ -0,0 +1,104 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifndef _WIN32 +#include "linux/videodev2.h" +#include +#include +#include + +#endif +#include "yangutil/sys/YangLog.h" + +YangVideoDeviceQuery::YangVideoDeviceQuery() { + +} + +YangVideoDeviceQuery::~YangVideoDeviceQuery() { + +} + +struct YangVideoDevice{ + string name; + int32_t vindex; +}; +#ifndef _WIN32 +int32_t YangVideoDeviceQuery::getVideoDeviceList() { + vector* pv; + struct v4l2_capability cap; + struct v4l2_fmtdesc fmt; + memset(&fmt, 0, sizeof(struct v4l2_fmtdesc)); + fmt.index = 0; + fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + int32_t vet = 0; + int32_t m_vd_id=0; + //v4l2_device dd; + if (ioctl(m_vd_id, VIDIOC_QUERYCAP, &cap) != 0) { + yang_error("\n VIDIOC_QUERYCAP error!"); + return ERROR_SYS_Linux_NoVideoDriver; + } + yang_trace("\ndriver name %s card = %s cap = %0x\n", cap.driver, cap.card, + cap.capabilities); + + while ((vet = ioctl(m_vd_id, VIDIOC_ENUM_FMT, &fmt)) != -1) { + pv->push_back(YangVideoDevice()); + pv->back().name=string((char*)fmt.description); + pv->back().vindex=fmt.index; + fmt.index++; + + //if(fmt.==m_para->width) + yang_trace("\n{ pixelformat = ''%c%c%c%c'', description = ''%s'' }\n", + fmt.pixelformat & 0xFF, (fmt.pixelformat >> 8) & 0xFF, + (fmt.pixelformat >> 16) & 0xFF, (fmt.pixelformat >> 24) & 0xFF, + fmt.description); + struct v4l2_frmsizeenum frmsize; + frmsize.pixel_format = fmt.pixelformat; + frmsize.index = 0; + while (!ioctl(m_vd_id, VIDIOC_ENUM_FRAMESIZES, &frmsize)) { + if (frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE) { + //if (m_para->width == (int) frmsize.discrete.width&& m_para->height == (int) frmsize.discrete.height) { + yang_trace("DISCRETE: line-%d %d: {%d*%d}\n", __LINE__, + frmsize.index, frmsize.discrete.width, + frmsize.discrete.height); + // setReselution(fmt.pixelformat, 2); + //} + + } + + else if (frmsize.type == V4L2_FRMSIZE_TYPE_STEPWISE) { + yang_trace("\nSTEPWISE: line: %d %d: {%d*%d}\n", __LINE__, + frmsize.index, frmsize.stepwise.max_width, + frmsize.stepwise.max_height); + } + + enum_camera_frmival(m_vd_id, &frmsize); + frmsize.index++; + } + } + return 1; +} + +int32_t YangVideoDeviceQuery::enum_camera_frmival(int32_t fd, struct v4l2_frmsizeenum *framesize) { + struct v4l2_frmivalenum frmival; + memset(&frmival, 0, sizeof(frmival)); + frmival.pixel_format = framesize->pixel_format; + frmival.width = framesize->discrete.width; + frmival.height = framesize->discrete.height; + //frmival.type = V4L2_FRMIVAL_TYPE_DISCRETE; + frmival.index = 0; + //yang_trace("the frame intervals enum"); + + while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmival) == 0) { + //输出分数,即帧间隔 + //if(frmival.width==m_para->width) { + yang_trace("%d.frameinterval:%u/%u\n ", frmival.index, + frmival.discrete.numerator, frmival.discrete.denominator); + //} + frmival.index++; + } + //yang_trace(".........................................\n"); + return 0; +} + +#endif diff --git a/libmetartc3/src/yangcapture/YangVideoDeviceQuery.h b/libmetartc3/src/yangcapture/YangVideoDeviceQuery.h new file mode 100755 index 00000000..783a6308 --- /dev/null +++ b/libmetartc3/src/yangcapture/YangVideoDeviceQuery.h @@ -0,0 +1,18 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGCAPTURE_SRC_YANGVIDEODEVICEQUERY_H_ +#define YANGCAPTURE_SRC_YANGVIDEODEVICEQUERY_H_ +#include +#include +#include +using namespace std; +class YangVideoDeviceQuery { +public: + YangVideoDeviceQuery(); + virtual ~YangVideoDeviceQuery(); + int32_t getVideoDeviceList(); + int32_t enum_camera_frmival(int32_t fd, struct v4l2_frmsizeenum *framesize); +}; + +#endif /* YANGCAPTURE_SRC_YANGVIDEODEVICEQUERY_H_ */ diff --git a/libmetartc3/src/yangcapture/win/YangVideoSrc.cpp b/libmetartc3/src/yangcapture/win/YangVideoSrc.cpp new file mode 100755 index 00000000..0b59794d --- /dev/null +++ b/libmetartc3/src/yangcapture/win/YangVideoSrc.cpp @@ -0,0 +1,413 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifdef __WIN321__ +#include "YangVideoSrc.h" +#include +#include +#include +#include +#define CheckPointer(p,ret) {if((p)==0) return (ret);} + #define ValidateReadWritePtr(p,cb) \ + {ValidateReadPtr(p,cb) ValidateWritePtr(p,cb)} +//DEFINE_GUID(MEDIASUBTYPE_I420, 0x30323449, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); +DEFINE_GUID(CLSID_YangVideoSrcFilter, 0xa5532f97, 0x46a2, 0x48ad, 0xab, 0x23, + 0xfd, 0x96, 0xb7, 0xbd, 0x86, 0xa3); +YangVideoSrc::YangVideoSrc() +{ + m_clsid=CLSID_YangVideoSrcFilter; + m_pName=new char[64]; + m_iPins=1; + m_stream=new CSourceStream(); +} +YangVideoSrc::~YangVideoSrc(){ + delete[] m_pName; + m_pName=0; +} + +HRESULT STDMETHODCALLTYPE YangVideoSrc::EnumPins(IEnumPins **ppEnum){ + CheckPointer(ppEnum,E_POINTER); + // ValidateReadWritePtr(ppEnum,sizeof(IEnumPins *)); + + /* Create a new ref counted enumerator */ + + *ppEnum = new CEnumPins(this, NULL); + + return *ppEnum == NULL ? E_OUTOFMEMORY : NOERROR; +} +#define VFW_E_NOT_FOUND ((HRESULT)0x80040216L) + HRESULT STDMETHODCALLTYPE YangVideoSrc::FindPin(LPCWSTR Id, IPin **ppPin){ + CheckPointer(ppPin,E_POINTER); + *ppPin = m_stream; + m_stream->AddRef(); + return S_OK; + //return VFW_E_NOT_FOUND; + } + + HRESULT STDMETHODCALLTYPE YangVideoSrc::QueryFilterInfo(FILTER_INFO *pInfo){ + CheckPointer(pInfo,E_POINTER); + // ValidateReadWritePtr(pInfo,sizeof(FILTER_INFO)); + + if (m_pName) { + memcpy(pInfo->achName, m_pName,strlen(m_pName)); + } else { + pInfo->achName[0] = L'\0'; + } + pInfo->pGraph = m_pGraph; + if (m_pGraph) + m_pGraph->AddRef(); + return NOERROR; + } + + HRESULT STDMETHODCALLTYPE YangVideoSrc::JoinFilterGraph(IFilterGraph *pGraph, + LPCWSTR pName){ + + m_pGraph = pGraph; + if (m_pGraph) { + HRESULT hr = m_pGraph->QueryInterface(IID_IMediaEventSink, + (void**) &m_pSink); + m_pSink->Release(); // we do NOT keep a reference on it. + } else { + // if graph pointer is null, then we should + // also release the IMediaEventSink on the same object - we don't + // refcount it, so just set it to null + m_pSink = NULL; + } + + + if (m_pName) { + delete[] m_pName; + m_pName = NULL; + } + + if (pName) { + DWORD nameLen = lstrlenW(pName)+1; + m_pName = new WCHAR[nameLen]; + if (m_pName) { + CopyMemory(m_pName, pName, nameLen*sizeof(WCHAR)); + } + } + + + return NOERROR; + + } + STDMETHODIMP + YangVideoSrc::GetClassID(CLSID *pClsID) + { + CheckPointer(pClsID,E_POINTER); + // ValidateReadWritePtr(pClsID,sizeof(CLSID)); + *pClsID = m_clsid; + return NOERROR; + } + + HRESULT STDMETHODCALLTYPE YangVideoSrc::QueryVendorInfo(LPWSTR *pVendorInfo){ + UNREFERENCED_PARAMETER(pVendorInfo); + return E_NOTIMPL; + } + + /* This is called after a media type has been proposed + + Try to complete the connection by agreeing the allocator + */ + HRESULT + CSourceStream::CompleteConnect(IPin *pReceivePin) + { + UNREFERENCED_PARAMETER(pReceivePin); + return DecideAllocator(m_pInputPin, &m_pAllocator); + } + + HRESULT + CSourceStream::CheckConnect(IPin * pPin) + { + /* Check that pin directions DONT match */ + + PIN_DIRECTION pd; + pPin->QueryDirection(&pd); + + // ASSERT((pd == PINDIR_OUTPUT) || (pd == PINDIR_INPUT)); + // ASSERT((m_dir == PINDIR_OUTPUT) || (m_dir == PINDIR_INPUT)); + + // we should allow for non-input and non-output connections? + if (pd == m_dir) { + return VFW_E_INVALID_DIRECTION; + } + return NOERROR; + } + /* This method is called when the output pin is about to try and connect to + an input pin. It is at this point32_t that you should try and grab any extra + interfaces that you need, in this case IMemInputPin. Because this is + only called if we are not currently connected we do NOT need to call + BreakConnect. This also makes it easier to derive classes from us as + BreakConnect is only called when we actually have to break a connection + (or a partly made connection) and not when we are checking a connection */ + + /* Overriden from CBasePin */ + + HRESULT + CSourceStream::CheckConnect(IPin * pPin) + { + HRESULT hr = CBasePin::CheckConnect(pPin); + if (FAILED(hr)) { + return hr; + } + + // get an input pin and an allocator interface + hr = pPin->QueryInterface(IID_IMemInputPin, (void **) &m_pInputPin); + if (FAILED(hr)) { + return hr; + } + return NOERROR; + } + + + /* Overriden from CBasePin */ + + HRESULT + CSourceStream::BreakConnect() + { + /* Release any allocator we hold */ + + if (m_pAllocator) { + // Always decommit the allocator because a downstream filter may or + // may not decommit the connection's allocator. A memory leak could + // occur if the allocator is not decommited when a connection is broken. + HRESULT hr = m_pAllocator->Decommit(); + if( FAILED( hr ) ) { + return hr; + } + + m_pAllocator->Release(); + m_pAllocator = NULL; + } + + /* Release any input pin interface we hold */ + + if (m_pInputPin) { + m_pInputPin->Release(); + m_pInputPin = NULL; + } + return NOERROR; + } + + + /* This is called when the input pin didn't give us a valid allocator */ + + HRESULT + CSourceStream::InitAllocator(IMemAllocator **ppAlloc) + { + return CreateMemoryAllocator(ppAlloc); + } + + + /* Decide on an allocator, override this if you want to use your own allocator + Override DecideBufferSize to call SetProperties. If the input pin fails + the GetAllocator call then this will construct a CMemAllocator and call + DecideBufferSize on that, and if that fails then we are completely hosed. + If the you succeed the DecideBufferSize call, we will notify the input + pin of the selected allocator. NOTE this is called during Connect() which + therefore looks after grabbing and locking the object's critical section */ + + // We query the input pin for its requested properties and pass this to + // DecideBufferSize to allow it to fulfill requests that it is happy + // with (eg most people don't care about alignment and are thus happy to + // use the downstream pin's alignment request). + + HRESULT + CSourceStream::DecideAllocator(IMemInputPin *pPin, IMemAllocator **ppAlloc) + { + HRESULT hr = NOERROR; + *ppAlloc = NULL; + + // get downstream prop request + // the derived class may modify this in DecideBufferSize, but + // we assume that he will consistently modify it the same way, + // so we only get it once + ALLOCATOR_PROPERTIES prop; + ZeroMemory(&prop, sizeof(prop)); + + // whatever he returns, we assume prop is either all zeros + // or he has filled it out. + pPin->GetAllocatorRequirements(&prop); + + // if he doesn't care about alignment, then set it to 1 + if (prop.cbAlign == 0) { + prop.cbAlign = 1; + } + + /* Try the allocator provided by the input pin */ + + hr = pPin->GetAllocator(ppAlloc); + if (SUCCEEDED(hr)) { + + hr = DecideBufferSize(*ppAlloc, &prop); + if (SUCCEEDED(hr)) { + hr = pPin->NotifyAllocator(*ppAlloc, FALSE); + if (SUCCEEDED(hr)) { + return NOERROR; + } + } + } + + /* If the GetAllocator failed we may not have an interface */ + + if (*ppAlloc) { + (*ppAlloc)->Release(); + *ppAlloc = NULL; + } + + /* Try the output pin's allocator by the same method */ + + hr = InitAllocator(ppAlloc); + if (SUCCEEDED(hr)) { + + // note - the properties passed here are in the same + // structure as above and may have been modified by + // the previous call to DecideBufferSize + hr = DecideBufferSize(*ppAlloc, &prop); + if (SUCCEEDED(hr)) { + hr = pPin->NotifyAllocator(*ppAlloc, FALSE); + if (SUCCEEDED(hr)) { + return NOERROR; + } + } + } + + /* Likewise we may not have an interface to release */ + + if (*ppAlloc) { + (*ppAlloc)->Release(); + *ppAlloc = NULL; + } + return hr; + } + + + /* This returns an empty sample buffer from the allocator WARNING the same + dangers and restrictions apply here as described below for Deliver() */ + + HRESULT + CSourceStream::GetDeliveryBuffer(IMediaSample ** ppSample, + REFERENCE_TIME * pStartTime, + REFERENCE_TIME * pEndTime, + DWORD dwFlags) + { + if (m_pAllocator != NULL) { + return m_pAllocator->GetBuffer(ppSample,pStartTime,pEndTime,dwFlags); + } else { + return E_NOINTERFACE; + } + } + + + + + HRESULT + CSourceStream::Deliver(IMediaSample * pSample) + { + if (m_pInputPin == NULL) { + return VFW_E_NOT_CONNECTED; + } + + + return m_pInputPin->Receive(pSample); + } + + + // called from elsewhere in our filter to pass EOS downstream to + // our connected input pin + HRESULT + CSourceStream::DeliverEndOfStream(void) + { + // remember this is on IPin not IMemInputPin + if (m_Connected == NULL) { + return VFW_E_NOT_CONNECTED; + } + return m_Connected->EndOfStream(); + } + + + /* Commit the allocator's memory, this is called through IMediaFilter + which is responsible for locking the object before calling us */ + + HRESULT + CSourceStream::Active(void) + { + if (m_pAllocator == NULL) { + return VFW_E_NO_ALLOCATOR; + } + return m_pAllocator->Commit(); + } + + + /* Free up or unprepare allocator's memory, this is called through + IMediaFilter which is responsible for locking the object first */ + + HRESULT + CSourceStream::Inactive(void) + { + m_bRunTimeError = FALSE; + if (m_pAllocator == NULL) { + return VFW_E_NO_ALLOCATOR; + } + return m_pAllocator->Decommit(); + } + + // we have a default handling of EndOfStream which is to return + // an error, since this should be called on input pins only + STDMETHODIMP + CSourceStream::EndOfStream(void) + { + return E_UNEXPECTED; + } + + + // BeginFlush should be called on input pins only + STDMETHODIMP + CSourceStream::BeginFlush(void) + { + return E_UNEXPECTED; + } + + // EndFlush should be called on input pins only + STDMETHODIMP + CSourceStream::EndFlush(void) + { + return E_UNEXPECTED; + } + + // call BeginFlush on the connected input pin + HRESULT + CSourceStream::DeliverBeginFlush(void) + { + // remember this is on IPin not IMemInputPin + if (m_Connected == NULL) { + return VFW_E_NOT_CONNECTED; + } + return m_Connected->BeginFlush(); + } + + // call EndFlush on the connected input pin + HRESULT + CSourceStream::DeliverEndFlush(void) + { + // remember this is on IPin not IMemInputPin + if (m_Connected == NULL) { + return VFW_E_NOT_CONNECTED; + } + return m_Connected->EndFlush(); + } + // deliver NewSegment to connected pin + HRESULT + CSourceStream::DeliverNewSegment( + REFERENCE_TIME tStart, + REFERENCE_TIME tStop, + double dRate) + { + if (m_Connected == NULL) { + return VFW_E_NOT_CONNECTED; + } + return m_Connected->NewSegment(tStart, tStop, dRate); + } + +#endif diff --git a/libmetartc3/src/yangcapture/win/YangVideoSrc.h b/libmetartc3/src/yangcapture/win/YangVideoSrc.h new file mode 100755 index 00000000..5a263c8f --- /dev/null +++ b/libmetartc3/src/yangcapture/win/YangVideoSrc.h @@ -0,0 +1,222 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef Yang_YangVideoSrc_H1 +#define Yang_YangVideoSrc_H1 +#ifdef __WIN321__ +#include +#include "amstream.h" +#include +#include "qedit.h" + +#include +#include +#include + +class CSourceStream; + +class YangVideoSrc: public IBaseFilter { +public: + + YangVideoSrc(); + virtual ~YangVideoSrc(); +//ibasefilter + HRESULT STDMETHODCALLTYPE EnumPins(IEnumPins **ppEnum); + + HRESULT STDMETHODCALLTYPE FindPin(LPCWSTR Id, IPin **ppPin); + + HRESULT STDMETHODCALLTYPE QueryFilterInfo(FILTER_INFO *pInfo); + + HRESULT STDMETHODCALLTYPE JoinFilterGraph(IFilterGraph *pGraph, + LPCWSTR pName); + + HRESULT STDMETHODCALLTYPE QueryVendorInfo(LPWSTR *pVendorInfo); +// + STDMETHODIMP GetClassID(CLSID *pClsID); + + + +protected: + CLSID m_clsid; + IFilterGraph *m_pGraph; + IMediaEventSink *m_pSink; + LONG m_PinVersion; + int32_t m_iPins; // The number of pins on this filter. Updated by CSourceStream + CSourceStream *m_stream; + char *m_pName; +}; + +// +// CSourceStream +// +// Use this class to manage a stream of data that comes from a +// pin. +// Uses a worker thread to put data on the pin. +class CSourceStream: public IPin,public IUnknown { +public: + + CSourceStream(); + virtual ~CSourceStream(void); // virtual destructor ensures derived class destructors are called too. + +protected: + + YangVideoSrc *m_pFilter; // The parent of this stream + + // * + // * Data Source + // * + // * The following three functions: FillBuffer, OnThreadCreate/Destroy, are + // * called from within the ThreadProc. They are used in the creation of + // * the media samples this pin will provide + // * + + // Override this to provide the worker thread a means + // of processing a buffer + virtual HRESULT FillBuffer(IMediaSample *pSamp) PURE; + + // Called as the thread is created/destroyed - use to perform + // jobs such as start/stop streaming mode + // If OnThreadCreate returns an error the thread will exit. + virtual HRESULT OnThreadCreate(void) { + return NOERROR; + } + ; + virtual HRESULT OnThreadDestroy(void) { + return NOERROR; + } + ; + virtual HRESULT OnThreadStartPlay(void) { + return NOERROR; + } + ; + + // * + // * Worker Thread + // * + + HRESULT Active(void); // Starts up the worker thread + HRESULT Inactive(void); // Exits the worker thread. + + + IMemAllocator *m_pAllocator; + IMemInputPin *m_pInputPin; + IPin *m_Connected; // Pin we have connected to + PIN_DIRECTION m_dir; + bool m_bRunTimeError; // Run time error generated + bool m_bCanReconnectWhenActive; // OK to reconnect when active + bool m_bTryMyTypesFirst; // When connecting enumerate + +public: + // thread commands + enum Command { + CMD_INIT, CMD_PAUSE, CMD_RUN, CMD_STOP, CMD_EXIT + }; + HRESULT Init(void) { + return CallWorker(CMD_INIT); + } + HRESULT Exit(void) { + return CallWorker(CMD_EXIT); + } + HRESULT Run(void) { + return CallWorker(CMD_RUN); + } + HRESULT Pause(void) { + return CallWorker(CMD_PAUSE); + } + HRESULT Stop(void) { + return CallWorker(CMD_STOP); + } + + + + HRESULT STDMETHODCALLTYPE Connect( + IPin *pReceivePin, + const AM_MEDIA_TYPE *pmt) ; + + HRESULT STDMETHODCALLTYPE ReceiveConnection( + IPin *pConnector, + const AM_MEDIA_TYPE *pmt) ; + + HRESULT STDMETHODCALLTYPE Disconnect( + ) ; + + HRESULT STDMETHODCALLTYPE ConnectedTo( + IPin **pPin) ; + + HRESULT STDMETHODCALLTYPE ConnectionMediaType( + AM_MEDIA_TYPE *pmt) ; + + HRESULT STDMETHODCALLTYPE Querypcontextnfo( + PIN_INFO *pInfo); + + HRESULT STDMETHODCALLTYPE QueryDirection( + PIN_DIRECTION *pPinDir) ; + + HRESULT STDMETHODCALLTYPE QueryId( + LPWSTR *Id) ; + + HRESULT STDMETHODCALLTYPE QueryAccept( + const AM_MEDIA_TYPE *pmt) ; + + HRESULT STDMETHODCALLTYPE EnumMediaTypes( + IEnumMediaTypes **ppEnum) ; + + HRESULT STDMETHODCALLTYPE QueryInternalConnections( + IPin **apPin, + ULONG *nPin) ; + + HRESULT STDMETHODCALLTYPE EndOfStream( + ) ; + + HRESULT STDMETHODCALLTYPE BeginFlush( + ); + + HRESULT STDMETHODCALLTYPE EndFlush( + ) ; + + HRESULT STDMETHODCALLTYPE NewSegment( + REFERENCE_TIME tStart, + REFERENCE_TIME tStop, + double dRate); + + HRESULT InitAllocator(IMemAllocator **ppAlloc); + HRESULT CheckConnect(IPin *pPin); + HRESULT BreakConnect(); + + HRESULT CompleteConnect(IPin *pReceivePin); + HRESULT DeliverEndOfStream(void); + + +protected: + Command GetRequest(void) { + return (Command) CAMThread::GetRequest(); + } + BOOL CheckRequest(Command *pCom) { + return CAMThread::CheckRequest((DWORD*) pCom); + } + + // override these if you want to add thread commands + virtual DWORD ThreadProc(void); // the thread function + + virtual HRESULT DoBufferProcessingLoop(void); // the loop executed whilst running + + // * + // * AM_MEDIA_TYPE support + // * + + // If you support more than one media type then override these 2 functions + virtual HRESULT CheckMediaType(const CMediaType *pMediaType); + virtual HRESULT GetMediaType(int32_t iPosition, CMediaType *pMediaType); // List pos. 0-n + + // If you support only one type then override this fn. + // This will only be called by the default implementations + // of CheckMediaType and GetMediaType(int, CMediaType*) + // You must override this fn. or the above 2! + virtual HRESULT GetMediaType(CMediaType *pMediaType) { + return E_UNEXPECTED; + } + + STDMETHODIMP QueryId(LPWSTR *Id); +}; +#endif +#endif diff --git a/libmetartc3/src/yangcapture/win/YangWinVideoCapture.cpp b/libmetartc3/src/yangcapture/win/YangWinVideoCapture.cpp new file mode 100755 index 00000000..f3d4a504 --- /dev/null +++ b/libmetartc3/src/yangcapture/win/YangWinVideoCapture.cpp @@ -0,0 +1,421 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangWinVideoCapture.h" + +#ifdef _WIN32 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include +#include + +#include "yangutil/sys/YangLog.h" +#define yang_release(x) { if (x) x->Release(); x = NULL; } +//HRESULT WINAPI CopyMediaType( AM_MEDIA_TYPE *pmtTarget, + // const AM_MEDIA_TYPE *pmtSource +//); + + +#include + +HRESULT WINAPI CopyMediaType(AM_MEDIA_TYPE *pmtTarget, const AM_MEDIA_TYPE *pmtSource) +{ + + *pmtTarget = *pmtSource; + if (pmtSource->cbFormat != 0) { + // ASSERT(pmtSource->pbFormat != NULL); + pmtTarget->pbFormat = (PBYTE)CoTaskMemAlloc(pmtSource->cbFormat); + if (pmtTarget->pbFormat == NULL) { + pmtTarget->cbFormat = 0; + return E_OUTOFMEMORY; + } else { + CopyMemory((PVOID)pmtTarget->pbFormat, (PVOID)pmtSource->pbFormat, + pmtTarget->cbFormat); + } + } + if (pmtTarget->pUnk != NULL) { + pmtTarget->pUnk->AddRef(); + } + + return S_OK; +} + +YangWinVideoCapture::YangWinVideoCapture(YangVideoInfo *pcontext) { + m_para = pcontext; + + m_vhandle = new YangWinVideoCaptureHandle(pcontext); + cameraIndex = pcontext->vIndex; + m_width = m_para->width; + m_height = m_para->height; + m_vd_id = 0; + + m_isloop = 0; + m_isFirstFrame = 0; + m_buffer_count = 0; + m_timestatmp = 0; + m_pg = NULL; + m_pb = NULL; + m_pm = NULL; + m_videoSrc = NULL; + m_grabberF=NULL; + m_grabber=NULL; + m_event = NULL; + //m_moniker = NULL; + m_nullRender=NULL; + m_t_time=0; + //m_gid=MEDIASUBTYPE_I420; + //videoRender1 = NULL; + + //vcs=new VideoSampleCapture(&config); + + cameraIndex = 1; + hasVideo = 1; + m_preframe = 12; + //m_fx=conf->fx; + m_isOpAddMinus = 0; + memset(&m_yuy2,0,sizeof(m_yuy2)); + memset(&m_i420,0,sizeof(m_i420)); + memset(&m_nv12,0,sizeof(m_nv12)); + memset(&m_yv12,0,sizeof(m_yv12)); + memset(&m_p010,0,sizeof(m_p010)); + memset(&m_p016,0,sizeof(m_p016)); + m_yuy2.yuvType = YangYuy2; + m_i420.yuvType = YangI420; + m_nv12.yuvType = YangNv12; + m_yv12.yuvType = YangYv12; + m_p010.yuvType=YangP010; + m_p016.yuvType=YangP016; + + +} + +YangWinVideoCapture::~YangWinVideoCapture() { + if (m_isloop) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + yang_release(m_grabber); + yang_release(m_grabberF); + + yang_release( m_event); + yang_release( m_pm); + yang_release( m_pg); + yang_release( m_pb); + yang_release( m_videoSrc); + delete m_vhandle; + m_vhandle = NULL; + + +} + +int32_t YangWinVideoCapture::getVideoCaptureState() { + return m_vhandle->m_isCapture; +} +int32_t YangWinVideoCapture::getLivingVideoCaptureState() { + return m_vhandle->m_isLivingCaptrue; +} +int32_t YangWinVideoCapture::getFilmVideoCaptureState() { + return m_vhandle->m_isFilm; +} + +void YangWinVideoCapture::setVideoCaptureStart() { + m_vhandle->m_isCapture = 1; +} +void YangWinVideoCapture::setVideoCaptureStop() { + m_vhandle->m_isCapture = 0; +} +void YangWinVideoCapture::setLivingVideoCaptureStart() { + m_vhandle->m_isLivingCaptrue = 1; +} +void YangWinVideoCapture::setLivingVideoCaptureStop() { + m_vhandle->m_isLivingCaptrue = 0; +} + +void YangWinVideoCapture::setFilmVideoCaptureStart() { + m_vhandle->m_isFilm = 1; +} +void YangWinVideoCapture::setFilmVideoCaptureStop() { + m_vhandle->m_isFilm = 0; +} + +void YangWinVideoCapture::setOutVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setVideoBuffer(pbuf); +} +void YangWinVideoCapture::setLivingOutVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setLivingVideoBuffer(pbuf); +} +void YangWinVideoCapture::setFilmOutVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setFilmVideoBuffer(pbuf); +} +void YangWinVideoCapture::setPreVideoBuffer(YangVideoBuffer *pbuf) { + m_vhandle->setPreVideoBuffer(pbuf); +} +void YangWinVideoCapture::initstamp() { + m_vhandle->initstamp(); +} +int32_t YangWinVideoCapture::init() { + CoInitialize(NULL); + + ICreateDevEnum *devEnum = NULL; + HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, + CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&devEnum)); + + //if (SUCCEEDED(hr)) { + // printf("suncess!"); + //} else { + // printf("fail"); + //} + IEnumMoniker *classEnum = NULL; + + ULONG cFetched; + IMoniker *moniker=NULL; + int32_t cco = 0; + devEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &classEnum,0); + while (classEnum->Next(1, &moniker, &cFetched) == S_OK) { + cco++; + if (cco != cameraIndex) + continue; + moniker->BindToObject(0, 0, IID_IBaseFilter, (void**) &m_videoSrc); + yang_release(moniker); + break; + } + yang_release(classEnum); + yang_release(devEnum); + + if (m_videoSrc == NULL) + return ERROR_SYS_Win_VideoDeveceOpenFailure; + + CoCreateInstance(CLSID_CaptureGraphBuilder2, 0, CLSCTX_INPROC_SERVER,IID_ICaptureGraphBuilder2, (void**) &m_pb); + CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,IID_IGraphBuilder, (void**) &m_pg); + m_pb->SetFiltergraph(m_pg); + m_pg->QueryInterface(IID_IMediaControl, (void**) &m_pm); + m_pg->AddFilter(m_videoSrc, L"mp4"); + CLSID CLSID_NullRenderer1 = { 0xC1F400A4, 0x3F08, 0x11d3, { 0x9F, 0x0B, 0x00, 0x60, 0x08, 0x03, 0x9E, 0x37 } }; + CLSID CLSID_SampleGrabber1 = { 0xC1F400A0, 0x3F08, 0x11d3, { 0x9F, 0x0B, 0x00, 0x60, 0x08, 0x03, 0x9E, 0x37 } }; + hr=CoCreateInstance(CLSID_SampleGrabber1, NULL, CLSCTX_INPROC_SERVER,IID_PPV_ARGS(&m_grabberF)); + hr = m_pg->AddFilter(m_grabberF, L"Sample Grabber"); + + hr = m_grabberF->QueryInterface(IID_ISampleGrabber,(void **)&m_grabber); + hr=m_pg->QueryInterface(IID_IMediaEventEx,(void **)&m_event); + + m_nullRender=NULL; + hr=CoCreateInstance(CLSID_NullRenderer1, NULL, CLSCTX_INPROC, IID_IBaseFilter,(void **)&m_nullRender); + + setRevolutionPara(); + + //m_yuy2, m_i420 , m_nv12, m_yv12 + if(m_i420.state>1||m_nv12.state>1||m_yv12.state>1||m_yuy2.state>1){ + if(m_i420.state>1) { + m_para->videoCaptureFormat=YangI420; + }else if(m_nv12.state>1){ + m_para->videoCaptureFormat=YangNv12; + }else if(m_yv12.state>1){ + m_para->videoCaptureFormat=YangYv12; + }else if(m_yuy2.state>1){ + m_para->videoCaptureFormat=YangYuy2; + } + #if Yang10bit + if(m_para->videoCaptureFormat==YangP010) format=V4L2_PIX_FMT_P010; + if(m_para->videoCaptureFormat==YangP016) format=V4L2_PIX_FMT_P016; + #endif + + }else if(m_i420.state||m_nv12.state||m_yv12.state||m_yuy2.state){ + if(m_i420.state) { + setWH(&m_i420); + }else if(m_nv12.state){ + setWH(&m_nv12); + }else if(m_yv12.state){ + setWH(&m_yv12); + }else if(m_yuy2.state){ + setWH(&m_yuy2); + } + //setReselution(format); + }else{ + return ERROR_SYS_Win_NoVideoDriver; + } + + setRevolution(); + // if(1) return 0; + if(m_vhandle) m_vhandle->setCaptureFormat(m_para->videoCaptureFormat); + return Yang_Ok; +} + +IPin* YangWinVideoCapture::FindPin(IBaseFilter *pFilter, PIN_DIRECTION dir) { + IEnumPins *pEnumPins; + IPin *pOutpin; + PIN_DIRECTION pDir; + pFilter->EnumPins(&pEnumPins); + while (pEnumPins->Next(1, &pOutpin, NULL) == S_OK) { + pOutpin->QueryDirection(&pDir); + + if (pDir == dir) { + yang_release(pEnumPins); + return pOutpin; + } + } + return 0; +} +REFGUID YangWinVideoCapture::getUID(){ + YangYuvType format=(YangYuvType)m_para->videoCaptureFormat; + if(format==YangYuy2) return MEDIASUBTYPE_YUY2; + if(format==YangI420) return MEDIASUBTYPE_I420; + if(format==YangNv12) return MEDIASUBTYPE_NV12; + if(format==YangYv12) return MEDIASUBTYPE_YV12; + return MEDIASUBTYPE_I420; + +} +void YangWinVideoCapture::setRevolutionPara() { + IAMStreamConfig *config1 = 0; + m_pb->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, m_videoSrc, + IID_IAMStreamConfig, (void**) &config1); + int32_t iCount = 0, iSize = 0; + HRESULT hr = config1->GetNumberOfCapabilities(&iCount, &iSize); + + + + if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS)) { + for (int32_t iFormat = 0; iFormat < iCount; iFormat++) { + VIDEO_STREAM_CONFIG_CAPS scc; + AM_MEDIA_TYPE *pmtConfig; + + hr = config1->GetStreamCaps(iFormat, &pmtConfig, (BYTE*) &scc); + VIDEOINFOHEADER* pvih1=(VIDEOINFOHEADER*)pmtConfig->pbFormat; + + if(IsEqualGUID(pmtConfig->subtype, MEDIASUBTYPE_I420)){ + if(!m_i420.state) m_i420.state=1; + if(pvih1->bmiHeader.biWidth==m_para->width&&pvih1->bmiHeader.biHeight==m_para->height) m_i420.state=2; + m_i420.width=pvih1->bmiHeader.biWidth; + m_i420.height=pvih1->bmiHeader.biHeight; + }else if(IsEqualGUID(pmtConfig->subtype, MEDIASUBTYPE_NV12)){ + if(!m_nv12.state) m_nv12.state=1; + if(pvih1->bmiHeader.biWidth==m_para->width&&pvih1->bmiHeader.biHeight==m_para->height) m_nv12.state=2; + m_nv12.width=pvih1->bmiHeader.biWidth; + m_nv12.height=pvih1->bmiHeader.biHeight; + }else if(IsEqualGUID(pmtConfig->subtype, MEDIASUBTYPE_YV12)){ + if(!m_yv12.state) m_yv12.state=1; + if(pvih1->bmiHeader.biWidth==m_para->width&&pvih1->bmiHeader.biHeight==m_para->height) m_yv12.state=2; + m_yv12.width=pvih1->bmiHeader.biWidth; + m_yv12.height=pvih1->bmiHeader.biHeight; + }else if(IsEqualGUID(pmtConfig->subtype, MEDIASUBTYPE_YUY2)){ + if(!m_yuy2.state) m_yuy2.state=1; + if(pvih1->bmiHeader.biWidth==m_para->width&&pvih1->bmiHeader.biHeight==m_para->height) m_yuy2.state=2; + m_yuy2.width=pvih1->bmiHeader.biWidth; + m_yuy2.height=pvih1->bmiHeader.biHeight; + } + pvih1=NULL; + } + } +} +void YangWinVideoCapture::setRevolution() { + IAMStreamConfig *config1 = 0; + + m_pb->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, m_videoSrc,IID_IAMStreamConfig, (void**) &config1); + int32_t iCount = 0, iSize = 0; + HRESULT hr = config1->GetNumberOfCapabilities(&iCount, &iSize); +//printf("***********start setRevolution*******************\n"); +// Check the size to make sure we pass in the correct structure. + YangYuvType format=(YangYuvType)m_para->videoCaptureFormat; + + if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS)) { + // Use the video capabilities structure. + + for (int32_t iFormat = 0; iFormat < iCount; iFormat++) { + VIDEO_STREAM_CONFIG_CAPS scc; + AM_MEDIA_TYPE *pmtConfig; + AM_MEDIA_TYPE amt; + hr = config1->GetStreamCaps(iFormat, &pmtConfig, (BYTE*) &scc); + VIDEOINFOHEADER* pvih1=(VIDEOINFOHEADER*)pmtConfig->pbFormat; + + if (IsEqualGUID(pmtConfig->subtype,getUID())&&pvih1->bmiHeader.biWidth==m_para->width&&pvih1->bmiHeader.biHeight==m_para->height) { + + CopyMediaType(&amt, pmtConfig); + VIDEOINFOHEADER *pvih = (VIDEOINFOHEADER*) amt.pbFormat; + BITMAPINFOHEADER bmi = pvih->bmiHeader; + pvih->bmiHeader.biWidth = m_para->width; + pvih->bmiHeader.biHeight = m_para->height; + pvih->bmiHeader.biBitCount = format==YangYuy2?16:12;//config.Video_Bit_Count; + pvih->bmiHeader.biSizeImage = pvih->bmiHeader.biWidth + * pvih->bmiHeader.biHeight + * pvih->bmiHeader.biBitCount / 8; + yang_info("\nset %d Camera Revolution Sucess!width=%d,height=%d..\n",cameraIndex,m_para->width,m_para->height); + config1->SetFormat(&amt); + pvih = NULL; + //break; + return; + + + } + pvih1=NULL; + } + // + } + //} + //printf("\nCamera %d Error Error!.yuy2 or Nv12 not exists............\n",cameraIndex); + //ExitProcess(0); + +} + + +void YangWinVideoCapture::stopLoop() { + m_isloop = 0; + if(m_pm!=NULL) m_pm->Stop(); +} +void YangWinVideoCapture::setWH(YangVideoCaptureType *pct){ + m_width=pct->width; + m_height=pct->height; + m_para->width=pct->width; + m_para->height=pct->height; + m_para->videoCaptureFormat=pct->yuvType; +} +void YangWinVideoCapture::startLoop() { + if (m_videoSrc == NULL) + return; +/** + DWORD taskIndex(0); + HANDLE hMmTask = AvSetMmThreadCharacteristicsA("Capture", &taskIndex); + if (hMmTask) { + if (FALSE == AvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) { + yang_warn( "failed to boost videoCapture-thread using MMCSS"); + } + yang_trace("videoCapture thread is now registered with MMCSS (taskIndex=%d)", + taskIndex ); + } else { + yang_error( "failed to enable MMCSS on videoCapture thread (err=%lu", GetLastError() ); + + } +**/ + IPin *pVideoOut = FindPin(m_videoSrc, PINDIR_OUTPUT); + IPin *pGrabin = FindPin(m_grabberF, PINDIR_INPUT); + IPin *pGrabout = FindPin(m_grabberF, PINDIR_OUTPUT); + IPin *pNullIn = FindPin(m_nullRender, PINDIR_INPUT); + HRESULT hr = m_pg->Connect(pVideoOut, pGrabin); + hr = m_pg->Connect(pGrabout, pNullIn); + yang_release(pVideoOut); + yang_release(pGrabin); + yang_release(pGrabout); + yang_release(pNullIn); + hr = m_grabber->SetBufferSamples(TRUE); + hr = m_grabber->SetOneShot(FALSE); + m_grabber->SetCallback(m_vhandle, 1); + m_pm->Run(); + long eventCode; + m_event->WaitForCompletion(INFINITE, &eventCode); +} +#endif diff --git a/libmetartc3/src/yangcapture/win/YangWinVideoCapture.h b/libmetartc3/src/yangcapture/win/YangWinVideoCapture.h new file mode 100755 index 00000000..659ca71f --- /dev/null +++ b/libmetartc3/src/yangcapture/win/YangWinVideoCapture.h @@ -0,0 +1,90 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGCAPTURE_SRC_YangLivingVideoCaptureWinImpl_H_ +#define YANGCAPTURE_SRC_YangLivingVideoCaptureWinImpl_H_ + +#ifdef _WIN32 + +#include +#include +#include "amstream.h" +#include +#include "qedit.h" + +#include +#include +#include +#include "YangWinVideoCaptureHandle.h" + +struct YangVideoCaptureType{ + YangYuvType yuvType; + int32_t state; + int32_t width; + int32_t height; +}; +class YangWinVideoCapture: public YangMultiVideoCapture { +public: + YangWinVideoCapture(YangVideoInfo *pcontext); + virtual ~YangWinVideoCapture(); + YangWinVideoCaptureHandle *m_vhandle; + int32_t init(); + void setVideoCaptureStart(); + void setVideoCaptureStop(); + int32_t getVideoCaptureState(); + int32_t getLivingVideoCaptureState(); + int32_t getFilmVideoCaptureState(); + + void setLivingOutVideoBuffer(YangVideoBuffer *pbuf); + void setLivingVideoCaptureStart(); + void setLivingVideoCaptureStop(); + + void setFilmOutVideoBuffer(YangVideoBuffer *pbuf); + void setFilmVideoCaptureStart(); + void setFilmVideoCaptureStop(); + + void setOutVideoBuffer(YangVideoBuffer *pbuf); + void setPreVideoBuffer(YangVideoBuffer *pbuf); + + void initstamp(); + void stopLoop(); + +protected: + void startLoop(); + //long m_difftime(struct timeval *p_start, struct timeval *p_end); + + void setRevolutionPara(); + void setRevolution(); + IPin* FindPin(IBaseFilter *pFilter, PIN_DIRECTION dir); + IPin* GetOutPin(IBaseFilter *pFilter, PIN_DIRECTION pin_dir, int32_t nPin); + IPin* GetInPin(IBaseFilter *pFilter, PIN_DIRECTION pin_dir, int32_t nPin); + + IMediaControl *m_pm; + IGraphBuilder *m_pg; + ICaptureGraphBuilder2 *m_pb; + IBaseFilter *m_videoSrc, *m_grabberF; + ISampleGrabber *m_grabber; + IBaseFilter *m_nullRender; + //IMoniker *m_moniker; + IMediaEventEx *m_event; +private: + void setWH(YangVideoCaptureType *pct); + REFGUID getUID(); + + YangVideoCaptureType m_yuy2, m_i420 , m_nv12, m_yv12, m_p010,m_p016; + int32_t m_width, m_height; + int32_t m_vd_id; + int32_t cameraIndex; + int32_t hasVideo; + int32_t m_preframe, m_isOpAddMinus; + ULONG m_t_time; + int32_t m_buffer_count; + int32_t m_isloop; + int32_t m_isFirstFrame; + //struct timeval m_startTime; + long m_timestatmp; + +}; +#endif +#endif /* YANGCAPTURE_SRC_YangLivingVideoCaptureWinImpl_H_ */ diff --git a/libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.cpp b/libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.cpp new file mode 100755 index 00000000..144532f9 --- /dev/null +++ b/libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.cpp @@ -0,0 +1,56 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangWinVideoCaptureHandle.h" +#ifdef _WIN32 + +#include "time.h" +#include "stdlib.h" +#include "stdio.h" +#include +//using namespace std; + +YangWinVideoCaptureHandle::YangWinVideoCaptureHandle(YangVideoInfo *pcontext):YangVideoCaptureHandle(pcontext) +{ + +} +YangWinVideoCaptureHandle:: ~YangWinVideoCaptureHandle(void){ + +} +STDMETHODIMP_(ULONG) YangWinVideoCaptureHandle::AddRef() { + return 1; +} +STDMETHODIMP_(ULONG) YangWinVideoCaptureHandle::Release() { + return 2; +} + +STDMETHODIMP YangWinVideoCaptureHandle::QueryInterface(REFIID riid, + void **ppvObject) { + // printf("*********************************\n"); + if (NULL == ppvObject) + return E_POINTER; + if (riid == __uuidof(IUnknown)) { + *ppvObject = static_cast(this); + return S_OK; + } + if (riid == IID_ISampleGrabber) { + *ppvObject = static_cast(this); + return S_OK; + } + return E_NOTIMPL; +} + +STDMETHODIMP YangWinVideoCaptureHandle::SampleCB(double Time, + IMediaSample *pSample) { + + return E_NOTIMPL; +} + +STDMETHODIMP YangWinVideoCaptureHandle::BufferCB(double Time, BYTE *pBuffer, + long BufferLen) { + + putBuffer(Time*1000000, pBuffer, BufferLen); + return E_NOTIMPL; +} + +#endif diff --git a/libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.h b/libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.h new file mode 100755 index 00000000..1d965a5e --- /dev/null +++ b/libmetartc3/src/yangcapture/win/YangWinVideoCaptureHandle.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangWinVideoCaptureHandle__ +#define __YangWinVideoCaptureHandle__ +#ifdef _WIN32 +#include "qedit.h" +//#include +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangIni.h" +#include "../YangVideoCaptureHandle.h" +class YangWinVideoCaptureHandle: public ISampleGrabberCB, + public YangVideoCaptureHandle { +public: + YangWinVideoCaptureHandle(YangVideoInfo *pcontext); + virtual ~YangWinVideoCaptureHandle(void); + + STDMETHODIMP_(ULONG) AddRef(); + STDMETHODIMP_(ULONG) Release(); + STDMETHODIMP QueryInterface(REFIID riid, void **ppvObject); + STDMETHODIMP SampleCB(double Time, IMediaSample *pSample); + STDMETHODIMP BufferCB(double Time, BYTE *pBuffer, long BufferLen); +private: + +}; +#endif +#endif diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoder.cpp b/libmetartc3/src/yangdecoder/YangAudioDecoder.cpp new file mode 100755 index 00000000..0e7166bc --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoder.cpp @@ -0,0 +1,22 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "yangdecoder/YangAudioDecoder.h" +#include +YangAudioDecoder::YangAudioDecoder() { + m_context = NULL; + m_isInit = 0; + m_frameSize = 0; + m_uid=-1; + m_alen=0; + m_dstBuffer=new uint8_t[4096]; + m_dstLen=0; +} + +YangAudioDecoder::~YangAudioDecoder() { + m_context = NULL; + if(m_dstBuffer) delete[] m_dstBuffer; + m_dstBuffer=NULL; +} + diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderAac.cpp b/libmetartc3/src/yangdecoder/YangAudioDecoderAac.cpp new file mode 100755 index 00000000..dcef7ee5 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderAac.cpp @@ -0,0 +1,120 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include "YangAudioDecoderAac.h" + +#include "yangutil/yang_unistd.h" +#include "string.h" +#include "memory.h" + +//#define yang_deleteA(a) {if( (a) != NULL ) {delete[] (a); (a) = NULL;}} +void YangAudioDecoderAac::loadLib(){ + //const SpeexMode * (*yang_speex_lib_get_mode) (int32_t mode); + + + yang_NeAACDecOpen=( NeAACDecHandle (*)(void))m_lib.loadFunction("NeAACDecOpen"); + yang_NeAACDecGetCurrentConfiguration=( NeAACDecConfigurationPtr (*)(NeAACDecHandle hDecoder))m_lib.loadFunction("NeAACDecGetCurrentConfiguration"); + yang_NeAACDecSetConfiguration=( uint8_t (*)(NeAACDecHandle hDecoder, + NeAACDecConfigurationPtr config))m_lib.loadFunction("NeAACDecSetConfiguration"); + yang_NeAACDecInit=( long (*)(NeAACDecHandle hDecoder, + uint8_t *buffer, + unsigned long buffer_size, + unsigned long *samplerate, + uint8_t *channels))m_lib.loadFunction("NeAACDecInit"); + yang_NeAACDecDecode=( void* (*)(NeAACDecHandle hDecoder, + NeAACDecFrameInfo *hInfo, + uint8_t *buffer, + unsigned long buffer_size))m_lib.loadFunction("NeAACDecDecode"); + yang_NeAACDecClose=( void (*)(NeAACDecHandle hDecoder))m_lib.loadFunction("NeAACDecClose"); + +} + + +void YangAudioDecoderAac::unloadLib(){ + yang_NeAACDecOpen=NULL; + yang_NeAACDecGetCurrentConfiguration=NULL; + yang_NeAACDecSetConfiguration=NULL; + yang_NeAACDecInit=NULL; + yang_NeAACDecDecode=NULL; + yang_NeAACDecClose=NULL; + + +} +YangAudioDecoderAac::YangAudioDecoderAac(YangAudioParam *pcontext) { + m_context=pcontext; + m_samplerate = 44100; + m_channel = 2; + m_bufLen = 4096; + isFirst = 1; + isConvert = 0; + m_handle = NULL; + m_buffer = new uint8_t[4096]; + temp = NULL; + unloadLib(); +} + +YangAudioDecoderAac::~YangAudioDecoderAac() { + + closeAacdec(); + temp = NULL; + m_handle = NULL; + yang_deleteA(m_buffer); + unloadLib(); + m_lib.unloadObject(); +} + +void YangAudioDecoderAac::init() { + //yap->init(); + if(m_isInit) return; + m_lib.loadObject("libfaad"); + loadLib(); + if (m_handle == NULL) + m_handle = yang_NeAACDecOpen(); + if (!m_handle) { + printf("NeAACDecOpen failed\n"); + // goto error; + } + NeAACDecConfigurationPtr conf = yang_NeAACDecGetCurrentConfiguration(m_handle); + if (!conf) { + printf("NeAACDecGetCurrentConfiguration failed\n"); + // goto error; + } + conf->defObjectType = LC; + conf->defSampleRate = 44100; + + conf->outputFormat = FAAD_FMT_16BIT; + conf->dontUpSampleImplicitSBR = 1; + + yang_NeAACDecSetConfiguration(m_handle, conf); + m_alen=4096; + m_isInit=1; +} + + +int32_t YangAudioDecoderAac::decode(YangFrame* pframe,YangDecoderCallback* pcallback){ + if (isFirst&&m_handle) { + long res = yang_NeAACDecInit(m_handle, m_buffer, m_bufLen, &m_samplerate,&m_channel); + isFirst = 0; + } + + if(m_handle) temp = (uint8_t *) yang_NeAACDecDecode(m_handle, &m_info, pframe->payload, pframe->nb); + + if (temp&&pcallback){ + pframe->payload=temp; + pframe->nb=4096; + pcallback->onAudioData(pframe); + } + return Yang_Ok; + +} + +void YangAudioDecoderAac::closeAacdec() { + if (m_handle) { + yang_NeAACDecClose(m_handle); + } + m_handle = NULL; + // yap->close(); + +} + diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderAac.h b/libmetartc3/src/yangdecoder/YangAudioDecoderAac.h new file mode 100755 index 00000000..31614a82 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderAac.h @@ -0,0 +1,62 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef AACDECODER_H +#define AACDECODER_H +#include +#include +#include + +#include "faad.h" +#include + +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/sys/YangLoadLib.h" +#include "yangdecoder/YangAudioDecoder.h" +using namespace std; +class YangAudioDecoderAac :public YangAudioDecoder +{ +public: + YangAudioDecoderAac(YangAudioParam *pcontext); + ~YangAudioDecoderAac(); + void init(); + int32_t decode(YangFrame* pframe,YangDecoderCallback* pcallback); + + +protected: + +private: + + void closeAacdec(); + int32_t isConvert; + uint8_t *temp; + NeAACDecHandle m_handle; + + int32_t isFirst; + NeAACDecFrameInfo m_info; + unsigned long m_samplerate; + uint8_t m_channel; + unsigned long m_bufLen; + uint8_t *m_buffer; + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + NeAACDecHandle (*yang_NeAACDecOpen)(void); + NeAACDecConfigurationPtr (*yang_NeAACDecGetCurrentConfiguration)(NeAACDecHandle hDecoder); + uint8_t (*yang_NeAACDecSetConfiguration)(NeAACDecHandle hDecoder, + NeAACDecConfigurationPtr config); + long (*yang_NeAACDecInit)(NeAACDecHandle hDecoder, + uint8_t *buffer, + unsigned long buffer_size, + unsigned long *samplerate, + uint8_t *channels); + void* (*yang_NeAACDecDecode)(NeAACDecHandle hDecoder, + NeAACDecFrameInfo *hInfo, + uint8_t *buffer, + unsigned long buffer_size); + void (*yang_NeAACDecClose)(NeAACDecHandle hDecoder); + + +}; + +#endif // AACDECODER_H diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderHandle.cpp b/libmetartc3/src/yangdecoder/YangAudioDecoderHandle.cpp new file mode 100755 index 00000000..468fb701 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderHandle.cpp @@ -0,0 +1,171 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "malloc.h" +#include +#include +#include +#include +#include "stdlib.h" + +#include "yangdecoder/YangDecoderFactory.h" + +YangAudioDecoderHandle::YangAudioDecoderHandle(YangContext *pcontext) { + m_context=pcontext; + m_context->streams.setDecoderMediaConfigCallback(this); + m_isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_audioBuffer = NULL; + m_decs = NULL; + m_out_audioBuffer = NULL; + m_buf=NULL; + m_size=0; + m_is44100=false; + m_param = new YangAudioParam(); + m_param->encode = (YangAudioCodec) pcontext->avinfo.audio.audioDecoderType; + m_param->sample = pcontext->avinfo.audio.sample; + m_param->channel = pcontext->avinfo.audio.channel; + + m_is44100=(m_param->sample==44100); + memset(&m_audioFrame,0,sizeof(YangFrame)); +} + + +YangAudioDecoderHandle::~YangAudioDecoderHandle(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_context = NULL; + m_in_audioBuffer = NULL; + m_out_audioBuffer = NULL; + + if (m_decs) { + + delete m_decs; + m_decs = NULL; + + } + yang_deleteA(m_buf); + +} + +void YangAudioDecoderHandle::stop() { + stopLoop(); +} + +void YangAudioDecoderHandle::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + +YangAudioPlayBuffer* YangAudioDecoderHandle::getOutAudioBuffer() { + + return m_out_audioBuffer; +} + +void YangAudioDecoderHandle::init() { + m_isInit = 1; +} +void YangAudioDecoderHandle::setRemoteParam(YangAudioParam *para) { + m_param->encode = para->encode; + m_param->sample = para->sample; + m_param->channel = para->channel; + m_is44100=(m_param->sample==44100); + +} + +void YangAudioDecoderHandle::setMediaConfig(int32_t puid, YangAudioParam *para, + YangVideoParam *video) { + m_param->encode = para->encode; + m_param->sample = para->sample; + m_param->channel = para->channel; + m_is44100=(m_param->sample==44100); + +} +void YangAudioDecoderHandle::setInAudioBuffer(YangAudioEncoderBuffer *pbuf) { + m_in_audioBuffer = pbuf; +} +void YangAudioDecoderHandle::setOutAudioBuffer(YangAudioPlayBuffer *pbuf) { + m_out_audioBuffer = pbuf; + if(m_context&&m_context->streams.m_playBuffer) m_context->streams.m_playBuffer->setInAudioBuffer(pbuf); +} + +void YangAudioDecoderHandle::onAudioData(YangFrame *pframe) { + + if (m_out_audioBuffer){ + if(m_is44100){ + if(m_buf==NULL) { + m_buf=new uint8_t[1024*10]; + m_size=0; + + } + memcpy(m_buf,pframe->payload,pframe->nb); + m_size+=pframe->nb; + int indexs=0; + int len=882<<2; + while(m_size>=len){ + pframe->payload=m_buf+indexs; + pframe->nb=len; + m_out_audioBuffer->putAudio(pframe); + indexs+=len; + m_size-=len; + } + if(indexs>0&&m_size>0) memmove(m_buf,m_buf+indexs,m_size); + }else{ + + m_out_audioBuffer->putAudio(pframe); + } + } + +} +void YangAudioDecoderHandle::onVideoData(YangFrame *pframe) { + +} +void YangAudioDecoderHandle::startLoop() { + m_isConvert = 1; + uint8_t srcAudioSource[600]; + int32_t audioCacheSize = m_context->avinfo.audio.audioPlayCacheNum; + YangDecoderFactory ydf; + YangFrame audioFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + while (m_isConvert == 1) { + if (!m_in_audioBuffer) { + yang_usleep(1000); + continue; + } + if (m_in_audioBuffer->size() == 0) { + yang_usleep(200); + continue; + } + audioFrame.payload = srcAudioSource; + m_in_audioBuffer->getPlayAudio(&audioFrame); + + if (!m_decs) { + m_decs = ydf.createAudioDecoder(m_param); + + m_decs->m_uid = audioFrame.uid; + m_decs->init(); + } + + if (m_decs){ + if(m_decs->decode(&audioFrame, this)){ + yang_error("decode audio fail.."); + } + } + + if (m_in_audioBuffer && m_in_audioBuffer->size() > audioCacheSize) + m_in_audioBuffer->resetIndex(); + + } //end + +} + +void YangAudioDecoderHandle::stopLoop() { + m_isConvert = 0; + +} diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderHandles.cpp b/libmetartc3/src/yangdecoder/YangAudioDecoderHandles.cpp new file mode 100755 index 00000000..99638b2c --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderHandles.cpp @@ -0,0 +1,251 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "malloc.h" +#include +#include +#include +#include +#include "stdlib.h" + +#include "yangdecoder/YangDecoderFactory.h" + +YangAudioDecoderHandles::YangAudioDecoderHandles(YangContext *pcontext) { + m_isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_audioBuffer = NULL; + m_decs = new vector(); + m_out_audioBuffer = NULL; + m_context = pcontext; + m_frameSize=1024; + m_channel=2; +} + + +YangAudioDecoderHandles::~YangAudioDecoderHandles(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_context = NULL; + m_in_audioBuffer = NULL; + + size_t i = 0; + if (m_out_audioBuffer && m_out_audioBuffer->size() > 0) { + for (i = 0; i < m_out_audioBuffer->size(); i++) {yang_delete( m_out_audioBuffer->at(i)); } + + m_out_audioBuffer->clear(); + + } + m_out_audioBuffer = NULL; + if (m_decs) { + for (i = 0; i < m_decs->size(); i++) {yang_delete( m_decs->at(i)); } + + m_decs->clear(); + delete m_decs; + m_decs = NULL; + + } + + for(std::map::iterator it = m_paramMap.begin(); it != m_paramMap.end(); ++it) { + yang_delete(it->second); + } + m_paramMap.clear(); + +} +void YangAudioDecoderHandles::stop() { + stopLoop(); +} + +void YangAudioDecoderHandles::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + +void YangAudioDecoderHandles::setRemoteParam(int32_t puid,YangAudioParam* para){ + + map::iterator iter; + iter=m_paramMap.find(puid); + YangAudioParam* t_param=NULL; + if(iter == m_paramMap.end()){ + t_param=new YangAudioParam(); + m_paramMap[puid]=t_param; + }else{ + t_param=iter->second; + } + t_param->encode=para->encode; + t_param->sample=para->sample; + t_param->channel=para->channel; + +} + +int32_t YangAudioDecoderHandles::getDecoderIndex(int32_t puid) { + for (int32_t i = 0; i < (int) m_decs->size(); i++) { + if (m_decs->at(i)->m_uid == puid) + return i; + } + return -1; +} +void YangAudioDecoderHandles::removeAllStream() { + int32_t i = 0; + if (m_decs && m_decs->size() > 0) { + for (i = 0; i < (int) m_decs->size(); i++) { + delete m_decs->at(i); + m_decs->at(i) = NULL; + } + m_decs->clear(); + } + if (m_out_audioBuffer && m_out_audioBuffer->size() > 0) { + for (i = 0; i < (int) m_out_audioBuffer->size(); i++) { + delete m_out_audioBuffer->at(i); + m_out_audioBuffer->at(i) = NULL; + } + m_out_audioBuffer->clear(); + } + + for (map::iterator it = m_paramMap.begin(); it != m_paramMap.end();it++) { + yang_delete(it->second); + } + m_paramMap.clear(); +} +void YangAudioDecoderHandles::removeAudioStream(int32_t puid) { + int32_t i = 0; + if (m_decs && m_decs->size() > 0) { + for (i = 0; i < (int) m_decs->size(); i++) { + if (m_decs->at(i)->m_uid == puid) { + delete m_decs->at(i); + m_decs->at(i) = NULL; + m_decs->erase(m_decs->begin() + i); + break; + } + + } + } + if (m_out_audioBuffer && m_out_audioBuffer->size() > 0) { + for (i = 0; i < (int) m_out_audioBuffer->size(); i++) { + if (m_out_audioBuffer->at(i)->m_uid == puid) { + delete m_out_audioBuffer->at(i); + m_out_audioBuffer->at(i) = NULL; + m_out_audioBuffer->erase(m_out_audioBuffer->begin() + i); + return; + } + } + + } + map::iterator it = m_paramMap.find(puid); + if(it!=m_paramMap.end()){ + yang_delete(it->second); + } + m_paramMap.erase(puid); + // m_paramMap.clear(); + +} + +YangAudioDecoder* YangAudioDecoderHandles::getDecoder(int32_t puid) { + for (int32_t i = 0; i < (int) m_decs->size(); i++) { + if (m_decs->at(i)->m_uid == puid) + return m_decs->at(i); + } + return NULL; +} + +YangAudioPlayBuffer* YangAudioDecoderHandles::getAudioBuffer(int32_t puid) { + for (int32_t i = 0; i < (int) m_out_audioBuffer->size(); i++) { + if (m_out_audioBuffer->at(i)->m_uid == puid) + return m_out_audioBuffer->at(i); + } + return NULL; +} + +void YangAudioDecoderHandles::init() { + m_isInit = 1; +} + +void YangAudioDecoderHandles::setInAudioBuffer(YangAudioEncoderBuffer *pbuf) { + m_in_audioBuffer = pbuf; +} +void YangAudioDecoderHandles::setOutAudioBuffer(vector *pbuf) { + m_out_audioBuffer = pbuf; +} +void YangAudioDecoderHandles::onAudioData(YangFrame* pframe){ + YangAudioPlayBuffer *t_vb = getAudioBuffer(pframe->uid); + if (!t_vb) { + m_out_audioBuffer->push_back(new YangAudioPlayBuffer()); + m_out_audioBuffer->back()->m_uid = pframe->uid; + t_vb = m_out_audioBuffer->back(); + int ind=0; + if(m_context&&(ind=m_context->streams.getIndex(pframe->uid))>-1){ + m_context->streams.m_playBuffers->at(ind)->setInAudioBuffer(t_vb); + m_context->streams.m_playBuffers->at(ind)->setAudioClock(m_context->streams.getAudioClock()); + } + } + if (t_vb) { + t_vb->putAudio(pframe); + } + t_vb=NULL; +} +void YangAudioDecoderHandles::onVideoData(YangFrame* pframe){ + +} + +void YangAudioDecoderHandles::startLoop() { + m_isConvert = 1; + uint8_t srcAudioSource[600]; + int32_t audioCacheSize = m_context->avinfo.audio.audioPlayCacheNum; + YangAudioDecoder *t_decoder = NULL; + YangDecoderFactory ydf; + YangFrame audioFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + while (m_isConvert == 1) { + if (!m_in_audioBuffer) { + yang_usleep(1000); + continue; + } + if (m_in_audioBuffer->size() == 0) { + yang_usleep(200); + continue; + } + audioFrame.payload=srcAudioSource; + m_in_audioBuffer->getPlayAudio(&audioFrame); + t_decoder = getDecoder(audioFrame.uid); + + if (!t_decoder) { + map::iterator iter; + iter=m_paramMap.find(audioFrame.uid); + if(iter != m_paramMap.end()){ + m_decs->push_back(ydf.createAudioDecoder(iter->second)); + }else{ + YangAudioParam* param=new YangAudioParam(); + m_paramMap[audioFrame.uid]=param; + param->encode=(YangAudioCodec)m_context->avinfo.audio.audioDecoderType; + param->sample=m_context->avinfo.audio.sample; + param->channel=m_context->avinfo.audio.channel; + m_decs->push_back(ydf.createAudioDecoder(param)); + } + + t_decoder = m_decs->back(); + t_decoder->m_uid = audioFrame.uid; + t_decoder->init(); + } + + if (t_decoder){ + if(t_decoder->decode(&audioFrame,this)){ + yang_error("decode audio fail..uid==%d",audioFrame.uid); + } + } + + if (m_in_audioBuffer && m_in_audioBuffer->size() > audioCacheSize) + m_in_audioBuffer->resetIndex(); + t_decoder = NULL; + } //end + +} + +void YangAudioDecoderHandles::stopLoop() { + m_isConvert = 0; + +} diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderOpus.cpp b/libmetartc3/src/yangdecoder/YangAudioDecoderOpus.cpp new file mode 100755 index 00000000..65b46b52 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderOpus.cpp @@ -0,0 +1,97 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangAudioDecoderOpus.h" + +#include "yangutil/yang_unistd.h" +#include "string.h" +#include "memory.h" +#include +#include + +void YangAudioDecoderOpus::loadLib(){ + yang_opus_decoder_create=(OpusDecoder *(*)( opus_int32 Fs, int32_t channels, int32_t *error))m_lib.loadFunction("opus_decoder_create"); + yang_opus_decoder_init=(int32_t (*)(OpusDecoder *st,opus_int32 Fs, int32_t channels ))m_lib.loadFunction("opus_decoder_init") ; + yang_opus_decode=(int32_t (*)(OpusDecoder *st,const uint8_t *data, opus_int32 len,opus_int16 *pcm, + int32_t frame_size, int32_t decode_fec))m_lib.loadFunction("opus_decode"); + yang_opus_decoder_ctl=( int32_t (*)(OpusDecoder *st, int32_t request, ...))m_lib.loadFunction("opus_decoder_ctl"); + yang_opus_decoder_destroy=( void (*)(OpusDecoder *st))m_lib.loadFunction("opus_decoder_destroy"); + yang_opus_strerror=(const char *(*)(int32_t error))m_lib.loadFunction("opus_strerror"); +} + +void YangAudioDecoderOpus::unloadLib(){ + yang_opus_decoder_create=NULL; + yang_opus_decoder_init=NULL; + yang_opus_decode=NULL; + yang_opus_decoder_ctl=NULL; + yang_opus_decoder_destroy=NULL; + yang_opus_strerror=NULL; +} + +YangAudioDecoderOpus::YangAudioDecoderOpus(YangAudioParam *pcontext) { + m_context=pcontext; + isConvert = 0; + m_frameSize=1024; + ret=0; + m_out=NULL; + m_output=NULL; + m_decoder=NULL; + m_sample=pcontext->sample; + m_channel = pcontext->channel; + m_isMono= (m_channel==1); + m_frameSize=m_sample/50; + unloadLib(); +} + +YangAudioDecoderOpus::~YangAudioDecoderOpus() { + yang_deleteA(m_out); + yang_delete(m_output); + closedec(); + unloadLib(); + m_lib.unloadObject(); +} + +#define MAX_FRAME_SIZE 8192 +void YangAudioDecoderOpus::init() { + if(m_isInit) return; + m_lib.loadObject("libopus"); + loadLib(); + + int32_t err=0; + m_decoder=yang_opus_decoder_create(m_sample, m_channel, &err); + if (err<0) + { + fprintf(stderr, "failed to create an decoder: %s\n", yang_opus_strerror(err)); + _exit(0); + // return EXIT_FAILURE; + } + m_out=new short[MAX_FRAME_SIZE*m_channel]; + m_output=new uint8_t[m_frameSize*m_channel*2]; + m_alen=m_frameSize*m_channel*2; + m_isInit=1; + +} + +int32_t YangAudioDecoderOpus::decode(YangFrame* pframe,YangDecoderCallback* pcallback){ +if(!m_decoder) return 1; + ret=yang_opus_decode(m_decoder, pframe->payload, pframe->nb, m_out, MAX_FRAME_SIZE, 0); + + if(ret==m_frameSize&&pcallback){ + + for(int32_t i=0;i>8)&0xFF; + } + pframe->payload=m_output; + pframe->nb=ret*2*m_channel; + pcallback->onAudioData(pframe); + return Yang_Ok; + } + return 1; +} + +void YangAudioDecoderOpus::closedec() { + if(m_decoder) yang_opus_decoder_destroy(m_decoder); + m_decoder=NULL; + +} diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderOpus.h b/libmetartc3/src/yangdecoder/YangAudioDecoderOpus.h new file mode 100755 index 00000000..a608ecaa --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderOpus.h @@ -0,0 +1,42 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGDECODER_SRC_YANGAUDIODECODEROPUS_H_ +#define YANGDECODER_SRC_YANGAUDIODECODEROPUS_H_ +#include +#include "opus/opus.h" +#include "yangdecoder/YangAudioDecoder.h" + +class YangAudioDecoderOpus:public YangAudioDecoder { +public: + YangAudioDecoderOpus(YangAudioParam *pcontext); + virtual ~YangAudioDecoderOpus(); + void init(); + int32_t decode(YangFrame* pframe,YangDecoderCallback* pcallback); + private: + + void closedec(); + int32_t isConvert; + OpusDecoder *m_decoder; + + + int32_t m_isMono; + int32_t ret; + int32_t m_sample; + int32_t m_channel; + short *m_out; + uint8_t *m_output; + + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + OpusDecoder *(*yang_opus_decoder_create)( opus_int32 Fs, int32_t channels, int32_t *error); + int32_t (*yang_opus_decoder_init)(OpusDecoder *st,opus_int32 Fs, int32_t channels ) ; + int32_t (*yang_opus_decode)(OpusDecoder *st,const uint8_t *data, opus_int32 len,opus_int16 *pcm, + int32_t frame_size, int32_t decode_fec); + int32_t (*yang_opus_decoder_ctl)(OpusDecoder *st, int32_t request, ...); + void (*yang_opus_decoder_destroy)(OpusDecoder *st); + const char *(*yang_opus_strerror)(int32_t error); +}; + +#endif /* YANGDECODER_SRC_YANGAUDIODECODEROPUS_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.cpp b/libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.cpp new file mode 100755 index 00000000..1e6e6fb2 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.cpp @@ -0,0 +1,173 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangAudioDecoderSpeex.h" + +#include "yangutil/yang_unistd.h" +#include "string.h" +#include "memory.h" + +void YangAudioDecoderSpeex::loadLib(){ + yang_speex_lib_get_mode=(const SpeexMode * (*) (int32_t mode))m_lib.loadFunction("speex_lib_get_mode"); + yang_speex_decoder_ctl=(int32_t (*)(void *state, int32_t request, void *ptr))m_lib.loadFunction("speex_decoder_ctl"); + yang_speex_decoder_init=(void* (*)(const SpeexMode *mode))m_lib.loadFunction("speex_decoder_init"); + yang_speex_bits_init=(void (*)(SpeexBits *bits))m_lib.loadFunction("speex_bits_init"); + yang_speex_bits_reset=(void (*)(SpeexBits *bits))m_lib.loadFunction("speex_bits_reset"); + yang_speex_bits_read_from=(int32_t (*)(SpeexBits *bits, const char *bytes, int32_t len))m_lib.loadFunction("speex_bits_read_from"); + yang_speex_decode_int=(int32_t (*)(void *state, SpeexBits *bits, spx_int16_t *out))m_lib.loadFunction("speex_decode_int"); + yang_speex_decoder_destroy=(void (*)(void *state))m_lib.loadFunction("speex_decoder_destroy"); + yang_speex_bits_destroy=(void (*)(SpeexBits *bits))m_lib.loadFunction("speex_bits_destroy"); +} + +void YangAudioDecoderSpeex::unloadLib(){ + yang_speex_lib_get_mode=NULL; + yang_speex_decoder_init=NULL; + yang_speex_bits_init=NULL; + yang_speex_bits_reset=NULL; + yang_speex_bits_read_from=NULL; + yang_speex_decode_int=NULL; + yang_speex_decoder_destroy=NULL; + yang_speex_bits_destroy=NULL; +} + +YangAudioDecoderSpeex::YangAudioDecoderSpeex(YangAudioParam *pcontext) { + m_context=pcontext; + //m_samplerate = 44100; + m_channel = 1; + //m_bufLen = 4096; + //m_tlen = 0; + //isFirst = 1; + //m_index = 0; + isConvert = 0; + m_frameSize=320; + //m_handle = NULL; + //m_buffer = new uint8_t[4096]; + //audioList = NULL; + // conf=NULL; + //temp = NULL; + //m_mode=speex_wb_mode; + ///memset(srcAudioSource, 0, sizeof(srcAudioSource)); + + //printf("\n*************************YangAudioDecoderSpeex Speex is starting.....\n"); + + m_quality=8; + m_state=NULL; + ret=0; + m_out=0; + m_bits=NULL; + unloadLib(); + +} + +YangAudioDecoderSpeex::~YangAudioDecoderSpeex() { + yang_deleteA(m_out); + //yang_delete(m_bits); + closedec(); + unloadLib(); + m_lib.unloadObject(); + //temp = NULL; + //m_handle = NULL; + //yang_deleteA(m_buffer); +} +void YangAudioDecoderSpeex::initSpeexPara(){ + //if(m_mode==speex_nb_mode){ + // m_quality=6; + // m_frameSize=160; + //} + //if(m_mode==speex_wb_mode){ + //m_quality=8; + //m_frameSize=320; + // } +// if(m_mode==speex_uwb_mode){ + // m_quality=10; + //m_frameSize=640; + // } +} +void YangAudioDecoderSpeex::init() { + //yap->init(); + if(m_isInit) return; + m_lib.loadObject("libspeex"); + loadLib(); + m_bits=new SpeexBits(); + m_state = yang_speex_decoder_init(yang_speex_lib_get_mode(SPEEX_MODEID_WB));//speex_wb_mode &speex_nb_mode); + initSpeexPara(); + m_quality=10; + yang_speex_decoder_ctl(m_state, SPEEX_SET_QUALITY, &m_quality); + // int32_t tmp=1; + m_frameSize=320; + // speex_decoder_ctl(m_state, SPEEX_SET_ENH, &tmp); + yang_speex_bits_init(m_bits); + // callback.callback_id = SPEEX_INBAND_CHAR; + // callback.func = speex_std_char_handler; + // callback.data = stderr; + // speex_decoder_ctl(m_state, SPEEX_SET_HANDLER, &callback); + // tmp=1; + // speex_decoder_ctl(m_state, SPEEX_GET_LOOKAHEAD, &tmp); + + + // speex_decoder_ctl(m_state, SPEEX_GET_FRAME_SIZE, &m_frameSize); + // m_output=new float[m_frameSize]; + m_out=new short[m_frameSize]; + m_alen=640; + m_isInit=1; + +} + +//void YangAudioDecoderSpeex::decode(uint8_t *pout,uint8_t *pData, int32_t *outLen, unsigned long nLen, int32_t puid) { +int32_t YangAudioDecoderSpeex::decode(YangFrame* pframe,YangDecoderCallback* pcallback){ + //printf("m%d,",m_state); +if(!m_state) return 1; + yang_speex_bits_read_from(m_bits, (const char*)pframe->payload, pframe->nb); + ret=yang_speex_decode_int(m_state, m_bits, m_out); + + if(!ret&&pcallback){ + pframe->payload=(uint8_t*)m_out; + pframe->nb=640; + pcallback->onAudioData(pframe); + return Yang_Ok; + } + return 1; +} + +void YangAudioDecoderSpeex::closedec() { + if(m_state) yang_speex_decoder_destroy(m_state); + m_state=NULL; + if(m_bits) yang_speex_bits_destroy(m_bits); + m_bits=NULL; +} +/** +int32_t YangAudioDecoderSpeex::getIndex(int32_t puid){ + for(int32_t i=0;isize();i++){ + if(m_out_audioBuffer->at(i)->m_uid==puid){ + return i; + } + } + + return -1; +} +void YangAudioDecoderSpeex::startLoop() { + int32_t audioBufLen = 0; + isConvert = 1; + int32_t t_uid = 0; + //int32_t t_size=0; + //m_out_audioBuffer->push_back(new YangAudioPlayBuffer(4096)); + //printf("\n*************************decode Speex is starting.....\n"); + int32_t audioCacheSize=m_context->audioPlayCacheNum; + while (isConvert == 1) { + + if (m_in_audioBuffer->size() == 0) { + yang_usleep(200); + continue; + } + + m_in_audioBuffer->getPlayAudio(srcAudioSource, &audioBufLen, &t_uid); + decode(srcAudioSource, audioBufLen, t_uid); + if(m_in_audioBuffer->size()>audioCacheSize) m_in_audioBuffer->resetIndex(); + } //end + +} +void YangAudioDecoderSpeex::stopLoop(){ + isConvert=0; +} +**/ diff --git a/libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.h b/libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.h new file mode 100755 index 00000000..8d9ea0fd --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangAudioDecoderSpeex.h @@ -0,0 +1,48 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGDECODER_SRC_YANGAUDIODECODERSPEEX_H_ +#define YANGDECODER_SRC_YANGAUDIODECODERSPEEX_H_ +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include + +#include "yangutil/sys/YangLoadLib.h" +#include "yangdecoder/YangAudioDecoder.h" + + +class YangAudioDecoderSpeex :public YangAudioDecoder{ +public: + YangAudioDecoderSpeex(YangAudioParam *pcontext); + virtual ~YangAudioDecoderSpeex(); + void init(); + int32_t decode(YangFrame* pframe,YangDecoderCallback* pcallback); +private: + void closedec(); + int32_t isConvert; + SpeexBits *m_bits; + //SpeexMode m_mode; + //SpeexCallback callback; + YangLoadLib m_lib; + void initSpeexPara(); + int32_t m_quality; + int32_t ret; + void* m_state; + + int32_t m_channel; + short *m_out;//[m_frameSize]; + + void loadLib(); + void unloadLib(); + const SpeexMode * (*yang_speex_lib_get_mode) (int32_t mode); + void *(*yang_speex_decoder_init)(const SpeexMode *mode); + void (*yang_speex_bits_init)(SpeexBits *bits); + void (*yang_speex_bits_reset)(SpeexBits *bits); + int32_t (*yang_speex_bits_read_from)(SpeexBits *bits, const char *bytes, int32_t len); + int32_t (*yang_speex_decode_int)(void *state, SpeexBits *bits, spx_int16_t *out); + void (*yang_speex_decoder_destroy)(void *state); + void (*yang_speex_bits_destroy)(SpeexBits *bits); + int32_t (*yang_speex_decoder_ctl)(void *state, int32_t request, void *ptr); +}; + +#endif /* YANGDECODER_SRC_YANGAUDIODECODERSPEEX_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangDecoderFactory.cpp b/libmetartc3/src/yangdecoder/YangDecoderFactory.cpp new file mode 100755 index 00000000..fe88e18b --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangDecoderFactory.cpp @@ -0,0 +1,66 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "yangdecoder/YangDecoderFactory.h" + +#include "YangAudioDecoderAac.h" +//#include "YangAudioDecoderMp3.h" +#include "YangAudioDecoderOpus.h" +#include "YangAudioDecoderSpeex.h" +#include "YangVideoDecoderIntel.h" +#include "YangH2645VideoDecoderFfmpeg.h" +#include "YangH264DecoderSoft.h" +#include "YangH264DecoderSoftFactory.h" +YangDecoderFactory::YangDecoderFactory() { + + +} + +YangDecoderFactory::~YangDecoderFactory() { + +} + +YangAudioDecoder *YangDecoderFactory::createAudioDecoder(YangAudioCodec paet,YangAudioParam *pcontext){ + //if(paet==Yang_AED_MP3) return new YangAudioDecoderMp3(pcontext); + if(paet==Yang_AED_SPEEX) return new YangAudioDecoderSpeex(pcontext); + if(paet==Yang_AED_OPUS) return new YangAudioDecoderOpus(pcontext); + return new YangAudioDecoderAac(pcontext); +} + +YangAudioDecoder *YangDecoderFactory::createAudioDecoder(YangAudioParam *pcontext){ + + + return createAudioDecoder(pcontext->encode,pcontext); +} + +YangVideoDecoder* YangDecoderFactory::createFfmpegVideoDecoder(YangVideoCodec paet,YangVideoInfo *pcontext){ + return new YangH2645VideoDecoderFfmpeg(pcontext,paet); +} +YangVideoDecoder* YangDecoderFactory::createVideoDecoder(YangVideoCodec paet,YangVideoInfo *pcontext){ + if (paet == Yang_VED_264) { + if(pcontext->videoDecHwType==0){ +#if Yang_Using_H264Decoder + return new YangH264DecoderSoft(); +#else + return new YangH2645VideoDecoderFfmpeg(pcontext,paet); +#endif + }else + return new YangH2645VideoDecoderFfmpeg(pcontext,paet); + } + if (paet == Yang_VED_265) return new YangH2645VideoDecoderFfmpeg(pcontext,paet); + //de264=new YangVideoDecoderIntel(); + // if(p_decType==Type_Nvdia) de264= new YangH264DecoderNv(); +#if YangLibva + return new YangVideoDecoderIntel(); +#else + return NULL; +#endif +} + YangVideoDecoder* YangDecoderFactory::createVideoDecoder(YangVideoInfo *pcontext){ + YangVideoCodec maet=Yang_VED_264; + if(pcontext->videoDecoderType==Yang_VED_265) maet=Yang_VED_265; + //if(pcontext->videoDecoderType==1) maet=Type_Dec_Ffmpeg; + //if(pcontext->audioDecoderType==2) maet=Yang_AE_SPEEX; + return createVideoDecoder(maet,pcontext); + } diff --git a/libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.cpp b/libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.cpp new file mode 100755 index 00000000..67a3d646 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.cpp @@ -0,0 +1,491 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangH2645VideoDecoderFfmpeg.h" +#include +#include +#include "stdlib.h" +#ifdef _WIN32 +#include "YangH264Header.h" +#endif +#if Yang_Ffmpeg_UsingSo +void YangH2645VideoDecoderFfmpeg::loadLib() { + + yang_av_frame_alloc = (AVFrame* (*)(void)) m_lib1.loadFunction( + "av_frame_alloc"); + yang_av_buffer_unref = (void (*)(AVBufferRef **buf)) m_lib1.loadFunction( + "av_buffer_unref"); + yang_av_hwframe_ctx_init = (int32_t (*)(AVBufferRef *ref)) m_lib1.loadFunction( + "av_hwframe_ctx_init"); + yang_av_image_get_buffer_size = (int32_t (*)(enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_get_buffer_size"); + yang_av_hwdevice_ctx_create = (int32_t (*)(AVBufferRef **device_ctx, + enum AVHWDeviceType type, const char *device, AVDictionary *opts, + int32_t flags)) m_lib1.loadFunction("av_hwdevice_ctx_create"); + yang_av_hwframe_transfer_data = (int32_t (*)(AVFrame *dst, const AVFrame *src, + int32_t flags)) m_lib1.loadFunction("av_hwframe_transfer_data"); + yang_av_free = (void (*)(void *ptr)) m_lib1.loadFunction("av_free"); + yang_av_frame_free = (void (*)(AVFrame **frame)) m_lib1.loadFunction( + "av_frame_free"); + yang_av_buffer_ref = + (AVBufferRef* (*)(AVBufferRef *buf)) m_lib1.loadFunction( + "av_buffer_ref"); + yang_av_image_fill_arrays = (int32_t (*)(uint8_t *dst_data[4], + int32_t dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_fill_arrays"); + yang_av_hwframe_ctx_alloc = + (AVBufferRef* (*)(AVBufferRef *device_ctx)) m_lib1.loadFunction( + "av_hwframe_ctx_alloc"); + yang_av_malloc = (void* (*)(size_t size)) m_lib1.loadFunction("av_malloc"); + + yang_avcodec_open2 = (int32_t (*)(AVCodecContext *avctx, const AVCodec *codec, + AVDictionary **options)) m_lib.loadFunction("avcodec_open2"); + yang_av_init_packet = (void (*)(AVPacket *pkt)) m_lib.loadFunction( + "av_init_packet"); + yang_av_hwframe_get_buffer = (int32_t (*)(AVBufferRef *hwframe_ctx, + AVFrame *frame, int32_t flags)) m_lib.loadFunction( + "av_hwframe_get_buffer"); + yang_avcodec_find_decoder = + (AVCodec* (*)(enum AVCodecID id)) m_lib.loadFunction( + "avcodec_find_decoder"); + yang_avcodec_find_decoder_by_name=(AVCodec *(*)(const char *name)) m_lib.loadFunction( + "avcodec_find_decoder_by_name"); + yang_avcodec_alloc_context3 = + (AVCodecContext* (*)(const AVCodec *codec)) m_lib.loadFunction( + "avcodec_alloc_context3"); + yang_avcodec_send_packet = (int32_t (*)(AVCodecContext *avctx, + const AVPacket *avpkt)) m_lib.loadFunction("avcodec_send_packet"); + yang_avcodec_receive_frame = + (int32_t (*)(AVCodecContext *avctx, AVFrame *frame)) m_lib.loadFunction( + "avcodec_receive_frame"); + yang_avcodec_flush_buffers=(void (*)(AVCodecContext *avctx)) m_lib.loadFunction( + "avcodec_flush_buffers"); + yang_avcodec_close = (int32_t (*)(AVCodecContext *avctx)) m_lib.loadFunction( + "avcodec_close"); +} +void YangH2645VideoDecoderFfmpeg::unloadLib() { + yang_avcodec_find_decoder_by_name=NULL; + yang_av_free = NULL; + yang_av_hwframe_ctx_alloc = NULL; + yang_av_hwframe_ctx_init = NULL; + yang_av_buffer_ref = NULL; + yang_av_malloc = NULL; + yang_av_hwdevice_ctx_create = NULL; + yang_avcodec_open2 = NULL; + yang_av_frame_alloc = NULL; + yang_av_image_get_buffer_size = NULL; + yang_av_image_fill_arrays = NULL; + yang_av_init_packet = NULL; + yang_av_hwframe_get_buffer = NULL; + yang_avcodec_find_decoder = NULL; + yang_avcodec_alloc_context3 = NULL; + yang_avcodec_send_packet = NULL; + yang_avcodec_receive_frame = NULL; + yang_av_hwframe_transfer_data = NULL; + yang_avcodec_flush_buffers=NULL; + yang_av_frame_free = NULL; + yang_avcodec_close = NULL; +} +#endif +enum AVPixelFormat get_hw_format1(AVCodecContext *ctx, + const enum AVPixelFormat *pix_fmts) { + if(YangH2645VideoDecoderFfmpeg::g_hwType==YangV_Hw_Intel) return AV_PIX_FMT_VAAPI; + if(YangH2645VideoDecoderFfmpeg::g_hwType==YangV_Hw_Nvdia) return AV_PIX_FMT_CUDA; + if(YangH2645VideoDecoderFfmpeg::g_hwType==YangV_Hw_Android) return AV_PIX_FMT_MEDIACODEC; + //return AV_PIX_FMT_VAAPI; + return AV_PIX_FMT_VAAPI; + +} + +int32_t YangH2645VideoDecoderFfmpeg::set_hwframe_ctx(AVPixelFormat ctxformat,AVPixelFormat swformat,YangVideoInfo *yvp,AVCodecContext *ctx, + AVBufferRef *hw_device_ctx, int32_t pwid, int32_t phei) { + AVBufferRef *hw_frames_ref; + AVHWFramesContext *frames_ctx = NULL; + int32_t err = 0; + + if (!(hw_frames_ref = yang_av_hwframe_ctx_alloc(hw_device_ctx))) { + printf("Failed to create hw frame context.\n"); + return -1; + } + frames_ctx = (AVHWFramesContext*) (hw_frames_ref->data); + frames_ctx->format = ctxformat; + frames_ctx->sw_format = swformat; + + frames_ctx->width = pwid; + frames_ctx->height = phei; + //frames_ctx->initial_pool_size = 20; + if ((err = yang_av_hwframe_ctx_init(hw_frames_ref)) < 0) { + printf("Failed to initialize VAAPI frame context.Error code: %d\n", + ret); + yang_av_buffer_unref(&hw_frames_ref); + return err; + } + ctx->hw_frames_ctx = yang_av_buffer_ref(hw_frames_ref); + ctx->hw_device_ctx = yang_av_buffer_ref(hw_device_ctx); + // ctx->hwaccel_flags=1; + if (!ctx->hw_frames_ctx) + err = AVERROR(ENOMEM); + + yang_av_buffer_unref(&hw_frames_ref); + return err; +} + +YangH2645VideoDecoderFfmpeg::YangH2645VideoDecoderFfmpeg(YangVideoInfo *pcontext,YangVideoCodec pencdectype) { + m_encDecType= pencdectype; + m_context=pcontext; + usingHw = 0; //pcontext->usingHwDec==2?1:0; + if(pcontext->videoDecHwType>0) usingHw=1; + + m_buffer=NULL; + m_bufLen=0; + + g_hwType=(YangVideoHwType)pcontext->videoDecHwType; + m_bitDepth=pcontext->bitDepth; + + m_width = 0; + m_height = 0; + m_frame = NULL; + yLen = 0; + uLen = 0; + allLen = 0; + buffer = NULL; + ret = 0; + m_codec = NULL; + + hw_device_ctx = NULL; + frame_mem_gpu = NULL; +#if Yang_Ffmpeg_UsingSo + unloadLib(); +#endif + +} +YangH2645VideoDecoderFfmpeg::~YangH2645VideoDecoderFfmpeg() { + m_context=NULL; + if(m_buffer) delete[] m_buffer; + m_buffer=NULL; + decode_close(); +#if Yang_Ffmpeg_UsingSo + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); +#endif + +} +YangVideoHwType YangH2645VideoDecoderFfmpeg::g_hwType=YangV_Hw_Intel; +void YangH2645VideoDecoderFfmpeg::parseHeaderH265(uint8_t *p,int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps){ + + AVCodec *t_codec = yang_avcodec_find_decoder(AV_CODEC_ID_H265); + AVCodecContext* t_codecCtx = yang_avcodec_alloc_context3(t_codec); + t_codecCtx->extradata = (uint8_t*) yang_av_malloc(pLen + AV_INPUT_BUFFER_PADDING_SIZE); + t_codecCtx->extradata_size = pLen; + memcpy(t_codecCtx->extradata, p, pLen); + int32_t ret = yang_avcodec_open2(t_codecCtx, t_codec, NULL); + if (ret < 0) + printf("\navcodec_open2 failure................\n"); + + *pwid=t_codecCtx->width; + *phei=t_codecCtx->height; + *pfps=t_codecCtx->time_base.den;///t_codecCtx->time_base.num; + + yang_av_free(t_codecCtx->extradata); + t_codecCtx->extradata=NULL; + t_codecCtx->extradata_size = 0; + yang_avcodec_close(t_codecCtx); + yang_av_free(t_codecCtx); + t_codecCtx=NULL; + t_codec=NULL; +} +void YangH2645VideoDecoderFfmpeg::parseHeaderH264(uint8_t *p,int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps){ + + AVCodec *t_codec = yang_avcodec_find_decoder(AV_CODEC_ID_H264); + AVCodecContext* t_codecCtx = yang_avcodec_alloc_context3(t_codec); + t_codecCtx->extradata = (uint8_t*) yang_av_malloc(pLen + AV_INPUT_BUFFER_PADDING_SIZE); + t_codecCtx->extradata_size = pLen; + memset(t_codecCtx->extradata, 0, pLen + AV_INPUT_BUFFER_PADDING_SIZE); + memcpy(t_codecCtx->extradata, p, pLen); + + int32_t ret = yang_avcodec_open2(t_codecCtx, t_codec, NULL); + if (ret < 0) + yang_error("\navcodec_open2 failure................\n"); + + *pwid=t_codecCtx->width; + *phei=t_codecCtx->height; + *pfps=t_codecCtx->time_base.den;///t_codecCtx->time_base.num; + + yang_av_free(t_codecCtx->extradata); + t_codecCtx->extradata=NULL; + t_codecCtx->extradata_size = 0; + yang_avcodec_close(t_codecCtx); + yang_av_free(t_codecCtx); + t_codecCtx=NULL; + t_codec=NULL; +} +void YangH2645VideoDecoderFfmpeg::parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, + int32_t *phei, int32_t *pfps) { + + uint8_t headers[128]; + memset(headers, 0, 128); + int32_t headerLen = 0; + if(m_encDecType==Yang_VED_264){ + yang_getH264RtmpHeader(p, headers, &headerLen); +#ifdef _WIN32 + YangH264Header h264header; + h264header.parseRtmpHeader(p); + *pwid = h264header.width; + *phei = h264header.height; + *pfps = h264header.fps; +#else + parseHeaderH264(headers,headerLen,pwid,phei,pfps); +#endif + }else if(m_encDecType==Yang_VED_265) { + yang_getH265RtmpHeader(p, headers, &headerLen); + parseHeaderH265(headers,headerLen,pwid,phei,pfps); + + } + m_width = *pwid; + m_height= *phei ; + + if(!m_buffer) m_buffer=new uint8_t[m_width*m_height*3/2]; + + int32_t bitLen=(m_bitDepth==8?1:2); + + yLen = m_width * m_height*bitLen; + uLen = yLen / 4; + allLen = yLen * 3 / 2; + + + m_codecCtx->extradata = (uint8_t*) yang_av_malloc(headerLen + AV_INPUT_BUFFER_PADDING_SIZE); + m_codecCtx->extradata_size = headerLen; + memcpy(m_codecCtx->extradata, headers, headerLen); + + m_codecCtx->width = m_width; + m_codecCtx->height = m_height; + + if (usingHw) { + AVPixelFormat ctxformat,swformat; + if(m_context->videoDecHwType==YangV_Hw_Intel) ctxformat = AV_PIX_FMT_VAAPI; + if(m_context->videoDecHwType==YangV_Hw_Nvdia) ctxformat = AV_PIX_FMT_CUDA; + if(m_context->videoDecHwType==YangV_Hw_Android) ctxformat = AV_PIX_FMT_MEDIACODEC; + if(m_context->bitDepth==8) swformat = AV_PIX_FMT_NV12; + if(m_context->bitDepth==10) swformat = AV_PIX_FMT_P010; + if(m_context->bitDepth==16) swformat = AV_PIX_FMT_P016; + if ((ret = set_hwframe_ctx(ctxformat,swformat,m_context,m_codecCtx, hw_device_ctx, m_width, m_height)) + < 0) { + printf("Failed to set hwframe context.\n"); + //goto close; + } + m_codecCtx->get_format = get_hw_format1; + } + + ret = yang_avcodec_open2(m_codecCtx, m_codec, NULL); + if (ret < 0) + printf("\navcodec_open2 failure................\n"); + AVPixelFormat fmt=AV_PIX_FMT_YUV420P; + if(m_bitDepth==8){ + if(usingHw) fmt=AV_PIX_FMT_NV12; + }else if(m_bitDepth==10){ + fmt=AV_PIX_FMT_P010; + }else if(m_bitDepth==16){ + fmt=AV_PIX_FMT_P016; + } + m_frame = yang_av_frame_alloc(); + + int32_t numBytes = yang_av_image_get_buffer_size(fmt, m_width,m_height, 1); + + buffer = (uint8_t*) yang_av_malloc(numBytes * sizeof(uint8_t)); + + + if (usingHw) { + yang_av_image_fill_arrays(m_frame->data, m_frame->linesize, buffer, + fmt, m_width, m_height, 1); + yang_av_init_packet(&packet); + frame_mem_gpu = yang_av_frame_alloc(); + frame_mem_gpu->format = m_codecCtx->pix_fmt; + frame_mem_gpu->width = m_codecCtx->width; + frame_mem_gpu->height = m_codecCtx->height; + ret = yang_av_hwframe_get_buffer(m_codecCtx->hw_frames_ctx,frame_mem_gpu, 0); + if (ret < 0) + printf("\nERROR:av_hwframe_get_buffer failure!\n"); + } else { + yang_av_image_fill_arrays(m_frame->data, m_frame->linesize, buffer, + fmt, m_width, m_height, 1); + yang_av_init_packet(&packet); + } + + m_isInit = 1; +} + +void YangH2645VideoDecoderFfmpeg::init() { + //AVCodecID codec_id = AV_CODEC_ID_H264; + // av_register_all(); + // avcodec_register_all(); + // avcodec_register(AV_CODEC_ID_H264); +#if Yang_Ffmpeg_UsingSo + m_lib.loadObject("libavcodec"); + m_lib1.loadObject("libavutil"); + loadLib(); +#endif + + if(usingHw){ + if(m_encDecType==Yang_VED_264){ + if(m_context->videoDecHwType==YangV_Hw_Intel) m_codec = yang_avcodec_find_decoder_by_name("h264_vaapi");//avcodec_find_encoder(AV_CODEC_ID_H264); + if(m_context->videoDecHwType==YangV_Hw_Nvdia) m_codec = yang_avcodec_find_decoder_by_name("h264_nvdec");//h264_cuvid + if(m_context->videoDecHwType==YangV_Hw_Android) m_codec = yang_avcodec_find_decoder_by_name("h264_mediacodec"); + }else if(m_encDecType==Yang_VED_265){ + if(m_context->videoDecHwType==YangV_Hw_Intel) m_codec = yang_avcodec_find_decoder_by_name("hevc_vaapi"); + if(m_context->videoDecHwType==YangV_Hw_Nvdia) m_codec = yang_avcodec_find_decoder_by_name("hevc_nvdec"); + if(m_context->videoDecHwType==YangV_Hw_Android) m_codec = yang_avcodec_find_decoder_by_name("hevc_mediacodec"); + } + }else{ + if(m_encDecType==Yang_VED_264){ + m_codec = yang_avcodec_find_decoder(AV_CODEC_ID_H264); + }else if(m_encDecType==Yang_VED_265){ + m_codec = yang_avcodec_find_decoder(AV_CODEC_ID_H265); + } + } + m_codecCtx = yang_avcodec_alloc_context3(m_codec); +} + +int32_t YangH2645VideoDecoderFfmpeg::decode(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback){ + if (usingHw) + return decode_2(videoFrame,yuvtype,pcallback); + else + return decode_1(videoFrame,yuvtype,pcallback); + +} +int32_t YangH2645VideoDecoderFfmpeg::decode_1(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback) { + packet.data = videoFrame->payload; + packet.size = videoFrame->nb; + + ret = yang_avcodec_send_packet(m_codecCtx, &packet); + if (ret != 0) { + m_bufLen= 0; + if(ret==AVERROR(EAGAIN)) { + yang_error("avcodec_send_packet EAGAIN error:input is not accepted in the current state"); + return 2; + } + if(ret==AVERROR_EOF){ + yang_error("avcodec_send_packet AVERROR_EOF error: the decoder has been flushed, and no new packets can be sent to it "); + return 2; + } + if(ret==AVERROR_INVALIDDATA){ + yang_error("avcodec_send_packet AVERROR_INVALIDDATA"); + yang_trace("\n%d:",videoFrame->nb); + for(int i=0;i<100;i++) yang_trace("%02x,",videoFrame->payload[i]); + return 2; + } + yang_error("avcodec_send_packet err(%d)",ret); + yang_trace("\n"); + for(int i=0;i<100;i++) yang_trace("%02x,",videoFrame->payload[i]); + return 2; + } + //avcodec_receive_frame + while(ret==0){ + ret = yang_avcodec_receive_frame(m_codecCtx, m_frame); + + if (ret != 0) { + m_bufLen = 0; + if(ret==AVERROR_EOF){ + yang_avcodec_flush_buffers(m_codecCtx); + return ret; + } + if(ret==AVERROR(EAGAIN)) { + // yang_error("avcodec_receive_packet err EAGAIN"); + return ret; + } + + yang_error("avcodec_receive_packet err(%d)",ret); + return 1; + } + + for (int i = 0; i < m_height; i++) { + memcpy(m_buffer + i * m_width, m_frame->data[0] + i * m_frame->linesize[0], m_width); + } + for (int i = 0; i < m_height / 2; i++) { + memcpy(m_buffer + yLen+i * m_width / 2,m_frame->data[1] + i * m_frame->linesize[1], m_width / 2); + } + for (int i = 0; i < m_height / 2; i++) { + memcpy(m_buffer + yLen+ uLen+ i * m_width / 2, m_frame->data[2] + i * m_frame->linesize[2], m_width / 2); + } + m_bufLen= allLen; + videoFrame->payload=m_buffer; + videoFrame->nb=m_bufLen; + if(pcallback) pcallback->onVideoData(videoFrame); + } + return Yang_Ok; +} +int32_t YangH2645VideoDecoderFfmpeg::decode_2(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback) { + packet.data = videoFrame->payload; + packet.size = videoFrame->nb; + ret = yang_avcodec_send_packet(m_codecCtx, &packet); + if (ret != 0) { + m_bufLen= 0; + return 2; + } + while(ret==0){ + ret = yang_avcodec_receive_frame(m_codecCtx, frame_mem_gpu); + + if (ret != 0) { + m_bufLen = 0; + if(ret==AVERROR_EOF){ + yang_avcodec_flush_buffers(m_codecCtx); + return ret; + } + if(ret==AVERROR(EAGAIN)) { + // yang_error("avcodec_receive_packet err EAGAIN"); + return ret; + } + + yang_error("avcodec_receive_packet err(%d)",ret); + return 1; + } + yang_av_hwframe_transfer_data(m_frame, frame_mem_gpu, 0); + //memcpy(m_buffer, m_frame->data[0], yLen); + // if(yuvtype==YangYv12){ + // memcpy(m_buffer + yLen, m_frame->data[1], uLen); + // memcpy(m_buffer + yLen + uLen, m_frame->data[1]+uLen, uLen); + // }else if(yuvtype==YangI420){ + // memcpy(m_buffer + yLen, m_frame->data[1]+uLen, uLen); + // memcpy(m_buffer + yLen + uLen, m_frame->data[1], uLen); + // } + //memcpy(m_buffer + yLen, m_frame->data[1], uLen * 2); + for (int i = 0; i < m_height; i++) { + memcpy(m_buffer + i * m_width, m_frame->data[0] + i * m_frame->linesize[0], m_width); + } + for (int i = 0; i < m_height / 2; i++) { + memcpy(m_buffer + yLen+i * m_width / 2,m_frame->data[1] + i * m_frame->linesize[1], m_width / 2); + } + for (int i = 0; i < m_height / 2; i++) { + memcpy(m_buffer + yLen+ uLen+ i * m_width / 2, m_frame->data[2] + i * m_frame->linesize[2], m_width / 2); + } + m_bufLen = allLen; + videoFrame->payload=m_buffer; + videoFrame->nb=m_bufLen; + if(pcallback) pcallback->onVideoData(videoFrame); + } + return Yang_Ok; +} +void YangH2645VideoDecoderFfmpeg::decode_close() { + if (usingHw) { + yang_av_buffer_unref(&hw_device_ctx); + yang_av_frame_free(&frame_mem_gpu); + } + + yang_av_frame_free(&m_frame); + m_frame = NULL; + if (m_codecCtx) { + yang_avcodec_close(m_codecCtx); + yang_av_free(m_codecCtx); + } + + m_codecCtx = NULL; + if (buffer) + yang_av_free(buffer); + buffer = NULL; + +} + + diff --git a/libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.h b/libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.h new file mode 100755 index 00000000..c74c8b20 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH2645VideoDecoderFfmpeg.h @@ -0,0 +1,122 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGDECODER_SRC_YANGH2645VIDEODECODERFFMPEG_H_ +#define YANGDECODER_SRC_YANGH2645VIDEODECODERFFMPEG_H_ +#include +#include +#include "yangdecoder/YangVideoDecoder.h" +#include "yangutil/sys/YangLoadLib.h" + +extern "C" { +#include "libavcodec/avcodec.h" +//#include "libavformat/avformat.h" +#include "libavutil/avutil.h" +#include "libavutil/imgutils.h" +} + + +class YangH2645VideoDecoderFfmpeg: public YangVideoDecoder { +public: + YangH2645VideoDecoderFfmpeg(YangVideoInfo *pcontext, + YangVideoCodec pencdectype); + virtual ~YangH2645VideoDecoderFfmpeg(); + void parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, int32_t *phei, int32_t *pfps); + void init(); + int32_t decode(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback); + //int32_t decode( int32_t isIframe, uint8_t *pData, int32_t nSize, uint8_t *dest, int32_t *pnFrameReturned); + void decode_close(); + static YangVideoHwType g_hwType; +protected: + uint8_t* m_buffer; + int32_t m_bufLen; +private: + YangVideoCodec m_encDecType; + YangVideoInfo *m_context; + int32_t m_bitDepth; + int32_t ret; + AVCodec *m_codec; + AVCodecContext *m_codecCtx = NULL; + AVPacket packet; + AVFrame *m_frame; + int32_t m_width, m_height; + int32_t yLen; + int32_t uLen; + int32_t allLen; + uint8_t *buffer = NULL; + int32_t usingHw; + AVBufferRef *hw_device_ctx; + AVFrame *frame_mem_gpu; + + int32_t decode_1(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback); + int32_t decode_2(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback); + int32_t set_hwframe_ctx(AVPixelFormat ctxformat, AVPixelFormat swformat, + YangVideoInfo *yvp, AVCodecContext *ctx, + AVBufferRef *hw_device_ctx, int32_t pwid, int32_t phei); + + void parseHeaderH265(uint8_t *p, int32_t pLen, int32_t *pwid, int32_t *phei, int32_t *pfps); + void parseHeaderH264(uint8_t *p, int32_t pLen, int32_t *pwid, int32_t *phei, int32_t *pfps); +#if Yang_Ffmpeg_UsingSo + YangLoadLib m_lib, m_lib1; + void loadLib(); + void unloadLib(); + AVBufferRef* (*yang_av_hwframe_ctx_alloc)(AVBufferRef *device_ctx); + int32_t (*yang_av_hwframe_ctx_init)(AVBufferRef *ref); + AVBufferRef* (*yang_av_buffer_ref)(AVBufferRef *buf); + void* (*yang_av_malloc)(size_t size); + int32_t (*yang_av_hwdevice_ctx_create)(AVBufferRef **device_ctx, + enum AVHWDeviceType type, const char *device, AVDictionary *opts, + int32_t flags); + int32_t (*yang_avcodec_open2)(AVCodecContext *avctx, const AVCodec *codec, + AVDictionary **options); + AVFrame* (*yang_av_frame_alloc)(void); + int32_t (*yang_av_image_get_buffer_size)(enum AVPixelFormat pix_fmt, int32_t width, + int32_t height, int32_t align); + int32_t (*yang_av_image_fill_arrays)(uint8_t *dst_data[4], int32_t dst_linesize[4], + const uint8_t *src, enum AVPixelFormat pix_fmt, int32_t width, + int32_t height, int32_t align); + void (*yang_av_buffer_unref)(AVBufferRef **buf); + void (*yang_av_init_packet)(AVPacket *pkt); + int32_t (*yang_av_hwframe_get_buffer)(AVBufferRef *hwframe_ctx, AVFrame *frame, + int32_t flags); + AVCodec* (*yang_avcodec_find_decoder)(enum AVCodecID id); + AVCodec* (*yang_avcodec_find_decoder_by_name)(const char *name); + AVCodecContext* (*yang_avcodec_alloc_context3)(const AVCodec *codec); + int32_t (*yang_avcodec_send_packet)(AVCodecContext *avctx, + const AVPacket *avpkt); + int32_t (*yang_avcodec_receive_frame)(AVCodecContext *avctx, AVFrame *frame); + int32_t (*yang_av_hwframe_transfer_data)(AVFrame *dst, const AVFrame *src, + int32_t flags); + void (*yang_avcodec_flush_buffers)(AVCodecContext *avctx); + void (*yang_av_frame_free)(AVFrame **frame); + void (*yang_av_free)(void *ptr); + int32_t (*yang_avcodec_close)(AVCodecContext *avctx); +#else + #define yang_avcodec_find_decoder_by_name avcodec_find_decoder_by_name + #define yang_av_free av_free + #define yang_av_hwframe_ctx_alloc av_hwframe_ctx_alloc + #define yang_av_hwframe_ctx_init av_hwframe_ctx_init + #define yang_av_buffer_ref av_buffer_ref + #define yang_av_malloc av_malloc + #define yang_av_hwdevice_ctx_create av_hwdevice_ctx_create + #define yang_avcodec_open2 avcodec_open2 + #define yang_av_frame_alloc av_frame_alloc + #define yang_av_image_get_buffer_size av_image_get_buffer_size + #define yang_av_image_fill_arrays av_image_fill_arrays + #define yang_av_init_packet av_init_packet + #define yang_av_hwframe_get_buffer av_hwframe_get_buffer + #define yang_avcodec_find_decoder avcodec_find_decoder + #define yang_avcodec_alloc_context3 avcodec_alloc_context3 + #define yang_avcodec_send_packet avcodec_send_packet + #define yang_avcodec_receive_frame avcodec_receive_frame + #define yang_av_hwframe_transfer_data av_hwframe_transfer_data + #define yang_avcodec_flush_buffers avcodec_flush_buffers + #define yang_av_frame_free av_frame_free + #define yang_avcodec_close avcodec_close + #define yang_av_buffer_unref av_buffer_unref +#endif + +}; + +#endif /* YANGDECODER_SRC_YANGH2645VIDEODECODERFFMPEG_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangH264Dec.h b/libmetartc3/src/yangdecoder/YangH264Dec.h new file mode 100755 index 00000000..c15ea1e0 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264Dec.h @@ -0,0 +1,25 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGH264DEC_H_ +#define SRC_YANGH264DEC_H_ +#include +#include +struct YangH264DecContext { + int m_width, m_height, m_fps; + int yLen; + int uLen; + int allLen; + void *context; +}; + +struct YangH264DecContext* getYangH264DecContext(); + +void init_H264DecCont(struct YangH264DecContext *cont, uint8_t *headers, + int headerLen); +int decode(struct YangH264DecContext *cont, int isIframe, unsigned char *pData, + int nSize, enum YangYuvType pyuvType, unsigned char *dest, + int *pnFrameReturned); +void decode_close(struct YangH264DecContext *cont); + +#endif /* SRC_YANGH264DEC_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangH264DecoderSoft.cpp b/libmetartc3/src/yangdecoder/YangH264DecoderSoft.cpp new file mode 100755 index 00000000..865ff6fd --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264DecoderSoft.cpp @@ -0,0 +1,96 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangH264DecoderSoft.h" +#include +#include +#if !Yang_Using_H264Decoder_So +int32_t yang_decode(struct YangH264DecContext *cont,int32_t isIframe,uint8_t *pData, int32_t nSize,enum YangYuvType yuvType, + uint8_t *dest, int32_t *pnFrameReturned){ + return decode(cont,isIframe,pData, nSize, yuvType,dest, pnFrameReturned); +} +#endif +YangH264DecoderSoft::YangH264DecoderSoft() { + context=NULL; + m_buffer=NULL; + m_bufLen=0; +#if Yang_Using_H264Decoder_So + m_lib.loadObject("libyangh264dec"); + yang_getYangH264DecContext=(struct YangH264DecContext *(*)())m_lib.loadFunction("getYangH264DecContext"); + //yang_parseHeader=(void (*)(uint8_t *p,int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps))m_lib.loadFunction("parseHeader"); + yang_init_H264DecCont=(void (*)(struct YangH264DecContext *cont,uint8_t *headers, int32_t headerLen))m_lib.loadFunction("init_H264DecCont"); + yang_decode=(int32_t (*)(struct YangH264DecContext *cont,int32_t isIframe,uint8_t *pData, int32_t nSize,enum YangYuvType yuvType, + uint8_t *dest, int32_t *pnFrameReturned))m_lib.loadFunction("decode"); + yang_decode_close=(void (*)(struct YangH264DecContext *cont))m_lib.loadFunction("decode_close"); +#endif +} + +YangH264DecoderSoft::~YangH264DecoderSoft() { + + //if(m_dec) delete m_dec; + if(m_buffer) delete[] m_buffer; + m_buffer=NULL; + if(context){ + yang_decode_close(context); + context=NULL; + } +#if Yang_Using_H264Decoder_So + yang_getYangH264DecContext=NULL; + yang_init_H264DecCont=NULL; + yang_decode=NULL; + yang_decode_close=NULL; + m_lib.unloadObject(); +#endif +} + +void YangH264DecoderSoft::getH264RtmpHeader(uint8_t *buf, uint8_t *src,int32_t *hLen) { + + int32_t spsLen = *(buf + 12) + 1; + uint8_t *spsStart = buf + 13; + + int32_t ppsLen = *(spsStart + spsLen + 1) + 1; + uint8_t *ppsStart = buf + 13 + spsLen + 2; + *(src + 3) = 0x01; + memcpy(src + 4, spsStart, spsLen); + *(src + 4 + spsLen + 3) = 0x01; + memcpy(src + 4 + spsLen + 4, ppsStart, ppsLen); + *hLen = 8 + spsLen + ppsLen; + /**printf("\n************************************sps*******len=%d\n",spsLen); + for(int32_t i=0;im_width; + *phei=context->m_height; + *pfps=context->m_fps; + if(!m_buffer) m_buffer=new uint8_t[context->m_width*context->m_height*3/2]; + //printf("\ndecode wid=%d,hei=%d,fps=%d....................\n",*pwid,*phei,*pfps); +} + void YangH264DecoderSoft::init(){ + if(context==NULL) context=yang_getYangH264DecContext(); + } + int32_t YangH264DecoderSoft::decode(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback){ + int32_t ret=0; + + if(context) ret=yang_decode(context,1,videoFrame->payload,videoFrame->nb,yuvtype,m_buffer,&m_bufLen); + videoFrame->payload=m_buffer; + videoFrame->nb=m_bufLen; + if(pcallback&&m_bufLen) pcallback->onVideoData(videoFrame); + return ret; + } + + //int32_t YangH264DecoderSoft::decode(int32_t isIframe, uint8_t *pData, int32_t nSize, uint8_t *dest, int32_t *pnFrameReturned){ + //if(context) return yang_decode(context,isIframe,pData,nSize,dest,pnFrameReturned); + //printf("\n*****decodesize===%d....",*pnFrameReturned); + //return 0; + //} diff --git a/libmetartc3/src/yangdecoder/YangH264DecoderSoft.h b/libmetartc3/src/yangdecoder/YangH264DecoderSoft.h new file mode 100755 index 00000000..8d83b5a1 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264DecoderSoft.h @@ -0,0 +1,48 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGDECODER_SRC_YANGH264DECODERSOFT_H_ +#define YANGDECODER_SRC_YANGH264DECODERSOFT_H_ +#include "stdint.h" +#include +#include +#include +extern "C"{ +#include "YangH264Dec.h" +} +class YangH264DecoderSoft: public YangVideoDecoder { +public: + YangH264DecoderSoft(); + virtual ~YangH264DecoderSoft(); + void parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, int32_t *phei, int32_t *pfps); + void init(); + int32_t decode(YangFrame* videoFrame,YangYuvType yuvtype,YangDecoderCallback* pcallback); + //int32_t decode(int32_t isIframe, uint8_t *pData, int32_t nSize, uint8_t *dest,int32_t *pnFrameReturned); +private: + uint8_t* m_buffer; + int32_t m_bufLen; +private: + struct YangH264DecContext *context; + void getH264RtmpHeader(uint8_t *buf, uint8_t *src, int32_t *hLen); +#if Yang_Using_H264Decoder_So + YangLoadLib m_lib; + + struct YangH264DecContext* (*yang_getYangH264DecContext)(); + //void (*yang_parseHeader)(uint8_t *p,int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps); + void (*yang_init_H264DecCont)(struct YangH264DecContext *cont, + uint8_t *headers, int32_t headerLen); + int32_t (*yang_decode)(struct YangH264DecContext *cont, int32_t isIframe, + uint8_t *pData, int32_t nSize, enum YangYuvType pyuvType,uint8_t *dest, + int32_t *pnFrameReturned); + void (*yang_decode_close)(struct YangH264DecContext *cont); +#else + #define yang_getYangH264DecContext getYangH264DecContext + //#define yang_parseHeader parseHeader + #define yang_init_H264DecCont init_H264DecCont + //#define yang_decode decode + #define yang_decode_close decode_close +#endif +}; + +#endif /* YANGDECODER_SRC_YANGH264DECODERSOFT_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangH264DecoderSoftFactory.h b/libmetartc3/src/yangdecoder/YangH264DecoderSoftFactory.h new file mode 100755 index 00000000..1bf6bec1 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264DecoderSoftFactory.h @@ -0,0 +1,16 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef SRC_YANGH264DECODERSOFTFACTORY_H_ +#define SRC_YANGH264DECODERSOFTFACTORY_H_ +#include "yangdecoder/YangVideoDecoder.h" +YangVideoDecoder* yang_createH264Decoder(); +class YangH264DecoderSoftFactory { +public: + YangH264DecoderSoftFactory(); + virtual ~YangH264DecoderSoftFactory(); + YangVideoDecoder* createH264Decoder(); +}; + +#endif /* SRC_YANGH264DECODERSOFTFACTORY_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangH264Header.cpp b/libmetartc3/src/yangdecoder/YangH264Header.cpp new file mode 100755 index 00000000..6c1b5cb7 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264Header.cpp @@ -0,0 +1,654 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangH264Header1.h" + +YangH264Header::YangH264Header() { + // TODO Auto-generated constructor stub + memset(&m_sps, 0, sizeof(m_sps)); + memset(&m_pps, 0, sizeof(m_pps)); + memset(&packet, 0, sizeof(packet)); + //packet. + nal = (YangNAL*) malloc(sizeof(YangNAL)); + rbsp = (YangRBSP*) malloc(sizeof(YangRBSP)); + rbsp->rbsp_buffer = (uint8_t*) malloc(MAX_MBPAIR_SIZE); + nal->rbsp_buffer = rbsp->rbsp_buffer; + packet.nals = nal; + width = 0; + height = 0; + sl = NULL; + fps=10; +} + + +YangH264Header::~YangH264Header() { + // TODO Auto-generated destructor stub +} + +int32_t YangH264Header::h264_decode_seq_parameter_set(uint8_t *buf, int32_t len) { + //printf("\n%02x,%02x**ind=%d,size_in_bits=%d*plus8=%d*picture_parameter_set\n",*gb->buffer,*(gb->buffer+1), + //gb->index,gb->size_in_bits,gb->size_in_bits_plus8); + /**printf("***************spslen=%d\n",len); + for(int32_t i=0;igb; + int32_t profile_idc, level_idc, constraint_set_flags = 0; + uint32_t sps_id; + int32_t i, log2_max_frame_num_minus4; + Yang_SPS *sps = &m_sps; + int32_t ret; + /** sps->data_size = gb->buffer_end - gb->buffer; + if (sps->data_size > sizeof(sps->data)) { + // av_log(avctx, AV_LOG_DEBUG, "Truncating likely oversized SPS\n"); + sps->data_size = sizeof(sps->data); + } + memcpy(sps->data, gb->buffer, sps->data_size);**/ + + profile_idc = yang_get_bits(gb, 8); + constraint_set_flags |= yang_get_bits1(gb) << 0; // constraint_set0_flag + constraint_set_flags |= yang_get_bits1(gb) << 1; // constraint_set1_flag + constraint_set_flags |= yang_get_bits1(gb) << 2; // constraint_set2_flag + constraint_set_flags |= yang_get_bits1(gb) << 3; // constraint_set3_flag + constraint_set_flags |= yang_get_bits1(gb) << 4; // constraint_set4_flag + constraint_set_flags |= yang_get_bits1(gb) << 5; // constraint_set5_flag + yang_skip_bits(gb, 2); // reserved_zero_2bits + level_idc = yang_get_bits(gb, 8); + sps_id = yang_get_ue_golomb_31(gb); + sps->sps_id = sps_id; + sps->time_offset_length = 24; + sps->profile_idc = profile_idc; + sps->constraint_set_flags = constraint_set_flags; + sps->level_idc = level_idc; + sps->full_range = -1; + + memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4)); + memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8)); + sps->scaling_matrix_present = 0; + sps->colorspace = Yang_SPC_UNSPECIFIED; //2; //AVCOL_SPC_UNSPECIFIED + + if (sps->profile_idc == 100 || // High profile + sps->profile_idc == 110 || // High10 profile + sps->profile_idc == 122 || // High422 profile + sps->profile_idc == 244 || // High444 Predictive profile + sps->profile_idc == 44 || // Cavlc444 profile + sps->profile_idc == 83 || // Scalable Constrained High profile (SVC) + sps->profile_idc == 86 || // Scalable High Intra profile (SVC) + sps->profile_idc == 118 || // Stereo High profile (MVC) + sps->profile_idc == 128 || // Multiview High profile (MVC) + sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) + sps->profile_idc == 144) { // old High444 profile + sps->chroma_format_idc = yang_get_ue_golomb_31(gb); + if (sps->chroma_format_idc == 3) { + sps->residual_color_transform_flag = yang_get_bits1(gb); + + } + sps->bit_depth_luma = yang_get_ue_golomb(gb) + 8; + sps->bit_depth_chroma = yang_get_ue_golomb(gb) + 8; + sps->transform_bypass = yang_get_bits1(gb); + ret = yang_decode_scaling_matrices(gb, sps, NULL, 1, + sps->scaling_matrix4, sps->scaling_matrix8); + + sps->scaling_matrix_present |= ret; + } else { + sps->chroma_format_idc = 1; + sps->bit_depth_luma = 8; + sps->bit_depth_chroma = 8; + } + + log2_max_frame_num_minus4 = yang_get_ue_golomb(gb); + + sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; + + sps->poc_type = yang_get_ue_golomb_31(gb); + + if (sps->poc_type == 0) { // FIXME #define + unsigned t = yang_get_ue_golomb(gb); + sps->log2_max_poc_lsb = t + 4; + } else if (sps->poc_type == 1) { // FIXME #define + sps->delta_pic_order_always_zero_flag = yang_get_bits1(gb); + sps->offset_for_non_ref_pic = yang_get_se_golomb(gb); + sps->offset_for_top_to_bottom_field = yang_get_se_golomb(gb); + sps->poc_cycle_length = yang_get_ue_golomb(gb); + for (i = 0; i < sps->poc_cycle_length; i++) + sps->offset_for_ref_frame[i] = yang_get_se_golomb(gb); + } + + sps->ref_frame_count = yang_get_ue_golomb_31(gb); + sps->gaps_in_frame_num_allowed_flag = yang_get_bits1(gb); + sps->mb_width = yang_get_ue_golomb(gb) + 1; + sps->mb_height = yang_get_ue_golomb(gb) + 1; + width = sps->mb_width * 16; + height = sps->mb_height * 16; + //if (width == 1920) height = 1080; + //if (width == 320) height = 240; + if (width > 3840) width = 3840; + if (width <= 0) width = 1280; + if (height > 2160) height = 2160; + if (height <= 0) height = 720; + + sps->frame_mbs_only_flag = yang_get_bits1(gb); + + sps->mb_height *= 2 - sps->frame_mbs_only_flag; + + if (!sps->frame_mbs_only_flag) + sps->mb_aff = yang_get_bits1(gb); + else + sps->mb_aff = 0; + + sps->direct_8x8_inference_flag = yang_get_bits1(gb); + sps->crop = yang_get_bits1(gb); + uint32_t crop_left = 0; + uint32_t crop_right = 0; + uint32_t crop_top = 0; + uint32_t crop_bottom = 0; + if (sps->crop) { + crop_left = yang_get_ue_golomb(gb); + crop_right = yang_get_ue_golomb(gb); + crop_top = yang_get_ue_golomb(gb); + crop_bottom = yang_get_ue_golomb(gb); + // int32_t width = 16 * sps->mb_width; + //int32_t height = 16 * sps->mb_height; + sps->crop_left = 0; + sps->crop_right = 0; + sps->crop_top = 0; + sps->crop_bottom = 0; + + + int32_t vsub = (sps->chroma_format_idc == 1) ? 1 : 0; + int32_t hsub = (sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2) ?1 : 0; + int32_t step_x = 1 << hsub; + int32_t step_y = (2 - sps->frame_mbs_only_flag) << vsub; + sps->crop_left = crop_left * step_x; + sps->crop_right = crop_right * step_x; + sps->crop_top = crop_top * step_y; + sps->crop_bottom = crop_bottom * step_y; + } else { + sps->crop_left = 0; + sps->crop_right = 0; + sps->crop_top = 0; + sps->crop_bottom = 0; + } + //printf("\n**************frame_mbs_only_flag=%d,sps->crop=%d,c_left=%d,c_right=%d,c_top=%d,c_bottom=%d\n", + // sps->frame_mbs_only_flag, sps->crop, sps->crop_left, sps->crop_right,sps->crop_top, sps->crop_bottom); + width = width - (sps->crop_right + sps->crop_left); + height = height - (sps->crop_top + sps->crop_bottom); + sps->vui_parameters_present_flag = yang_get_bits1(gb); + if (sps->vui_parameters_present_flag) { + //int32_t ret = + yang_decode_vui_parameters(gb, sps); + } + if(sps->timing_info_present_flag){ + fps=sps->time_scale/(sps->num_units_in_tick); + if(sps->fixed_frame_rate_flag) fps=fps/2; + // printf("\n***********fps=%d\n",fps); + } + /* if the maximum delay is not stored in the SPS, derive it based on the + * level */ + /** if (!sps->bitstream_restriction_flag && + (sps->ref_frame_count || avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT)) { + sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1; + for (i = 0; i < FF_ARRAY_ELEMS(level_max_dpb_mbs); i++) { + if (level_max_dpb_mbs[i][0] == sps->level_idc) { + sps->num_reorder_frames = FFMIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height), + sps->num_reorder_frames); + break; + } + } + }**/ + + if (!sps->sar.den) + sps->sar.den = 1; + return 0; +} + +int32_t YangH264Header::h264_decode_picture_parameter_set(uint8_t *buf, int32_t len) { + //printf("\n%02x,%02x**ind=%d,size_in_bits=%d*plus8=%d*picture_parameter_set\n",*gb->buffer,*(gb->buffer+1), + //gb->index,gb->size_in_bits,gb->size_in_bits_plus8); + yang_packet_split(buf, len, 1, 1); + YangGetBitContext *gb = &packet.nals->gb; + const Yang_SPS *sps=&m_sps; + uint32_t pps_id = yang_get_ue_golomb(gb); + Yang_PPS *pps = &m_pps; + int32_t qp_bd_offset=0; + int32_t bits_left=0; + int32_t ret=0; + + /** pps->data_size = gb->buffer_end - gb->buffer; + if (pps->data_size > sizeof(pps->data)) { + // av_log(avctx, AV_LOG_DEBUG, "Truncating likely oversized PPS (%"SIZE_SPECIFIER" > %"SIZE_SPECIFIER")\n", pps->data_size, sizeof(pps->data)); + pps->data_size = sizeof(pps->data); + } + memcpy(pps->data, gb->buffer, pps->data_size);**/ + + pps->sps_id = yang_get_ue_golomb_31(gb); + pps->cabac = yang_get_bits1(gb); + pps->pic_order_present = yang_get_bits1(gb); + pps->slice_group_count = yang_get_ue_golomb(gb) + 1; + if (pps->slice_group_count > 1) { + pps->mb_slice_group_map_type = yang_get_ue_golomb(gb); + } + pps->ref_count[0] = yang_get_ue_golomb(gb) + 1; + pps->ref_count[1] = yang_get_ue_golomb(gb) + 1; + qp_bd_offset = 6 * (sps->bit_depth_luma - 8); + + pps->weighted_pred = yang_get_bits1(gb); + pps->weighted_bipred_idc = yang_get_bits(gb, 2); + pps->init_qp = yang_get_se_golomb(gb) + 26U + qp_bd_offset; + pps->init_qs = yang_get_se_golomb(gb) + 26U + qp_bd_offset; + pps->chroma_qp_index_offset[0] = yang_get_se_golomb(gb); + if (pps->chroma_qp_index_offset[0] < -12 + || pps->chroma_qp_index_offset[0] > 12) { + ret = YangERROR_INVALIDDATA; + } + + pps->deblocking_filter_parameters_present = yang_get_bits1(gb); + pps->constrained_intra_pred = yang_get_bits1(gb); + pps->redundant_pic_cnt_present = yang_get_bits1(gb); + + pps->transform_8x8_mode = 0; + memcpy(pps->scaling_matrix4, sps->scaling_matrix4, + sizeof(pps->scaling_matrix4)); + memcpy(pps->scaling_matrix8, sps->scaling_matrix8, + sizeof(pps->scaling_matrix8)); + + bits_left = len - yang_get_bits_count(gb); + /** if (bits_left > 0 && more_rbsp_data_in_pps(sps, avctx)) { + pps->transform_8x8_mode = yang_get_bits1(gb); + ret = yang_decode_scaling_matrices(gb, sps, pps, 0, + pps->scaling_matrix4, pps->scaling_matrix8); + + // second_chroma_qp_index_offset + pps->chroma_qp_index_offset[1] = yang_get_se_golomb(gb); + if (pps->chroma_qp_index_offset[1] < -12 || pps->chroma_qp_index_offset[1] > 12) { + ret = AVERROR_INVALIDDATA; + + } + } else { + pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0]; + }**/ + pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0]; + yang_build_qp_table(pps, 0, pps->chroma_qp_index_offset[0], + sps->bit_depth_luma); + yang_build_qp_table(pps, 1, pps->chroma_qp_index_offset[1], + sps->bit_depth_luma); + + yang_init_dequant_tables(pps, sps); + + if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) + pps->chroma_qp_diff = 1; + + return 0; + +} + +int32_t YangH264Header::yang_packet_split(uint8_t *buf, int32_t length, int32_t isH264, + int32_t small_padding) { + //GetByteContext bc; + int32_t consumed, ret = 0; + + // nal->data=buf; + // nal->size=length; + YangPacket *pkt = &packet; + + // if (!pkt->rbsp.rbsp_buffer) + // return AVERROR(ENOMEM); + + pkt->rbsp.rbsp_buffer_size = 0; + pkt->nb_nals = 0; + + consumed = yang_extract_rbsp(buf, length, small_padding); + ret = yang_init_get_bits(&nal->gb, nal->data, nal->size << 3); + if (ret < 0) + return ret; + + // if (isH264) + ret = yang_parse_nal_header(nal); + // else + // ret = h264_parse_nal_header(nal, logctx); + if (ret <= 0 || nal->size <= 0 || nal->size_bits <= 0) { + + packet.nb_nals--; + } + + return 0; +} + +int32_t YangH264Header::yang_packet_split1(uint8_t *buf, int32_t length, int32_t isH264, + int32_t small_padding) { + //GetByteContext bc; + int32_t ret = 0; + YangPacket *pkt = &packet; + pkt->rbsp.rbsp_buffer_size = 0; + pkt->nb_nals = 0; + nal->data = buf; + nal->raw_data = buf; + nal->size = length; + nal->raw_size = length; + ret = yang_init_get_bits(&nal->gb, nal->data, nal->size << 3); + if (ret < 0) + return ret; + + // if (isH264) + ret = yang_parse_nal_header(nal); + // else + // ret = h264_parse_nal_header(nal, logctx); + if (ret <= 0 || nal->size <= 0 || nal->size_bits <= 0) { + + packet.nb_nals--; + } + + return 0; +} +int32_t YangH264Header::yang_extract_rbsp(uint8_t *src, int32_t length, + int32_t small_padding) { + int32_t i, si, di; + uint8_t *dst; + + packet.nals->skipped_bytes = 0; +#define STARTCODE_TEST \ + if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \ + if (src[i + 2] != 3 && src[i + 2] != 0) { \ + /* startcode, so we must be past the end */ \ + length = i; \ + } \ + break; \ + } +#if HAVE_FAST_UNALIGNED +#define FIND_FIRST_ZERO \ + if (i > 0 && !src[i]) \ + i--; \ + while (src[i]) \ + i++ +#if HAVE_FAST_64BIT + for (i = 0; i + 1 < length; i += 9) { + if (!((~AV_RN64(src + i) & + (AV_RN64(src + i) - 0x0100010001000101ULL)) & + 0x8000800080008080ULL)) + continue; + FIND_FIRST_ZERO; + STARTCODE_TEST; + i -= 7; + } +#else + for (i = 0; i + 1 < length; i += 5) { + if (!((~AV_RN32(src + i) & + (AV_RN32(src + i) - 0x01000101U)) & + 0x80008080U)) + continue; + FIND_FIRST_ZERO; + STARTCODE_TEST; + i -= 3; + } +#endif /* HAVE_FAST_64BIT */ +#else + for (i = 0; i + 1 < length; i += 2) { + if (src[i]) + continue; + if (i > 0 && src[i - 1] == 0) + i--; + STARTCODE_TEST; + } +#endif /* HAVE_FAST_UNALIGNED */ + + if (i >= length - 1 && small_padding) { // no escaped 0 + nal->data = src; + nal->raw_data = src; + nal->size = length; + nal->raw_size = length; + return length; + } else if (i > length) + i = length; + + // nal->rbsp_buffer = &rbsp->rbsp_buffer[rbsp->rbsp_buffer_size]; + dst = nal->rbsp_buffer; + + memcpy(dst, src, i); + si = di = i; + while (si + 2 < length) { + // remove escapes (very rare 1:2^22) + if (src[si + 2] > 3) { + dst[di++] = src[si++]; + dst[di++] = src[si++]; + } else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) { + if (src[si + 2] == 3) { // escape + dst[di++] = 0; + dst[di++] = 0; + si += 3; + /** + if (nal->skipped_bytes_pos) { + nal->skipped_bytes++; + if (nal->skipped_bytes_pos_size < nal->skipped_bytes) { + nal->skipped_bytes_pos_size *= 2; + // av_assert0(nal->skipped_bytes_pos_size >= nal->skipped_bytes); + // av_reallocp_array(&nal->skipped_bytes_pos, nal->skipped_bytes_pos_size, sizeof(*nal->skipped_bytes_pos)); + if (!nal->skipped_bytes_pos) { + nal->skipped_bytes_pos_size = 0; + return -12; + } + } + if (nal->skipped_bytes_pos) + nal->skipped_bytes_pos[nal->skipped_bytes-1] = di - 1; + }**/ + continue; + } else + // next start code + goto nsc; + } + + dst[di++] = src[si++]; + } + while (si < length) + dst[di++] = src[si++]; + + nsc: memset(dst + di, 0, Yang_INPUT_BUFFER_PADDING_SIZE); + + nal->data = dst; + nal->size = di; + nal->raw_data = src; + nal->raw_size = si; + packet.rbsp.rbsp_buffer_size += si; + + return si; +} + +void YangH264Header::parseRtmpHeader(uint8_t *buf) { + int32_t spsLen = *(buf + 12) + 1; + uint8_t * spsStart = buf + 13; + int32_t ppsLen = *(spsStart + spsLen + 1) + 1; + uint8_t *ppsStart = buf + 13 + spsLen + 2; + h264_decode_seq_parameter_set(spsStart, spsLen); + h264_decode_picture_parameter_set(ppsStart, ppsLen); + +} +int32_t YangH264Header::h264_slice_header_parse(uint8_t *buf, int32_t length) { + yang_packet_split1(buf, length, 1, 1); + Yang_SPS *sps = &m_sps; + Yang_PPS *pps = &m_pps; + if (sl == NULL) { + sl = (YangSliceContext*) malloc(sizeof(YangSliceContext)); + // sl->gb + } + yang_init_get_bits(&sl->gb, nal->data, nal->size << 3); + // yang_init_get_bits( &sl->gb, buf, length<<3); + sl->gb.index = 8; + + int32_t ret; + uint32_t slice_type, tmp, i; + int32_t field_pic_flag, bottom_field_flag; + //int32_t first_slice = 1; //sl == h->slice_ctx && !h->current_slice; + int32_t picture_structure; + + sl->first_mb_addr = yang_get_ue_golomb_long(&sl->gb); + + slice_type = yang_get_ue_golomb_31(&sl->gb); + + if (slice_type > 4) { + slice_type -= 5; + sl->slice_type_fixed = 1; + } else + sl->slice_type_fixed = 0; + + slice_type = yang_h264_golomb_to_pict_type[slice_type]; + sl->slice_type = slice_type; + sl->slice_type_nos = slice_type & 3; + + if (nal->type == Yang_NAL_IDR_SLICE + && sl->slice_type_nos != Yang_PICTURE_TYPE_I) { + printf("A non-intra slice in an IDR NAL unit.\n"); + return YangERROR_INVALIDDATA; + } + + sl->pps_id = yang_get_ue_golomb(&sl->gb); + if (sl->pps_id >= Yang_MAX_PPS_COUNT) { + printf("pps_id %u out of range\n", sl->pps_id); + return YangERROR_INVALIDDATA; + } + sl->frame_num = yang_get_bits(&sl->gb, sps->log2_max_frame_num); + sl->mb_mbaff = 0; + + if (sps->frame_mbs_only_flag) { + picture_structure = Yang_PICT_FRAME; + } else { + if (!sps->direct_8x8_inference_flag + && slice_type == Yang_PICTURE_TYPE_B) { + return -1; + } + field_pic_flag = yang_get_bits1(&sl->gb); + if (field_pic_flag) { + bottom_field_flag = yang_get_bits1(&sl->gb); + picture_structure = Yang_PICT_TOP_FIELD + bottom_field_flag; + } else { + picture_structure = Yang_PICT_FRAME; + } + } + sl->picture_structure = picture_structure; + sl->mb_field_decoding_flag = picture_structure != Yang_PICT_FRAME; + + if (picture_structure == Yang_PICT_FRAME) { + sl->curr_pic_num = sl->frame_num; + sl->max_pic_num = 1 << sps->log2_max_frame_num; + } else { + sl->curr_pic_num = 2 * sl->frame_num + 1; + sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1); + } + + if (nal->type == Yang_NAL_IDR_SLICE) + yang_get_ue_golomb_long(&sl->gb); /* idr_pic_id */ + + if (sps->poc_type == 0) { + sl->poc_lsb = yang_get_bits(&sl->gb, sps->log2_max_poc_lsb); + + if (pps->pic_order_present == 1 && picture_structure == Yang_PICT_FRAME) + sl->delta_poc_bottom = yang_get_se_golomb(&sl->gb); + } + + if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { + sl->delta_poc[0] = yang_get_se_golomb(&sl->gb); + + if (pps->pic_order_present == 1 && picture_structure == Yang_PICT_FRAME) + sl->delta_poc[1] = yang_get_se_golomb(&sl->gb); + } + + sl->redundant_pic_count = 0; + if (pps->redundant_pic_cnt_present) + sl->redundant_pic_count = yang_get_ue_golomb(&sl->gb); + + if (sl->slice_type_nos == Yang_PICTURE_TYPE_B) + sl->direct_spatial_mv_pred = yang_get_bits1(&sl->gb); + + ret = yang_h264_parse_ref_count(&sl->list_count, sl->ref_count, &sl->gb, + pps, sl->slice_type_nos, picture_structure); + if (ret < 0) + return ret; + + if (sl->slice_type_nos != Yang_PICTURE_TYPE_I) { + ret = yang_h264_decode_ref_pic_list_reordering(sl); + if (ret < 0) { + sl->ref_count[1] = sl->ref_count[0] = 0; + return ret; + } + } + + sl->pwt.use_weight = 0; + for (i = 0; i < 2; i++) { + sl->pwt.luma_weight_flag[i] = 0; + sl->pwt.chroma_weight_flag[i] = 0; + } + if ((pps->weighted_pred && sl->slice_type_nos == Yang_PICTURE_TYPE_P) + || (pps->weighted_bipred_idc == 1 + && sl->slice_type_nos == Yang_PICTURE_TYPE_B)) { + ret = yang_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, + sl->slice_type_nos, &sl->pwt, picture_structure); + if (ret < 0) + return ret; + } + + sl->explicit_ref_marking = 0; + if (nal->ref_idc) { + ret = yang_h264_decode_ref_pic_marking(sl, &sl->gb, nal); + if (ret < 0) + return YangERROR_INVALIDDATA; + } + + if (sl->slice_type_nos != Yang_PICTURE_TYPE_I && pps->cabac) { + tmp = yang_get_ue_golomb_31(&sl->gb); + if (tmp > 2) { + return YangERROR_INVALIDDATA; + } + sl->cabac_init_idc = tmp; + } + + sl->last_qscale_diff = 0; + tmp = pps->init_qp + (unsigned) yang_get_se_golomb(&sl->gb); + if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { + //av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); + return YangERROR_INVALIDDATA; + } + sl->qscale = tmp; + sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale); + sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale); + // FIXME qscale / qp ... stuff + if (sl->slice_type == Yang_PICTURE_TYPE_SP) + yang_get_bits1(&sl->gb); /* sp_for_switch_flag */ + if (sl->slice_type == Yang_PICTURE_TYPE_SP + || sl->slice_type == Yang_PICTURE_TYPE_SI) + yang_get_se_golomb(&sl->gb); /* slice_qs_delta */ + + sl->deblocking_filter = 1; + sl->slice_alpha_c0_offset = 0; + sl->slice_beta_offset = 0; + if (pps->deblocking_filter_parameters_present) { + tmp = yang_get_ue_golomb_31(&sl->gb); + if (tmp > 2) { + printf("deblocking_filter_idc %u out of range\n", tmp); + return YangERROR_INVALIDDATA; + } + sl->deblocking_filter = tmp; + if (sl->deblocking_filter < 2) + sl->deblocking_filter ^= 1; // 1<->0 + + if (sl->deblocking_filter) { + int32_t slice_alpha_c0_offset_div2 = yang_get_se_golomb(&sl->gb); + int32_t slice_beta_offset_div2 = yang_get_se_golomb(&sl->gb); + if (slice_alpha_c0_offset_div2 > 6 + || slice_alpha_c0_offset_div2 < -6 + || slice_beta_offset_div2 > 6 + || slice_beta_offset_div2 < -6) { + printf("deblocking filter parameters %d %d out of range\n", + slice_alpha_c0_offset_div2, slice_beta_offset_div2); + return YangERROR_INVALIDDATA; + } + sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2; + sl->slice_beta_offset = slice_beta_offset_div2 * 2; + } + } + + return 0; +} + +int32_t YangH264Header::get_bits_count(YangGetBitContext *s) { + return yang_get_bits_count(s); +} diff --git a/libmetartc3/src/yangdecoder/YangH264Header.h b/libmetartc3/src/yangdecoder/YangH264Header.h new file mode 100755 index 00000000..9e7a2466 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264Header.h @@ -0,0 +1,213 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGDECODER_SRC_YANGH264HEADER_H_ +#define YANGDECODER_SRC_YANGH264HEADER_H_ +#include +#include "stdint.h" +#include "stddef.h" + + + +#define LEFT_MBS 1 +#define Yang_MAX_SPS_COUNT 32 +#define Yang_MAX_PPS_COUNT 256 +#define Yang_MAX_LOG2_MAX_FRAME_NUM (12 + 4) + +#define Yang_MAX_MMCO_COUNT 66 +#define Yang_PICT_FRAME 3 +#define Yang_EXTENDED_SAR 255 +#define Yang_QP(qP, depth) ((qP) + 6 * ((depth) - 8)) +#define Yang_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) + +#define Yang_NAL_IDR_SLICE 5 +#define Yang_PICT_TOP_FIELD 1 +#define Yang_FIELD_PICTURE(h) ((h)->picture_structure != Yang_PICT_FRAME) +//#define Yang_FRAME_MBAFF(h) sps->mb_aff && (h->picture_structure == PICT_FRAME) + +//#define Yang_FIELD_OR_MBAFF_PICTURE(h) (Yang_FRAME_MBAFF(h) || Yang_FIELD_PICTURE(h)) + + +typedef struct YangSliceContext { + // struct H264Context *h264; + YangGetBitContext gb; + // ERContext er; + + int32_t slice_num; + int32_t slice_type; + int32_t slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P) + int32_t slice_type_fixed; + + int32_t qscale; + int32_t chroma_qp[2]; // QPc + int32_t qp_thresh; ///< QP threshold to skip loopfilter + int32_t last_qscale_diff; + + // deblock + int32_t deblocking_filter; ///< disable_deblocking_filter_idc with 1 <-> 0 + int32_t slice_alpha_c0_offset; + int32_t slice_beta_offset; + + YangPredWeightTable pwt; + + int32_t prev_mb_skipped; + int32_t next_mb_skipped; + + int32_t chroma_pred_mode; + int32_t intra16x16_pred_mode; + + char intra4x4_pred_mode_cache[5 * 8]; + char (*intra4x4_pred_mode); + + int32_t topleft_mb_xy; + int32_t top_mb_xy; + int32_t topright_mb_xy; + int32_t left_mb_xy[LEFT_MBS]; + + int32_t topleft_type; + int32_t top_type; + int32_t topright_type; + int32_t left_type[LEFT_MBS]; + + const uint8_t *left_block; + int32_t topleft_partition; + + uint32_t topleft_samples_available; + uint32_t top_samples_available; + uint32_t topright_samples_available; + uint32_t left_samples_available; + + ptrdiff_t linesize, uvlinesize; + ptrdiff_t mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff + ptrdiff_t mb_uvlinesize; + + int32_t mb_x, mb_y; + int32_t mb_xy; + int32_t resync_mb_x; + int32_t resync_mb_y; + uint32_t first_mb_addr; + // index of the first MB of the next slice + int32_t next_slice_idx; + int32_t mb_skip_run; + int32_t is_complex; + + int32_t picture_structure; + int32_t mb_field_decoding_flag; + int32_t mb_mbaff; ///< mb_aff_frame && mb_field_decoding_flag + + int32_t redundant_pic_count; + + /** + * number of neighbors (top and/or left) that used 8x8 dct + */ + int32_t neighbor_transform_size; + + int32_t direct_spatial_mv_pred; + int32_t col_parity; + int32_t col_fieldoff; + + int32_t cbp; + int32_t top_cbp; + int32_t left_cbp; + + int32_t dist_scale_factor[32]; + int32_t dist_scale_factor_field[2][32]; + int32_t map_col_to_list0[2][16 + 32]; + int32_t map_col_to_list0_field[2][2][16 + 32]; + + /** + * num_ref_idx_l0/1_active_minus1 + 1 + */ + uint32_t ref_count[2]; ///< counts frames or fields, depending on current mb mode + uint32_t list_count; + // H264Ref ref_list[2][48]; /**< 0..15: frame refs, 16..47: mbaff field refs. + // * Reordered version of default_ref_list + // * according to picture reordering in slice header */ + struct { + uint8_t op; + uint32_t val; + } ref_modifications[2][32]; + int32_t nb_ref_modifications[2]; + + uint32_t pps_id; + + const uint8_t *intra_pcm_ptr; + int16_t *dc_val_base; + + uint8_t *bipred_scratchpad; + uint8_t *edge_emu_buffer; + uint8_t (*top_borders[2])[(16 * 3) * 2]; + int32_t bipred_scratchpad_allocated; + int32_t edge_emu_buffer_allocated; + int32_t top_borders_allocated[2]; + + /** + * non zero coeff count cache. + * is 64 if not available. + */ + /** + DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8]; + DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2]; + DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8]; + DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2]; + uint8_t direct_cache[5 * 8]; + + DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4]; + + ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space. + DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2]; + DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2]; + **/ + ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either + ///< check that i is not too large or ensure that there is some unused stuff after mb + int16_t mb_padding[256 * 2]; + + uint8_t (*mvd_table[2])[2]; + + /** + * Cabac + */ + //CABACContext cabac; + //uint8_t cabac_state[1024]; + int32_t cabac_init_idc; + + YangMMCO mmco[Yang_MAX_MMCO_COUNT]; + int32_t nb_mmco; + int32_t explicit_ref_marking; + + int32_t frame_num; + int32_t poc_lsb; + int32_t delta_poc_bottom; + int32_t delta_poc[2]; + int32_t curr_pic_num; + int32_t max_pic_num; +} YangSliceContext; +#define MAX_MBPAIR_SIZE (256*1024) + + +class YangH264Header { +public: + YangH264Header(); + virtual ~YangH264Header(); + Yang_SPS m_sps; + Yang_PPS m_pps; + YangPacket packet; + YangNAL *nal; + YangRBSP *rbsp; + YangSliceContext *sl; + int32_t width,height,fps; + //void initGb(YangGetBitContext *gb,uint8_t *buf,int32_t bitsize); + int32_t h264_decode_seq_parameter_set(uint8_t *buf,int32_t len);//, int32_t ignore_truncation + int32_t h264_decode_picture_parameter_set(uint8_t *buf,int32_t len); + int32_t yang_packet_split(uint8_t *buf, int32_t length,int32_t isH264, int32_t small_padding); + int32_t h264_slice_header_parse(uint8_t *buf, int32_t length); + int32_t get_bits_count(YangGetBitContext *s); + void parseRtmpHeader(uint8_t *buf); + // void getRtmpHeader(uint8_t *buf,uint8_t *src,int32_t *hLen); +private: + int32_t yang_extract_rbsp( uint8_t *src, int32_t length, int32_t small_padding); + int32_t yang_packet_split1(uint8_t *buf, int32_t length,int32_t isH264, int32_t small_padding); +}; + +#endif /* YANGDECODER_SRC_YANGH264HEADER_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangH264Header1.h b/libmetartc3/src/yangdecoder/YangH264Header1.h new file mode 100755 index 00000000..a4385e87 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangH264Header1.h @@ -0,0 +1,712 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include "memory.h" +#include "stdio.h" + +#include "YangH264Header.h" + + +static int32_t yang_clip(int32_t a, int32_t amin, int32_t amax) +{ + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} +#define Yang_CHROMA_QP_TABLE_END(d) \ + Yang_QP(0, d), Yang_QP(1, d), Yang_QP(2, d), Yang_QP(3, d), Yang_QP(4, d), Yang_QP(5, d), \ + Yang_QP(6, d), Yang_QP(7, d), Yang_QP(8, d), Yang_QP(9, d), Yang_QP(10, d), Yang_QP(11, d), \ + Yang_QP(12, d), Yang_QP(13, d), Yang_QP(14, d), Yang_QP(15, d), Yang_QP(16, d), Yang_QP(17, d), \ + Yang_QP(18, d), Yang_QP(19, d), Yang_QP(20, d), Yang_QP(21, d), Yang_QP(22, d), Yang_QP(23, d), \ + Yang_QP(24, d), Yang_QP(25, d), Yang_QP(26, d), Yang_QP(27, d), Yang_QP(28, d), Yang_QP(29, d), \ + Yang_QP(29, d), Yang_QP(30, d), Yang_QP(31, d), Yang_QP(32, d), Yang_QP(32, d), Yang_QP(33, d), \ + Yang_QP(34, d), Yang_QP(34, d), Yang_QP(35, d), Yang_QP(35, d), Yang_QP(36, d), Yang_QP(36, d), \ + Yang_QP(37, d), Yang_QP(37, d), Yang_QP(37, d), Yang_QP(38, d), Yang_QP(38, d), Yang_QP(38, d), \ + Yang_QP(39, d), Yang_QP(39, d), Yang_QP(39, d), Yang_QP(39, d) + +/* maximum number of MBs in the DPB for a given level */ +static const int32_t yang_level_max_dpb_mbs[][2] = { + { 10, 396 }, + { 11, 900 }, + { 12, 2376 }, + { 13, 2376 }, + { 20, 2376 }, + { 21, 4752 }, + { 22, 8100 }, + { 30, 8100 }, + { 31, 18000 }, + { 32, 20480 }, + { 40, 32768 }, + { 41, 32768 }, + { 42, 34816 }, + { 50, 110400 }, + { 51, 184320 }, + { 52, 184320 }, +}; + +static const uint8_t yang_default_scaling4[2][16] = { + { 6, 13, 20, 28, 13, 20, 28, 32, + 20, 28, 32, 37, 28, 32, 37, 42 }, + { 10, 14, 20, 24, 14, 20, 24, 27, + 20, 24, 27, 30, 24, 27, 30, 34 } +}; + +static const uint8_t yang_default_scaling8[2][64] = { + { 6, 10, 13, 16, 18, 23, 25, 27, + 10, 11, 16, 18, 23, 25, 27, 29, + 13, 16, 18, 23, 25, 27, 29, 31, + 16, 18, 23, 25, 27, 29, 31, 33, + 18, 23, 25, 27, 29, 31, 33, 36, + 23, 25, 27, 29, 31, 33, 36, 38, + 25, 27, 29, 31, 33, 36, 38, 40, + 27, 29, 31, 33, 36, 38, 40, 42 }, + { 9, 13, 15, 17, 19, 21, 22, 24, + 13, 13, 17, 19, 21, 22, 24, 25, + 15, 17, 19, 21, 22, 24, 25, 27, + 17, 19, 21, 22, 24, 25, 27, 28, + 19, 21, 22, 24, 25, 27, 28, 30, + 21, 22, 24, 25, 27, 28, 30, 32, + 22, 24, 25, 27, 28, 30, 32, 33, + 24, 25, 27, 28, 30, 32, 33, 35 } +}; + +const uint8_t yang_zigzag_direct[64] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; + +const uint8_t yang_zigzag_scan[16+1] = { + 0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, + 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4, + 1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, + 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4, +}; + +const uint8_t yang_h264_chroma_qp[7][Yang_QP_MAX_NUM + 1] = { + { Yang_CHROMA_QP_TABLE_END(8) }, + { 0, 1, 2, 3, 4, 5, + Yang_CHROMA_QP_TABLE_END(9) }, + { 0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + Yang_CHROMA_QP_TABLE_END(10) }, + { 0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12,13,14,15, 16, 17, + Yang_CHROMA_QP_TABLE_END(11) }, + { 0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12,13,14,15, 16, 17, + 18,19,20,21, 22, 23, + Yang_CHROMA_QP_TABLE_END(12) }, + { 0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12,13,14,15, 16, 17, + 18,19,20,21, 22, 23, + 24,25,26,27, 28, 29, + Yang_CHROMA_QP_TABLE_END(13) }, + { 0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12,13,14,15, 16, 17, + 18,19,20,21, 22, 23, + 24,25,26,27, 28, 29, + 30,31,32,33, 34, 35, + Yang_CHROMA_QP_TABLE_END(14) }, +}; +const uint8_t yang_h264_quant_div6[Yang_QP_MAX_NUM + 1] = { + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, + 10,10,10,11,11,11,11,11,11,12,12,12,12,12,12,13,13,13, 13, 13, 13, + 14,14,14,14, +}; +const uint8_t yang_h264_quant_rem6[Yang_QP_MAX_NUM + 1] = { + 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, + 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, + 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, + 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, + 0, 1, 2, 3, +}; + +const uint8_t yang_h264_dequant8_coeff_init_scan[16] = { + 0, 3, 4, 3, 3, 1, 5, 1, 4, 5, 2, 5, 3, 1, 5, 1 +}; + +const uint8_t yang_h264_dequant8_coeff_init[6][6] = { + { 20, 18, 32, 19, 25, 24 }, + { 22, 19, 35, 21, 28, 26 }, + { 26, 23, 42, 24, 33, 31 }, + { 28, 25, 45, 26, 35, 33 }, + { 32, 28, 51, 30, 40, 38 }, + { 36, 32, 58, 34, 46, 43 }, +}; +const uint8_t yang_h264_dequant4_coeff_init[6][3] = { + { 10, 13, 16 }, + { 11, 14, 18 }, + { 13, 16, 20 }, + { 14, 18, 23 }, + { 16, 20, 25 }, + { 18, 23, 29 }, +}; + +static const YangRational yang_h264_pixel_aspect[17] = { + { 0, 1 }, + { 1, 1 }, + { 12, 11 }, + { 10, 11 }, + { 16, 11 }, + { 40, 33 }, + { 24, 11 }, + { 20, 11 }, + { 32, 11 }, + { 80, 33 }, + { 18, 11 }, + { 15, 11 }, + { 64, 33 }, + { 160, 99 }, + { 4, 3 }, + { 3, 2 }, + { 2, 1 }, +}; + +const uint8_t yang_h264_golomb_to_pict_type[5] = { + Yang_PICTURE_TYPE_P, Yang_PICTURE_TYPE_B, Yang_PICTURE_TYPE_I, + Yang_PICTURE_TYPE_SP, Yang_PICTURE_TYPE_SI +}; +static inline int32_t get_chroma_qp(Yang_PPS *pps, int32_t t, int32_t qscale) +{ + return pps->chroma_qp_table[t][qscale]; +} +static void yang_build_qp_table(Yang_PPS *pps, int32_t t, int32_t index, const int32_t depth) +{ + int32_t i; + const int32_t max_qp = 51 + 6 * (depth - 8); + for (i = 0; i < max_qp + 1; i++) + pps->chroma_qp_table[t][i] = + yang_h264_chroma_qp[depth - 8][yang_clip(i + index, 0, max_qp)]; +} +static void init_dequant8_coeff_table(Yang_PPS *pps, const Yang_SPS *sps) +{ + int32_t i, j, q, x; + const int32_t max_qp = 51 + 6 * (sps->bit_depth_luma - 8); + + for (i = 0; i < 6; i++) { + pps->dequant8_coeff[i] = pps->dequant8_buffer[i]; + for (j = 0; j < i; j++) + if (!memcmp(pps->scaling_matrix8[j], pps->scaling_matrix8[i], + 64 * sizeof(uint8_t))) { + pps->dequant8_coeff[i] = pps->dequant8_buffer[j]; + break; + } + if (j < i) + continue; + + for (q = 0; q < max_qp + 1; q++) { + int32_t shift = yang_h264_quant_div6[q]; + int32_t idx = yang_h264_quant_rem6[q]; + for (x = 0; x < 64; x++) + pps->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] = + ((uint32_t)yang_h264_dequant8_coeff_init[idx][yang_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] * + pps->scaling_matrix8[i][x]) << shift; + } + } +} + +static void init_dequant4_coeff_table(Yang_PPS *pps, const Yang_SPS *sps) +{ + int32_t i, j, q, x; + const int32_t max_qp = 51 + 6 * (sps->bit_depth_luma - 8); + for (i = 0; i < 6; i++) { + pps->dequant4_coeff[i] = pps->dequant4_buffer[i]; + for (j = 0; j < i; j++) + if (!memcmp(pps->scaling_matrix4[j], pps->scaling_matrix4[i], + 16 * sizeof(uint8_t))) { + pps->dequant4_coeff[i] = pps->dequant4_buffer[j]; + break; + } + if (j < i) + continue; + + for (q = 0; q < max_qp + 1; q++) { + int32_t shift = yang_h264_quant_div6[q] + 2; + int32_t idx = yang_h264_quant_rem6[q]; + for (x = 0; x < 16; x++) + pps->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] = + ((uint32_t)yang_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * + pps->scaling_matrix4[i][x]) << shift; + } + } +} +static void yang_init_dequant_tables(Yang_PPS *pps, const Yang_SPS *sps) +{ + int32_t i, x; + init_dequant4_coeff_table(pps, sps); + memset(pps->dequant8_coeff, 0, sizeof(pps->dequant8_coeff)); + + if (pps->transform_8x8_mode) + init_dequant8_coeff_table(pps, sps); + if (sps->transform_bypass) { + for (i = 0; i < 6; i++) + for (x = 0; x < 16; x++) + pps->dequant4_coeff[i][0][x] = 1 << 6; + if (pps->transform_8x8_mode) + for (i = 0; i < 6; i++) + for (x = 0; x < 64; x++) + pps->dequant8_coeff[i][0][x] = 1 << 6; + } +} +static int32_t yang_decode_scaling_list(YangGetBitContext *gb, uint8_t *factors, int32_t size, + const uint8_t *jvt_list, + const uint8_t *fallback_list) +{ + int32_t i, last = 8, next = 8; + const uint8_t *scan = size == 16 ? yang_zigzag_scan : yang_zigzag_direct; + if (!yang_get_bits1(gb)) /* matrix not written, we use the predicted one */ + memcpy(factors, fallback_list, size * sizeof(uint8_t)); + else + for (i = 0; i < size; i++) { + if (next) { + int32_t v = yang_get_se_golomb(gb); + next = (last + v) & 0xff; + } + if (!i && !next) { /* matrix not written, we use the preset one */ + memcpy(factors, jvt_list, size * sizeof(uint8_t)); + break; + } + last = factors[scan[i]] = next ? next : last; + } + return 0; +} + +static int32_t yang_decode_scaling_matrices(YangGetBitContext *gb, const Yang_SPS *sps, + const Yang_PPS *pps, int32_t is_sps, + uint8_t(*scaling_matrix4)[16], + uint8_t(*scaling_matrix8)[64]) +{ + int32_t fallback_sps = !is_sps && sps->scaling_matrix_present; + const uint8_t *fallback[4] = { + fallback_sps ? sps->scaling_matrix4[0] : yang_default_scaling4[0], + fallback_sps ? sps->scaling_matrix4[3] : yang_default_scaling4[1], + fallback_sps ? sps->scaling_matrix8[0] : yang_default_scaling8[0], + fallback_sps ? sps->scaling_matrix8[3] : yang_default_scaling8[1] + }; + int32_t ret = 0; + if (yang_get_bits1(gb)) { + ret |= yang_decode_scaling_list(gb, scaling_matrix4[0], 16, yang_default_scaling4[0], fallback[0]); // Intra, Y + ret |= yang_decode_scaling_list(gb, scaling_matrix4[1], 16, yang_default_scaling4[0], scaling_matrix4[0]); // Intra, Cr + ret |= yang_decode_scaling_list(gb, scaling_matrix4[2], 16, yang_default_scaling4[0], scaling_matrix4[1]); // Intra, Cb + ret |= yang_decode_scaling_list(gb, scaling_matrix4[3], 16, yang_default_scaling4[1], fallback[1]); // Inter, Y + ret |= yang_decode_scaling_list(gb, scaling_matrix4[4], 16, yang_default_scaling4[1], scaling_matrix4[3]); // Inter, Cr + ret |= yang_decode_scaling_list(gb, scaling_matrix4[5], 16, yang_default_scaling4[1], scaling_matrix4[4]); // Inter, Cb + if (is_sps || pps->transform_8x8_mode) { + ret |= yang_decode_scaling_list(gb, scaling_matrix8[0], 64, yang_default_scaling8[0], fallback[2]); // Intra, Y + ret |= yang_decode_scaling_list(gb, scaling_matrix8[3], 64, yang_default_scaling8[1], fallback[3]); // Inter, Y + if (sps->chroma_format_idc == 3) { + ret |= yang_decode_scaling_list(gb, scaling_matrix8[1], 64, yang_default_scaling8[0], scaling_matrix8[0]); // Intra, Cr + ret |= yang_decode_scaling_list(gb, scaling_matrix8[4], 64, yang_default_scaling8[1], scaling_matrix8[3]); // Inter, Cr + ret |= yang_decode_scaling_list(gb, scaling_matrix8[2], 64, yang_default_scaling8[0], scaling_matrix8[1]); // Intra, Cb + ret |= yang_decode_scaling_list(gb, scaling_matrix8[5], 64, yang_default_scaling8[1], scaling_matrix8[4]); // Inter, Cb + } + } + if (!ret) + ret = is_sps; + } + + return ret; +} +static int32_t yang_decode_hrd_parameters(YangGetBitContext *gb, Yang_SPS *sps) +{ + int32_t cpb_count=0, i=0; + cpb_count = yang_get_ue_golomb_31(gb) + 1; + + if (cpb_count > 32) { + // av_log(avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); + return YangERROR_INVALIDDATA; + } + + yang_get_bits(gb, 4); /* bit_rate_scale */ + yang_get_bits(gb, 4); /* cpb_size_scale */ + for (i = 0; i < cpb_count; i++) { + yang_get_ue_golomb_long(gb); /* bit_rate_value_minus1 */ + yang_get_ue_golomb_long(gb); /* cpb_size_value_minus1 */ + yang_get_bits1(gb); /* cbr_flag */ + } + sps->initial_cpb_removal_delay_length = yang_get_bits(gb, 5) + 1; + sps->cpb_removal_delay_length = yang_get_bits(gb, 5) + 1; + sps->dpb_output_delay_length = yang_get_bits(gb, 5) + 1; + sps->time_offset_length = yang_get_bits(gb, 5); + sps->cpb_cnt = cpb_count; + return 0; +} +static int32_t yang_decode_vui_parameters(YangGetBitContext *gb,Yang_SPS *sps) +{ + int32_t aspect_ratio_info_present_flag; + uint32_t aspect_ratio_idc; + + aspect_ratio_info_present_flag = yang_get_bits1(gb); + + if (aspect_ratio_info_present_flag) { + aspect_ratio_idc = yang_get_bits(gb, 8); + if (aspect_ratio_idc == Yang_EXTENDED_SAR) { + sps->sar.num = yang_get_bits(gb, 16); + sps->sar.den = yang_get_bits(gb, 16); + } else if (aspect_ratio_idc < Yang_ARRAY_ELEMS(yang_h264_pixel_aspect)) { + sps->sar = yang_h264_pixel_aspect[aspect_ratio_idc]; + } else { + // av_log(avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); + return YangERROR_INVALIDDATA; + } + } else { + sps->sar.num = + sps->sar.den = 0; + } + + if (yang_get_bits1(gb)) /* overscan_info_present_flag */ + yang_get_bits1(gb); /* overscan_appropriate_flag */ + + sps->video_signal_type_present_flag = yang_get_bits1(gb); + if (sps->video_signal_type_present_flag) { + yang_get_bits(gb, 3); /* video_format */ + sps->full_range = yang_get_bits1(gb); /* video_full_range_flag */ + + sps->colour_description_present_flag = yang_get_bits1(gb); + if (sps->colour_description_present_flag) { + sps->color_primaries = yang_get_bits(gb, 8); /* colour_primaries */ + sps->color_trc = yang_get_bits(gb, 8); /* transfer_characteristics */ + sps->colorspace = yang_get_bits(gb, 8); /* matrix_coefficients */ + + // Set invalid values to "unspecified" + // if (!av_color_primaries_name(sps->color_primaries)) + sps->color_primaries = Yang_PRI_UNSPECIFIED; + // if (!av_color_transfer_name(sps->color_trc)) + sps->color_trc = Yang_TRC_UNSPECIFIED; + // if (!av_color_space_name(sps->colorspace)) + sps->colorspace = Yang_SPC_UNSPECIFIED; + } + } + + /* chroma_location_info_present_flag */ + if (yang_get_bits1(gb)) { + /* chroma_sample_location_type_top_field */ + // avctx->chroma_sample_location = yang_get_ue_golomb(gb) + 1; + int32_t chroma_sample_location = yang_get_ue_golomb(gb) + 1; + yang_get_ue_golomb(gb); /* chroma_sample_location_type_bottom_field */ + } + + if (yang_show_bits1(gb) && yang_get_bits_left(gb) < 10) { + // av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n"); + return 0; + } + + sps->timing_info_present_flag = yang_get_bits1(gb); + if (sps->timing_info_present_flag) { + unsigned num_units_in_tick = yang_get_bits_long(gb, 32); + unsigned time_scale = yang_get_bits_long(gb, 32); + if (!num_units_in_tick || !time_scale) { + // av_log(avctx, AV_LOG_ERROR,"time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n", time_scale, num_units_in_tick); + sps->timing_info_present_flag = 0; + } else { + sps->num_units_in_tick = num_units_in_tick; + sps->time_scale = time_scale; + } + sps->fixed_frame_rate_flag = yang_get_bits1(gb); + } + + sps->nal_hrd_parameters_present_flag = yang_get_bits1(gb); + if (sps->nal_hrd_parameters_present_flag) + if (yang_decode_hrd_parameters(gb, sps) < 0) + return YangERROR_INVALIDDATA; + sps->vcl_hrd_parameters_present_flag = yang_get_bits1(gb); + if (sps->vcl_hrd_parameters_present_flag) + if (yang_decode_hrd_parameters(gb, sps) < 0) + return YangERROR_INVALIDDATA; + if (sps->nal_hrd_parameters_present_flag || + sps->vcl_hrd_parameters_present_flag) + yang_get_bits1(gb); /* low_delay_hrd_flag */ + sps->pic_struct_present_flag = yang_get_bits1(gb); + if (!yang_get_bits_left(gb)) + return 0; + sps->bitstream_restriction_flag = yang_get_bits1(gb); + if (sps->bitstream_restriction_flag) { + yang_get_bits1(gb); /* motion_vectors_over_pic_boundaries_flag */ + yang_get_ue_golomb(gb); /* max_bytes_per_pic_denom */ + yang_get_ue_golomb(gb); /* max_bits_per_mb_denom */ + yang_get_ue_golomb(gb); /* log2_max_mv_length_horizontal */ + yang_get_ue_golomb(gb); /* log2_max_mv_length_vertical */ + sps->num_reorder_frames = yang_get_ue_golomb(gb); + yang_get_ue_golomb(gb); /*max_dec_frame_buffering*/ + + if (yang_get_bits_left(gb) < 0) { + sps->num_reorder_frames = 0; + sps->bitstream_restriction_flag = 0; + } + + if (sps->num_reorder_frames > 16U + /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { + // av_log(avctx, AV_LOG_ERROR, "Clipping illegal num_reorder_frames %d\n", sps->num_reorder_frames); + sps->num_reorder_frames = 16; + return YangERROR_INVALIDDATA; + } + } + + return 0; +} + +int32_t yang_parse_nal_header(YangNAL *nal) +{ + YangGetBitContext *gb = &nal->gb; + + if (yang_get_bits1(gb) != 0) + return YangERROR_INVALIDDATA; + + nal->ref_idc = yang_get_bits(gb, 2); + nal->type = yang_get_bits(gb, 5); + return 1; +} + + +int32_t yang_h264_parse_ref_count(uint32_t *plist_count, uint32_t ref_count[2], + YangGetBitContext *gb, Yang_PPS *pps, + int32_t slice_type_nos, int32_t picture_structure) +{ + int32_t list_count; + int32_t num_ref_idx_active_override_flag; + + // set defaults, might be overridden a few lines later + ref_count[0] = pps->ref_count[0]; + ref_count[1] = pps->ref_count[1]; + + if (slice_type_nos != Yang_PICTURE_TYPE_I) { + unsigned max[2]; + max[0] = max[1] = picture_structure == Yang_PICT_FRAME ? 15 : 31; + + num_ref_idx_active_override_flag = yang_get_bits1(gb); + + if (num_ref_idx_active_override_flag) { + ref_count[0] = yang_get_ue_golomb(gb) + 1; + if (slice_type_nos == Yang_PICTURE_TYPE_B) { + ref_count[1] = yang_get_ue_golomb(gb) + 1; + } else + // full range is spec-ok in this case, even for frames + ref_count[1] = 1; + } + + if (ref_count[0] - 1 > max[0] || ref_count[1] - 1 > max[1]) { + printf("reference overflow %u > %u or %u > %u\n", + ref_count[0] - 1, max[0], ref_count[1] - 1, max[1]); + ref_count[0] = ref_count[1] = 0; + *plist_count = 0; + goto fail; + } + + if (slice_type_nos == Yang_PICTURE_TYPE_B) + list_count = 2; + else + list_count = 1; + } else { + list_count = 0; + ref_count[0] = ref_count[1] = 0; + } + + *plist_count = list_count; + + return 0; +fail: + *plist_count = 0; + ref_count[0] = 0; + ref_count[1] = 0; + return YangERROR_INVALIDDATA; +} + +int32_t yang_h264_decode_ref_pic_list_reordering(YangSliceContext *sl) +{ + int32_t list=0, index=0; + + sl->nb_ref_modifications[0] = 0; + sl->nb_ref_modifications[1] = 0; + + for (list = 0; list < sl->list_count; list++) { + if (!yang_get_bits1(&sl->gb)) // ref_pic_list_modification_flag_l[01] + continue; + + for (index = 0; ; index++) { + uint32_t op = yang_get_ue_golomb_31(&sl->gb); + + if (op == 3) + break; + + if (index >= sl->ref_count[list]) { + printf("reference count overflow\n"); + return YangERROR_INVALIDDATA; + } else if (op > 2) { + printf("illegal modification_of_pic_nums_idc %u\n", op); + return YangERROR_INVALIDDATA; + } + sl->ref_modifications[list][index].val = yang_get_ue_golomb_long(&sl->gb); + sl->ref_modifications[list][index].op = op; + sl->nb_ref_modifications[list]++; + } + } + + return 0; +} +int32_t yang_h264_pred_weight_table(YangGetBitContext *gb, Yang_SPS *sps, + uint32_t *ref_count, int32_t slice_type_nos, + YangPredWeightTable *pwt, + int32_t picture_structure) +{ + int32_t list, i, j; + int32_t luma_def, chroma_def; + + pwt->use_weight = 0; + pwt->use_weight_chroma = 0; + + pwt->luma_log2_weight_denom = yang_get_ue_golomb(gb); + if (pwt->luma_log2_weight_denom > 7U) { + printf( "luma_log2_weight_denom %d is out of range\n", pwt->luma_log2_weight_denom); + pwt->luma_log2_weight_denom = 0; + } + luma_def = 1 << pwt->luma_log2_weight_denom; + + if (sps->chroma_format_idc) { + pwt->chroma_log2_weight_denom = yang_get_ue_golomb(gb); + if (pwt->chroma_log2_weight_denom > 7U) { + printf("chroma_log2_weight_denom %d is out of range\n", pwt->chroma_log2_weight_denom); + pwt->chroma_log2_weight_denom = 0; + } + chroma_def = 1 << pwt->chroma_log2_weight_denom; + } + + for (list = 0; list < 2; list++) { + pwt->luma_weight_flag[list] = 0; + pwt->chroma_weight_flag[list] = 0; + for (i = 0; i < ref_count[list]; i++) { + int32_t luma_weight_flag, chroma_weight_flag; + + luma_weight_flag = yang_get_bits1(gb); + if (luma_weight_flag) { + pwt->luma_weight[i][list][0] = yang_get_se_golomb(gb); + pwt->luma_weight[i][list][1] = yang_get_se_golomb(gb); + if ((char)pwt->luma_weight[i][list][0] != pwt->luma_weight[i][list][0] || + (char)pwt->luma_weight[i][list][1] != pwt->luma_weight[i][list][1]) + goto out_range_weight; + if (pwt->luma_weight[i][list][0] != luma_def || + pwt->luma_weight[i][list][1] != 0) { + pwt->use_weight = 1; + pwt->luma_weight_flag[list] = 1; + } + } else { + pwt->luma_weight[i][list][0] = luma_def; + pwt->luma_weight[i][list][1] = 0; + } + + if (sps->chroma_format_idc) { + chroma_weight_flag = yang_get_bits1(gb); + if (chroma_weight_flag) { + int32_t j; + for (j = 0; j < 2; j++) { + pwt->chroma_weight[i][list][j][0] = yang_get_se_golomb(gb); + pwt->chroma_weight[i][list][j][1] = yang_get_se_golomb(gb); + if ((char)pwt->chroma_weight[i][list][j][0] != pwt->chroma_weight[i][list][j][0] || + (char)pwt->chroma_weight[i][list][j][1] != pwt->chroma_weight[i][list][j][1]) { + pwt->chroma_weight[i][list][j][0] = chroma_def; + pwt->chroma_weight[i][list][j][1] = 0; + goto out_range_weight; + } + if (pwt->chroma_weight[i][list][j][0] != chroma_def || + pwt->chroma_weight[i][list][j][1] != 0) { + pwt->use_weight_chroma = 1; + pwt->chroma_weight_flag[list] = 1; + } + } + } else { + int32_t j; + for (j = 0; j < 2; j++) { + pwt->chroma_weight[i][list][j][0] = chroma_def; + pwt->chroma_weight[i][list][j][1] = 0; + } + } + } + + // for MBAFF + if (picture_structure == Yang_PICT_FRAME) { + pwt->luma_weight[16 + 2 * i][list][0] = pwt->luma_weight[16 + 2 * i + 1][list][0] = pwt->luma_weight[i][list][0]; + pwt->luma_weight[16 + 2 * i][list][1] = pwt->luma_weight[16 + 2 * i + 1][list][1] = pwt->luma_weight[i][list][1]; + if (sps->chroma_format_idc) { + for (j = 0; j < 2; j++) { + pwt->chroma_weight[16 + 2 * i][list][j][0] = pwt->chroma_weight[16 + 2 * i + 1][list][j][0] = pwt->chroma_weight[i][list][j][0]; + pwt->chroma_weight[16 + 2 * i][list][j][1] = pwt->chroma_weight[16 + 2 * i + 1][list][j][1] = pwt->chroma_weight[i][list][j][1]; + } + } + } + } + if (slice_type_nos != Yang_PICTURE_TYPE_B) + break; + } + pwt->use_weight = pwt->use_weight || pwt->use_weight_chroma; + return 0; +out_range_weight: + // avpriv_request_sample(logctx, "Out of range weight"); + return YangERROR_INVALIDDATA; +} +int32_t yang_h264_decode_ref_pic_marking(YangSliceContext *sl, YangGetBitContext *gb, + YangNAL *nal) +{ + int32_t i; + YangMMCO *mmco = sl->mmco; + int32_t nb_mmco = 0; + + if (nal->type == Yang_NAL_IDR_SLICE) { // FIXME fields + yang_skip_bits1(gb); // broken_link + if (yang_get_bits1(gb)) { + mmco[0].opcode = Yang_MMCO_LONG; + mmco[0].long_arg = 0; + nb_mmco = 1; + } + sl->explicit_ref_marking = 1; + } else { + sl->explicit_ref_marking = yang_get_bits1(gb); + if (sl->explicit_ref_marking) { + for (i = 0; i < Yang_MAX_MMCO_COUNT; i++) { + Yang_MMCOOpcode opcode = Yang_MMCOOpcode(yang_get_ue_golomb_31(gb)); + + mmco[i].opcode = opcode; + if (opcode == Yang_MMCO_SHORT2UNUSED || opcode == Yang_MMCO_SHORT2LONG) { + mmco[i].short_pic_num = + (sl->curr_pic_num - yang_get_ue_golomb_long(gb) - 1) & + (sl->max_pic_num - 1); + } + if (opcode == Yang_MMCO_SHORT2LONG || opcode == Yang_MMCO_LONG2UNUSED || + opcode == Yang_MMCO_LONG || opcode == Yang_MMCO_SET_MAX_LONG) { + uint32_t long_arg = yang_get_ue_golomb_31(gb); + if (long_arg >= 32 || + (long_arg >= 16 && !(opcode == Yang_MMCO_SET_MAX_LONG && + long_arg == 16) && + !(opcode == Yang_MMCO_LONG2UNUSED && Yang_FIELD_PICTURE(sl)))) { + printf( "illegal long ref in memory management control operation %d\n", opcode); + return -1; + } + mmco[i].long_arg = long_arg; + } + + if (opcode > (unsigned) Yang_MMCO_LONG) { + printf("illegal memory management control operation %d\n",opcode); + return -1; + } + if (opcode == Yang_MMCO_END) + break; + } + nb_mmco = i; + } + } + + sl->nb_mmco = nb_mmco; + + return 0; +} diff --git a/libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.cpp b/libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.cpp new file mode 100755 index 00000000..c6ff2566 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.cpp @@ -0,0 +1,111 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangHeaderParseFfmpeg.h" + +void YangHeaderParseFfmpeg::loadLib() { +//yang_speex_lib_get_mode=(const SpeexMode * (*) (int32_t mode))m_lib.loadFunction(""); + yang_av_frame_alloc = (AVFrame* (*)(void)) m_lib1.loadFunction( + "av_frame_alloc"); + yang_av_buffer_unref = (void (*)(AVBufferRef **buf)) m_lib1.loadFunction( + "av_buffer_unref"); + yang_av_hwframe_ctx_init = (int32_t (*)(AVBufferRef *ref)) m_lib1.loadFunction( + "av_hwframe_ctx_init"); + yang_av_image_get_buffer_size = (int32_t (*)(enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_get_buffer_size"); + yang_av_hwdevice_ctx_create = (int32_t (*)(AVBufferRef **device_ctx, + enum AVHWDeviceType type, const char *device, AVDictionary *opts, + int32_t flags)) m_lib1.loadFunction("av_hwdevice_ctx_create"); + yang_av_hwframe_transfer_data = (int32_t (*)(AVFrame *dst, const AVFrame *src, + int32_t flags)) m_lib1.loadFunction("av_hwframe_transfer_data"); + yang_av_free = (void (*)(void *ptr)) m_lib1.loadFunction("av_free"); + yang_av_frame_free = (void (*)(AVFrame **frame)) m_lib1.loadFunction( + "av_frame_free"); + yang_av_buffer_ref = + (AVBufferRef* (*)(AVBufferRef *buf)) m_lib1.loadFunction( + "av_buffer_ref"); + yang_av_image_fill_arrays = (int32_t (*)(uint8_t *dst_data[4], + int32_t dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_fill_arrays"); + yang_av_hwframe_ctx_alloc = + (AVBufferRef* (*)(AVBufferRef *device_ctx)) m_lib1.loadFunction( + "av_hwframe_ctx_alloc"); + yang_av_malloc = (void* (*)(size_t size)) m_lib1.loadFunction("av_malloc"); + + yang_avcodec_open2 = (int32_t (*)(AVCodecContext *avctx, const AVCodec *codec, + AVDictionary **options)) m_lib.loadFunction("avcodec_open2"); + yang_av_init_packet = (void (*)(AVPacket *pkt)) m_lib.loadFunction( + "av_init_packet"); + yang_av_hwframe_get_buffer = (int32_t (*)(AVBufferRef *hwframe_ctx, + AVFrame *frame, int32_t flags)) m_lib.loadFunction( + "av_hwframe_get_buffer"); + yang_avcodec_find_decoder = + (AVCodec* (*)(enum AVCodecID id)) m_lib.loadFunction( + "avcodec_find_decoder"); + yang_avcodec_alloc_context3 = + (AVCodecContext* (*)(const AVCodec *codec)) m_lib.loadFunction( + "avcodec_alloc_context3"); + yang_avcodec_send_packet = (int32_t (*)(AVCodecContext *avctx, + const AVPacket *avpkt)) m_lib.loadFunction("avcodec_send_packet"); + yang_avcodec_receive_frame = + (int32_t (*)(AVCodecContext *avctx, AVFrame *frame)) m_lib.loadFunction( + "avcodec_receive_frame"); + yang_avcodec_close = (int32_t (*)(AVCodecContext *avctx)) m_lib.loadFunction( + "avcodec_close"); +} + +void YangHeaderParseFfmpeg::unloadLib() { + yang_av_free = NULL; + yang_av_hwframe_ctx_alloc = NULL; + yang_av_hwframe_ctx_init = NULL; + yang_av_buffer_ref = NULL; + yang_av_malloc = NULL; + yang_av_hwdevice_ctx_create = NULL; + yang_avcodec_open2 = NULL; + yang_av_frame_alloc = NULL; + yang_av_image_get_buffer_size = NULL; + yang_av_image_fill_arrays = NULL; + yang_av_init_packet = NULL; + yang_av_hwframe_get_buffer = NULL; + yang_avcodec_find_decoder = NULL; + yang_avcodec_alloc_context3 = NULL; + yang_avcodec_send_packet = NULL; + yang_avcodec_receive_frame = NULL; + yang_av_hwframe_transfer_data = NULL; + yang_av_frame_free = NULL; + yang_avcodec_close = NULL; +} +YangHeaderParseFfmpeg::YangHeaderParseFfmpeg() { + unloadLib(); + +} + +YangHeaderParseFfmpeg::~YangHeaderParseFfmpeg() { + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); +} +void YangHeaderParseFfmpeg::init(){ + m_lib.loadObject("libavcodec"); + m_lib1.loadObject("libavutil"); + loadLib(); +} +void YangHeaderParseFfmpeg::parse(uint8_t* headers,int32_t headerLen){ + AVCodec *t_codec = yang_avcodec_find_decoder(AV_CODEC_ID_H265); + AVCodecContext* t_codecCtx = yang_avcodec_alloc_context3(t_codec); + t_codecCtx->extradata = (uint8_t*) yang_av_malloc(headerLen + AV_INPUT_BUFFER_PADDING_SIZE); + t_codecCtx->extradata_size = headerLen; + memcpy(t_codecCtx->extradata, headers, headerLen); + int32_t ret = yang_avcodec_open2(t_codecCtx, t_codec, NULL); + if (ret < 0) + printf("\navcodec_open2 failure................\n"); + + + yang_avcodec_close(t_codecCtx); + yang_av_free(t_codecCtx); + t_codecCtx=NULL; + +} diff --git a/libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.h b/libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.h new file mode 100755 index 00000000..0f9ff8ee --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangHeaderParseFfmpeg.h @@ -0,0 +1,52 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGDECODER_SRC_YANGHEADERPARSEFFMPEG_H_ +#define YANGDECODER_SRC_YANGHEADERPARSEFFMPEG_H_ +#include +#include "yangutil/sys/YangLoadLib.h" +extern "C"{ +#include "libavcodec/avcodec.h" +//#include "libavformat/avformat.h" +#include "libavutil/avutil.h" +#include "libavutil/imgutils.h" +} + +class YangHeaderParseFfmpeg { +public: + YangHeaderParseFfmpeg(); + virtual ~YangHeaderParseFfmpeg(); + void parse(uint8_t* pheader,int32_t pheaderLen); + void init(); +private: + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + + AVBufferRef *(*yang_av_hwframe_ctx_alloc)(AVBufferRef *device_ctx); + int32_t (*yang_av_hwframe_ctx_init)(AVBufferRef *ref); + AVBufferRef *(*yang_av_buffer_ref)(AVBufferRef *buf); + void *(*yang_av_malloc)(size_t size); + int32_t (*yang_av_hwdevice_ctx_create)(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int32_t flags); + int32_t (*yang_avcodec_open2)(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + AVFrame *(*yang_av_frame_alloc)(void); + int32_t (*yang_av_image_get_buffer_size)(enum AVPixelFormat pix_fmt, int32_t width, int32_t height, int32_t align); + int32_t (*yang_av_image_fill_arrays)(uint8_t *dst_data[4], int32_t dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int32_t width, int32_t height, int32_t align); + void (*yang_av_buffer_unref)(AVBufferRef **buf); + void (*yang_av_init_packet)(AVPacket *pkt); + int32_t (*yang_av_hwframe_get_buffer)(AVBufferRef *hwframe_ctx, AVFrame *frame, int32_t flags); + AVCodec *(*yang_avcodec_find_decoder)(enum AVCodecID id); + AVCodecContext *(*yang_avcodec_alloc_context3)(const AVCodec *codec); + int32_t (*yang_avcodec_send_packet)(AVCodecContext *avctx, const AVPacket *avpkt); + int32_t (*yang_avcodec_receive_frame)(AVCodecContext *avctx, AVFrame *frame); + int32_t (*yang_av_hwframe_transfer_data)(AVFrame *dst, const AVFrame *src, int32_t flags); + void (*yang_av_frame_free)(AVFrame **frame); + void (*yang_av_free)(void *ptr); + int32_t (*yang_avcodec_close)(AVCodecContext *avctx); +}; + +#endif /* YANGDECODER_SRC_YANGHEADERPARSEFFMPEG_H_ */ diff --git a/libmetartc3/src/yangdecoder/YangVideoDecoderHandle.cpp b/libmetartc3/src/yangdecoder/YangVideoDecoderHandle.cpp new file mode 100755 index 00000000..6c9328cc --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangVideoDecoderHandle.cpp @@ -0,0 +1,201 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "malloc.h" +#include +#include +#include +#include +#include "stdlib.h" + +#include "yangdecoder/YangDecoderFactory.h" + +YangVideoDecoderHandle::YangVideoDecoderHandle(YangContext *pcontext) { + isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_videoBuffer = NULL; + m_decs = NULL; + m_out_videoBuffer = NULL; + m_context = pcontext; +} + +YangVideoDecoderHandle::~YangVideoDecoderHandle(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_context = NULL; + m_in_videoBuffer = NULL; + m_out_videoBuffer = NULL; + //size_t i = 0; + if (m_decs) { + delete m_decs; + m_decs = NULL; + + } + +} + +void YangVideoDecoderHandle::stop() { + stopLoop(); +} + +void YangVideoDecoderHandle::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + + + +YangVideoBuffer* YangVideoDecoderHandle::getOutVideoBuffer() { + + return m_out_videoBuffer; +} + + + +void YangVideoDecoderHandle::addVideoStream(uint8_t *ps, int32_t puid, + int32_t pisAdd) { + if (pisAdd) { + YangVideoCodec aet = ps[0] == 0x17 ? Yang_VED_264 : Yang_VED_265; + + YangDecoderFactory df; + m_decs=df.createVideoDecoder(aet, &m_context->avinfo.video); + m_decs->m_uid = puid; + m_decs->init(); + int32_t wid = 0, hei = 0, fps = 10; + + m_decs->parseRtmpHeader(ps, 128, &wid, &hei, &fps); + if(m_out_videoBuffer){ + m_out_videoBuffer->m_uid = puid; + m_out_videoBuffer->init(wid, hei, m_context->avinfo.video.videoDecoderFormat); + m_out_videoBuffer->m_frames = fps; + yang_trace("\ndecode:width==%d,height==%d,fps==%d\n",wid,hei,fps); + yang_trace("\nvideoBuffer:width==%d,height==%d,fps==%d\n",m_out_videoBuffer->m_width,m_out_videoBuffer->m_height,fps); + } + + + } +} + +void YangVideoDecoderHandle::init() { + isInit = 1; +} + +void YangVideoDecoderHandle::setInVideoBuffer(YangVideoDecoderBuffer *pbuf) { + m_in_videoBuffer = pbuf; +} +void YangVideoDecoderHandle::setOutVideoBuffer(YangVideoBuffer *pbuf) { + m_out_videoBuffer = pbuf; + if(m_context&&m_context->streams.m_playBuffer) m_context->streams.m_playBuffer->setInVideoBuffer(pbuf); +} + +void YangVideoDecoderHandle::onAudioData(YangFrame* pframe){ + +} + +void YangVideoDecoderHandle::onVideoData(YangFrame* pframe){ + if(m_out_videoBuffer) { + m_out_videoBuffer->putVideo(pframe); + } +} + + +void YangVideoDecoderHandle::startLoop() { + m_isConvert = 1; + uint8_t *srcVideo = new uint8_t[YANG_VIDEO_ENCODE_BUFFER_LEN]; + uint8_t *temp; + temp = srcVideo + 4; + //int32_t isFFmpeg = 1; //(m_context->avinfo.videoDecoderType == 1 ? 1 : 0); + int32_t headLen = 0; + //int32_t nalLen = 0; + YangYuvType yuvType=YangI420;//YangYv12; + YangFrame videoFrame; + memset(&videoFrame,0,sizeof(YangFrame)); + int err=0; + while (m_isConvert == 1) { + if (!m_in_videoBuffer) { + yang_usleep(2000); + continue; + } + + + if (m_in_videoBuffer && m_in_videoBuffer->size() == 0) { + yang_usleep(2000); + continue; + } + + + videoFrame.payload=temp; + if (m_in_videoBuffer) + m_in_videoBuffer->getEVideo(&videoFrame); + else + continue; + + + + if (videoFrame.frametype == YANG_Frametype_Spspps) { + if (m_decs == NULL) { + addVideoStream(temp, videoFrame.uid, 1); + } + continue; + } + + //compatible flash/flex h264 encode +// if (temp[0] == 0 && temp[1] == 0 && temp[2] == 0 && temp[3] == 0x02) { +// nalLen = temp[9]; +// headLen = nalLen + 10 + 4; +// } else { +// headLen = 0; +// } + + if (m_decs && m_decs->m_state == 0) { + if (!videoFrame.frametype) + continue; + m_decs->m_state = 1; + } + + //if (isFFmpeg) { + // headLen -= 4; + *(temp + headLen) = 0x00; + *(temp + headLen + 1) = 0x00; + *(temp + headLen + 2) = 0x00; + *(temp + headLen + 3) = 0x01; + //} + + if (m_decs) { + videoFrame.payload=temp+headLen ; + videoFrame.nb=videoFrame.nb+headLen; + if((err=m_decs->decode(&videoFrame,yuvType,this))!=Yang_Ok){ + if(err>0){ + if(err==2&&m_context) m_context->streams.sendRequest(0,YangRTC_Decoder_Input ); + yang_error("decode video fail.."); + + + }//else{ + // /headLen=4; + //videoFrame.payload=temp + headLen; + //videoFrame.nb=videoFrame.nb - headLen; + //m_decs->decode(&videoFrame,yuvType,this); + // yang_error("ffmpeg decoder receive buffer fail.."); + // yang_trace("\n"); + //for(int i=0;i<100;i++) yang_trace("%02x,",temp[i]); + // } + } + + } + + } + + temp = NULL; + yang_deleteA(srcVideo); +} + +void YangVideoDecoderHandle::stopLoop() { + m_isConvert = 0; + +} diff --git a/libmetartc3/src/yangdecoder/YangVideoDecoderHandles.cpp b/libmetartc3/src/yangdecoder/YangVideoDecoderHandles.cpp new file mode 100755 index 00000000..a8f72ab9 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangVideoDecoderHandles.cpp @@ -0,0 +1,260 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "malloc.h" +#include +#include +#include +#include +#include "stdlib.h" + +#include "yangdecoder/YangDecoderFactory.h" + +YangVideoDecoderHandles::YangVideoDecoderHandles(YangContext *pcontext) { + isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_videoBuffer = NULL; + m_decs = new vector(); + m_out_videoBuffer = NULL; + m_context = pcontext; +} + +YangVideoDecoderHandles::~YangVideoDecoderHandles(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_context = NULL; + m_in_videoBuffer = NULL; + m_out_videoBuffer = NULL; + size_t i = 0; + if (m_decs) { + for (i = 0; i < m_decs->size(); i++) { + delete m_decs->at(i); + } + m_decs->clear(); + delete m_decs; + m_decs = NULL; + + } + +} + +void YangVideoDecoderHandles::stop() { + stopLoop(); +} + +void YangVideoDecoderHandles::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + +int32_t YangVideoDecoderHandles::getDecoderIndex(int32_t puid) { + for (int32_t i = 0; i < (int) m_decs->size(); i++) { + if (m_decs->at(i)->m_uid == puid) + return i; + } + return -1; +} +YangVideoDecoder* YangVideoDecoderHandles::getDecoder(int32_t puid) { + for (int32_t i = 0; i < (int) m_decs->size(); i++) { + if (m_decs->at(i)->m_uid == puid) + return m_decs->at(i); + } + return NULL; +} + +YangVideoBuffer* YangVideoDecoderHandles::getVideoBuffer(int32_t puid) { + for (int32_t i = 0; i < (int) m_out_videoBuffer->size(); i++) { + if (m_out_videoBuffer->at(i)->m_uid == puid) + return m_out_videoBuffer->at(i); + } + return NULL; +} + +void YangVideoDecoderHandles::removeAllStream() { + if (m_out_videoBuffer->size() == 0) + return; + int32_t i = 0; + for (i = 0; i < (int) m_out_videoBuffer->size(); i++) { + delete m_out_videoBuffer->at(i); + m_out_videoBuffer->at(i) = NULL; + + } + m_out_videoBuffer->clear(); + if (m_decs->size() == 0) + return; + for (i = 0; i < (int) m_decs->size(); i++) { + delete m_decs->at(i); + m_decs->at(i) = NULL; + } + m_decs->clear(); +} + + +void YangVideoDecoderHandles::removeStream(){ + int32_t i = 0; + for(;m_removeList.size()>0;){ + int32_t puid=m_removeList.front(); + m_removeList.erase(m_removeList.begin()); + + for (i = 0; i < (int) m_out_videoBuffer->size(); i++) { + if (m_out_videoBuffer->at(i)->m_uid == puid) { + delete m_out_videoBuffer->at(i); + m_out_videoBuffer->at(i) = NULL; + m_out_videoBuffer->erase(m_out_videoBuffer->begin() + i); + break; + } + } + if (m_decs->size() == 0) + return; + for (i = 0; i < (int) m_decs->size(); i++) { + if (m_decs->at(i)->m_uid == puid) { + delete m_decs->at(i); + m_decs->at(i) = NULL; + m_decs->erase(m_decs->begin() + i); + return; + } + } + } +} + +void YangVideoDecoderHandles::addVideoStream(uint8_t *ps, int32_t puid, + int32_t pisAdd) { + if (pisAdd) { + YangVideoCodec aet = ps[0] == 0x17 ? Yang_VED_264 : Yang_VED_265; + YangDecoderFactory df; + m_decs->push_back(df.createVideoDecoder(aet, &m_context->avinfo.video)); + m_decs->back()->m_uid = puid; + m_decs->back()->init(); + int32_t wid = 0, hei = 0, fps = 10; + + m_decs->back()->parseRtmpHeader(ps, 128, &wid, &hei, &fps); + m_out_videoBuffer->push_back( + new YangVideoBuffer(m_context->avinfo.video.bitDepth == 8 ? 1 : 2)); + m_out_videoBuffer->back()->m_uid = puid; + m_out_videoBuffer->back()->init(wid, hei, m_context->avinfo.video.videoDecoderFormat); + m_out_videoBuffer->back()->m_frames = fps; + yang_trace("\nwidth==%d,height==%d,fps==%d\n",wid,hei,fps); + + int ind=0; + if(m_context&&(ind=m_context->streams.getIndex(puid))>-1){ + m_context->streams.m_playBuffers->at(ind)->setInVideoBuffer(m_out_videoBuffer->back()); + } + + } else { + m_removeList.push_back(puid); + } +} + +void YangVideoDecoderHandles::init() { + isInit = 1; +} + +void YangVideoDecoderHandles::setInVideoBuffer(YangVideoDecoderBuffer *pbuf) { + m_in_videoBuffer = pbuf; +} +void YangVideoDecoderHandles::setOutVideoBuffer(vector *pbuf) { + m_out_videoBuffer = pbuf; +} +void YangVideoDecoderHandles::onAudioData(YangFrame* pframe){ + +} +void YangVideoDecoderHandles::onVideoData(YangFrame* pframe){ + YangVideoBuffer *t_vb = getVideoBuffer(pframe->uid); + if(t_vb) { + t_vb->putVideo(pframe); + } + t_vb=NULL; +} + + +void YangVideoDecoderHandles::startLoop() { + m_isConvert = 1; + uint8_t *srcVideo = new uint8_t[YANG_VIDEO_ENCODE_BUFFER_LEN]; + uint8_t *temp; + temp = srcVideo + 4; + + //int32_t isFFmpeg = 1; //(m_context->avinfo.videoDecoderType == 1 ? 1 : 0); + int32_t index = 0; + YangVideoDecoder *t_decoder = NULL; + int32_t headLen = 0; + //int32_t nalLen = 0; + YangYuvType yuvType=YangI420;//YangYv12; + YangFrame videoFrame; + memset(&videoFrame,0,sizeof(YangFrame)); + //videoFrame.yuvType=yuvType; + int err=Yang_Ok; + while (m_isConvert == 1) { + if (!m_in_videoBuffer) { + yang_usleep(2000); + continue; + } + + if(m_removeList.size()>0) removeStream(); + + if (m_in_videoBuffer && m_in_videoBuffer->size() == 0) { + yang_usleep(2000); + continue; + } + + videoFrame.payload=temp; + + if (m_in_videoBuffer) m_in_videoBuffer->getEVideo(&videoFrame); + + + if (videoFrame.frametype == YANG_Frametype_Spspps) { + index = getDecoderIndex(videoFrame.uid); + if (index == -1) { + addVideoStream(temp, videoFrame.uid, 1); + } + continue; + } + //compatible flash/flex h264 encode +// if (temp[0] == 0 && temp[1] == 0 && temp[2] == 0 && temp[3] == 0x02) { +// nalLen = temp[9]; +// headLen = nalLen + 10 + 4; +// } else { +// headLen = 4; +// } + + + t_decoder = getDecoder(videoFrame.uid); + + if (t_decoder && t_decoder->m_state == 0) { + if (!videoFrame.frametype) + continue; + t_decoder->m_state = 1; + } + + //if (isFFmpeg) { + // headLen -= 4; + *(temp + headLen) = 0x00; + *(temp + headLen + 1) = 0x00; + *(temp + headLen + 2) = 0x00; + *(temp + headLen + 3) = 0x01; + //} + if (t_decoder) { + videoFrame.payload=temp + headLen; + videoFrame.nb=videoFrame.nb - headLen; + if((err=t_decoder->decode(&videoFrame,yuvType,this))!=Yang_Ok){ + if(err==2&&m_context) m_context->streams.sendRequest(videoFrame.uid,YangRTC_Decoder_Input ); + yang_error("decode video fail..uid===%d",videoFrame.uid); + } + + } + t_decoder = NULL; + } + t_decoder = NULL; + temp = NULL; + yang_deleteA(srcVideo); +} + +void YangVideoDecoderHandles::stopLoop() { + m_isConvert = 0; + +} diff --git a/libmetartc3/src/yangdecoder/YangVideoDecoderIntel.cpp b/libmetartc3/src/yangdecoder/YangVideoDecoderIntel.cpp new file mode 100755 index 00000000..e338132e --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangVideoDecoderIntel.cpp @@ -0,0 +1,495 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangVideoDecoderIntel.h" +#if HaveLibva +#include "memory.h" +#include +#include "string.h" +#include "sys/ioctl.h" +#include "fcntl.h" +#include "memory.h" +#include "yangutil/yang_unistd.h" +//#include +//#include +#include "string.h" +#include "stdio.h" +#include "xf86drm.h" + +using namespace std; +#define CHECK_VASTATUS(X, MSG) {if ((X) != VA_STATUS_SUCCESS) {cout <<"\n"<m_sps; + Yang_PPS *pps = &header->m_pps; + //YangSliceContext *sl=header->sl; + //(*p_pic_para) = (VAPictureParameterBufferH264){ + p_pic_para->picture_width_in_mbs_minus1 = sps->mb_width - 1;//conf->Video_Width_Zb - 1; + p_pic_para->picture_height_in_mbs_minus1 = sps->mb_height - 1;//conf->Video_Height_Zb - 1; + + p_pic_para->bit_depth_luma_minus8 = sps->bit_depth_luma - 8; + p_pic_para->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8; + p_pic_para->num_ref_frames = sps->ref_frame_count; + //.seq_fields.bits = { + p_pic_para->seq_fields.bits.chroma_format_idc = sps->chroma_format_idc; + p_pic_para->seq_fields.bits.residual_colour_transform_flag = + sps->residual_color_transform_flag; + p_pic_para->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = + sps->gaps_in_frame_num_allowed_flag; + p_pic_para->seq_fields.bits.frame_mbs_only_flag = sps->frame_mbs_only_flag; + p_pic_para->seq_fields.bits.mb_adaptive_frame_field_flag = sps->mb_aff; + p_pic_para->seq_fields.bits.direct_8x8_inference_flag = + sps->direct_8x8_inference_flag; + p_pic_para->seq_fields.bits.MinLumaBiPredSize8x8 = sps->level_idc >= 31; /* Ap_pic_para->seq_fields.bits.3p_pic_para->seq_fields.bits.3p_pic_para->seq_fields.bits.2 */ + p_pic_para->seq_fields.bits.log2_max_frame_num_minus4 = + sps->log2_max_frame_num - 4; + p_pic_para->seq_fields.bits.pic_order_cnt_type = sps->poc_type; + p_pic_para->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = + sps->log2_max_poc_lsb - 4; + p_pic_para->seq_fields.bits.delta_pic_order_always_zero_flag = + sps->delta_pic_order_always_zero_flag; + // }; + p_pic_para->num_slice_groups_minus1 = pps->slice_group_count - 1; + p_pic_para->slice_group_map_type = pps->mb_slice_group_map_type; + p_pic_para->slice_group_change_rate_minus1 = 0; /* FMO is not implemented */ + p_pic_para->pic_init_qp_minus26 = pps->init_qp - 26; //init_qp - 26; + p_pic_para->pic_init_qs_minus26 = pps->init_qs - 26; //init_qs - 26; + p_pic_para->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; + p_pic_para->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; + //.pic_fields.bits = { + p_pic_para->pic_fields.bits.entropy_coding_mode_flag = pps->cabac; + p_pic_para->pic_fields.bits.weighted_pred_flag = pps->weighted_pred; + p_pic_para->pic_fields.bits.weighted_bipred_idc = pps->weighted_bipred_idc; + p_pic_para->pic_fields.bits.transform_8x8_mode_flag = + pps->transform_8x8_mode; + p_pic_para->pic_fields.bits.field_pic_flag = 0; //sl->picture_structure != Yang_PICT_FRAME; + p_pic_para->pic_fields.bits.constrained_intra_pred_flag = + pps->constrained_intra_pred; + p_pic_para->pic_fields.bits.pic_order_present_flag = pps->pic_order_present; + p_pic_para->pic_fields.bits.deblocking_filter_control_present_flag = + pps->deblocking_filter_parameters_present; + p_pic_para->pic_fields.bits.redundant_pic_cnt_present_flag = + pps->redundant_pic_cnt_present; + p_pic_para->pic_fields.bits.reference_pic_flag = 1; //h->nal_ref_idc != 0; + // }, + p_pic_para->frame_num = 0; + // }; + +} +int32_t yang_get_slice_type(YangSliceContext *sl) { + switch (sl->slice_type) { + case Yang_PICTURE_TYPE_P: + return 0; + case Yang_PICTURE_TYPE_B: + return 1; + case Yang_PICTURE_TYPE_I: + return 2; + case Yang_PICTURE_TYPE_SP: + return 3; + case Yang_PICTURE_TYPE_SI: + return 4; + default: + return -1; + } +} +YangVideoDecoderIntel::YangVideoDecoderIntel() { + /**m_va_dpy = NULL; + m_config_id = VA_INVALID_ID; + memset(pics, 0, sizeof(YangDecodePicture)*Para_Bufs); + memset(&m_picPara, 0, sizeof(m_picPara)); + memset(&iq_matrix, 0, sizeof(iq_matrix)); + memset(&image_format, 0, sizeof(image_format)); + memset(&m_slice_param, 0, sizeof(m_slice_param)); + sid=0;va-drm + frameIdx=0; + FieldOrderCnt=0; + m_ref_count=3;**/ + //printf("\n*****YangH264Decoder***********************************Intel\n"); + + m_va_dpy = NULL; + m_config_id = VA_INVALID_ID; + + m_config_id = 0; + m_vaContext = 0; + vas = 0; + memset(pics, 0, sizeof(YangDecodePicture) * Para_Bufs); + memset(&m_picPara, 0, sizeof(m_picPara)); + memset(&iq_matrix, 0, sizeof(iq_matrix)); + memset(&image_format, 0, sizeof(image_format)); + memset(&m_slice_param, 0, sizeof(m_slice_param)); + m_fd = 0; + m_ref_count = 0; + m_ref_count_m1 = 0; + sid = 0; + frameIdx = 0; + FieldOrderCnt = 0; + m_ref_count = 3; + //m_yvp = NULL; + m_width = 0; + m_height = 0; + m_frame = 0; + yLen = 0; + uLen = 0; + uvLen = 0; + allLen = 0; + frameIdx = 0; + FieldOrderCnt = 0; + posNum = 0; + m_isInit = 0; + unloadLib(); +} + +YangVideoDecoderIntel::~YangVideoDecoderIntel() { +// printf("\n**************YangVideoDecoderIntel destruct***************\n"); + closeDevice(); + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); +} + +void YangVideoDecoderIntel::initH264Pic() { + + for (int32_t i = 0; i < 16; i++) { + m_picPara.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID; + m_picPara.ReferenceFrames[i].picture_id = 0xffffffff; + m_picPara.ReferenceFrames[i].TopFieldOrderCnt = 0; + m_picPara.ReferenceFrames[i].BottomFieldOrderCnt = 0; + } +} + +void YangVideoDecoderIntel::closeDevice() { + //vaDestroySurfaces(m_va_dpy,&m_vaSurface,1); + for (int32_t i = 0; i < Para_Bufs; i++) { + if (m_va_dpy && surface_id[i] != VA_INVALID_ID) { + vas = yang_vaDestroySurfaces(m_va_dpy, &surface_id[i], 1); + CHECK_VASTATUS(vas, "vaDestroySurfaces"); + } + } + if (m_va_dpy && m_vaContext != VA_INVALID_ID) { + vas = yang_vaDestroyContext(m_va_dpy, m_vaContext); + CHECK_VASTATUS(vas, "vaDestroyContext"); + } + if (m_va_dpy && m_config_id != VA_INVALID_ID) { + vas = yang_vaDestroyConfig(m_va_dpy, m_config_id); + CHECK_VASTATUS(vas, "vaDestroyConfig"); + } + if (m_va_dpy) { + vas = yang_vaTerminate(m_va_dpy); + CHECK_VASTATUS(vas, "vaTerminate"); + } + if (m_fd >= 0) + close(m_fd); + m_va_dpy = NULL; + +} + +void YangVideoDecoderIntel::initSlicePara(VASliceParameterBufferH264 *slice264, + uint8_t *p_data, int32_t p_len) { + //initNaluPara(&m_Nalu,p_data); + + YangSliceContext *sl = h264header.sl; + int32_t ret = (h264header.m_sps.mb_aff + && (sl->picture_structure == Yang_PICT_FRAME)) + || sl->picture_structure != Yang_PICT_FRAME; + slice264->slice_data_size = p_len; + slice264->slice_data_offset = 0; + slice264->slice_data_flag = VA_SLICE_DATA_FLAG_ALL; + slice264->slice_data_bit_offset = h264header.get_bits_count(&sl->gb); + slice264->first_mb_in_slice = (sl->mb_y >> ret) * h264header.m_sps.mb_width + + sl->mb_x; + slice264->slice_type = yang_get_slice_type(sl); //sl->slice_type;//m_Nalu.nal_reference_idc;//ff_h264_get_slice_type(sl); + slice264->direct_spatial_mv_pred_flag = + sl->slice_type == Yang_PICTURE_TYPE_B ? + sl->direct_spatial_mv_pred : 0; + slice264->num_ref_idx_l0_active_minus1 = + sl->list_count > 0 ? sl->ref_count[0] - 1 : 0; + slice264->num_ref_idx_l1_active_minus1 = + sl->list_count > 1 ? sl->ref_count[1] - 1 : 0; + slice264->cabac_init_idc = sl->cabac_init_idc; + slice264->slice_qp_delta = sl->qscale - h264header.m_pps.init_qp; + slice264->disable_deblocking_filter_idc = + sl->deblocking_filter < 2 ? + !sl->deblocking_filter : sl->deblocking_filter; + slice264->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2; + slice264->slice_beta_offset_div2 = sl->slice_beta_offset / 2; + slice264->luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom; + slice264->chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom; + sl = NULL; + if (slice264->slice_type == 2) { + slice264->chroma_weight_l0[0][0] = 0; + slice264->chroma_weight_l0[0][1] = 0; + slice264->chroma_weight_l1[0][0] = 0; + slice264->chroma_weight_l1[0][1] = 0; + slice264->RefPicList0[0].picture_id = 0xffffffff; + } else { + slice264->chroma_weight_l0[0][0] = 1; + slice264->chroma_weight_l0[0][1] = 1; + slice264->chroma_weight_l1[0][0] = 1; + slice264->chroma_weight_l1[0][1] = 1; + } + + for (int32_t i = 0; i < 32; i++) { + slice264->RefPicList0[i].flags = VA_PICTURE_H264_INVALID; + slice264->RefPicList1[i].flags = VA_PICTURE_H264_INVALID; + slice264->RefPicList0[i].picture_id = 0xffffffff; + slice264->RefPicList1[i].picture_id = 0xffffffff; + } +} + +void YangVideoDecoderIntel::cachePic(VAPictureH264 *p_vpic, int32_t p_pocind) { + + if (p_pocind > 0) { + if (p_pocind > m_ref_count_m1) { + memcpy(&m_picPara.ReferenceFrames[m_ref_count_m1], &m_pic[m_ref_count_m1], sizeof(VAPictureH264)); + memcpy(&m_slice_param.RefPicList0[m_ref_count_m1], &m_pic[m_ref_count_m1], sizeof(VAPictureH264)); + } + int32_t i = (p_pocind > m_ref_count_m1 - 1 ? m_ref_count_m1 : p_pocind) - 1; + + for (; i >= 0; i--) { + memcpy(&m_picPara.ReferenceFrames[i], &m_pic[i],sizeof(VAPictureH264)); + memcpy(&m_slice_param.RefPicList0[i], &m_pic[i],sizeof(VAPictureH264)); + memcpy(&m_pic[i + 1], &m_pic[i], sizeof(VAPictureH264)); + } + } + memcpy(&m_pic[0], p_vpic, sizeof(VAPictureH264)); + m_pic[0].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE; + +} +void YangVideoDecoderIntel::init() { + +} +void YangVideoDecoderIntel::parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, + int32_t *phei, int32_t *pfps) { + //printf("\n*****************************header.........\n"); + //for(int32_t i=0;i<60;i++){ + // printf("%02x,",*(p+i)); + //} + //printf("\n**********************************************\n"); + m_lib.loadSysObject("libva"); + m_lib1.loadSysObject("libva-drm"); + loadLib(); + m_fd = open("/dev/dri/renderD128", O_RDWR); + m_va_dpy = yang_vaGetDisplayDRM(m_fd); + int32_t major_version = 0, minor_version = 0; + if (m_va_dpy) + vas = yang_vaInitialize(m_va_dpy, &major_version, &minor_version); + CHECK_VASTATUS(vas, "vaInitialize"); + //printf("Initialised VAAPI connection: version %d.%d,..md=%d\n", major_version, minor_version, m_fd); + image_format.fourcc = VA_FOURCC_NV12; + image_format.depth = 8; + frameIdx = 0; + posNum = 0; + h264header.parseRtmpHeader(p); + initWH(); + *pwid = m_width; + *phei = m_height; + *pfps = h264header.fps; + m_isInit = 1; +} +void YangVideoDecoderIntel::initWH() { + memset(iq_matrix.ScalingList4x4, 0x10, sizeof(iq_matrix.ScalingList4x4)); + memset(iq_matrix.ScalingList8x8[0], 0x10, + sizeof(iq_matrix.ScalingList8x8[0])); + memset(iq_matrix.ScalingList8x8[1], 0x10, + sizeof(iq_matrix.ScalingList8x8[0])); + m_width = h264header.width; + m_height = h264header.height; + //m_yvp->initSdlWin(2,&m_yvp->rects[0], m_width, m_height, 0, 0); +// printf("\nmd=%d*********************intel decode init wid=%d,hei=%d,fps=%d,profile=%d..........%d\n", +// m_fd, m_width, m_height, h264header.fps, +// h264header.m_sps.profile_idc, h264header.m_sps.ref_frame_count); + initPicPara_1(&m_picPara, &h264header); + yLen = m_width * m_height; + uLen = yLen / 4; + allLen = yLen * 3 / 2; + uvLen = yLen / 2; + m_ref_count = h264header.m_sps.ref_frame_count; + if (m_ref_count > 16) + m_ref_count = 16; + if (m_ref_count < 3) + m_ref_count = 3; + m_ref_count_m1 = m_ref_count - 1; + VAConfigAttrib attrib; + VAProfile vap = VAProfileH264ConstrainedBaseline; + if (h264header.m_sps.profile_idc == 77) + vap = VAProfileH264Main; + if (h264header.m_sps.profile_idc == 100 + || h264header.m_sps.profile_idc == 110 + || h264header.m_sps.profile_idc == 122) + vap = VAProfileH264High; + vas = yang_vaCreateConfig(m_va_dpy, vap, VAEntrypointVLD, &attrib, 1, + &m_config_id); + CHECK_VASTATUS(vas, "vaCreateConfig"); + vas = yang_vaCreateSurfaces(m_va_dpy, VA_RT_FORMAT_YUV420, m_width, + m_height, surface_id, Para_Bufs, NULL, 0); + CHECK_VASTATUS(vas, "vaCreateSurfaces"); + + vas = yang_vaCreateContext(m_va_dpy, m_config_id, m_width, m_height, + VA_PROGRESSIVE, &surface_id[0], Para_Bufs, &m_vaContext); + CHECK_VASTATUS(vas, "vaCreateContext"); +} +void YangVideoDecoderIntel::init(YangVideoParam *p_config) { + +} + +int32_t YangVideoDecoderIntel::decode(YangVideoBuffer *pvpl, int32_t isIframe, + uint8_t *pData, int32_t nSize, int64_t ptimestamp, + uint8_t *dest, int32_t *pnFrameReturned) { + + h264header.h264_slice_header_parse(pData, nSize); + initSlicePara(&m_slice_param, pData, nSize); + initH264Pic(); + //m_picPara.frame_num = frameIdx; + if (h264header.sl) + m_picPara.pic_fields.bits.field_pic_flag = + h264header.sl->picture_structure != Yang_PICT_FRAME; + + if (m_slice_param.slice_type == 2) { + posNum = 0; + FieldOrderCnt = 0; + } + m_picPara.CurrPic.picture_id = surface_id[sid]; + m_picPara.CurrPic.frame_idx = frameIdx; + m_picPara.CurrPic.flags = 0; + m_picPara.CurrPic.BottomFieldOrderCnt = FieldOrderCnt; + m_picPara.CurrPic.TopFieldOrderCnt = FieldOrderCnt; + m_picPara.frame_num = posNum; + + cachePic(&m_picPara.CurrPic, posNum); + //yang_printme(&m_slice_param); + //yang_printMe2(&m_picPara); + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAPictureParameterBufferType, sizeof(VAPictureParameterBufferH264), + 1, &m_picPara, &pics[sid].picBuf); + CHECK_VASTATUS(vas, "vaCreateBuffer VAPictureParameterBufferType"); + + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, VAIQMatrixBufferType, + sizeof(VAIQMatrixBufferH264), 1, &iq_matrix, &pics[sid].IQMBuf); + CHECK_VASTATUS(vas, "vaCreateBuffer VAIQMatrixBufferType"); + + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, VASliceParameterBufferType, + sizeof(VASliceParameterBufferH264), 1, &m_slice_param, + &pics[sid].sliceParaBuf); + CHECK_VASTATUS(vas, "vaCreateBuffer VASliceParameterBufferType"); + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, VASliceDataBufferType, + nSize, 1, pData, &pics[sid].sliceDataBuf); + CHECK_VASTATUS(vas, "vaCreateBuffer VASliceDataBufferType"); + + vas = yang_vaBeginPicture(m_va_dpy, m_vaContext, surface_id[sid]); + CHECK_VASTATUS(vas, "vaBeginPicture"); + + bufids[0] = pics[sid].picBuf; + bufids[1] = pics[sid].IQMBuf; + vas = yang_vaRenderPicture(m_va_dpy, m_vaContext, bufids, 2); + CHECK_VASTATUS(vas, "vaRenderPicture 1,2"); + + bufids[0] = pics[sid].sliceParaBuf; + bufids[1] = pics[sid].sliceDataBuf; + vas = yang_vaRenderPicture(m_va_dpy, m_vaContext, bufids, 2); + CHECK_VASTATUS(vas, "vaRenderPicture 3,4"); + + vas = yang_vaEndPicture(m_va_dpy, m_vaContext); + CHECK_VASTATUS(vas, "vaEndPicture"); + vas = yang_vaSyncSurface(m_va_dpy, surface_id[sid]); + CHECK_VASTATUS(vas, "vaSyncSurface"); + + frameIdx++; + if (frameIdx > 15) + frameIdx = 0; + FieldOrderCnt += 2; + posNum++; + //memcpy(&m_old_pic, &m_picPara.CurrPic, sizeof(VAPictureH264)); + // vas = vaPutSurface(m_va_dpy, surface_id[sid], win, 0, 0, m_width, m_height, 0, 0, m_width, m_height, NULL, 0, VA_FRAME_PICTURE); + // CHECK_VASTATUS(vas, "vaPutSurface"); + + uint8_t *picData1; + vas = yang_vaCreateImage(m_va_dpy, &image_format, m_width, m_height, + &m_img); + CHECK_VASTATUS(vas, "vaCreateImage"); + vas = yang_vaGetImage(m_va_dpy, surface_id[sid], 0, 0, m_width, m_height, + m_img.image_id); + CHECK_VASTATUS(vas, "vaGetImage"); + vas = yang_vaMapBuffer(m_va_dpy, m_img.buf, (void**) &picData1); + CHECK_VASTATUS(vas, "vaMapBuffer img"); + + memcpy(dest, picData1, allLen); + *pnFrameReturned = m_img.data_size; + //if (vas == 0) t_yvp.yuv_show(dest, m_width); + if (vas == 0) + pvpl->putVideo(picData1, allLen, ptimestamp); + vas = yang_vaUnmapBuffer(m_va_dpy, m_img.buf); + picData1=NULL; + //mapDrm(sid); + sid++; + if (sid == Para_Bufs) + sid = 0; + return 1; + +} +#endif diff --git a/libmetartc3/src/yangdecoder/YangVideoDecoderIntel.h b/libmetartc3/src/yangdecoder/YangVideoDecoderIntel.h new file mode 100755 index 00000000..f04ad879 --- /dev/null +++ b/libmetartc3/src/yangdecoder/YangVideoDecoderIntel.h @@ -0,0 +1,127 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangVideoDecoderIntel_H +#define YangVideoDecoderIntel_H + +#include "Yang_Config.h" +#if HaveLibva +#include "yangdecoder/YangVideoDecoder.h" +#include "yangutil/yangavtype.h" +//#include "YangInitVaPicPara.h" +#include "va/va.h" +#include "va/va_drm.h" +#include "va/va_drmcommon.h" + +//#include "../yangplayer/YangVideoPlay.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/sys/YangLoadLib.h" +#include "YangH264Header.h" +//#include +//#include "YangVaDevice.h" + +//} + +//#define MAX_PARAM_BUFFERS 16 +#define Para_Bufs 7 +typedef struct YangDecodePicture { + VABufferID picBuf; + VABufferID IQMBuf; + VABufferID sliceParaBuf; + VABufferID sliceDataBuf; + +} YangDecodePicture; + + +class YangVideoDecoderIntel:public YangVideoDecoder +{ +public: + YangVideoDecoderIntel(); + ~YangVideoDecoderIntel(); + void init(); + void init(int32_t iW,int32_t iH); + void init(YangVideoParam *p_config); + int32_t decode(YangVideoBuffer *vpl,int32_t isIframe,uint8_t *pData, int32_t nSize, int64_t ptimestamp,uint8_t *dest, int32_t *pnFrameReturned); + void initWH(); + void parseRtmpHeader(uint8_t *p, int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps); + +protected: + void closeDevice(); +private: + int32_t m_fd; + VADisplay m_va_dpy; + VAConfigID m_config_id; + VAContextID m_vaContext; + + VAStatus vas; + + VAImage m_img; + VAImageFormat image_format; + + YangH264Header h264header; + VAPictureH264 m_pic[16]; + int32_t m_ref_count; + int32_t m_ref_count_m1; + + //VAPictureH264 m_old_pic; + VAPictureParameterBufferH264 m_picPara; + + VABufferID bufids[10]; + VAIQMatrixBufferH264 iq_matrix; + VASliceParameterBufferH264 m_slice_param; + + YangDecodePicture pics[Para_Bufs]; + VASurfaceID surface_id[Para_Bufs]; + int32_t sid; + +private: + void cachePic(VAPictureH264 *p_pic,int32_t p_pocind); + void initH264Pic(); + void initH264Slice(); + int32_t m_width; + int32_t m_height; + int32_t m_frame; + int32_t yLen; + int32_t uLen; + int32_t uvLen; + int32_t allLen; + int32_t frameIdx; + int32_t FieldOrderCnt; + int32_t posNum; + void initSlicePara(VASliceParameterBufferH264 *slice264,uint8_t *p_data,int32_t p_len); + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + VADisplay (*yang_vaGetDisplayDRM)(int32_t fd); + VAStatus (*yang_vaInitialize) ( VADisplay dpy, int32_t *major_version,int32_t *minor_version); + VAStatus (*yang_vaDestroyContext) (VADisplay dpy,VAContextID context); + VAStatus (*yang_vaDestroySurfaces) (VADisplay dpy,VASurfaceID *surfaces,int32_t num_surfaces); + VAStatus (*yang_vaDestroyConfig) (VADisplay dpy,VAConfigID config_id); + VAStatus (*yang_vaTerminate) ( VADisplay dpy); + + + VAStatus (*yang_vaCreateConfig) (VADisplay dpy, VAProfile profile, VAEntrypoint32_t entrypoint, VAConfigAttrib *attrib_list, int32_t num_attribs, VAConfigID *config_id ); + + VAStatus (*yang_vaCreateSurfaces)(VADisplay dpy,uint32_t format,uint32_t width, + uint32_t height,VASurfaceID *surfaces,uint32_t num_surfaces, VASurfaceAttrib *attrib_list, + uint32_t num_attribs); + + VAStatus (*yang_vaCreateContext) (VADisplay dpy,VAConfigID config_id, int32_t picture_width, + int32_t picture_height, int32_t flag, VASurfaceID *render_targets, + int32_t num_render_targets,VAContextID *context); + VAStatus (*yang_vaCreateBuffer) (VADisplay dpy,VAContextID context, VABufferType type, + uint32_t size, uint32_t num_elements, void *data, VABufferID *buf_id + ); + VAStatus (*yang_vaBeginPicture) (VADisplay dpy,VAContextID context,VASurfaceID render_target); + VAStatus (*yang_vaRenderPicture) (VADisplay dpy,VAContextID context, VABufferID *buffers,int32_t num_buffers); + + VAStatus (*yang_vaCreateImage) (VADisplay dpy,VAImageFormat *format, int32_t width, int32_t height, VAImage *image); + VAStatus (*yang_vaEndPicture) (VADisplay dpy,VAContextID context); + VAStatus (*yang_vaGetImage) (VADisplay dpy,VASurfaceID surface, int32_t x, + int32_t y,uint32_t width, uint32_t height,VAImageID image); + VAStatus (*yang_vaMapBuffer) (VADisplay dpy,VABufferID buf_id, void **pbuf); + VAStatus (*yang_vaSyncSurface) (VADisplay dpy,VASurfaceID render_target); + VAStatus (*yang_vaUnmapBuffer) (VADisplay dpy,VABufferID buf_id); +}; +#endif +#endif // YANGH264DECODER_H diff --git a/libmetartc3/src/yangencoder/YangAudioEncoder.cpp b/libmetartc3/src/yangencoder/YangAudioEncoder.cpp new file mode 100755 index 00000000..6b74f8b7 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoder.cpp @@ -0,0 +1,23 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangencoder/YangAudioEncoder.h" + +#include +#include "yangutil/yang_unistd.h" + +YangAudioEncoder::YangAudioEncoder() { + m_isInit=0; + m_uid=-1; + memset(&m_audioInfo,0,sizeof(YangAudioInfo)); + +} +YangAudioEncoder::~YangAudioEncoder(void) { + //m_context = NULL; +} + +void YangAudioEncoder::setAudioPara(YangAudioInfo *pap){ + memcpy(&m_audioInfo,pap,sizeof(YangAudioInfo)); +} + + diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderAac.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderAac.cpp new file mode 100755 index 00000000..a3e6b983 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderAac.cpp @@ -0,0 +1,113 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangAudioEncoderAac.h" + +#include "yangutil/yang_unistd.h" +#include "stdio.h" + +void YangAudioEncoderAac::loadLib(){ + yang_faacEncOpen=(faacEncHandle (*)(unsigned long sampleRate, uint32_t numChannels, + unsigned long *inputSamples, unsigned long *maxOutputBytes))m_lib.loadFunction("faacEncOpen"); + + yang_faacEncSetConfiguration=(int32_t (*)(faacEncHandle hEncoder,faacEncConfigurationPtr config))m_lib.loadFunction("faacEncSetConfiguration"); + yang_faacEncEncode=(int32_t (*)(faacEncHandle hEncoder, int32_t * inputBuffer, uint32_t samplesInput, + uint8_t *outputBuffer, + uint32_t bufferSize))m_lib.loadFunction("faacEncEncode"); + yang_faacEncGetCurrentConfiguration=(faacEncConfigurationPtr + (*)(faacEncHandle hEncoder))m_lib.loadFunction("faacEncGetCurrentConfiguration"); + yang_faacEncClose=(int32_t (*)(faacEncHandle hEncoder))m_lib.loadFunction("faacEncClose"); + +} + +void YangAudioEncoderAac::unloadLib(){ + yang_faacEncGetCurrentConfiguration=NULL; + yang_faacEncOpen=NULL; + yang_faacEncSetConfiguration=NULL; + yang_faacEncEncode=NULL; + yang_faacEncClose=NULL; + +} +YangAudioEncoderAac::YangAudioEncoderAac() { + nSampleRate = 44100; + nChannels = 2; + nInputSamples = 0; + nMaxOutputBytes = 0; + + m_aacBuffer = NULL; + ret = 0; + nPCMBitSize = 0; + nInputSamples = 0; + nMaxOutputBytes = 0; + isRec = 0; + hEncoder = NULL; + isamples = 0; + maxsample = 0; + mnInputSamples = 0; + nRet = 0; + + nBytesRead = 0; + nPCMBufferSize = 0; + frames = 0; + unloadLib(); + +} +YangAudioEncoderAac::~YangAudioEncoderAac(void) { + closeAac(); + yang_delete(m_aacBuffer); + unloadLib(); + m_lib.unloadObject(); +} + + + +void YangAudioEncoderAac::init(YangAudioInfo *pap) { + if(m_isInit) return; + m_lib.loadObject("libfaac"); + loadLib(); + setAudioPara(pap); + hEncoder = yang_faacEncOpen(nSampleRate, nChannels, &nInputSamples,&nMaxOutputBytes); + isamples = nInputSamples; + maxsample = nMaxOutputBytes; + mnInputSamples = nInputSamples * 2; + //printf("\n*****is=%d,maxs=%d,mns=%d\n", isamples, maxsample,mnInputSamples); + m_aacBuffer = new uint8_t[nMaxOutputBytes]; + faacEncConfigurationPtr aconfiguration = yang_faacEncGetCurrentConfiguration(hEncoder); + + aconfiguration->version = MPEG4; + + aconfiguration->aacObjectType = LOW; //MAIN;//LOW;//MAIN; + aconfiguration->allowMidside = 1; + aconfiguration->useTns = 0; + aconfiguration->shortctl = SHORTCTL_NORMAL; + //aconfiguration-> + //aconfiguration->nputformat=FAAC_INPUT_16BIT; + aconfiguration->outputFormat = pap->hasAudioHeader; + + aconfiguration->inputFormat = FAAC_INPUT_16BIT; + aconfiguration->bitRate = 128000 / nChannels; + nRet = yang_faacEncSetConfiguration(hEncoder, aconfiguration); + m_isInit=1; + + +} + +int32_t YangAudioEncoderAac::encoder(YangFrame* pframe,YangEncoderCallback* pcallback){ + if(!hEncoder) return 1; + ret = yang_faacEncEncode(hEncoder,(int32_t *) pframe->payload, isamples, m_aacBuffer, maxsample); + + if (ret > 0&&pcallback){ + pframe->payload=m_aacBuffer; + pframe->nb=ret; + pcallback->onAudioData(pframe); + return Yang_Ok; + }else + return 1; +} + + +void YangAudioEncoderAac::closeAac() { + if(hEncoder) yang_faacEncClose(hEncoder); + hEncoder = NULL; +} + diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderAac.h b/libmetartc3/src/yangencoder/YangAudioEncoderAac.h new file mode 100755 index 00000000..77621f4b --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderAac.h @@ -0,0 +1,57 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __AudioAacEncoder__ +#define __AudioAacEncoder__ +#include "faac.h" + +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/sys/YangLoadLib.h" +#include "yangencoder/YangAudioEncoder.h" + +class YangAudioEncoderAac: public YangAudioEncoder +{ +public: + YangAudioEncoderAac(); + ~YangAudioEncoderAac(void); + void init(YangAudioInfo *pap); + int32_t encoder(YangFrame* pframe,YangEncoderCallback* pcallback); + +private: + uint8_t *m_aacBuffer; + + int32_t frames; + int32_t mnInputSamples; + uint32_t isamples, maxsample; + unsigned long nSampleRate; + uint32_t nChannels; + uint32_t nPCMBitSize; + unsigned long nInputSamples; + unsigned long nMaxOutputBytes; + + int32_t nRet; + int32_t nBytesRead; + int32_t nPCMBufferSize; + +private: + YangLoadLib m_lib; + void encoder(int32_t *p_buf); + void closeAac(); + void saveWave(); + int32_t ret;//, isConvert; + int32_t isRec; + faacEncHandle hEncoder; + void loadLib(); + void unloadLib(); + faacEncHandle (*yang_faacEncOpen)(unsigned long sampleRate, uint32_t numChannels, + unsigned long *inputSamples, unsigned long *maxOutputBytes); + int32_t (*yang_faacEncSetConfiguration)(faacEncHandle hEncoder,faacEncConfigurationPtr config); + int32_t (*yang_faacEncEncode)(faacEncHandle hEncoder, int32_t * inputBuffer, uint32_t samplesInput, + uint8_t *outputBuffer, uint32_t bufferSize); + int32_t (*yang_faacEncClose)(faacEncHandle hEncoder); + faacEncConfigurationPtr + (*yang_faacEncGetCurrentConfiguration)(faacEncHandle hEncoder); +}; + +#endif diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderHandle.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderHandle.cpp new file mode 100755 index 00000000..1d82be1d --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderHandle.cpp @@ -0,0 +1,103 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangencoder/YangAudioEncoderHandle.h" + +#include "malloc.h" +#include +#include +#include +#include "stdlib.h" +#include "yangencoder/YangEncoderFactory.h" + +YangAudioEncoderHandle::YangAudioEncoderHandle(YangAudioInfo *pcontext) { + m_isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_audioBuffer = NULL; + m_enc = NULL; + m_out_audioBuffer = NULL; + m_context = pcontext; + m_uid=0; + +} + +YangAudioEncoderHandle::~YangAudioEncoderHandle(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + yang_delete(m_enc); + m_context = NULL; + m_in_audioBuffer = NULL; + m_out_audioBuffer = NULL; +} +void YangAudioEncoderHandle::stop() { + stopLoop(); +} + +void YangAudioEncoderHandle::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + +void YangAudioEncoderHandle::init() { + if (m_isInit) + return; + YangEncoderFactory ydf; + if (!m_enc) + m_enc = ydf.createAudioEncoder(m_context); + m_enc->init(m_context); + m_isInit = 1; + +} + +void YangAudioEncoderHandle::setInAudioBuffer(YangAudioBuffer *pbuf) { + m_in_audioBuffer = pbuf; +} +void YangAudioEncoderHandle::setOutAudioBuffer(YangAudioEncoderBuffer *pbuf) { + m_out_audioBuffer = pbuf; +} + +void YangAudioEncoderHandle::onVideoData(YangFrame* pframe) { + +} +void YangAudioEncoderHandle::onAudioData(YangFrame* pframe) { + + m_out_audioBuffer->putAudio(pframe); +} +void YangAudioEncoderHandle::startLoop() { + if(m_in_audioBuffer==NULL) return; + m_isConvert = 1; + uint8_t t_buf1[4096]; + + yang_reindex(m_in_audioBuffer); + yang_reindex(m_out_audioBuffer); + YangFrame audioFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + while (m_isConvert == 1) { + if (m_in_audioBuffer->size() == 0) { + yang_usleep(200); + continue; + } + audioFrame.payload=t_buf1; + audioFrame.uid=m_uid; + audioFrame.nb=0; + if (m_in_audioBuffer->getAudio(&audioFrame)) { + yang_usleep(1000); + continue; + } else { + m_enc->encoder(&audioFrame, this); + + } + } //end + +} + +void YangAudioEncoderHandle::stopLoop() { + m_isConvert = 0; + +} diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderHandleCb.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderHandleCb.cpp new file mode 100755 index 00000000..be8ead1a --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderHandleCb.cpp @@ -0,0 +1,98 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangencoder/YangAudioEncoderHandleCb.h" + +#include "malloc.h" +#include +#include +#include +#include "stdlib.h" +#include "yangencoder/YangEncoderFactory.h" + +YangAudioEncoderHandleCb::YangAudioEncoderHandleCb(YangAudioInfo *pcontext) { + m_isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_audioBuffer = NULL; + m_enc = NULL; + //m_out_audioBuffer = NULL; + m_context = pcontext; + m_cb=NULL; + m_uid=0; +} + +YangAudioEncoderHandleCb::~YangAudioEncoderHandleCb(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + yang_delete(m_enc); + m_context = NULL; + m_in_audioBuffer = NULL; + m_cb=NULL; + +} +void YangAudioEncoderHandleCb::stop() { + stopLoop(); +} + +void YangAudioEncoderHandleCb::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + +void YangAudioEncoderHandleCb::init() { + if (m_isInit) + return; + YangEncoderFactory ydf; + if (!m_enc) + m_enc = ydf.createAudioEncoder(m_context); + m_enc->init(m_context); + m_isInit = 1; + +} +void YangAudioEncoderHandleCb::setCallback(YangEncoderCallback* pcb){ + m_cb=pcb; +} +void YangAudioEncoderHandleCb::setInAudioBuffer(YangAudioBuffer *pbuf) { + m_in_audioBuffer = pbuf; +} + + + +void YangAudioEncoderHandleCb::startLoop() { + m_isConvert = 1; + + + uint8_t t_buf1[4096]; + yang_reindex(m_in_audioBuffer); + YangFrame audioFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + audioFrame.uid=m_uid; + while (m_isConvert == 1) { + if (m_in_audioBuffer->size() == 0) { + yang_usleep(200); + continue; + } + audioFrame.payload=t_buf1; + if (m_in_audioBuffer->getAudio(&audioFrame)) { + yang_usleep(2000); + continue; + } else { + + m_enc->encoder(&audioFrame, m_cb); + + } + } //end + +} + +void YangAudioEncoderHandleCb::stopLoop() { + m_isConvert = 0; + +} + diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderMeta.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderMeta.cpp new file mode 100755 index 00000000..a30f6292 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderMeta.cpp @@ -0,0 +1,91 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include +#include + + + + +void YangAudioEncoderMeta::loadLib(){ + yang_faacEncOpen=(faacEncHandle (*)(unsigned long sampleRate, uint32_t numChannels, + unsigned long *inputSamples, unsigned long *maxOutputBytes))m_lib.loadFunction("faacEncOpen"); + + yang_faacEncSetConfiguration=(int32_t (*)(faacEncHandle hEncoder,faacEncConfigurationPtr config))m_lib.loadFunction("faacEncSetConfiguration"); + yang_faacEncEncode=(int32_t (*)(faacEncHandle hEncoder, int32_t * inputBuffer, uint32_t samplesInput, + uint8_t *outputBuffer, + uint32_t bufferSize))m_lib.loadFunction("faacEncEncode"); + yang_faacEncGetCurrentConfiguration=(faacEncConfigurationPtr + (*)(faacEncHandle hEncoder))m_lib.loadFunction("faacEncGetCurrentConfiguration"); + yang_faacEncClose=(int32_t (*)(faacEncHandle hEncoder))m_lib.loadFunction("faacEncClose"); + + yang_faacEncGetDecoderSpecificInfo=(int32_t (*)(faacEncHandle hEncoder, uint8_t **ppBuffer, + unsigned long *pSizeOfDecoderSpecificInfo))m_lib.loadFunction("faacEncGetDecoderSpecificInfo"); + +} + +void YangAudioEncoderMeta::unloadLib(){ + yang_faacEncGetCurrentConfiguration=NULL; + yang_faacEncOpen=NULL; + yang_faacEncSetConfiguration=NULL; + yang_faacEncEncode=NULL; + yang_faacEncClose=NULL; + yang_faacEncGetDecoderSpecificInfo=NULL; + +} +YangAudioEncoderMeta::YangAudioEncoderMeta(){ + unloadLib(); +} + +YangAudioEncoderMeta::~YangAudioEncoderMeta(){ + unloadLib(); + m_lib.unloadObject(); +} + + +void YangAudioEncoderMeta::createMeta(uint8_t *pasc,int32_t *asclen){ + + m_lib.loadObject("libfaac"); + loadLib(); + faacEncHandle hEncoder=NULL; + unsigned long nSampleRate = 44100; + uint32_t nChannels = 2; + //nPCMBitSize = 16; + unsigned long nInputSamples = 0; + unsigned long nMaxOutputBytes = 0; + hEncoder = yang_faacEncOpen(nSampleRate, nChannels, &nInputSamples,&nMaxOutputBytes); + faacEncConfigurationPtr aconfiguration = yang_faacEncGetCurrentConfiguration(hEncoder); + + aconfiguration->version = MPEG4; + + aconfiguration->aacObjectType = LOW; //MAIN;//LOW;//MAIN; + aconfiguration->allowMidside = 1; + aconfiguration->useTns = 0; + aconfiguration->shortctl = SHORTCTL_NORMAL; + //aconfiguration-> + //aconfiguration->nputformat=FAAC_INPUT_16BIT; + aconfiguration->outputFormat = 0; + + aconfiguration->inputFormat = FAAC_INPUT_16BIT; + aconfiguration->bitRate = 128000 / 2; + //aconfiguration-> + int32_t nRet = yang_faacEncSetConfiguration(hEncoder, aconfiguration); + + + uint8_t *asc=NULL; + unsigned long len=0; + yang_faacEncGetDecoderSpecificInfo(hEncoder, &asc, &len); + if(asc){ + memcpy(pasc,asc,len); + *asclen=len; + yang_faacEncClose(hEncoder); + hEncoder=NULL; + //yang_free(asc); + + } + +} diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderMp3.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderMp3.cpp new file mode 100755 index 00000000..68a02290 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderMp3.cpp @@ -0,0 +1,166 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangAudioEncoderMp3.h" + +#include "yangutil/yang_unistd.h" +#include "stdio.h" + +void YangAudioEncoderMp3::loadLib() { + + yang_lame_init = (lame_global_flags* (*)(void))m_lib.loadFunction("lame_init"); + yang_lame_init_params=(int32_t (*)(lame_global_flags *))m_lib.loadFunction("lame_init_params"); + yang_lame_set_preset=(int32_t (*)( lame_global_flags* gfp, int32_t ))m_lib.loadFunction("lame_set_preset"); + yang_lame_set_in_samplerate=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_in_samplerate"); + yang_lame_set_VBR=(int32_t (*)(lame_global_flags *, vbr_mode))m_lib.loadFunction("lame_set_VBR"); + yang_lame_set_mode=(int32_t (*)(lame_global_flags *, MPEG_mode))m_lib.loadFunction("lame_set_mode"); + yang_lame_set_num_channels=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_num_channels"); + yang_lame_set_brate=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_brate"); + yang_lame_set_strict_ISO=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_strict_ISO"); + yang_lame_set_original=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_original"); + yang_lame_set_error_protection=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_error_protection"); + yang_lame_set_extension=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_extension"); + yang_lame_set_disable_reservoir=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_disable_reservoir"); + yang_lame_set_bWriteVbrTag=(int32_t (*)(lame_global_flags *, int))m_lib.loadFunction("lame_set_bWriteVbrTag"); + yang_lame_encode_buffer_interleaved=(int32_t (*)(lame_global_flags* gfp, + int16_t pcm[],int32_t num_samples, uint8_t* mp3buf, + int32_t mp3buf_size ))m_lib.loadFunction("lame_encode_buffer_interleaved"); + yang_lame_close=(int32_t (*) (lame_global_flags *))m_lib.loadFunction("lame_close"); +} + +void YangAudioEncoderMp3::unloadLib() { + yang_lame_init = NULL; + yang_lame_init_params = NULL; + yang_lame_set_preset = NULL; + yang_lame_set_in_samplerate = NULL; + yang_lame_set_VBR = NULL; + yang_lame_set_mode = NULL; + yang_lame_set_num_channels = NULL; + yang_lame_set_brate = NULL; + yang_lame_set_strict_ISO = NULL; + yang_lame_set_original = NULL; + yang_lame_set_error_protection = NULL; + yang_lame_set_extension = NULL; + yang_lame_set_disable_reservoir = NULL; + yang_lame_set_bWriteVbrTag = NULL; + yang_lame_encode_buffer_interleaved = NULL; + yang_lame_close = NULL; +} + +YangAudioEncoderMp3::YangAudioEncoderMp3() { + m_Lame_Sample_Number = 1152; + ret = 0; + preSize = 0; + dwSamples = 0; + dwRead = 0; + dwWrite = 0; + dwDone = 0; + dwFileSize = 0; + m_bufLen = 0; + + dwMP3Buffer = 0; + pMP3Buffer = NULL; + pWAVBuffer = NULL; + gfp = NULL; + temp = NULL; + //isConvert = 0; + unloadLib(); + +} + +YangAudioEncoderMp3::~YangAudioEncoderMp3(void) { + closeMp3(); + yang_deleteA(temp); + yang_deleteA(pWAVBuffer); + yang_deleteA(pMP3Buffer); + unloadLib(); + m_lib.unloadObject(); + +} + +void YangAudioEncoderMp3::init(YangAudioInfo *pap) { + if (m_isInit == 1) + return; + m_lib.loadObject("libmp3lame"); + loadLib(); + setAudioPara(pap); + dwSamples = 0; + dwMP3Buffer = 0; + pMP3Buffer = NULL; + pWAVBuffer = NULL; + + gfp = NULL; + gfp = yang_lame_init(); + yang_lame_set_in_samplerate(gfp, m_audioInfo.sample); + + yang_lame_set_preset(gfp, m_audioInfo.bitrate); + //lame_set_preset( gfp, R3MIX); + yang_lame_set_VBR(gfp, vbr_off); + yang_lame_set_mode(gfp, STEREO); //JOINT_STEREO STEREO + yang_lame_set_num_channels(gfp, 2); + yang_lame_set_brate(gfp, m_audioInfo.bitrate); + //lame_set_quality(gfp,); + yang_lame_set_strict_ISO(gfp, 1); + yang_lame_set_original(gfp, 1); + yang_lame_set_error_protection(gfp, 0); + yang_lame_set_extension(gfp, 0); + yang_lame_set_disable_reservoir(gfp, 1); + yang_lame_set_bWriteVbrTag(gfp, 1); + int32_t ret_code = yang_lame_init_params(gfp); + if (ret_code < 0) { + printf("lame_init_params failure returned %d\n", ret_code); + } else { + //printf("lame_init_params success returned %d\n", ret_code); + } + + dwSamples = 1152 * 2; + dwMP3Buffer = (DWORD) (1.25 * (dwSamples / 2) + 7200); + + pMP3Buffer = new uint8_t[dwMP3Buffer]; + pWAVBuffer = new short[dwSamples * 8]; + + temp = new uint8_t[1152 * 8]; + //dwRead=0; + dwWrite = 0; + dwDone = 0; + dwFileSize = 0; + m_isInit = 1; + dwRead = m_Lame_Sample_Number * 4; + +} + + +int32_t YangAudioEncoderMp3::encoder(YangFrame* pframe,YangEncoderCallback *pcallback) { + memcpy(temp + m_bufLen, pframe->payload, pframe->nb); + + m_bufLen += pframe->nb; + if (m_bufLen < dwRead) + return 1; + + memcpy(pWAVBuffer, temp, dwRead); + dwWrite = yang_lame_encode_buffer_interleaved(gfp, pWAVBuffer, 1152, + pMP3Buffer, 0); + + if (dwWrite > 0) { + pframe->payload=pMP3Buffer; + pframe->nb=dwWrite; + pcallback->onAudioData(pframe); + return Yang_Ok; + + } + m_bufLen -= dwRead; + + if (m_bufLen < dwRead && m_bufLen > 0) { + memcpy(temp, temp + dwRead, m_bufLen); + } + return Yang_Ok; +} + +void YangAudioEncoderMp3::closeMp3() { + if (gfp != NULL) + yang_lame_close(gfp); + gfp = NULL; + ret = 2; + +} + diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderMp3.h b/libmetartc3/src/yangencoder/YangAudioEncoderMp3.h new file mode 100755 index 00000000..4e86489e --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderMp3.h @@ -0,0 +1,68 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangAudioMp3Encoder__ +#define __YangAudioMp3Encoder__ +#include +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +extern "C"{ +#include "lame.h" +} +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangencoder/YangAudioEncoder.h" +#include "yangutil/sys/YangLoadLib.h" +#ifndef _WIN32 +typedef int32_t DWORD ; +#endif +class YangAudioEncoderMp3:public YangAudioEncoder +{ +public: + YangAudioEncoderMp3(); + ~YangAudioEncoderMp3(void); + void init(YangAudioInfo *pap); + int32_t encoder(YangFrame* pframe,YangEncoderCallback* pcallback); + + +private: + uint8_t *temp;//[1153*2+44100*2]; + uint8_t *pMP3Buffer; + short* pWAVBuffer; + lame_global_flags *gfp; + int m_Lame_Sample_Number; + int32_t m_bufLen;//m_Audio_Frame_Size,; + DWORD dwSamples; + DWORD dwRead; + DWORD dwWrite; + DWORD dwDone; + DWORD dwFileSize; + int32_t preSize; + +private: + YangLoadLib m_lib; + void closeMp3(); + int dwMP3Buffer ; + int32_t ret; + void loadLib(); + void unloadLib(); + lame_global_flags * (*yang_lame_init)(void); + int32_t (*yang_lame_init_params)(lame_global_flags *); + int32_t (*yang_lame_set_preset)( lame_global_flags* gfp, int32_t ); + int32_t (*yang_lame_set_in_samplerate)(lame_global_flags *, int); + int32_t (*yang_lame_set_VBR)(lame_global_flags *, vbr_mode); + int32_t (*yang_lame_set_mode)(lame_global_flags *, MPEG_mode); + int32_t (*yang_lame_set_num_channels)(lame_global_flags *, int); + int32_t (*yang_lame_set_brate)(lame_global_flags *, int); + int32_t (*yang_lame_set_strict_ISO)(lame_global_flags *, int); + int32_t (*yang_lame_set_original)(lame_global_flags *, int); + int32_t (*yang_lame_set_error_protection)(lame_global_flags *, int); + int32_t (*yang_lame_set_extension)(lame_global_flags *, int); + int32_t (*yang_lame_set_disable_reservoir)(lame_global_flags *, int); + int32_t (*yang_lame_set_bWriteVbrTag)(lame_global_flags *, int); + int32_t (*yang_lame_encode_buffer_interleaved)(lame_global_flags* gfp, + int16_t pcm[],int32_t num_samples, uint8_t* mp3buf, + int32_t mp3buf_size ); + int32_t (*yang_lame_close) (lame_global_flags *); + +}; +#endif + diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderOpus.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderOpus.cpp new file mode 100755 index 00000000..baea7b41 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderOpus.cpp @@ -0,0 +1,179 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangAudioEncoderOpus.h" + +#include +#include +#include +#include + + + +void YangAudioEncoderOpus::loadLib() { + yang_opus_encoder_create = (OpusEncoder* (*)(opus_int32 Fs, int32_t channels, + int32_t application, int32_t *error)) m_lib.loadFunction( + "opus_encoder_create"); + yang_opus_encoder_init = (int32_t (*)(OpusEncoder *st, opus_int32 Fs, + int32_t channels, int32_t application)) m_lib.loadFunction( + "opus_encoder_init"); + yang_opus_encode = + (opus_int32 (*)(OpusEncoder *st, const opus_int16 *pcm, + int32_t frame_size, uint8_t *data, + opus_int32 max_data_bytes)) m_lib.loadFunction( + "opus_encode"); + yang_opus_encoder_ctl = + (int32_t (*)(OpusEncoder *st, int32_t request, ...)) m_lib.loadFunction( + "opus_encoder_ctl"); + yang_opus_encoder_destroy = (void (*)(OpusEncoder *st)) m_lib.loadFunction( + "opus_encoder_destroy"); + yang_opus_strerror = (const char* (*)(int32_t error)) m_lib.loadFunction( + "opus_strerror"); +} + +void YangAudioEncoderOpus::unloadLib() { + yang_opus_encoder_create = NULL; + yang_opus_encoder_init = NULL; + yang_opus_encode = NULL; + yang_opus_encoder_ctl = NULL; + yang_opus_encoder_destroy = NULL; + yang_opus_strerror = NULL; +} +YangAudioEncoderOpus::YangAudioEncoderOpus() { + ret = 0; + m_cbits = NULL; + m_encoder = NULL; + m_input1 = NULL; + m_input = NULL; + m_in = NULL; + m_frameShortSize=0; + m_frameSize=0; + //temp = NULL; + unloadLib(); +} + +YangAudioEncoderOpus::~YangAudioEncoderOpus() { + closeEnc(); + yang_deleteA(m_cbits); + + yang_deleteA(m_in); + yang_deleteA(m_input1); + yang_deleteA(m_input); + + + unloadLib(); + m_lib.unloadObject(); +} + +#define MAX_PACKET_SIZE (3*1276) +void YangAudioEncoderOpus::init(YangAudioInfo *pap) { + if (m_isInit) + return; + + m_lib.loadObject("libopus"); + loadLib(); + setAudioPara(pap); + int32_t err = 0; + + m_encoder = yang_opus_encoder_create(m_audioInfo.sample, m_audioInfo.channel, + OPUS_APPLICATION_VOIP, &err);//OPUS_APPLICATION_AUDIO + if (err < 0) { + fprintf(stderr, "failed to create an Opus encoder: %s\n", + yang_opus_strerror(err)); +#ifdef _MSC_VER + ExitProcess(1); +#else + _exit(0); +#endif + + } + yang_opus_encoder_ctl(m_encoder, OPUS_SET_VBR(0)); + //#define BITRATE 16000 + //#define BITRATE 16000 +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_BITRATE(bitrate_bps)); + if (m_audioInfo.usingMono){ + yang_opus_encoder_ctl(m_encoder,OPUS_SET_BANDWIDTH(OPUS_BANDWIDTH_WIDEBAND)); + } +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_VBR(use_vbr)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_VBR_CONSTRAINT(cvbr)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_COMPLEXITY(10)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_INBAND_FEC(use_inbandfec)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_FORCE_CHANNELS(forcechannels)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_DTX(use_dtx)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_PACKET_LOSS_PERC(packet_loss_perc)); + int32_t skip = 0; + yang_opus_encoder_ctl(m_encoder, OPUS_GET_LOOKAHEAD(&skip)); + yang_opus_encoder_ctl(m_encoder, OPUS_SET_LSB_DEPTH(16)); +//yang_opus_encoder_ctl(m_encoder, OPUS_SET_EXPERT_FRAME_DURATION(variable_duration)); + + m_frameSize=m_audioInfo.sample/50; + + m_frameShortSize=m_frameSize*m_audioInfo.channel; + m_in = new short[m_frameShortSize]; + m_cbits = new uint8_t[MAX_PACKET_SIZE]; + m_input = new uint8_t[m_frameShortSize*2]; + m_input1 = new short[m_frameShortSize]; + m_isInit = 1; + +} + + +int32_t YangAudioEncoderOpus::encoder(YangFrame* pframe,YangEncoderCallback *pcallback) { + if (!m_encoder) return 1; + + memcpy(m_input, pframe->payload, pframe->nb); + for (int32_t i = 0; i < m_frameShortSize; i++) { + m_input1[i] = m_input[2 * i + 1] << 8 | m_input[2 * i]; + } + + ret = yang_opus_encode(m_encoder, m_input1, m_frameSize, m_cbits, MAX_PACKET_SIZE); + if (ret > 0 && pcallback){ + pframe->payload=m_cbits; + pframe->nb=ret; + pcallback->onAudioData(pframe); + + } + + + return Yang_Ok; +} +/** + void YangAudioEncoderOpus::encoder(uint8_t *p_buf,int32_t bufLen){ + if(!m_encoder) return; + if(m_yap.usingMono){ + memcpy(m_input, p_buf, 640); + for (int32_t i=0;i 0 && m_pipe) m_pipe->putPipeData( m_cbits, ret); + }else{ + memcpy(temp + m_bufLen, p_buf, 4096); + m_bufLen = m_bufLen + 4096; + int32_t len = 0; + int32_t frameSize=(m_yap.sample/50)*4; + //int32_t readLen=frameSize; + while (m_bufLen >= frameSize) { + memcpy((char*)m_in, temp + len, frameSize); + ret = yang_opus_encode(m_encoder,m_in, frameSize, m_cbits, MAX_PACKET_SIZE); + if (ret > 0 && m_pipe) m_pipe->putPipeData((uint8_t*) m_cbits, ret); + //printf("%d,",ret); + m_bufLen -= frameSize; + len += frameSize; + } + memcpy(temp, temp + len, m_bufLen); + } + } + + void YangAudioEncoderOpus::encoder(uint8_t *p_buf,int32_t bufLen,uint8_t *p_out,int32_t *outLen){ + + } + **/ +void YangAudioEncoderOpus::closeEnc() { + + if (m_encoder) + yang_opus_encoder_destroy(m_encoder); + m_encoder = NULL; + +} diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderOpus.h b/libmetartc3/src/yangencoder/YangAudioEncoderOpus.h new file mode 100755 index 00000000..64a280fe --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderOpus.h @@ -0,0 +1,46 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_SRC_YANGAUDIOENCODEROPUS_H_ +#define YANGENCODER_SRC_YANGAUDIOENCODEROPUS_H_ +#include "opus/opus.h" +#include "yangutil/sys/YangLoadLib.h" +#include "yangencoder/YangAudioEncoder.h" + +class YangAudioEncoderOpus: public YangAudioEncoder { +public: + YangAudioEncoderOpus(); + virtual ~YangAudioEncoderOpus(); + void init(YangAudioInfo *pap); + int32_t encoder(YangFrame* pframe,YangEncoderCallback* pcallback); + +private: + int32_t ret; + int32_t m_frameSize; + int32_t m_frameShortSize; + OpusEncoder *m_encoder; + YangLoadLib m_lib; + + uint8_t *m_cbits; //[YangSpeexBufferSize]; + uint8_t *m_input; + short *m_in; + short *m_input1; //[m_frameSize]; + + + void encoder(uint8_t *p_buf); + void encoder_mono(uint8_t *p_buf); + + void closeEnc(); + void loadLib(); + void unloadLib(); + OpusEncoder *(*yang_opus_encoder_create)(opus_int32 Fs, int32_t channels, int32_t application, int32_t *error); + int32_t (*yang_opus_encoder_init)(OpusEncoder *st, opus_int32 Fs, int32_t channels, int32_t application) ; + opus_int32 (*yang_opus_encode)(OpusEncoder *st,const opus_int16 *pcm, int32_t frame_size, uint8_t *data, + opus_int32 max_data_bytes); + int32_t (*yang_opus_encoder_ctl)(OpusEncoder *st, int32_t request, ...); + void (*yang_opus_encoder_destroy)(OpusEncoder *st); + const char *(*yang_opus_strerror)(int32_t error); +}; + +#endif /* YANGENCODER_SRC_YANGAUDIOENCODEROPUS_H_ */ diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderSpeex.cpp b/libmetartc3/src/yangencoder/YangAudioEncoderSpeex.cpp new file mode 100755 index 00000000..ddb9ecce --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderSpeex.cpp @@ -0,0 +1,211 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangAudioEncoderSpeex.h" + +#include "yangutil/yang_unistd.h" +#include "stdio.h" +#include "string.h" + + +void YangAudioEncoderSpeex::loadLib() { + yang_speex_lib_get_mode = + (const SpeexMode* (*)(int32_t mode)) m_lib.loadFunction( + "speex_lib_get_mode"); + yang_speex_encoder_ctl = + (int32_t (*)(void *state, int32_t request, void *ptr)) m_lib.loadFunction( + "speex_encoder_ctl"); + yang_speex_encoder_init = + (void* (*)(const SpeexMode *mode)) m_lib.loadFunction( + "speex_encoder_init"); + yang_speex_bits_init = (void (*)(SpeexBits *bits)) m_lib.loadFunction( + "speex_bits_init"); + yang_speex_bits_reset = (void (*)(SpeexBits *bits)) m_lib.loadFunction( + "speex_bits_reset"); + yang_speex_bits_write = + (int32_t (*)(SpeexBits *bits, char *bytes, int32_t max_len)) m_lib.loadFunction( + "speex_bits_write"); + yang_speex_encode = + (int32_t (*)(void *state, float *in, SpeexBits *bits)) m_lib.loadFunction( + "speex_encode"); + yang_speex_encoder_destroy = (void (*)(void *state)) m_lib.loadFunction( + "speex_encoder_destroy"); + yang_speex_bits_destroy = (void (*)(SpeexBits *bits)) m_lib.loadFunction( + "speex_bits_destroy"); +} + +void YangAudioEncoderSpeex::unloadLib() { + yang_speex_lib_get_mode = NULL; + yang_speex_encoder_init = NULL; + yang_speex_bits_init = NULL; + yang_speex_bits_reset = NULL; + yang_speex_bits_write = NULL; + yang_speex_encode = NULL; + yang_speex_encoder_destroy = NULL; + yang_speex_bits_destroy = NULL; +} +YangAudioEncoderSpeex::YangAudioEncoderSpeex() { + //m_context = conf; + isConvert = 0; + m_quality = 8; + m_state = NULL; + ret = 0; + m_cbits = new char[600]; + + m_bufLen = 0; + skip_group_delay = 0; + //m_mode = speex_wb_mode; + temp = NULL; //new char[8096]; + + m_input = NULL; + m_input1 = NULL; + m_in = NULL; + unloadLib(); + +} +YangAudioEncoderSpeex::~YangAudioEncoderSpeex(void) { + closeSpeex(); + yang_deleteA(m_cbits); + yang_deleteA(temp); + //yang_deleteA(m_outData); + yang_deleteA(m_input1); + yang_deleteA(m_input); + yang_deleteA(m_in); + + unloadLib(); + m_lib.unloadObject(); +} + +void YangAudioEncoderSpeex::initSpeexPara() { + //if (m_mode == speex_nb_mode) { + // m_quality = 6; + //m_frameSize = 160; + //} + //if (m_mode == speex_wb_mode) { + m_quality = 10; + // m_frameSize = 320; +// } + //if (m_mode == speex_uwb_mode) { + // m_quality = 10; + // m_frameSize = 640; + //} +} + +void YangAudioEncoderSpeex::init(YangAudioInfo *pap) { + if (m_isInit) + return; + //printf("\n******************************speex is initing.........................\n"); + m_lib.loadObject("libspeex"); + loadLib(); + setAudioPara(pap); + m_state = yang_speex_encoder_init(yang_speex_lib_get_mode(SPEEX_MODEID_WB));//speex_wb_mode &speex_nb_mode); + //speex_lib_get_mode(SPEEX_MODEID_WB); + //const SpeexMode * speex_lib_get_mode (int32_t mode); + //initSpeexPara(); + //m_quality = 8; + m_quality = 10; + //m_frameSize = 320; + //m_channel=1; + //int32_t samplerate = m_context->sample; + yang_speex_encoder_ctl(m_state, SPEEX_SET_QUALITY, &m_quality); + //speex_encoder_ctl(m_state, SPEEX_SET_SAMPLING_RATE, &samplerate); + //int32_t tmp = 0; + //speex_encoder_ctl(m_state, SPEEX_SET_VBR, &tmp); + //tmp = 10; + //speex_encoder_ctl(m_state, SPEEX_SET_QUALITY, &tmp); + //tmp = 1; +// speex_encoder_ctl(m_state, SPEEX_SET_COMPLEXITY, &tmp); + + //speex_encoder_ctl(m_state, SPEEX_GET_BITRATE, &tmp); + //int32_t sam=0; + //speex_encoder_ctl(m_state, SPEEX_GET_SAMPLING_RATE, &sam); + +// m_resample->init(2,m_context->sample,16000); + //speex_encoder_ctl(m_state, SPEEX_GET_LOOKAHEAD, &skip_group_delay); + + // int32_t frameSize=0; +// speex_encoder_ctl(m_state, SPEEX_GET_FRAME_SIZE, &m_frameSize); + //printf("\nframeSize==%d,bitrate==%d,sample=%d\n", m_frameSize,tmp,sam); + if (!m_audioInfo.usingMono) { + m_in = new short[882 * 2]; + temp = new char[1024 * 8]; + m_input = new float[m_audioInfo.frameSize * m_audioInfo.channel]; + m_input1 = new short[m_audioInfo.frameSize * m_audioInfo.channel]; + } else { + m_input1 = new short[320]; + m_input = new float[320]; + } + + //SPEEX_SET_BITRATE + yang_speex_bits_init(&m_bits); + + m_isInit = 1; + //speex_encode_stereo() + +} + + +int32_t YangAudioEncoderSpeex::encoder(YangFrame* pframe,YangEncoderCallback *pcallback) { + + if (!m_state) + return 1; + if (m_audioInfo.usingMono) { + memcpy((char*) m_input1, pframe->payload, 640); + for (int32_t i = 0; i < 320; i++) { + m_input[i] = m_input1[i]; + } + yang_speex_bits_reset(&m_bits); + //speex_encode_int(m_state, m_in, &m_bits); + yang_speex_encode(m_state, m_input, &m_bits); + ret = yang_speex_bits_write(&m_bits, m_cbits, 300); + //printf("m%d,",ret); + if (ret > 0 && pcallback) { + pframe->payload=(uint8_t*) m_cbits; + pframe->nb=ret; + pcallback->onAudioData(pframe); + return Yang_Ok; + } + return 1; + //if (ret > 0 && m_pipe) m_pipe->putPipeData((uint8_t*) m_cbits, ret); + } + return 1; + +} + +/** + void YangAudioEncoderSpeex::encoder(uint8_t *p_buf,int32_t bufLen,YangAudioEncoderBuffer* pbuf){ + if(!m_state) return; + if(m_yap.usingMono){ + memcpy((char*)m_input1, p_buf, 640); + for(int32_t i=0;i<320;i++){ + m_input[i]=m_input1[i]; + } + yang_speex_bits_reset(&m_bits); + //speex_encode_int(m_state, m_in, &m_bits); + yang_speex_encode(m_state,m_input, &m_bits); + ret = yang_speex_bits_write(&m_bits, m_cbits, 300); + //printf("m%d,",ret); + if (ret > 0 && pbuf) pbuf->putAudio((uint8_t*) m_cbits, ret); + }else{ + + } + } + void YangAudioEncoderSpeex::encoder(uint8_t *p_buf,int32_t bufLen,uint8_t *p_out,int32_t *outLen){ + + } + void YangAudioEncoderSpeex::encoder_mono(char *p_buf){ + + + //printf("%d,",ret); + + } + + + **/ +void YangAudioEncoderSpeex::closeSpeex() { + if (m_state) + yang_speex_encoder_destroy(m_state); + m_state = NULL; + yang_speex_bits_destroy(&m_bits); + //hEncoder = NULL; +} diff --git a/libmetartc3/src/yangencoder/YangAudioEncoderSpeex.h b/libmetartc3/src/yangencoder/YangAudioEncoderSpeex.h new file mode 100755 index 00000000..9c0b5fe7 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangAudioEncoderSpeex.h @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_SRC_YANGAUDIOENCODERSPEEX_H_ +#define YANGENCODER_SRC_YANGAUDIOENCODERSPEEX_H_ + +#include "speex/speex.h" +//#include "../../yangaudioproc/YangResample.h" +#include "yangutil/sys/YangLoadLib.h" +#include "yangencoder/YangAudioEncoder.h" +//#define YangSpeexBufferSize 1024 + +class YangAudioEncoderSpeex: public YangAudioEncoder { +public: + YangAudioEncoderSpeex(); + virtual ~YangAudioEncoderSpeex(); + void init(YangAudioInfo *pap); + int32_t encoder(YangFrame* pframe,YangEncoderCallback* pcallback); + +private: + int32_t isConvert; + SpeexBits m_bits; + + YangLoadLib m_lib; + void initSpeexPara(); + int32_t m_quality; + int32_t ret; + spx_int32_t skip_group_delay; + void *m_state; + char *m_cbits; //[YangSpeexBufferSize]; + int32_t m_bufLen; + char *temp; + float *m_input; + short *m_input1; //[m_frameSize]; + short *m_in; //[m_frameSize]; + + void encoder(char *p_buf); + void encoder_mono(char *p_buf); + void closeSpeex(); + void saveWave(); + void loadLib(); + void unloadLib(); + const SpeexMode * (*yang_speex_lib_get_mode) (int32_t mode); + void *(*yang_speex_encoder_init)(const SpeexMode *mode); + void (*yang_speex_bits_init)(SpeexBits *bits); + void (*yang_speex_bits_reset)(SpeexBits *bits); + int32_t (*yang_speex_bits_write)(SpeexBits *bits, char *bytes, int32_t max_len); + int32_t (*yang_speex_encode)(void *state, float *in, SpeexBits *bits); + void (*yang_speex_encoder_destroy)(void *state); + void (*yang_speex_bits_destroy)(SpeexBits *bits); + int32_t (*yang_speex_encoder_ctl)(void *state, int32_t request, void *ptr); +}; + +#endif /* YANGENCODER_SRC_YANGAUDIOENCODERSPEEX_H_ */ diff --git a/libmetartc3/src/yangencoder/YangEncoderFactory.cpp b/libmetartc3/src/yangencoder/YangEncoderFactory.cpp new file mode 100755 index 00000000..77a68050 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangEncoderFactory.cpp @@ -0,0 +1,73 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "YangH264EncoderIntel.h" +#include "YangH264EncoderSoft.h" +#include "YangH265EncoderSoft.h" +#include "YangVideoEncoderFfmpeg.h" +#include "yangencoder/YangEncoderFactory.h" + +#include "YangAudioEncoderAac.h" +#include "YangAudioEncoderMp3.h" +#include "YangAudioEncoderOpus.h" +#include "YangAudioEncoderSpeex.h" +#include "YangH264EncoderMeta.h" +#include "YangH265EncoderMeta.h" +#include "YangFfmpegEncoderMeta.h" +#include + +YangEncoderFactory::YangEncoderFactory() { + + +} + +YangEncoderFactory::~YangEncoderFactory() { + +} +YangVideoEncoderMeta* YangEncoderFactory::createVideoEncoderMeta(YangVideoInfo *pcontext){ + if(pcontext->videoEncHwType>0) return new YangFfmpegEncoderMeta(); + if(pcontext->videoEncoderType==0) return new YangH264EncoderMeta(); + if(pcontext->videoEncoderType==1) return new YangH265EncoderMeta(); + return new YangH264EncoderMeta(); +} + + +YangAudioEncoder *YangEncoderFactory::createAudioEncoder(YangAudioCodec paet,YangAudioInfo *pcontext){ + if(paet==Yang_AED_MP3) return new YangAudioEncoderMp3(); + if(paet==Yang_AED_SPEEX) return new YangAudioEncoderSpeex(); + if(paet==Yang_AED_OPUS) return new YangAudioEncoderOpus(); + return new YangAudioEncoderAac(); +} +YangAudioEncoder *YangEncoderFactory::createAudioEncoder(YangAudioInfo *pcontext){ + YangAudioCodec maet=Yang_AED_AAC; + + if(pcontext->audioEncoderType==1) maet=Yang_AED_MP3; + if(pcontext->audioEncoderType==2) maet=Yang_AED_SPEEX; + if(pcontext->audioEncoderType==3) maet=Yang_AED_OPUS; + return createAudioEncoder(maet,pcontext); +} + +YangVideoEncoder* YangEncoderFactory::createVideoEncoder(YangVideoCodec paet,YangVideoInfo *pcontext){ + if(pcontext->videoEncHwType==0){ + //printf("\n*********************pate===%d\n",paet); + if (paet == Yang_VED_264) return new YangH264EncoderSoft(); + if (paet == Yang_VED_265) return new YangH265EncoderSoft(); + }else{ + #if Yang_GPU_Encoding + YangGpuEncoderFactory gf; + return gf.createGpuEncoder(); +#else + return new YangVideoEncoderFfmpeg(paet,pcontext->videoEncHwType); +#endif + } + + return new YangH264EncoderSoft(); +} + YangVideoEncoder* YangEncoderFactory::createVideoEncoder(YangVideoInfo *pcontext){ + YangVideoCodec maet=Yang_VED_264; + if(pcontext->videoEncoderType==0) maet=Yang_VED_264; + if(pcontext->videoEncoderType==1) maet=Yang_VED_265; + //if(pcontext->audioEncoderType==2) maet=Yang_AE_SPEEX; + return createVideoEncoder(maet,pcontext); + } diff --git a/libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.cpp b/libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.cpp new file mode 100755 index 00000000..147f4b86 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.cpp @@ -0,0 +1,284 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangFfmpegEncoderMeta.h" +#include "YangVideoEncoderFfmpeg.h" +#include +#include +YangFfmpegEncoderMeta::YangFfmpegEncoderMeta() { +#if Yang_Ffmpeg_UsingSo + unloadLib(); +#endif + +} + +YangFfmpegEncoderMeta::~YangFfmpegEncoderMeta() { +#if Yang_Ffmpeg_UsingSo + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); +#endif +} + +#if Yang_Ffmpeg_UsingSo +void YangFfmpegEncoderMeta::loadLib() { + yang_av_buffer_unref = (void (*)(AVBufferRef **buf)) m_lib1.loadFunction( + "av_buffer_unref"); + yang_av_hwframe_ctx_init = (int32_t (*)(AVBufferRef *ref)) m_lib1.loadFunction( + "av_hwframe_ctx_init"); + yang_av_frame_alloc = (AVFrame* (*)(void)) m_lib1.loadFunction( + "av_frame_alloc"); + yang_av_image_get_buffer_size = (int32_t (*)(enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_get_buffer_size"); + yang_av_hwdevice_ctx_create = (int32_t (*)(AVBufferRef **device_ctx, + enum AVHWDeviceType type, const char *device, AVDictionary *opts, + int32_t flags)) m_lib1.loadFunction("av_hwdevice_ctx_create"); + yang_av_hwframe_transfer_data = (int32_t (*)(AVFrame *dst, const AVFrame *src, + int32_t flags)) m_lib1.loadFunction("av_hwframe_transfer_data"); + yang_av_free = (void (*)(void *ptr)) m_lib1.loadFunction("av_free"); + yang_av_frame_free = (void (*)(AVFrame **frame)) m_lib1.loadFunction( + "av_frame_free"); + yang_av_buffer_ref = + (AVBufferRef* (*)(AVBufferRef *buf)) m_lib1.loadFunction( + "av_buffer_ref"); + yang_av_image_fill_arrays = (int32_t (*)(uint8_t *dst_data[4], + int32_t dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_fill_arrays"); + yang_av_hwframe_ctx_alloc = + (AVBufferRef* (*)(AVBufferRef *device_ctx)) m_lib1.loadFunction( + "av_hwframe_ctx_alloc"); + yang_av_hwframe_get_buffer = (int32_t (*)(AVBufferRef *hwframe_ctx, + AVFrame *frame, int32_t flags)) m_lib1.loadFunction( + "av_hwframe_get_buffer"); + yang_av_malloc = (void* (*)(size_t size)) m_lib1.loadFunction("av_malloc"); + + yang_avcodec_alloc_context3 = + (AVCodecContext* (*)(const AVCodec *codec)) m_lib.loadFunction( + "avcodec_alloc_context3"); + yang_av_init_packet = (void (*)(AVPacket *pkt)) m_lib.loadFunction( + "av_init_packet"); + yang_avcodec_find_encoder_by_name = + (AVCodec* (*)(const char *name)) m_lib.loadFunction( + "avcodec_find_encoder_by_name"); + yang_avcodec_open2 = (int32_t (*)(AVCodecContext *avctx, const AVCodec *codec, + AVDictionary **options)) m_lib.loadFunction("avcodec_open2"); + yang_avcodec_send_frame = (int32_t (*)(AVCodecContext *avctx, + const AVFrame *frame)) m_lib.loadFunction("avcodec_send_frame"); + yang_avcodec_receive_packet = (int32_t (*)(AVCodecContext *avctx, + AVPacket *avpkt)) m_lib.loadFunction("avcodec_receive_packet"); + yang_avcodec_close = (int32_t (*)(AVCodecContext *avctx)) m_lib.loadFunction( + "avcodec_close"); +} +void YangFfmpegEncoderMeta::unloadLib() { + yang_av_hwframe_ctx_alloc = NULL; + yang_av_hwframe_ctx_init = NULL; + yang_av_buffer_unref = NULL; + yang_avcodec_find_encoder_by_name = NULL; + yang_av_hwdevice_ctx_create = NULL; + yang_av_frame_alloc = NULL; + yang_avcodec_open2 = NULL; + yang_av_image_get_buffer_size = NULL; + yang_av_malloc = NULL; + yang_av_image_fill_arrays = NULL; + yang_av_init_packet = NULL; + yang_av_hwframe_get_buffer = NULL; + yang_av_hwframe_transfer_data = NULL; + yang_avcodec_send_frame = NULL; + yang_avcodec_receive_packet = NULL; + yang_av_frame_free = NULL; + yang_avcodec_close = NULL; + yang_av_free = NULL; +} +#endif +#define HEX2BIN(a) (((a)&0x40)?((a)&0xf)+9:((a)&0xf)) +//void ConvertYCbCr2BGR(uint8_t *pYUV,uint8_t *pBGR,int32_t iWidth,int32_t iHeight); +//void ConvertRGB2YUV(int32_t w,int32_t h,uint8_t *bmp,uint8_t *yuv); +//int32_t g_m_fx2=2; +void YangFfmpegEncoderMeta::yang_find_next_start_code(YangVideoCodec pve,uint8_t *buf,int32_t bufLen,int32_t *vpsPos,int32_t *vpsLen,int32_t *spsPos,int32_t *spsLen,int32_t *ppsPos,int32_t *ppsLen) +{ + int32_t i = 0; + // printf("\n**********************extradate.....=%d\n",bufLen); + // for(int32_t j=0;jdata); + frames_ctx->format = ctxformat; + frames_ctx->sw_format = swformat; + + frames_ctx->width = pwid; + frames_ctx->height = phei; + frames_ctx->initial_pool_size = 0; + if ((err = yang_av_hwframe_ctx_init(hw_frames_ref)) < 0) { + yang_error("Failed to initialize VAAPI frame context.Error code: %d\n", + ret); + yang_av_buffer_unref(&hw_frames_ref); + return err; + } + ctx->hw_frames_ctx = yang_av_buffer_ref(hw_frames_ref); + ctx->hw_device_ctx = yang_av_buffer_ref(hw_device_ctx); + // ctx->hwaccel_flags=1; + if (!ctx->hw_frames_ctx) + err = AVERROR(ENOMEM); + + yang_av_buffer_unref(&hw_frames_ref); + return err; +} + +enum AVPixelFormat get_hw_format22(AVCodecContext *ctx, + const enum AVPixelFormat *pix_fmts) { + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Intel) return AV_PIX_FMT_VAAPI; + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Nvdia) return AV_PIX_FMT_CUDA; + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Android) return AV_PIX_FMT_MEDIACODEC; + return AV_PIX_FMT_VAAPI; +} +void YangFfmpegEncoderMeta::yang_getSpsPps(YangH2645Conf *pconf, + YangVideoInfo *p_yvp, YangVideoEncInfo *penc) { +#if Yang_Ffmpeg_UsingSo + m_lib.loadObject("libavcodec"); + m_lib1.loadObject("libavutil"); + loadLib(); +#endif + YangVideoCodec m_encoderType=(YangVideoCodec)p_yvp->videoEncoderType; + YangVideoHwType m_hwType=(YangVideoHwType)p_yvp->videoEncHwType; + AVCodec *m_codec=NULL; + AVCodecContext *m_codecCtx = NULL; + AVBufferRef *hw_device_ctx=NULL; + //hevc_vaapi nvenc nvdec vdpau h264_nvenc + if(m_encoderType==Yang_VED_264){ + if(m_hwType==YangV_Hw_Intel) m_codec = yang_avcodec_find_encoder_by_name("h264_vaapi");//avcodec_find_encoder(AV_CODEC_ID_H264); + if(m_hwType==YangV_Hw_Nvdia) m_codec = yang_avcodec_find_encoder_by_name("h264_nvenc"); + if(m_hwType==YangV_Hw_Android) m_codec = yang_avcodec_find_encoder_by_name("h264_mediacodec"); + }else if(m_encoderType==Yang_VED_265){ + if(m_hwType==YangV_Hw_Intel) m_codec = yang_avcodec_find_encoder_by_name("hevc_vaapi"); + if(m_hwType==YangV_Hw_Nvdia) m_codec = yang_avcodec_find_encoder_by_name("hevc_nvenc"); + if(m_hwType==YangV_Hw_Android) m_codec = yang_avcodec_find_encoder_by_name("hevc_mediacodec"); + } + m_codecCtx = yang_avcodec_alloc_context3(m_codec); + YangVideoEncoderFfmpeg::initParam(m_codecCtx,p_yvp,penc); + m_codecCtx->get_format = get_hw_format22; // AV_PIX_FMT_NV12;//get_hw_format; + int32_t ret=0; + //AV_HWDEVICE_TYPE_CUDA + YangVideoEncoderFfmpeg::g_hwType=(YangVideoHwType)p_yvp->videoEncHwType; + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Intel){ + ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,"/dev/dri/renderD128", NULL, 0); + m_codecCtx->pix_fmt = AV_PIX_FMT_VAAPI; + }else if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Nvdia){ + ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_CUDA,"CUDA", NULL, 0); + m_codecCtx->pix_fmt = AV_PIX_FMT_CUDA; + }else if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Android){ + ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_MEDIACODEC,"MEDIACODEC", NULL, 0); + m_codecCtx->pix_fmt = AV_PIX_FMT_MEDIACODEC; + } + //YangVideoEncoderFfmpeg::g_hwType=m_codecCtx->pix_fmt ; + if(ret<0){ + printf("\nhw create error!..ret=%d\n",ret); + exit(1); + } + //ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_CUDA,"CUDA", NULL, 0); + //AV_PIX_FMT_NV12;//AV_PIX_FMT_VAAPI;AV_PIX_FMT_YUV420P;//AV_PIX_FMT_CUDA + //AV_PIX_FMT_CUDA + AVPixelFormat ctxformat,swformat; + if(p_yvp->videoEncHwType==YangV_Hw_Intel) ctxformat = AV_PIX_FMT_VAAPI; + if(p_yvp->videoEncHwType==YangV_Hw_Nvdia) ctxformat = AV_PIX_FMT_CUDA; + if(p_yvp->videoEncHwType==YangV_Hw_Android) ctxformat = AV_PIX_FMT_MEDIACODEC; + if(p_yvp->bitDepth==8) swformat = AV_PIX_FMT_NV12; + if(p_yvp->bitDepth==10) swformat = AV_PIX_FMT_P010; + if(p_yvp->bitDepth==16) swformat = AV_PIX_FMT_P016; + if ((ret = set_hwframe_ctx(ctxformat,swformat,p_yvp,m_codecCtx, hw_device_ctx, p_yvp->outWidth, + p_yvp->outHeight)) < 0) { + printf("Failed to set hwframe context.\n"); + //goto close; + } + + m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + ret = yang_avcodec_open2(m_codecCtx, m_codec, NULL); + if (ret < 0){ + printf("\navcodec_open2 failure................\n"); + exit(1); + } + int32_t vpsPos=0,vpsLen=0; + int32_t spsPos=0,ppsPos=0; + int32_t spsLen=0,ppsLen=0; + yang_find_next_start_code(m_encoderType,m_codecCtx->extradata,m_codecCtx->extradata_size,&vpsPos,&vpsLen,&spsPos,&spsLen,&ppsPos,&ppsLen); + if(m_encoderType==Yang_VED_265) { + pconf->vpsLen=vpsLen; + memcpy(pconf->vps,m_codecCtx->extradata+vpsPos,vpsLen); + //printf("\n**************vpsLen===%d...\n",pconf->vpsLen); + //for(int32_t i=0;ivpsLen;i++) printf("%02x,",pconf->vps[i]); + } + pconf->spsLen=spsLen; + pconf->ppsLen=ppsLen; + memcpy(pconf->sps,m_codecCtx->extradata+spsPos,spsLen); + memcpy(pconf->pps,m_codecCtx->extradata+ppsPos,ppsLen); + + + yang_av_buffer_unref(&hw_device_ctx); + if (m_codecCtx){ + yang_avcodec_close(m_codecCtx); + yang_av_free(m_codecCtx); + } + m_codecCtx = NULL; + +} + +//Conf264 t_conf264; + + + +void YangFfmpegEncoderMeta::yang_initVmd(YangVideoMeta *p_vmd, + YangVideoInfo *p_yvp, YangVideoEncInfo *penc) { + if (!p_vmd->isInit) { + + yang_getSpsPps(&p_vmd->mp4Meta, p_yvp,penc); + if(p_yvp->videoEncoderType==Yang_VED_264) yang_getConfig_Flv_H264(&p_vmd->mp4Meta, p_vmd->livingMeta.buffer,&p_vmd->livingMeta.bufLen); + if(p_yvp->videoEncoderType==Yang_VED_265) yang_getConfig_Flv_H265(&p_vmd->mp4Meta, p_vmd->livingMeta.buffer,&p_vmd->livingMeta.bufLen); + // yang_getH265Config_Flv(&p_vmd->mp4Meta, p_vmd->flvMeta.buffer, &p_vmd->flvMeta.bufLen); + p_vmd->isInit = 1; + } +} diff --git a/libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.h b/libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.h new file mode 100755 index 00000000..034cd3a3 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangFfmpegEncoderMeta.h @@ -0,0 +1,84 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_SRC_YANGFFMPEGENCODERMETA_H_ +#define YANGENCODER_SRC_YANGFFMPEGENCODERMETA_H_ +#include "yangutil/sys/YangLoadLib.h" +#include +#include + +extern "C"{ +#include "libavcodec/avcodec.h" +//#include "libavformat/avformat.h" +#include "libavutil/avutil.h" +#include "libavutil/imgutils.h" +} +#include +class YangFfmpegEncoderMeta:public YangVideoEncoderMeta { +public: + YangFfmpegEncoderMeta(); + virtual ~YangFfmpegEncoderMeta(); + void yang_initVmd(YangVideoMeta *p_vmd, YangVideoInfo *p_config, YangVideoEncInfo *penc); + private: + void yang_find_next_start_code(YangVideoCodec pve,uint8_t *buf,int32_t bufLen,int32_t *vpsPos,int32_t *vpsLen,int32_t *spsPos,int32_t *spsLen,int32_t *ppsPos,int32_t *ppsLen); + int32_t set_hwframe_ctx(AVPixelFormat ctxformat,AVPixelFormat swformat,YangVideoInfo *yvp,AVCodecContext *ctx, AVBufferRef *hw_device_ctx,int32_t pwid,int32_t phei); + + void yang_getH2645Config(uint8_t *p_configBuf, int32_t *p_configLen, + YangVideoInfo *p_config); + + //void yang_getH265Config_1(YangVideoParam *p_config, YangH2645Conf *p265); + void yang_getSpsPps(YangH2645Conf *p265, YangVideoInfo *config, YangVideoEncInfo *penc); + //void initParam(x265_param *param,YangVideoParam *yvp, int32_t p_rc_method); +#if Yang_Ffmpeg_UsingSo + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + AVBufferRef *(*yang_av_hwframe_ctx_alloc)(AVBufferRef *device_ctx); + int32_t (*yang_av_hwframe_ctx_init)(AVBufferRef *ref); + void (*yang_av_buffer_unref)(AVBufferRef **buf); + AVCodec *(*yang_avcodec_find_encoder_by_name)(const char *name); + int32_t (*yang_av_hwdevice_ctx_create)(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int32_t flags); + AVFrame *(*yang_av_frame_alloc)(void); + int32_t (*yang_avcodec_open2)(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + int32_t (*yang_av_image_get_buffer_size)(enum AVPixelFormat pix_fmt, int32_t width, int32_t height, int32_t align); + void *(*yang_av_malloc)(size_t size); + int32_t (*yang_av_image_fill_arrays)(uint8_t *dst_data[4], int32_t dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int32_t width, int32_t height, int32_t align); + void (*yang_av_init_packet)(AVPacket *pkt); + int32_t (*yang_av_hwframe_get_buffer)(AVBufferRef *hwframe_ctx, AVFrame *frame, int32_t flags); + int32_t (*yang_av_hwframe_transfer_data)(AVFrame *dst, const AVFrame *src, int32_t flags); + int32_t (*yang_avcodec_send_frame)(AVCodecContext *avctx, const AVFrame *frame); + int32_t (*yang_avcodec_receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + void (*yang_av_frame_free)(AVFrame **frame); + int32_t (*yang_avcodec_close)(AVCodecContext *avctx); + void (*yang_av_free)(void *ptr); + AVBufferRef *(*yang_av_buffer_ref)(AVBufferRef *buf); + AVCodecContext *(*yang_avcodec_alloc_context3)(const AVCodec *codec); +#else +#define yang_av_hwframe_ctx_alloc av_hwframe_ctx_alloc +#define yang_av_hwframe_ctx_init av_hwframe_ctx_init +#define yang_av_buffer_unref av_buffer_unref +#define yang_avcodec_find_encoder_by_name avcodec_find_encoder_by_name +#define yang_av_hwdevice_ctx_create av_hwdevice_ctx_create +#define yang_av_frame_alloc av_frame_alloc +#define yang_avcodec_open2 avcodec_open2 +#define yang_av_image_get_buffer_size av_image_get_buffer_size +#define yang_av_malloc av_malloc +#define yang_av_image_fill_arrays av_image_fill_arrays +#define yang_av_init_packet av_init_packet +#define yang_av_hwframe_get_buffer av_hwframe_get_buffer +#define yang_av_hwframe_transfer_data av_hwframe_transfer_data +#define yang_avcodec_send_frame avcodec_send_frame +#define yang_avcodec_receive_packet avcodec_receive_packet +#define yang_av_frame_free av_frame_free +#define yang_avcodec_close avcodec_close +#define yang_av_free av_free +#define yang_av_buffer_ref av_buffer_ref +#define yang_avcodec_alloc_context3 avcodec_alloc_context3 +#endif +}; + +#endif /* YANGENCODER_SRC_YANGFFMPEGENCODERMETA_H_ */ diff --git a/libmetartc3/src/yangencoder/YangH264EncHeader.cpp b/libmetartc3/src/yangencoder/YangH264EncHeader.cpp new file mode 100755 index 00000000..d2dcf17b --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncHeader.cpp @@ -0,0 +1,216 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH264EncHeader.h" + +#include "malloc.h" +#include "yangutil/yang_unistd.h" +#include +//#include + +#if YangLibva +#include "string.h" +#ifdef _WIN32 +#include +#else +#include "va/va.h" +#include +#endif +void initYangMeta(YangMeataData *pmd,YangVideoParam *pcontext){ + pmd->width=pcontext->outWidth ; + pmd->height=pcontext->outHeight; + pmd->width_align=(pcontext->outWidth + 15) & (~15); + pmd->height_align=(pcontext->outHeight + 15) & (~15); + pmd->gop_size=20; + pmd->num_ref_frames=2; + pmd->numShortTerm=0; + pmd->constraint_set_flag=0; + pmd->h264_packedheader=0;/*supportpackheader?*/ + pmd->h264_maxref=(1<<16|1); + pmd->h264_entropy_mode=0;//1;/*cabac*/ + pmd->MaxFrameNum=(2<<16); + pmd->MaxPicOrderCntLsb=(2<<8); + pmd->Log2MaxFrameNum=16; + pmd->Log2MaxPicOrderCntLsb=4;//8; + + pmd->frame_coded=0; + pmd->frame_bitrate=1024000; + pmd->frame_slices=1; + pmd->frame_size=0; + pmd->initial_qp=26; + pmd->minimal_qp=26; + pmd->intra_period=pmd->gop_size; + pmd->intra_idr_period=pmd->gop_size; + pmd->ip_period=1;//3; + pmd->rc_mode=0; +} +#ifndef _WIN32 +void initVaSeq(YangMeataData *pmd,VAEncSequenceParameterBufferH264 *sps){ + sps->seq_parameter_set_id=0; + sps->level_idc = 31;//41 /*SH_LEVEL_3*/; + sps->intra_period = pmd->intra_period; + sps->intra_idr_period = pmd->intra_idr_period; + sps->ip_period = pmd->ip_period; + + sps->bits_per_second = pmd->frame_bitrate; + sps->max_num_ref_frames = pmd->num_ref_frames; + sps->picture_width_in_mbs = pmd->width_align / 16; + sps->picture_height_in_mbs = pmd->height_align / 16; + + sps->seq_fields.bits.chroma_format_idc = 1; + sps->seq_fields.bits.frame_mbs_only_flag = 1; + sps->seq_fields.bits.mb_adaptive_frame_field_flag=0; + sps->seq_fields.bits.seq_scaling_matrix_present_flag=0; + sps->seq_fields.bits.direct_8x8_inference_flag = 1; + sps->seq_fields.bits.log2_max_frame_num_minus4 = 4;//pmd->Log2MaxFrameNum - 4; + sps->seq_fields.bits.pic_order_cnt_type=0; + sps->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 =pmd->Log2MaxPicOrderCntLsb - 4; + sps->seq_fields.bits.delta_pic_order_always_zero_flag=0; + + sps->time_scale = 50; + sps->num_units_in_tick = 1; /* Tc = num_units_in_tick / time_sacle */ + sps->bit_depth_chroma_minus8=0; + sps->bit_depth_luma_minus8=0; + if (pmd->width != pmd->width_align || pmd->height != pmd->height_align) { + sps->frame_cropping_flag = 1; + sps->frame_crop_left_offset = 0; + sps->frame_crop_right_offset = (pmd->width_align - pmd->width) + / 2; + sps->frame_crop_top_offset = 0; + sps->frame_crop_bottom_offset =(pmd->height_align - pmd->height) / 2; + } + // sps->vui_parameters_present_flag=1; + // sps->vui_fields.bits.bitstream_restriction_flag=1; + // sps->vui_fields.bits.log2_max_mv_length_horizontal=15,sps->vui_fields.bits.log2_max_mv_length_vertical=15; + // sps->vui_fields.bits.timing_info_present_flag=1; +} +void initVaPic(YangMeataData *pmd,VAEncPictureParameterBufferH264 *pps){ + pps->pic_fields.bits.entropy_coding_mode_flag = pmd->h264_entropy_mode; + pps->pic_fields.bits.deblocking_filter_control_present_flag = 1; + pps->pic_init_qp = pmd->initial_qp; +} +void initVaPara(YangMeataData *pmd,VAEncSequenceParameterBufferH264 *sps,VAEncPictureParameterBufferH264 *pps){ + initVaSeq(pmd,sps); + initVaPic(pmd,pps); +} +#endif +/** +void printSeq(VAEncSequenceParameterBufferH264 *sps){ + printf("\n%02x,%02x,%02x,%02x\n%d,%d,%d,%d\n%02x,%d,%02x,%d\n%d,%d,%d,%d,%02x\nvui_fields:%d,%d,%d,%d,%d,%d,%d,%d\nseq_fields.bits:%d,%d,%d,%d,%d,%d,%d,%d,%d\n", + sps->seq_parameter_set_id,sps->aspect_ratio_idc,sps->bit_depth_chroma_minus8, + sps->bit_depth_luma_minus8,sps->bits_per_second,sps->intra_idr_period,sps->intra_period,sps->ip_period,// + sps->level_idc,sps->max_num_ref_frames,sps->num_ref_frames_in_pic_order_cnt_cycle,sps->num_units_in_tick, + sps->offset_for_non_ref_pic,sps->sar_width,sps->sar_height,sps->time_scale,sps->vui_parameters_present_flag, + sps->vui_fields.bits.aspect_ratio_info_present_flag,sps->vui_fields.bits.bitstream_restriction_flag,sps->vui_fields.bits.fixed_frame_rate_flag, + sps->vui_fields.bits.log2_max_mv_length_horizontal,sps->vui_fields.bits.log2_max_mv_length_vertical,sps->vui_fields.bits.low_delay_hrd_flag, + sps->vui_fields.bits.motion_vectors_over_pic_boundaries_flag,sps->vui_fields.bits.timing_info_present_flag, + sps->seq_fields.bits.chroma_format_idc,sps->seq_fields.bits.delta_pic_order_always_zero_flag,sps->seq_fields.bits.direct_8x8_inference_flag, + sps->seq_fields.bits.frame_mbs_only_flag,sps->seq_fields.bits.log2_max_frame_num_minus4,sps->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4, + sps->seq_fields.bits.mb_adaptive_frame_field_flag,sps->seq_fields.bits.pic_order_cnt_type,sps->seq_fields.bits.seq_scaling_matrix_present_flag + ); +} +void printPic(VAEncPictureParameterBufferH264 *pps){ + printf("\n%02x,%hd,%02x,%02x,%02x,%02x,\npic_fields:%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",pps->chroma_qp_index_offset,pps->frame_num,pps->num_ref_idx_l0_active_minus1,pps->num_ref_idx_l1_active_minus1, + pps->pic_init_qp,pps->second_chroma_qp_index_offset,pps->pic_fields.bits.constrained_intra_pred_flag,pps->pic_fields.bits.deblocking_filter_control_present_flag, + pps->pic_fields.bits.entropy_coding_mode_flag,pps->pic_fields.bits.idr_pic_flag,pps->pic_fields.bits.pic_order_present_flag,pps->pic_fields.bits.pic_scaling_matrix_present_flag, + pps->pic_fields.bits.redundant_pic_cnt_present_flag,pps->pic_fields.bits.reference_pic_flag,pps->pic_fields.bits.transform_8x8_mode_flag, + pps->pic_fields.bits.weighted_bipred_idc,pps->pic_fields.bits.weighted_pred_flag + + ); +}**/ + +void setZbVmd(uint8_t *sps,int32_t spsLen,uint8_t *pps,int32_t pps_len,uint8_t *configBuf,int32_t *zbLen){ + configBuf[0] = 0x17; + configBuf[1] = 0x00; + configBuf[2] = 0x00; + configBuf[3] = 0x00; + configBuf[4] = 0x00; + configBuf[5] = 0x01; + configBuf[6] = sps[1]; + configBuf[7] = sps[2]; + configBuf[8] = sps[3]; //0x29; //AVCLevelIndication1f + configBuf[9] = 0xff; //03;//ff;//0x03; AVCLevelIndication + configBuf[10] = 0xe1; //01;//e1;//01;numOfSequenceParameterSets + char * szTmp = (char*) configBuf + 11; + + short slen = spsLen; //spslen(short) + slen = htons(slen); + memcpy(szTmp, &slen, sizeof(short)); + szTmp += sizeof(short); + + memcpy(szTmp, sps,spsLen); + szTmp += spsLen; + *szTmp = 0x01; + szTmp += 1; + slen = pps_len; //spslen(short) + slen = htons(slen); + + memcpy(szTmp, &slen, sizeof(short)); + szTmp += sizeof(short); + memcpy(szTmp, pps, pps_len); + //printf("...%d.....\n",bs.p-bs.p_start); + szTmp += pps_len; + *zbLen = szTmp - (char*) configBuf; + szTmp = NULL; +} +void yang_find_next_start_code(uint8_t *buf,int32_t bufLen,int32_t *spsPos,int32_t *spsLen,int32_t *ppsPos,int32_t *ppsLen) +{ + int32_t i = 0; + + *spsPos=0;*ppsPos=0; + while (i >>>>>>>>>>>>>>>>>>>%d\n",spsLen); + for(i=0;i>>>>>>>>>>>>>>>>>>>.%d\n",ppsLen); + for(i=0;i>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");**/ + +} +#endif +/** +YangH264EncHeader::YangH264EncHeader() { + // TODO Auto-generated constructor stub + +} + +YangH264EncHeader::~YangH264EncHeader() { + // TODO Auto-generated destructor stub +} + +int32_t YangH264EncHeader::setSps(uint8_t *sps_buffer,int32_t *spsLen){ + +} + int32_t YangH264EncHeader::setPPS(uint8_t *pps_buffer,int32_t *ppsLen){ + + }**/ diff --git a/libmetartc3/src/yangencoder/YangH264EncHeader.h b/libmetartc3/src/yangencoder/YangH264EncHeader.h new file mode 100755 index 00000000..52e722ae --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncHeader.h @@ -0,0 +1,62 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGENCODER_SRC_YANGH264ENCHEADER_H_ +#define YANGENCODER_SRC_YANGH264ENCHEADER_H_ +#include "stdint.h" + +#if YangLibva +#ifndef _WIN32 +#include "va/va.h" +#endif +#include "yangutil/yangavtype.h" + +struct YangMeataData { + int32_t width; + int32_t height; + int32_t width_align; + int32_t height_align; + int32_t num_ref_frames; + int32_t numShortTerm; + int32_t constraint_set_flag; + int32_t h264_packedheader; /* support pack header? */ + int32_t h264_maxref; + int32_t h264_entropy_mode; /* cabac */ + int32_t MaxFrameNum; + int32_t MaxPicOrderCntLsb; + int32_t Log2MaxFrameNum; + int32_t Log2MaxPicOrderCntLsb; + int32_t gop_size; + int32_t frame_coded; + int32_t frame_bitrate; + int32_t frame_slices; + double frame_size; + int32_t initial_qp; + int32_t minimal_qp; + int32_t intra_period; + int32_t intra_idr_period; + int32_t ip_period; + int32_t rc_mode; +}; +void initYangMeta(YangMeataData *pmd, YangVideoParam *ini); +#ifndef _WIN32 +void initVaSeq(YangMeataData *pmd, VAEncSequenceParameterBufferH264 *sps); +void initVaPic(YangMeataData *pmd, VAEncPictureParameterBufferH264 *pps); +void initVaPara(YangMeataData *pmd, VAEncSequenceParameterBufferH264 *sps, + VAEncPictureParameterBufferH264 *pps); +void printSeq(VAEncSequenceParameterBufferH264 *sps); +void printPic(VAEncPictureParameterBufferH264 *pps); +#endif +void setZbVmd(uint8_t *sps, int32_t spsLen, uint8_t *pps, int32_t ppsLen, + uint8_t *zbconf, int32_t *zbLen); +void setZbVmd_f(uint8_t *buf,int32_t bufLen,uint8_t *configBuf,int32_t *zbLen); +/**class YangH264EncHeader { + public: + YangH264EncHeader(); + virtual ~YangH264EncHeader(); + int32_t setSps(uint8_t *sps_buffer,int32_t *spsLen); + int32_t setPPS(uint8_t *pps_buffer,int32_t *ppsLen); + }; + **/ +#endif /* YANGENCODER_SRC_YANGH264ENCHEADER_H_ */ +#endif diff --git a/libmetartc3/src/yangencoder/YangH264EncoderIntel.cpp b/libmetartc3/src/yangencoder/YangH264EncoderIntel.cpp new file mode 100755 index 00000000..66b9a46c --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderIntel.cpp @@ -0,0 +1,999 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH264EncoderIntel1.h" +#ifndef _WIN32 +#if YangLibva +void YangH264EncoderIntel::loadLib(){ + //const SpeexMode * (*yang_speex_lib_get_mode) (int32_t mode); + yang_vaGetConfigAttributes=(VAStatus (*) (VADisplay dpy, VAProfile profile, VAEntrypoint32_t entrypoint, + VAConfigAttrib *attrib_list, int32_t num_attribs))m_lib.loadFunction("vaGetConfigAttributes"); + yang_vaDeriveImage=(VAStatus (*) (VADisplay dpy,VASurfaceID surface,VAImage *image))m_lib.loadFunction("vaDeriveImage"); + yang_vaDestroyImage=(VAStatus (*) (VADisplay dpy,VAImageID image))m_lib.loadFunction("vaDestroyImage"); + + yang_vaInitialize=(VAStatus (*) ( VADisplay dpy, int32_t *major_version,int32_t *minor_version))m_lib.loadFunction("vaInitialize"); + //yang_NeAACDecOpen=(NEAACDECAPI NeAACDecHandle (*)(void))m_lib.loadFunction(""); + yang_vaGetDisplayDRM=(VADisplay (*)(int32_t fd))m_lib1.loadFunction("vaGetDisplayDRM"); + + yang_vaDestroyContext=(VAStatus (*) (VADisplay dpy,VAContextID context))m_lib.loadFunction("vaDestroyContext"); + yang_vaDestroySurfaces=(VAStatus (*) (VADisplay dpy,VASurfaceID *surfaces,int32_t num_surfaces))m_lib.loadFunction("vaDestroySurfaces"); + yang_vaDestroyConfig=(VAStatus (*) (VADisplay dpy,VAConfigID config_id))m_lib.loadFunction("vaDestroyConfig"); + yang_vaTerminate=(VAStatus (*) ( VADisplay dpy))m_lib.loadFunction("vaTerminate"); + + + yang_vaCreateConfig=(VAStatus (*) (VADisplay dpy, VAProfile profile, VAEntrypoint32_t entrypoint, VAConfigAttrib *attrib_list, int32_t num_attribs, VAConfigID *config_id ))m_lib.loadFunction("vaCreateConfig"); + + yang_vaCreateSurfaces=(VAStatus (*)(VADisplay dpy,uint32_t format,uint32_t width, + uint32_t height,VASurfaceID *surfaces,uint32_t num_surfaces, VASurfaceAttrib *attrib_list, + uint32_t num_attribs))m_lib.loadFunction("vaCreateSurfaces"); + + yang_vaCreateContext=(VAStatus (*) (VADisplay dpy,VAConfigID config_id, int32_t picture_width, + int32_t picture_height, int32_t flag, VASurfaceID *render_targets, + int32_t num_render_targets,VAContextID *context))m_lib.loadFunction("vaCreateContext"); + yang_vaCreateBuffer=(VAStatus (*) (VADisplay dpy,VAContextID context, VABufferType type, + uint32_t size, uint32_t num_elements, void *data, VABufferID *buf_id + ))m_lib.loadFunction("vaCreateBuffer"); + yang_vaBeginPicture=(VAStatus (*) (VADisplay dpy,VAContextID context,VASurfaceID render_target))m_lib.loadFunction("vaBeginPicture"); + yang_vaRenderPicture=(VAStatus (*) (VADisplay dpy,VAContextID context, VABufferID *buffers,int32_t num_buffers))m_lib.loadFunction("vaRenderPicture"); + + yang_vaCreateImage=(VAStatus (*) (VADisplay dpy,VAImageFormat *format, int32_t width, int32_t height, VAImage *image))m_lib.loadFunction("vaCreateImage"); + yang_vaEndPicture=(VAStatus (*) (VADisplay dpy,VAContextID context))m_lib.loadFunction("vaEndPicture"); + yang_vaGetImage=(VAStatus (*) (VADisplay dpy,VASurfaceID surface, int32_t x, + int32_t y,uint32_t width, uint32_t height,VAImageID image))m_lib.loadFunction("vaGetImage"); + yang_vaMapBuffer=(VAStatus (*) (VADisplay dpy,VABufferID buf_id, void **pbuf))m_lib.loadFunction("vaMapBuffer"); + yang_vaSyncSurface=(VAStatus (*) (VADisplay dpy,VASurfaceID render_target))m_lib.loadFunction("vaSyncSurface"); + yang_vaUnmapBuffer=(VAStatus (*) (VADisplay dpy,VABufferID buf_id))m_lib.loadFunction("vaUnmapBuffer"); + +} + +void YangH264EncoderIntel::unloadLib(){ + yang_vaGetDisplayDRM=NULL; + yang_vaGetConfigAttributes=NULL; + yang_vaDeriveImage=NULL; + yang_vaDestroyImage=NULL; + yang_vaInitialize=NULL; + yang_vaDestroyContext=NULL; + yang_vaDestroySurfaces=NULL; + yang_vaDestroyConfig=NULL; + yang_vaTerminate=NULL; + + yang_vaCreateConfig=NULL; + yang_vaCreateSurfaces=NULL; + + yang_vaCreateContext=NULL; + yang_vaCreateBuffer=NULL; + yang_vaBeginPicture=NULL; + yang_vaRenderPicture=NULL; + + yang_vaCreateImage=NULL; + yang_vaEndPicture=NULL; + yang_vaGetImage=NULL; + yang_vaMapBuffer=NULL; + yang_vaSyncSurface=NULL; + yang_vaUnmapBuffer=NULL; + + + +} +YangH264EncoderIntel::YangH264EncoderIntel() { + //printf("\n*****YangH264Encoder***********************************Intel\n"); + //YangH264Encoder(); + //m_context = pcontext; + + m_config_id = VA_INVALID_ID; + m_config_id = 0; + m_vaContext = 0; + vas = 0; + //memset(pics, 0, sizeof(YangDecodePicture) * Para_Bufs); + memset(&m_pic_param, 0, sizeof(m_pic_param)); + memset(&image_format, 0, sizeof(image_format)); + memset(&m_slice_param, 0, sizeof(m_slice_param)); + memset(&m_seq_param, 0, sizeof(m_seq_param)); + memset(&m_ymd,0,sizeof(m_ymd)); + + m_fd = 0; + //m_ref_count = 0; + //m_ref_count_m1 = 0; + sid = 0; + frameIdx = 0; + FieldOrderCnt = 0; + //m_inWidth = 0; +// m_inHeight = 0; +// m_outWidth = 0; +// m_outHeight = 0; + m_frame = 0; + yLen = 0; + uLen = 0; + uvLen = 0; + allLen = 0; + frameIdx = 0; + FieldOrderCnt = 0; + posNum = 0; + m_isInit = 0; + m_vap_size = sizeof(VAPictureH264); + enc_packed_header_idx = 0; + m_width_mbaligned = 0; + m_height_mbaligned = 0; + current_frame_type = 0; + initYangMeta(&m_ymd,&m_yvp); + /**num_ref_frames = m_ymd.num_ref_frames; + numShortTerm = m_ymd.numShortTerm; + constraint_set_flag = m_ymd.constraint_set_flag; + h264_packedheader = m_ymd.h264_packedheader; //support pack header? + h264_maxref = m_ymd.h264_maxref; + h264_entropy_mode = m_ymd.h264_entropy_mode; // cabac + MaxFrameNum = m_ymd.MaxFrameNum; + MaxPicOrderCntLsb = m_ymd.MaxPicOrderCntLsb; + Log2MaxFrameNum = m_ymd.Log2MaxFrameNum; + Log2MaxPicOrderCntLsb = m_ymd.Log2MaxPicOrderCntLsb; + frame_count = m_ymd.frame_count; + frame_coded = m_ymd.frame_coded; + frame_bitrate = m_ymd.frame_bitrate; + frame_slices = m_ymd.frame_slices; + frame_size = m_ymd.frame_size; + initial_qp = m_ymd.initial_qp; + minimal_qp = m_ymd.minimal_qp; + intra_period = m_ymd.intra_period; + intra_idr_period = m_ymd.intra_idr_period; + ip_period = m_ymd.ip_period; + rc_mode = m_ymd.rc_mode;**/ + current_frame_encoding = 0; + current_frame_display = 0; + current_IDR_display = 0; + current_frame_num = 0; + misc_priv_type = 0; + misc_priv_value = 0; + h264_profile = VAProfileH264ConstrainedBaseline; + config_attrib_num = 0, + + m_va_dpy = NULL; + + m_vmd = NULL; + //isConvert = 0; + unloadLib(); +} + +YangH264EncoderIntel::~YangH264EncoderIntel() { + closeDevice(); + m_va_dpy = NULL; + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); + +} + +void YangH264EncoderIntel::closeDevice() { + //vaDestroySurfaces(m_va_dpy,&m_vaSurface,1); + for (int32_t i = 0; i < Para_Bufs; i++) { + if (m_va_dpy && src_surface[i] != VA_INVALID_ID) { + vas = yang_vaDestroySurfaces(m_va_dpy, &src_surface[i], 1); + CHECK_VASTATUS(vas, "vaDestroySurfaces"); + } + } + if (m_va_dpy && m_vaContext != VA_INVALID_ID) { + vas = yang_vaDestroyContext(m_va_dpy, m_vaContext); + CHECK_VASTATUS(vas, "vaDestroyContext"); + } + if (m_va_dpy && m_config_id != VA_INVALID_ID) { + vas = yang_vaDestroyConfig(m_va_dpy, m_config_id); + CHECK_VASTATUS(vas, "vaDestroyConfig"); + } + if (m_va_dpy) { + vas = yang_vaTerminate(m_va_dpy); + CHECK_VASTATUS(vas, "vaTerminate"); + } + if (m_fd >= 0) + close(m_fd); + m_va_dpy = NULL; + +} + +void YangH264EncoderIntel::setVideoMetaData(YangVideoMeta *pvmd){ + //if(1) return; + uint32_t length_in_bits=0,length_in_bits1=0; + uint8_t *packedseq_buffer = NULL; + uint8_t *packedpic_buffer = NULL; + length_in_bits = build_packed_seq_buffer(&packedseq_buffer); + length_in_bits1 = build_packed_pic_buffer(&packedpic_buffer); + memset(pvmd->livingMeta.buffer,0,sizeof(pvmd->livingMeta.buffer)); + setZbVmd(packedseq_buffer+4,(length_in_bits+7)/8-4,packedpic_buffer+4,(length_in_bits1+7)/8-4,pvmd->livingMeta.buffer,&pvmd->livingMeta.bufLen); + + free(packedseq_buffer); + free(packedpic_buffer); + packedseq_buffer=NULL; + packedpic_buffer=NULL; +} + +int32_t YangH264EncoderIntel::render_packedsequence(void) { + VAEncPackedHeaderParameterBuffer packedheader_param_buffer; + VABufferID packedseq_para_bufid, packedseq_data_bufid, render_id[2]; + uint32_t length_in_bits=0; + uint8_t *packedseq_buffer = NULL; + VAStatus va_status; + + length_in_bits = build_packed_seq_buffer(&packedseq_buffer); + //printf("\n*************sps********************%d\n",length_in_bits/8); + //for(int32_t j=0;j> 16) & 0xffff); + } + + if (attrib[VAConfigAttribEncMaxSlices].value != VA_ATTRIB_NOT_SUPPORTED) + printf("Support %d slices\n", attrib[VAConfigAttribEncMaxSlices].value); + + if (attrib[VAConfigAttribEncSliceStructure].value != VA_ATTRIB_NOT_SUPPORTED) { + int32_t tmp = attrib[VAConfigAttribEncSliceStructure].value; + + printf("Support VAConfigAttribEncSliceStructure\n"); + + if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS) + printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS\n"); + if (tmp & VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS) + printf("Support VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS\n"); + if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS) + printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS\n"); + } + if (attrib[VAConfigAttribEncMacroblockInfo].value != VA_ATTRIB_NOT_SUPPORTED) { + printf("Support VAConfigAttribEncMacroblockInfo\n"); + } + + vas = yang_vaCreateConfig(m_va_dpy, h264_profile, VAEntrypointEncSlice, + &config_attrib[0], config_attrib_num, &m_config_id); + CHECK_VASTATUS(vas, "vaCreateConfig"); + + + m_ymd.constraint_set_flag |= (1 << 0 | 1 << 1); /* Annex A.2.2 */ + //m_ymd.ip_period = 1; + + vas = yang_vaCreateSurfaces(m_va_dpy, VA_RT_FORMAT_YUV420, m_width_mbaligned, + m_height_mbaligned, src_surface, Para_Bufs, NULL, 0); + CHECK_VASTATUS(vas, "vaCreateSurfaces"); + + //vas = vaCreateSurfaces(m_va_dpy, VA_RT_FORMAT_YUV420, m_width_mbaligned,m_height_mbaligned, &ref_surface[0], Para_Bufs, NULL, 0); + //CHECK_VASTATUS(vas, "vaCreateSurfaces ref"); + + //VASurfaceID *tmp_surfaceid = (VASurfaceID *) calloc(2 * Para_Bufs, sizeof(VASurfaceID)); + //memcpy(tmp_surfaceid, src_surface, Para_Bufs * sizeof(VASurfaceID)); + //memcpy(tmp_surfaceid + Para_Bufs, ref_surface,Para_Bufs * sizeof(VASurfaceID)); +// vas = vaCreateContext(m_va_dpy, m_config_id, m_width_mbaligned, +// m_height_mbaligned, VA_PROGRESSIVE, tmp_surfaceid, 2 * Para_Bufs, +// &m_vaContext); + vas = yang_vaCreateContext(m_va_dpy, m_config_id, m_width_mbaligned, + m_height_mbaligned, VA_PROGRESSIVE, src_surface, Para_Bufs, + &m_vaContext); + CHECK_VASTATUS(vas, "vaCreateContext"); + //free(tmp_surfaceid); + + int32_t codedbuf_size = 0; + codedbuf_size = (m_width_mbaligned * m_height_mbaligned * 400) / (16 * 16); + for (int32_t i = 0; i < Para_Bufs; i++) { + /* create coded buffer once for all + * other VA buffers which won't be used again after vaRenderPicture. + * so APP can always vaCreateBuffer for every frame + * but coded buffer need to be mapped and accessed after vaRenderPicture/vaEndPicture + * so VA won't maintain the coded buffer + */ + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, VAEncCodedBufferType, + codedbuf_size, 1, NULL, &coded_buf[i]); + CHECK_VASTATUS(vas, "vaCreateBuffer"); + } + frameIdx = 0; + posNum = 0; + m_isInit=1; +// printSeq(&m_seq_param); + //printPic(&m_pic_param); + +} + +void YangH264EncoderIntel::render_packedslice() { + VAEncPackedHeaderParameterBuffer packedheader_param_buffer; + VABufferID packedslice_para_bufid, packedslice_data_bufid, render_id[2]; + uint32_t length_in_bits; + uint8_t *packedslice_buffer = NULL; + //VAStatus va_status; + + length_in_bits = build_packed_slice_buffer(&packedslice_buffer); + packedheader_param_buffer.type = VAEncPackedHeaderSlice; + packedheader_param_buffer.bit_length = length_in_bits; + packedheader_param_buffer.has_emulation_bytes = 0; + + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAEncPackedHeaderParameterBufferType, + sizeof(packedheader_param_buffer), 1, &packedheader_param_buffer, + &packedslice_para_bufid); + CHECK_VASTATUS(vas, "vaCreateBuffer"); + + vas = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAEncPackedHeaderDataBufferType, (length_in_bits + 7) / 8, 1, + packedslice_buffer, &packedslice_data_bufid); + CHECK_VASTATUS(vas, "vaCreateBuffer"); + + render_id[0] = packedslice_para_bufid; + render_id[1] = packedslice_data_bufid; + vas = yang_vaRenderPicture(m_va_dpy, m_vaContext, render_id, 2); + CHECK_VASTATUS(vas, "vaRenderPicture"); + + free(packedslice_buffer); +} +int32_t YangH264EncoderIntel::upload_surface_yuv(VASurfaceID surface_id, + int32_t src_width, int32_t src_height, uint8_t *src_Y, + uint8_t *src_U, uint8_t *src_V) { + VAImage surface_image; + uint8_t *surface_p = NULL, *Y_start = NULL, *U_start = NULL; + int32_t Y_pitch = 0, U_pitch = 0, row; + + vas = yang_vaDeriveImage(m_va_dpy, surface_id, &surface_image); + CHECK_VASTATUS(vas, "vaDeriveImage"); + + yang_vaMapBuffer(m_va_dpy, surface_image.buf, (void **) &surface_p); + // assert(VA_STATUS_SUCCESS == va_status); + + Y_start = surface_p; + Y_pitch = surface_image.pitches[0]; + U_start = (uint8_t *) surface_p + surface_image.offsets[1]; + U_pitch = surface_image.pitches[1]; + + /* copy Y plane */ + for (row = 0; row < src_height; row++) { + uint8_t *Y_row = Y_start + row * Y_pitch; + memcpy(Y_row, src_Y + row * src_width, src_width); + } + //int32_t j = 0; + for (row = 0; row < src_height / 2; row++) { + uint8_t *U_row = U_start + row * U_pitch; + // uint8_t *u_ptr = NULL, *v_ptr = NULL; + + memcpy(U_row, src_U + row * src_width, src_width); + + } + yang_vaUnmapBuffer(m_va_dpy, surface_image.buf); + yang_vaDestroyImage(m_va_dpy, surface_image.image_id); + return Yang_Ok; +} +int32_t YangH264EncoderIntel::calc_poc(int32_t pic_order_cnt_lsb) { + static int32_t PicOrderCntMsb_ref = 0, pic_order_cnt_lsb_ref = 0; + int32_t prevPicOrderCntMsb, prevPicOrderCntLsb; + int32_t PicOrderCntMsb, TopFieldOrderCnt; + + if (current_frame_type == FRAME_IDR) + prevPicOrderCntMsb = prevPicOrderCntLsb = 0; + else { + prevPicOrderCntMsb = PicOrderCntMsb_ref; + prevPicOrderCntLsb = pic_order_cnt_lsb_ref; + } + + if ((pic_order_cnt_lsb < prevPicOrderCntLsb) + && ((prevPicOrderCntLsb - pic_order_cnt_lsb) + >= (int) (m_ymd.MaxPicOrderCntLsb / 2))) + PicOrderCntMsb = prevPicOrderCntMsb + m_ymd.MaxPicOrderCntLsb; + else if ((pic_order_cnt_lsb > prevPicOrderCntLsb) + && ((pic_order_cnt_lsb - prevPicOrderCntLsb) + > (int) (m_ymd.MaxPicOrderCntLsb / 2))) + PicOrderCntMsb = prevPicOrderCntMsb - m_ymd.MaxPicOrderCntLsb; + else + PicOrderCntMsb = prevPicOrderCntMsb; + + TopFieldOrderCnt = PicOrderCntMsb + pic_order_cnt_lsb; + + if (current_frame_type != FRAME_B) { + PicOrderCntMsb_ref = PicOrderCntMsb; + pic_order_cnt_lsb_ref = pic_order_cnt_lsb; + } + + return TopFieldOrderCnt; +} +int32_t YangH264EncoderIntel::render_slice(void) { + VABufferID slice_param_buf; + VAStatus va_status; + int32_t i; + + update_RefPicList(); + + /* one frame, one slice */ + m_slice_param.macroblock_address = 0; + m_slice_param.num_macroblocks = m_width_mbaligned * m_height_mbaligned + / (16 * 16); /* Measured by MB */ + m_slice_param.slice_type = + (current_frame_type == FRAME_IDR) ? 2 : current_frame_type; + if (current_frame_type == FRAME_IDR) { + if (current_frame_encoding != 0) + ++m_slice_param.idr_pic_id; + } else if (current_frame_type == FRAME_P) { + int32_t refpiclist0_max = m_ymd.h264_maxref & 0xffff; + memcpy(m_slice_param.RefPicList0, RefPicList0_P, + refpiclist0_max * sizeof(VAPictureH264)); + + for (i = refpiclist0_max; i < 32; i++) { + m_slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE; + m_slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID; + } + } else if (current_frame_type == FRAME_B) { + int32_t refpiclist0_max = m_ymd.h264_maxref & 0xffff; + int32_t refpiclist1_max = (m_ymd.h264_maxref >> 16) & 0xffff; + + memcpy(m_slice_param.RefPicList0, RefPicList0_B, + refpiclist0_max * sizeof(VAPictureH264)); + for (i = refpiclist0_max; i < 32; i++) { + m_slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE; + m_slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID; + } + + memcpy(m_slice_param.RefPicList1, RefPicList1_B, + refpiclist1_max * sizeof(VAPictureH264)); + for (i = refpiclist1_max; i < 32; i++) { + m_slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE; + m_slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID; + } + } + + m_slice_param.slice_alpha_c0_offset_div2 = 0; + m_slice_param.slice_beta_offset_div2 = 0; + m_slice_param.direct_spatial_mv_pred_flag = 1; + m_slice_param.pic_order_cnt_lsb = (current_frame_display + - current_IDR_display) % m_ymd.MaxPicOrderCntLsb; + + if (m_ymd.h264_packedheader&& (config_attrib[enc_packed_header_idx].value & VA_ENC_PACKED_HEADER_SLICE)) + render_packedslice(); + + va_status = yang_vaCreateBuffer(m_va_dpy, m_vaContext,VAEncSliceParameterBufferType, sizeof(m_slice_param), 1, + &m_slice_param, &slice_param_buf); + CHECK_VASTATUS(va_status, "vaCreateBuffer");; + + va_status = yang_vaRenderPicture(m_va_dpy, m_vaContext, &slice_param_buf, 1); + CHECK_VASTATUS(va_status, "vaRenderPicture"); + + return Yang_Ok; +} + +int32_t YangH264EncoderIntel::update_RefPicList(void) { + uint32_t current_poc = CurrentCurrPic.TopFieldOrderCnt; + + if (current_frame_type == FRAME_P) { + memcpy(RefPicList0_P, ReferenceFrames, + m_ymd.numShortTerm * sizeof(VAPictureH264)); + sort_one(RefPicList0_P, 0, m_ymd.numShortTerm - 1, 0, 1); + } + + if (current_frame_type == FRAME_B) { + memcpy(RefPicList0_B, ReferenceFrames, + m_ymd.numShortTerm * sizeof(VAPictureH264)); + sort_two(RefPicList0_B, 0, m_ymd.numShortTerm - 1, current_poc, 0, 1, 0, 1); + + memcpy(RefPicList1_B, ReferenceFrames, + m_ymd.numShortTerm * sizeof(VAPictureH264)); + sort_two(RefPicList1_B, 0, m_ymd.numShortTerm - 1, current_poc, 0, 0, 1, 0); + } + + return Yang_Ok; +} + +int32_t YangH264EncoderIntel::render_picture(void) { + VABufferID m_pic_param_buf; + VAStatus va_status; + int32_t i = 0; + + m_pic_param.CurrPic.picture_id = src_surface[sid]; + m_pic_param.CurrPic.frame_idx = frameIdx; + m_pic_param.CurrPic.flags = 0; + m_pic_param.CurrPic.TopFieldOrderCnt = calc_poc( + (current_frame_display - current_IDR_display) % m_ymd.MaxPicOrderCntLsb); + m_pic_param.CurrPic.BottomFieldOrderCnt = + m_pic_param.CurrPic.TopFieldOrderCnt; + CurrentCurrPic = m_pic_param.CurrPic; + + /**if (getenv("TO_DEL")) { // set RefPicList into ReferenceFrames + + update_RefPicList(); // calc RefPicList + memset(m_pic_param.ReferenceFrames, 0xff, 16 * sizeof(VAPictureH264)); // invalid all + if (current_frame_type == FRAME_P) { + m_pic_param.ReferenceFrames[0] = RefPicList0_P[0]; + } else if (current_frame_type == FRAME_B) { + m_pic_param.ReferenceFrames[0] = RefPicList0_B[0]; + m_pic_param.ReferenceFrames[1] = RefPicList1_B[0]; + } + } else { + **/ + memcpy(m_pic_param.ReferenceFrames, ReferenceFrames,m_ymd.numShortTerm * sizeof(VAPictureH264)); + for (i = m_ymd.numShortTerm; i < Para_Bufs; i++) { + m_pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE; + m_pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID; + } + //} + + m_pic_param.pic_fields.bits.idr_pic_flag =(current_frame_type == FRAME_IDR); + m_pic_param.pic_fields.bits.reference_pic_flag = (current_frame_type!= FRAME_B); + + m_pic_param.frame_num = frameIdx; + m_pic_param.coded_buf = coded_buf[sid]; + m_pic_param.last_picture = (current_frame_encoding == m_ymd.gop_size); + + //initVaPic(&m_ymd,&m_pic_param); + + va_status = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAEncPictureParameterBufferType, sizeof(m_pic_param), 1, + &m_pic_param, &m_pic_param_buf); + CHECK_VASTATUS(va_status, "vaCreateBuffer");; + + va_status = yang_vaRenderPicture(m_va_dpy, m_vaContext, &m_pic_param_buf, 1); + CHECK_VASTATUS(va_status, "vaRenderPicture"); + + return Yang_Ok; +} +int32_t YangH264EncoderIntel::render_sequence(void) { + VABufferID seq_param_buf, rc_param_buf, misc_param_tmpbuf, render_id[2]; + VAStatus va_status; + VAEncMiscParameterBuffer *misc_param, *misc_param_tmp; + VAEncMiscParameterRateControl *misc_rate_ctrl; + //initVaSeq(&m_ymd,&m_seq_param); + va_status = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAEncSequenceParameterBufferType, sizeof(m_seq_param), 1, + &m_seq_param, &seq_param_buf); + CHECK_VASTATUS(va_status, "vaCreateBuffer"); + + va_status = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAEncMiscParameterBufferType, + sizeof(VAEncMiscParameterBuffer) + + sizeof(VAEncMiscParameterRateControl), 1, NULL, + &rc_param_buf); + CHECK_VASTATUS(va_status, "vaCreateBuffer"); + + yang_vaMapBuffer(m_va_dpy, rc_param_buf, (void **) &misc_param); + misc_param->type = VAEncMiscParameterTypeRateControl; + misc_rate_ctrl = (VAEncMiscParameterRateControl *) misc_param->data; + memset(misc_rate_ctrl, 0, sizeof(*misc_rate_ctrl)); + misc_rate_ctrl->bits_per_second = m_ymd.frame_bitrate; + misc_rate_ctrl->target_percentage = 66; + misc_rate_ctrl->window_size = 1000; + misc_rate_ctrl->initial_qp = m_ymd.initial_qp; + misc_rate_ctrl->min_qp = m_ymd.minimal_qp; + misc_rate_ctrl->basic_unit_size = 0; + yang_vaUnmapBuffer(m_va_dpy, rc_param_buf); + + render_id[0] = seq_param_buf; + render_id[1] = rc_param_buf; + + va_status = yang_vaRenderPicture(m_va_dpy, m_vaContext, &render_id[0], 2); + CHECK_VASTATUS(va_status, "vaRenderPicture");; + + if (misc_priv_type != 0) { + va_status = yang_vaCreateBuffer(m_va_dpy, m_vaContext, + VAEncMiscParameterBufferType, sizeof(VAEncMiscParameterBuffer), + 1, NULL, &misc_param_tmpbuf); + CHECK_VASTATUS(va_status, "vaCreateBuffer"); + yang_vaMapBuffer(m_va_dpy, misc_param_tmpbuf, (void **) &misc_param_tmp); + misc_param_tmp->type = (VAEncMiscParameterType) misc_priv_type; + misc_param_tmp->data[0] = misc_priv_value; + yang_vaUnmapBuffer(m_va_dpy, misc_param_tmpbuf); + + va_status = yang_vaRenderPicture(m_va_dpy, m_vaContext, &misc_param_tmpbuf,1); + } + + return Yang_Ok; +} +int32_t YangH264EncoderIntel::update_ReferenceFrames(void) { + int32_t i; + if (current_frame_type == FRAME_B) + return Yang_Ok; + CurrentCurrPic.flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE; + m_ymd.numShortTerm++; + if (m_ymd.numShortTerm > m_ymd.num_ref_frames) + m_ymd.numShortTerm = m_ymd.num_ref_frames; + for (i = m_ymd.numShortTerm - 1; i > 0; i--) + ReferenceFrames[i] = ReferenceFrames[i - 1]; + ReferenceFrames[0] = CurrentCurrPic; + + if (current_frame_type != FRAME_B) + current_frame_num++; + if (current_frame_num > m_ymd.MaxFrameNum) + current_frame_num = 0; + + return Yang_Ok; +} + +int32_t YangH264EncoderIntel::encode(uint8_t *buf, int32_t buflen, uint8_t *dest, + int32_t *destLen, int32_t *frametype) { + upload_surface_yuv(src_surface[sid], m_yvp.outWidth, m_yvp.outHeight, buf, + buf + yLen, NULL); + encoding2display_order(current_frame_encoding, m_ymd.intra_period, + m_ymd.intra_idr_period, m_ymd.ip_period, ¤t_frame_display, + ¤t_frame_type); + if (current_frame_type == FRAME_IDR) { + m_ymd.numShortTerm = 0; + current_frame_num = 0; + current_IDR_display = current_frame_display; + } + + vas = yang_vaBeginPicture(m_va_dpy, m_vaContext, src_surface[sid]); + CHECK_VASTATUS(vas, "vaBeginPicture"); + + if (current_frame_type == FRAME_IDR) { + render_sequence(); + render_picture(); + if (m_ymd.h264_packedheader) { + render_packedsequence(); + render_packedpicture(); + } + *frametype = 1; + //if (rc_mode == VA_RC_CBR) + // render_packedsei(); + //render_hrd(); + } else { + //render_sequence(); + render_picture(); + *frametype = 0; + //if (rc_mode == VA_RC_CBR) + // render_packedsei(); + //render_hrd(); + } + render_slice(); + vas = yang_vaEndPicture(m_va_dpy, m_vaContext); + CHECK_VASTATUS(vas, "vaEndPicture");; + + vas = yang_vaSyncSurface(m_va_dpy, src_surface[sid]); + CHECK_VASTATUS(vas, "vaSyncSurface"); + VACodedBufferSegment *buf_list = NULL; + + //uint32_t coded_size = 0; + + vas = yang_vaMapBuffer(m_va_dpy, coded_buf[sid], (void **) (&buf_list)); + CHECK_VASTATUS(vas, "vaMapBuffer"); + *destLen = 0; + while (buf_list != NULL) { + memcpy(dest + (*destLen), buf_list->buf, buf_list->size);//fwrite(buf_list->buf, 1, buf_list->size, coded_fp); + (*destLen) += buf_list->size; + buf_list = (VACodedBufferSegment *) buf_list->next; + + } + yang_vaUnmapBuffer(m_va_dpy, coded_buf[sid]); + update_ReferenceFrames(); + + printf("%d-%d,",current_frame_type, *destLen); + if(current_frame_encoding%15==0) printf("\n"); + + current_frame_encoding++; + if(current_frame_encoding==m_ymd.gop_size) current_frame_encoding=0; + sid++; + if (sid == Para_Bufs) + sid = 0; + return 1; + +} +/** +void YangH264EncoderIntel::startLoop() { + isConvert = 1; + int32_t isTrans = (m_inWidth != m_outWidth ? 1 : 0); + int64_t videoTimestamp = 0; + int32_t videoDestLen = 0; + int32_t frameType = 0; + uint8_t *videoDest = new uint8_t[1024 * 256]; + + int32_t m_in_File_Size = + m_context->videoCaptureFormat>0 ? + m_inWidth * m_inHeight * 3 / 2 : m_inWidth * m_inHeight * 2; + //int32_t m_in_File_Size1 = m_inWidth * m_inHeight * 3 / 2; + int32_t m_out_File_Size = m_outWidth * m_outHeight * 3 / 2; + uint8_t *videoSrc = new uint8_t[m_inWidth * m_inHeight * 3 / 2]; + uint8_t videoSrc1[m_out_File_Size]; //=new uint8_t[m_in_File_Size1]; + int64_t preVideostamp = 0; + printf("\n**************************start Zbhd encoding..........."); + YangConvert con; + //if(isTrans) con.initNv12_resize(inWidth,inHeight,outWidth,outHeight); + //uint8_t *t_in; + //uint8_t *t_out; + + if (m_in_videoBuffer != NULL) m_in_videoBuffer->resetIndex(); + if (m_out_videoBuffer != NULL) m_out_videoBuffer->resetIndex(); + int32_t is16bit=1; + if(m_context->videoCaptureFormat>0) is16bit=0; + while (isConvert == 1) { + + if (m_in_videoBuffer->size() == 0) { + //printf("***listsize=%d\n",vlist->getSize()); + yang_usleep(20000); + //printf("**"); + continue; + } + //if (isTrans) { + // m_in_videoBuffer->getVideo(t_in, m_in_File_Size, &videoTimestamp); + //util1.getVideoZ(videoSrc,m_in_File_Size,&videoTimestamp); + // if (preVideostamp == 0) { + // preVideostamp = videoTimestamp; + // } + // if (videoTimestamp - preVideostamp <= 0) + // continue; + + //con.YUY2toNV12(inWidth,inHeight,videoSrc,videoSrc1); + //con.nv12_nearest_scale(videoSrc1,p264Pic->img.plane[0]); + // con.mpu->pu->resize(); + // memcpy(videoSrc1, t_out, m_out_File_Size); + //con.YUY2toI420(outWidth,outHeight,con.mpu->pu->out,p264Pic->img.plane[0]); + + //con.resize_NV12(videoSrc1,p264Pic->img.plane[0],inWidth,inHeight,outWidth,outHeight); + //con.resize_Yuy2_NV12(videoSrc,p264Pic->img.plane[0],inWidth,inHeight,outWidth,outHeight); + //memcpy(p264Pic->img.plane[0],t_out,m_out_File_Size); + // } else { + // util1.getVideoZ(p264Pic->img.plane[0],m_in_File_Size,&videoTimestamp); + //util1.YUY2toI420(inWidth, inHeight, util1.getVideoIn(&videoTimestamp), p264Pic->img.plane[0]); + + if(is16bit){ + if (isTrans) + con.resize_Yuy2_NV12(m_in_videoBuffer->getVideoIn(&videoTimestamp),videoSrc1,m_inWidth,m_inHeight,m_outWidth,m_outHeight); + else + con.YUY2toNV12(m_inWidth,m_inHeight,m_in_videoBuffer->getVideoIn(&videoTimestamp), videoSrc1); + }else{ + if(isTrans) + con.resize_NV12(m_in_videoBuffer->getVideoIn(&videoTimestamp),videoSrc1,m_inWidth,m_inHeight,m_outWidth,m_outHeight); + else + memcpy(videoSrc1,m_in_videoBuffer->getVideoIn(&videoTimestamp),m_in_File_Size); + } + if (preVideostamp == 0) { + preVideostamp = videoTimestamp; + } + if (videoTimestamp - preVideostamp <= 0) + continue; + //} + + encode(videoSrc1, m_out_File_Size, videoDest, &videoDestLen, + &frameType); + if (videoDestLen > 4) { + //writeFile(videoDest,videoDestLen); + m_out_videoBuffer->putEVideo(videoDest, videoDestLen, videoTimestamp, + frameType); + } + //printf("%d-%d,",frameType, videoDestLen); + + } + //util1.close(); + //util_z.close(); + //t_in = NULL; + //t_out = NULL; + //con.mpu->pu->close(); + //con.mpu->pu = NULL; + //util_z.close(); + // delete[] srcVideoSource; + yang_deleteA(videoSrc); + yang_deleteA(videoDest); + +} + +void YangH264EncoderIntel::stopLoop() { + isConvert = 0; + +} +**/ +#endif +#endif diff --git a/libmetartc3/src/yangencoder/YangH264EncoderIntel.h b/libmetartc3/src/yangencoder/YangH264EncoderIntel.h new file mode 100755 index 00000000..7781a0a4 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderIntel.h @@ -0,0 +1,168 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YangH264EncoderIntel_H +#define YangH264EncoderIntel_H + +#ifndef _WIN32 +#include "yangencoder/YangVideoEncoder.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include +#if YangLibva +#include "va/va.h" +#include "va/va_enc_h264.h" +#include "va/va_drm.h" +#include "va/va_drmcommon.h" + +#include "yangutil/sys/YangLoadLib.h" +#include "YangH264EncHeader.h" +#define Para_Bufs 16 + +struct bitstream { + uint32_t *buffer; + int32_t bit_offset; + int32_t max_size_in_dword; +}; + + +class YangH264EncoderIntel: public YangVideoEncoder { +public: + YangH264EncoderIntel(); + ~YangH264EncoderIntel(); + //void init(YangIni *pcontext); + void init(YangVideoParam *pvp,YangVideoEncParam *penc); + + int32_t encode(uint8_t *buf, int32_t buflen, uint8_t *dest, int32_t *destLen,int32_t *frametype); + void setVideoMetaData(YangVideoMeta *pvmd); + void parseRtmpHeader(uint8_t *p,int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps); + +protected: + //void stopLoop(); + //void startLoop(); +private: + + YangVideoMeta *m_vmd; + YangMeataData m_ymd; + int32_t m_fd; + VADisplay m_va_dpy; + VAConfigID m_config_id; + VAContextID m_vaContext; + + VAStatus vas; + + VAImage m_img; + VAImageFormat image_format; + + // YangH264Header h264header; + VAPictureH264 m_pic[16]; + //int32_t m_ref_count; + //int32_t m_ref_count_m1; + + VAPictureH264 CurrentCurrPic; + VAEncPictureParameterBufferH264 m_pic_param; + VAEncSliceParameterBufferH264 m_slice_param; + VAEncSequenceParameterBufferH264 m_seq_param; + VABufferID bufids[10]; + + //YangDecodePicture pics[Para_Bufs]; + VASurfaceID src_surface[Para_Bufs]; + //VASurfaceID ref_surface[Para_Bufs]; + VABufferID coded_buf[Para_Bufs]; + int32_t sid; + +private: + //int32_t isConvert; + //int32_t m_inWidth; + //int32_t m_inHeight; + int32_t m_width_mbaligned; + int32_t m_height_mbaligned; + + int32_t m_frame; + int32_t yLen; + int32_t uLen; + int32_t uvLen; + int32_t allLen; + int32_t frameIdx; + int32_t FieldOrderCnt; + int32_t posNum; + int32_t m_vap_size; + + int32_t current_frame_type; + VAConfigAttrib attrib[VAConfigAttribTypeMax]; + VAConfigAttrib config_attrib[VAConfigAttribTypeMax]; + unsigned int64_t current_frame_encoding; + unsigned int64_t current_frame_display; + unsigned int64_t current_IDR_display; + uint32_t current_frame_num; + int32_t misc_priv_type; + int32_t misc_priv_value; + VAProfile h264_profile; + // int32_t current_frame_type; + int32_t config_attrib_num, enc_packed_header_idx; + +//#define current_slot (current_frame_display % Para_Bufs) + VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32],RefPicList1_B[32]; + void initSlicePara(VASliceParameterBufferH264 *slice264,uint8_t *p_data, int32_t p_len); + int32_t render_packedpicture(void); + int32_t render_packedsequence(void); + int32_t render_slice(void); + int32_t render_picture(void); + int32_t render_sequence(void); + int32_t update_RefPicList(void); + void slice_header(bitstream *bs); + void pps_rbsp(bitstream *bs); + void sps_rbsp(bitstream *bs); + int32_t calc_poc(int32_t pic_order_cnt_lsb); + void render_packedslice(); + int32_t build_packed_slice_buffer(uint8_t **header_buffer); + int32_t build_packed_seq_buffer(uint8_t **header_buffer); + int32_t build_packed_pic_buffer(uint8_t **header_buffer); + int32_t update_ReferenceFrames(void); + int32_t upload_surface_yuv(VASurfaceID surface_id, int32_t src_width,int32_t src_height, uint8_t *src_Y, uint8_t *src_U,uint8_t *src_V); + int32_t upload_surface_yuv(VADisplay va_dpy, VASurfaceID surface_id, int32_t src_fourcc, int32_t src_width, int32_t src_height, uint8_t *src_Y, + uint8_t *src_U, uint8_t *src_V); +private: + + void closeDevice(); + // int32_t build_packed_seq_buffer(uint8_t **header_buffer) + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + + VAStatus (*yang_vaGetConfigAttributes) (VADisplay dpy, VAProfile profile, VAEntrypoint32_t entrypoint, VAConfigAttrib *attrib_list, int32_t num_attribs); + VAStatus (*yang_vaDeriveImage) (VADisplay dpy,VASurfaceID surface,VAImage *image); + VAStatus (*yang_vaDestroyImage) (VADisplay dpy,VAImageID image); + VADisplay (*yang_vaGetDisplayDRM)(int32_t fd); + VAStatus (*yang_vaInitialize) ( VADisplay dpy, int32_t *major_version,int32_t *minor_version); + VAStatus (*yang_vaDestroyContext) (VADisplay dpy,VAContextID context); + VAStatus (*yang_vaDestroySurfaces) (VADisplay dpy,VASurfaceID *surfaces,int32_t num_surfaces); + VAStatus (*yang_vaDestroyConfig) (VADisplay dpy,VAConfigID config_id); + VAStatus (*yang_vaTerminate) ( VADisplay dpy); + + + VAStatus (*yang_vaCreateConfig) (VADisplay dpy, VAProfile profile, VAEntrypoint32_t entrypoint, VAConfigAttrib *attrib_list, int32_t num_attribs, VAConfigID *config_id ); + + VAStatus (*yang_vaCreateSurfaces)(VADisplay dpy,uint32_t format,uint32_t width, + uint32_t height,VASurfaceID *surfaces,uint32_t num_surfaces, VASurfaceAttrib *attrib_list, + uint32_t num_attribs); + + VAStatus (*yang_vaCreateContext) (VADisplay dpy,VAConfigID config_id, int32_t picture_width, + int32_t picture_height, int32_t flag, VASurfaceID *render_targets, + int32_t num_render_targets,VAContextID *context); + VAStatus (*yang_vaCreateBuffer) (VADisplay dpy,VAContextID context, VABufferType type, + uint32_t size, uint32_t num_elements, void *data, VABufferID *buf_id + ); + VAStatus (*yang_vaBeginPicture) (VADisplay dpy,VAContextID context,VASurfaceID render_target); + VAStatus (*yang_vaRenderPicture) (VADisplay dpy,VAContextID context, VABufferID *buffers,int32_t num_buffers); + + VAStatus (*yang_vaCreateImage) (VADisplay dpy,VAImageFormat *format, int32_t width, int32_t height, VAImage *image); + VAStatus (*yang_vaEndPicture) (VADisplay dpy,VAContextID context); + VAStatus (*yang_vaGetImage) (VADisplay dpy,VASurfaceID surface, int32_t x, + int32_t y,uint32_t width, uint32_t height,VAImageID image); + VAStatus (*yang_vaMapBuffer) (VADisplay dpy,VABufferID buf_id, void **pbuf); + VAStatus (*yang_vaSyncSurface) (VADisplay dpy,VASurfaceID render_target); + VAStatus (*yang_vaUnmapBuffer) (VADisplay dpy,VABufferID buf_id); +}; +#endif +#endif +#endif // YANGH264DECODER_H diff --git a/libmetartc3/src/yangencoder/YangH264EncoderIntel1.h b/libmetartc3/src/yangencoder/YangH264EncoderIntel1.h new file mode 100755 index 00000000..e1a3e4cc --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderIntel1.h @@ -0,0 +1,795 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH264EncoderIntel.h" +#ifndef _WIN32 +#include "memory.h" +#include +#include "string.h" +#include "sys/ioctl.h" +#include "fcntl.h" +#include "memory.h" +#include "yangutil/yang_unistd.h" +//#include +//#include +#include "string.h" +#include "stdio.h" +#if YangLibva +#include "xf86drm.h" + +//#include "yangutil/video/yangconvert.h" +using namespace std; +#define CHECK_VASTATUS(X, MSG) {if ((X) != VA_STATUS_SUCCESS) {cout <<"\n_ERROR:"<max_size_in_dword = BITSTREAM_ALLOCATE_STEPPING; + bs->buffer = (unsigned int*) calloc(bs->max_size_in_dword * sizeof(int), 1); + bs->bit_offset = 0; +} + +void bitstream_end(bitstream *bs) { + int32_t pos = (bs->bit_offset >> 5); + int32_t bit_offset = (bs->bit_offset & 0x1f); + int32_t bit_left = 32 - bit_offset; + + if (bit_offset) { + bs->buffer[pos] = va_swap32((bs->buffer[pos] << bit_left)); + } +} + +void bitstream_put_ui(bitstream *bs, uint32_t val, int32_t size_in_bits) { + int32_t pos = (bs->bit_offset >> 5); + int32_t bit_offset = (bs->bit_offset & 0x1f); + int32_t bit_left = 32 - bit_offset; + + if (!size_in_bits) + return; + + bs->bit_offset += size_in_bits; + + if (bit_left > size_in_bits) { + bs->buffer[pos] = (bs->buffer[pos] << size_in_bits | val); + } else { + size_in_bits -= bit_left; + bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits); + bs->buffer[pos] = va_swap32(bs->buffer[pos]); + + if (pos + 1 == bs->max_size_in_dword) { + bs->max_size_in_dword += BITSTREAM_ALLOCATE_STEPPING; + bs->buffer = (unsigned int*) realloc(bs->buffer, + bs->max_size_in_dword * sizeof(unsigned int)); + } + + bs->buffer[pos + 1] = val; + } +} + +void bitstream_put_ue(bitstream *bs, uint32_t val) { + int32_t size_in_bits = 0; + int32_t tmp_val = ++val; + + while (tmp_val) { + tmp_val >>= 1; + size_in_bits++; + } + + bitstream_put_ui(bs, 0, size_in_bits - 1); // leading zero + bitstream_put_ui(bs, val, size_in_bits); +} + +void bitstream_put_se(bitstream *bs, int32_t val) { + uint32_t new_val; + + if (val <= 0) + new_val = -2 * val; + else + new_val = 2 * val - 1; + + bitstream_put_ue(bs, new_val); +} + +void bitstream_byte_aligning(bitstream *bs, int32_t bit) { + int32_t bit_offset = (bs->bit_offset & 0x7); + int32_t bit_left = 8 - bit_offset; + int32_t new_val; + + if (!bit_offset) + return; + + //assert(bit == 0 || bit == 1); + + if (bit) + new_val = (1 << bit_left) - 1; + else + new_val = 0; + + bitstream_put_ui(bs, new_val, bit_left); +} + +void rbsp_trailing_bits(bitstream *bs) { + bitstream_put_ui(bs, 1, 1); + bitstream_byte_aligning(bs, 0); +} + +void nal_start_code_prefix(bitstream *bs) { + bitstream_put_ui(bs, 0x00000001, 32); +} + +void nal_header(bitstream *bs, int32_t nal_ref_idc, int32_t nal_unit_type) { + bitstream_put_ui(bs, 0, 1); /* forbidden_zero_bit: 0 */ + bitstream_put_ui(bs, nal_ref_idc, 2); + bitstream_put_ui(bs, nal_unit_type, 5); +} + +int32_t build_packed_sei_buffer_timing(uint32_t init_cpb_removal_length, + uint32_t init_cpb_removal_delay, + uint32_t init_cpb_removal_delay_offset, + uint32_t cpb_removal_length, uint32_t cpb_removal_delay, + uint32_t dpb_output_length, uint32_t dpb_output_delay, + uint8_t **sei_buffer) { + uint8_t *byte_buf; + int32_t bp_byte_size, i, pic_byte_size; + + bitstream nal_bs; + bitstream sei_bp_bs, sei_pic_bs; + + bitstream_start(&sei_bp_bs); + bitstream_put_ue(&sei_bp_bs, 0); /*seq_parameter_set_id*/ + bitstream_put_ui(&sei_bp_bs, init_cpb_removal_delay, cpb_removal_length); + bitstream_put_ui(&sei_bp_bs, init_cpb_removal_delay_offset, + cpb_removal_length); + if (sei_bp_bs.bit_offset & 0x7) { + bitstream_put_ui(&sei_bp_bs, 1, 1); + } + bitstream_end(&sei_bp_bs); + bp_byte_size = (sei_bp_bs.bit_offset + 7) / 8; + + bitstream_start(&sei_pic_bs); + bitstream_put_ui(&sei_pic_bs, cpb_removal_delay, cpb_removal_length); + bitstream_put_ui(&sei_pic_bs, dpb_output_delay, dpb_output_length); + if (sei_pic_bs.bit_offset & 0x7) { + bitstream_put_ui(&sei_pic_bs, 1, 1); + } + bitstream_end(&sei_pic_bs); + pic_byte_size = (sei_pic_bs.bit_offset + 7) / 8; + + bitstream_start(&nal_bs); + nal_start_code_prefix(&nal_bs); + nal_header(&nal_bs, NAL_REF_IDC_NONE, NAL_SEI); + + /* Write the SEI buffer period data */ + bitstream_put_ui(&nal_bs, 0, 8); + bitstream_put_ui(&nal_bs, bp_byte_size, 8); + + byte_buf = (uint8_t *) sei_bp_bs.buffer; + for (i = 0; i < bp_byte_size; i++) { + bitstream_put_ui(&nal_bs, byte_buf[i], 8); + } + free(byte_buf); + /* write the SEI timing data */ + bitstream_put_ui(&nal_bs, 0x01, 8); + bitstream_put_ui(&nal_bs, pic_byte_size, 8); + + byte_buf = (uint8_t *) sei_pic_bs.buffer; + for (i = 0; i < pic_byte_size; i++) { + bitstream_put_ui(&nal_bs, byte_buf[i], 8); + } + free(byte_buf); + + rbsp_trailing_bits(&nal_bs); + bitstream_end(&nal_bs); + + *sei_buffer = (uint8_t *) nal_bs.buffer; + + return nal_bs.bit_offset; +} + + +char *rc_to_string(int32_t rcmode) { + switch (rcmode) { + case VA_RC_NONE: + return "NONE"; + case VA_RC_CBR: + return "CBR"; + case VA_RC_VBR: + return "VBR"; + case VA_RC_VCM: + return "VCM"; + case VA_RC_CQP: + return "CQP"; + case VA_RC_VBR_CONSTRAINED: + return "VBR_CONSTRAINED"; + default: + return "Unknown"; + } +} +/** + void YangH264EncoderIntel::initSlicePara(VASliceParameterBufferH264 *slice264, + uint8_t *p_data, int32_t p_len) { + //initNaluPara(&m_Nalu,p_data); + + + YangSliceContext *sl = h264header.sl; + int32_t ret = (h264header.m_sps.mb_aff + && (sl->picture_structure == Yang_PICT_FRAME)) + || sl->picture_structure != Yang_PICT_FRAME; + slice264->slice_data_size = p_len; + slice264->slice_data_offset = 0; + slice264->slice_data_flag = VA_SLICE_DATA_FLAG_ALL; + slice264->slice_data_bit_offset = h264header.get_bits_count(&sl->gb); + slice264->first_mb_in_slice = (sl->mb_y >> ret) * h264header.m_sps.mb_width + + sl->mb_x; + slice264->slice_type = yang_get_slice_type(sl); //sl->slice_type;//m_Nalu.nal_reference_idc;//ff_h264_get_slice_type(sl); + slice264->direct_spatial_mv_pred_flag = + sl->slice_type == Yang_PICTURE_TYPE_B ? + sl->direct_spatial_mv_pred : 0; + slice264->num_ref_idx_l0_active_minus1 = + sl->list_count > 0 ? sl->ref_count[0] - 1 : 0; + slice264->num_ref_idx_l1_active_minus1 = + sl->list_count > 1 ? sl->ref_count[1] - 1 : 0; + slice264->cabac_init_idc = sl->cabac_init_idc; + slice264->slice_qp_delta = sl->qscale - h264header.m_pps.init_qp; + slice264->disable_deblocking_filter_idc = + sl->deblocking_filter < 2 ? + !sl->deblocking_filter : sl->deblocking_filter; + slice264->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2; + slice264->slice_beta_offset_div2 = sl->slice_beta_offset / 2; + slice264->luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom; + slice264->chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom; + sl = NULL; + if(slice264->slice_type==2) + { + slice264->chroma_weight_l0[0][0]=0; + slice264->chroma_weight_l0[0][1]=0; + slice264->chroma_weight_l1[0][0]=0; + slice264->chroma_weight_l1[0][1]=0; + slice264->RefPicList0[0].picture_id = 0xffffffff; + }else{ + slice264->chroma_weight_l0[0][0]=1; + slice264->chroma_weight_l0[0][1]=1; + slice264->chroma_weight_l1[0][0]=1; + slice264->chroma_weight_l1[0][1]=1; + } + + for (int32_t i = 0; i < 32; i++) { + slice264->RefPicList0[i].flags = VA_PICTURE_H264_INVALID; + slice264->RefPicList1[i].flags = VA_PICTURE_H264_INVALID; + slice264->RefPicList0[i].picture_id = 0xffffffff; + slice264->RefPicList1[i].picture_id = 0xffffffff; + } + + }**/ +/** + void YangH264EncoderIntel::cachePic(VAPictureH264 *p_vpic, int32_t p_pocind) { + + if (p_pocind > 0) { + int32_t i = (p_pocind > m_ref_count_m1 - 1 ? m_ref_count_m1 : p_pocind) - 1; + + for (int32_t i = p_pocind - 1; i > 0; i--) + memcpy(&m_pic_param.ReferenceFrames[i], + &m_pic_param.ReferenceFrames[i - 1], m_vap_size); + } + memcpy(&m_pic_param.ReferenceFrames[0], p_vpic, m_vap_size); + + m_pic_param.ReferenceFrames[0].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE; + + }**/ +/**void initPicPara_1(VAPictureParameterBufferH264 *p_pic_para,YangH264Header *header){ + //Yang_SPS *sps, Yang_PPS *pps) { + Yang_SPS *sps=&header->m_sps; + Yang_PPS *pps=&header->m_pps; + //YangSliceContext *sl=header->sl; + //(*p_pic_para) = (VAPictureParameterBufferH264){ + p_pic_para->picture_width_in_mbs_minus1 = sps->mb_width-1;//conf->Video_Width_Zb - 1; + p_pic_para->picture_height_in_mbs_minus1 = sps->mb_height-1;//conf->Video_Height_Zb - 1; + + p_pic_para->bit_depth_luma_minus8 = sps->bit_depth_luma-8; + p_pic_para->bit_depth_chroma_minus8 = sps->bit_depth_chroma-8; + p_pic_para->num_ref_frames = sps->ref_frame_count; + //.seq_fields.bits = { + p_pic_para->seq_fields.bits.chroma_format_idc = sps->chroma_format_idc; + p_pic_para->seq_fields.bits.residual_colour_transform_flag = sps->residual_color_transform_flag; + p_pic_para->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = + sps->gaps_in_frame_num_allowed_flag; + p_pic_para->seq_fields.bits.frame_mbs_only_flag = sps->frame_mbs_only_flag; + p_pic_para->seq_fields.bits.mb_adaptive_frame_field_flag = + sps->mb_aff; + p_pic_para->seq_fields.bits.direct_8x8_inference_flag = + sps->direct_8x8_inference_flag; + p_pic_para->seq_fields.bits.MinLumaBiPredSize8x8 = sps->level_idc >= 31; //Ap_pic_para->seq_fields.bits.3p_pic_para->seq_fields.bits.3p_pic_para->seq_fields.bits.2 + p_pic_para->seq_fields.bits.log2_max_frame_num_minus4 = + sps->log2_max_frame_num - 4; + p_pic_para->seq_fields.bits.pic_order_cnt_type = sps->poc_type; + p_pic_para->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = + sps->log2_max_poc_lsb - 4; + p_pic_para->seq_fields.bits.delta_pic_order_always_zero_flag = + sps->delta_pic_order_always_zero_flag; + // }; + p_pic_para->num_slice_groups_minus1 = pps->slice_group_count - 1; + p_pic_para->slice_group_map_type = pps->mb_slice_group_map_type; + p_pic_para->slice_group_change_rate_minus1 =0; // + p_pic_para->pic_init_qp_minus26 = pps->init_qp-26; //init_qp - 26; + p_pic_para->pic_init_qs_minus26 = pps->init_qs-26; //init_qs - 26; + p_pic_para->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; + p_pic_para->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; + //.pic_fields.bits = { + p_pic_para->pic_fields.bits.entropy_coding_mode_flag =pps->cabac; + p_pic_para->pic_fields.bits.weighted_pred_flag = pps->weighted_pred; + p_pic_para->pic_fields.bits.weighted_bipred_idc = pps->weighted_bipred_idc; + p_pic_para->pic_fields.bits.transform_8x8_mode_flag =pps->transform_8x8_mode; + p_pic_para->pic_fields.bits.field_pic_flag =0;//sl->picture_structure != Yang_PICT_FRAME; + p_pic_para->pic_fields.bits.constrained_intra_pred_flag = + pps->constrained_intra_pred; + p_pic_para->pic_fields.bits.pic_order_present_flag = + pps->pic_order_present; + p_pic_para->pic_fields.bits.deblocking_filter_control_present_flag = + pps->deblocking_filter_parameters_present; + p_pic_para->pic_fields.bits.redundant_pic_cnt_present_flag = + pps->redundant_pic_cnt_present; + p_pic_para->pic_fields.bits.reference_pic_flag = 1; //h->nal_ref_idc != 0; + // }, + p_pic_para->frame_num = 0; + // }; + + } + int32_t yang_get_slice_type( YangSliceContext *sl) + { + switch (sl->slice_type) { + case Yang_PICTURE_TYPE_P: + return 0; + case Yang_PICTURE_TYPE_B: + return 1; + case Yang_PICTURE_TYPE_I: + return 2; + case Yang_PICTURE_TYPE_SP: + return 3; + case Yang_PICTURE_TYPE_SI: + return 4; + default: + return -1; + } + }**/ + +void encoding2display_order(unsigned int64_t encoding_order, int32_t intra_period, + int32_t intra_idr_period, int32_t ip_period, + unsigned int64_t *displaying_order, int32_t *frame_type) { + int32_t encoding_order_gop = 0; + + if (intra_period == 1) { /* all are I/IDR frames */ + *displaying_order = encoding_order; + if (intra_idr_period == 0) + *frame_type = (encoding_order == 0) ? FRAME_IDR : FRAME_I; + else + *frame_type = + (encoding_order % intra_idr_period == 0) ? + FRAME_IDR : FRAME_I; + return; + } + + if (intra_period == 0) + intra_idr_period = 0; + + /* new sequence like + * IDR PPPPP IPPPPP + * IDR (PBB)(PBB)(IBB)(PBB) + */ + encoding_order_gop = + (intra_idr_period == 0) ? + encoding_order : + (encoding_order + % (intra_idr_period + ((ip_period == 1) ? 0 : 1))); + + if (encoding_order_gop == 0) { /* the first frame */ + *frame_type = FRAME_IDR; + *displaying_order = encoding_order; + } else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */ + *frame_type = FRAME_B; + *displaying_order = encoding_order - 1; + } else if ((intra_period != 0) && /* have I frames */ + (encoding_order_gop >= 2) + && ((ip_period == 1 && encoding_order_gop % intra_period == 0) + || /* for IDR PPPPP IPPPP */ + /* for IDR (PBB)(PBB)(IBB) */ + (ip_period >= 2 + && ((encoding_order_gop - 1) / ip_period + % (intra_period / ip_period)) == 0))) { + *frame_type = FRAME_I; + *displaying_order = encoding_order + ip_period - 1; + } else { + *frame_type = FRAME_P; + *displaying_order = encoding_order + ip_period - 1; + } +} + +#define partition(ref, field, key, ascending) \ + while (i <= j) { \ + if (ascending) { \ + while (ref[i].field < key) \ + i++; \ + while (ref[j].field > key) \ + j--; \ + } else { \ + while (ref[i].field > key) \ + i++; \ + while (ref[j].field < key) \ + j--; \ + } \ + if (i <= j) { \ + tmp = ref[i]; \ + ref[i] = ref[j]; \ + ref[j] = tmp; \ + i++; \ + j--; \ + } \ + } \ + +void sort_one(VAPictureH264 ref[], int32_t left, int32_t right, int32_t ascending, + int32_t frame_idx) { + int32_t i = left, j = right; + uint32_t key; + VAPictureH264 tmp; + + if (frame_idx) { + key = ref[(left + right) / 2].frame_idx; + partition(ref, frame_idx, key, ascending); + } else { + key = ref[(left + right) / 2].TopFieldOrderCnt; + partition(ref, TopFieldOrderCnt, (signed int32_t )key, ascending); + } + + /* recursion */ + if (left < j) + sort_one(ref, left, j, ascending, frame_idx); + + if (i < right) + sort_one(ref, i, right, ascending, frame_idx); +} + +void sort_two(VAPictureH264 ref[], int32_t left, int32_t right, uint32_t key, + uint32_t frame_idx, int32_t partition_ascending, int32_t list0_ascending, + int32_t list1_ascending) { + int32_t i = left, j = right; + VAPictureH264 tmp; + + if (frame_idx) { + partition(ref, frame_idx, key, partition_ascending); + } else { + partition(ref, TopFieldOrderCnt, (signed int32_t )key, partition_ascending); + } + + sort_one(ref, left, i - 1, list0_ascending, frame_idx); + sort_one(ref, j + 1, right, list1_ascending, frame_idx); +} + +void YangH264EncoderIntel::sps_rbsp(bitstream *bs) { + int32_t profile_idc = PROFILE_IDC_BASELINE; + + // if (h264_profile == VAProfileH264High) + // profile_idc = PROFILE_IDC_HIGH; + // else if (h264_profile == VAProfileH264Main) + //profile_idc = PROFILE_IDC_MAIN; + + bitstream_put_ui(bs, profile_idc, 8); /* profile_idc */ + bitstream_put_ui(bs, !!(m_ymd.constraint_set_flag & 1), 1); /* constraint_set0_flag */ + bitstream_put_ui(bs, !!(m_ymd.constraint_set_flag & 2), 1); /* constraint_set1_flag */ + bitstream_put_ui(bs, !!(m_ymd.constraint_set_flag & 4), 1); /* constraint_set2_flag */ + bitstream_put_ui(bs, !!(m_ymd.constraint_set_flag & 8), 1); /* constraint_set3_flag */ + bitstream_put_ui(bs, 0, 4); /* reserved_zero_4bits */ + bitstream_put_ui(bs, m_seq_param.level_idc, 8); /* level_idc */ + bitstream_put_ue(bs, m_seq_param.seq_parameter_set_id); /* seq_parameter_set_id */ + + if (profile_idc == PROFILE_IDC_HIGH) { + bitstream_put_ue(bs, 1); /* chroma_format_idc = 1, 4:2:0 */ + bitstream_put_ue(bs, 0); /* bit_depth_luma_minus8 */ + bitstream_put_ue(bs, 0); /* bit_depth_chroma_minus8 */ + bitstream_put_ui(bs, 0, 1); /* qpprime_y_zero_transform_bypass_flag */ + bitstream_put_ui(bs, 0, 1); /* seq_scaling_matrix_present_flag */ + } + + bitstream_put_ue(bs, m_seq_param.seq_fields.bits.log2_max_frame_num_minus4); /* log2_max_frame_num_minus4 */ + bitstream_put_ue(bs, m_seq_param.seq_fields.bits.pic_order_cnt_type); /* pic_order_cnt_type */ + + if (m_seq_param.seq_fields.bits.pic_order_cnt_type == 0) + bitstream_put_ue(bs, + m_seq_param.seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4); /* log2_max_pic_order_cnt_lsb_minus4 */ + else { + //assert(0); + } + + bitstream_put_ue(bs, m_seq_param.max_num_ref_frames); /* num_ref_frames */ + bitstream_put_ui(bs, 0, 1); /* gaps_in_frame_num_value_allowed_flag */ + + bitstream_put_ue(bs, m_seq_param.picture_width_in_mbs - 1); /* pic_width_in_mbs_minus1 */ + bitstream_put_ue(bs, m_seq_param.picture_height_in_mbs - 1); /* pic_height_in_map_units_minus1 */ + bitstream_put_ui(bs, m_seq_param.seq_fields.bits.frame_mbs_only_flag, 1); /* frame_mbs_only_flag */ + + if (!m_seq_param.seq_fields.bits.frame_mbs_only_flag) { + // assert(0); + } + + bitstream_put_ui(bs, m_seq_param.seq_fields.bits.direct_8x8_inference_flag, + 1); /* direct_8x8_inference_flag */ + bitstream_put_ui(bs, m_seq_param.frame_cropping_flag, 1); /* frame_cropping_flag */ + + if (m_seq_param.frame_cropping_flag) { + bitstream_put_ue(bs, m_seq_param.frame_crop_left_offset); /* frame_crop_left_offset */ + bitstream_put_ue(bs, m_seq_param.frame_crop_right_offset); /* frame_crop_right_offset */ + bitstream_put_ue(bs, m_seq_param.frame_crop_top_offset); /* frame_crop_top_offset */ + bitstream_put_ue(bs, m_seq_param.frame_crop_bottom_offset); /* frame_crop_bottom_offset */ + } + + //if ( frame_bit_rate < 0 ) { //TODO EW: the vui header isn't correct + //if (!m_seq_param.vui_parameters_present_flag) { + if (1) { + bitstream_put_ui(bs, 0, 1); /* vui_parameters_present_flag */ + } else { + bitstream_put_ui(bs, m_seq_param.vui_parameters_present_flag, 1); /* vui_parameters_present_flag */ + bitstream_put_ui(bs, m_seq_param.vui_fields.bits.aspect_ratio_info_present_flag, 1); /* aspect_ratio_info_present_flag */ + bitstream_put_ui(bs, 0, 1); /* overscan_info_present_flag */ + bitstream_put_ui(bs, 0, 1); /* video_signal_type_present_flag */ + bitstream_put_ui(bs, 0, 1); /* chroma_loc_info_present_flag */ + bitstream_put_ui(bs, m_seq_param.vui_fields.bits.timing_info_present_flag, 1); /* timing_info_present_flag */ + if(m_seq_param.vui_fields.bits.timing_info_present_flag){ + bitstream_put_ui(bs, 15, 32); + bitstream_put_ui(bs, 50, 32); + bitstream_put_ui(bs, m_seq_param.vui_fields.bits.fixed_frame_rate_flag, 1); + } + bitstream_put_ui(bs, 0, 1); /* nal_hrd_parameters_present_flag */ + if(0){ + // hrd_parameters + bitstream_put_ue(bs, 0); /* cpb_cnt_minus1 */ + bitstream_put_ui(bs, 4, 4); /* bit_rate_scale */ + bitstream_put_ui(bs, 6, 4); /* cpb_size_scale */ + + bitstream_put_ue(bs, m_ymd.frame_bitrate - 1); /* bit_rate_value_minus1[0] */ + bitstream_put_ue(bs, m_ymd.frame_bitrate * 8 - 1); /* cpb_size_value_minus1[0] */ + bitstream_put_ui(bs, 1, 1); /* cbr_flag[0] */ + + bitstream_put_ui(bs, 23, 5); /* initial_cpb_removal_delay_length_minus1 */ + bitstream_put_ui(bs, 23, 5); /* cpb_removal_delay_length_minus1 */ + bitstream_put_ui(bs, 23, 5); /* dpb_output_delay_length_minus1 */ + bitstream_put_ui(bs, 23, 5); /* time_offset_length */ + } + bitstream_put_ui(bs, 0, 1); /* vcl_hrd_parameters_present_flag */ + bitstream_put_ui(bs, 0, 1); /* low_delay_hrd_flag */ + + bitstream_put_ui(bs, 0, 1); /* pic_struct_present_flag */ + bitstream_put_ui(bs, 0, 1); /* bitstream_restriction_flag */ + } + + rbsp_trailing_bits(bs); /* rbsp_trailing_bits */ +} + +int32_t YangH264EncoderIntel::build_packed_pic_buffer( + uint8_t **header_buffer) { + bitstream bs; + + bitstream_start(&bs); + nal_start_code_prefix(&bs); + nal_header(&bs, NAL_REF_IDC_HIGH, NAL_PPS); + pps_rbsp(&bs); + bitstream_end(&bs); + + *header_buffer = (uint8_t *) bs.buffer; + return bs.bit_offset; +} + +int32_t YangH264EncoderIntel::build_packed_slice_buffer( + uint8_t **header_buffer) { + bitstream bs; + int32_t is_idr = !!m_pic_param.pic_fields.bits.idr_pic_flag; + int32_t is_ref = !!m_pic_param.pic_fields.bits.reference_pic_flag; + + bitstream_start(&bs); + nal_start_code_prefix(&bs); + + if (IS_I_SLICE(m_slice_param.slice_type)) { + nal_header(&bs, NAL_REF_IDC_HIGH, is_idr ? NAL_IDR : NAL_NON_IDR); + } else if (IS_P_SLICE(m_slice_param.slice_type)) { + nal_header(&bs, NAL_REF_IDC_MEDIUM, NAL_NON_IDR); + } else { + //assert(IS_B_SLICE(slice_param.slice_type)); + nal_header(&bs, is_ref ? NAL_REF_IDC_LOW : NAL_REF_IDC_NONE, + NAL_NON_IDR); + } + + slice_header(&bs); + bitstream_end(&bs); + + *header_buffer = (uint8_t *) bs.buffer; + return bs.bit_offset; +} + +int32_t YangH264EncoderIntel::build_packed_seq_buffer( + uint8_t **header_buffer) { + bitstream bs; + + bitstream_start(&bs); + nal_start_code_prefix(&bs); + nal_header(&bs, NAL_REF_IDC_HIGH, NAL_SPS); + sps_rbsp(&bs); + bitstream_end(&bs); + + *header_buffer = (uint8_t *) bs.buffer; + return bs.bit_offset; +} + +void YangH264EncoderIntel::pps_rbsp(bitstream *bs) { + bitstream_put_ue(bs, m_pic_param.pic_parameter_set_id); /* m_pic_parameter_set_id */ + bitstream_put_ue(bs, m_pic_param.seq_parameter_set_id); /* seq_parameter_set_id */ + + bitstream_put_ui(bs, m_pic_param.pic_fields.bits.entropy_coding_mode_flag, + 1); /* entropy_coding_mode_flag */ + + bitstream_put_ui(bs, 0, 1); /* pic_order_present_flag: 0 */ + + bitstream_put_ue(bs, 0); /* num_slice_groups_minus1 */ + + bitstream_put_ue(bs, m_pic_param.num_ref_idx_l0_active_minus1); /* num_ref_idx_l0_active_minus1 */ + bitstream_put_ue(bs, m_pic_param.num_ref_idx_l1_active_minus1); /* num_ref_idx_l1_active_minus1 1 */ + + bitstream_put_ui(bs, m_pic_param.pic_fields.bits.weighted_pred_flag, 1); /* weighted_pred_flag: 0 */ + bitstream_put_ui(bs, m_pic_param.pic_fields.bits.weighted_bipred_idc, 2); /* weighted_bipred_idc: 0 */ + + bitstream_put_se(bs, m_pic_param.pic_init_qp - 26); /* pic_init_qp_minus26 */ + bitstream_put_se(bs, 0); /* pic_init_qs_minus26 */ + bitstream_put_se(bs, 0); /* chroma_qp_index_offset */ + + bitstream_put_ui(bs, + m_pic_param.pic_fields.bits.deblocking_filter_control_present_flag, + 1); /* deblocking_filter_control_present_flag */ + bitstream_put_ui(bs, 0, 1); /* constrained_intra_pred_flag */ + bitstream_put_ui(bs, 0, 1); /* redundant_pic_cnt_present_flag */ + + /* more_rbsp_data */ + bitstream_put_ui(bs, m_pic_param.pic_fields.bits.transform_8x8_mode_flag, + 1); /*transform_8x8_mode_flag */ + bitstream_put_ui(bs, 0, 1); /* pic_scaling_matrix_present_flag */ + bitstream_put_se(bs, m_pic_param.second_chroma_qp_index_offset); /*second_chroma_qp_index_offset */ + + rbsp_trailing_bits(bs); +} + +void YangH264EncoderIntel::slice_header(bitstream *bs) { + int32_t first_mb_in_slice = m_slice_param.macroblock_address; + + bitstream_put_ue(bs, first_mb_in_slice); /* first_mb_in_slice: 0 */ + bitstream_put_ue(bs, m_slice_param.slice_type); /* slice_type */ + bitstream_put_ue(bs, m_slice_param.pic_parameter_set_id); /* m_pic_parameter_set_id: 0 */ + bitstream_put_ui(bs, m_pic_param.frame_num, + m_seq_param.seq_fields.bits.log2_max_frame_num_minus4 + 4); /* frame_num */ + + /* frame_mbs_only_flag == 1 */ + if (!m_seq_param.seq_fields.bits.frame_mbs_only_flag) { + /* FIXME: */ + // assert(0); + } + + if (m_pic_param.pic_fields.bits.idr_pic_flag) + bitstream_put_ue(bs, m_slice_param.idr_pic_id); /* idr_pic_id: 0 */ + + if (m_seq_param.seq_fields.bits.pic_order_cnt_type == 0) { + bitstream_put_ui(bs, m_pic_param.CurrPic.TopFieldOrderCnt, + m_seq_param.seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 + + 4); + /* pic_order_present_flag == 0 */ + } else { + /* FIXME: */ + // assert(0); + } + + /* redundant_pic_cnt_present_flag == 0 */ + /* slice type */ + if (IS_P_SLICE(m_slice_param.slice_type)) { + bitstream_put_ui(bs, m_slice_param.num_ref_idx_active_override_flag, 1); /* num_ref_idx_active_override_flag: */ + + if (m_slice_param.num_ref_idx_active_override_flag) + bitstream_put_ue(bs, m_slice_param.num_ref_idx_l0_active_minus1); + + /* ref_pic_list_reordering */ + bitstream_put_ui(bs, 0, 1); /* ref_pic_list_reordering_flag_l0: 0 */ + } else if (IS_B_SLICE(m_slice_param.slice_type)) { + bitstream_put_ui(bs, m_slice_param.direct_spatial_mv_pred_flag, 1); /* direct_spatial_mv_pred: 1 */ + + bitstream_put_ui(bs, m_slice_param.num_ref_idx_active_override_flag, 1); /* num_ref_idx_active_override_flag: */ + + if (m_slice_param.num_ref_idx_active_override_flag) { + bitstream_put_ue(bs, m_slice_param.num_ref_idx_l0_active_minus1); + bitstream_put_ue(bs, m_slice_param.num_ref_idx_l1_active_minus1); + } + + /* ref_pic_list_reordering */ + bitstream_put_ui(bs, 0, 1); /* ref_pic_list_reordering_flag_l0: 0 */ + bitstream_put_ui(bs, 0, 1); /* ref_pic_list_reordering_flag_l1: 0 */ + } + + if ((m_pic_param.pic_fields.bits.weighted_pred_flag + && IS_P_SLICE(m_slice_param.slice_type)) + || ((m_pic_param.pic_fields.bits.weighted_bipred_idc == 1) + && IS_B_SLICE(m_slice_param.slice_type))) { + /* FIXME: fill weight/offset table */ + // assert(0); + } + + /* dec_ref_pic_marking */ + if (m_pic_param.pic_fields.bits.reference_pic_flag) { /* nal_ref_idc != 0 */ + uint8_t no_output_of_prior_pics_flag = 0; + uint8_t long_term_reference_flag = 0; + uint8_t adaptive_ref_pic_marking_mode_flag = 0; + + if (m_pic_param.pic_fields.bits.idr_pic_flag) { + bitstream_put_ui(bs, no_output_of_prior_pics_flag, 1); /* no_output_of_prior_pics_flag: 0 */ + bitstream_put_ui(bs, long_term_reference_flag, 1); /* long_term_reference_flag: 0 */ + } else { + bitstream_put_ui(bs, adaptive_ref_pic_marking_mode_flag, 1); /* adaptive_ref_pic_marking_mode_flag: 0 */ + } + } + + if (m_pic_param.pic_fields.bits.entropy_coding_mode_flag + && !IS_I_SLICE(m_slice_param.slice_type)) + bitstream_put_ue(bs, m_slice_param.cabac_init_idc); /* cabac_init_idc: 0 */ + + bitstream_put_se(bs, m_slice_param.slice_qp_delta); /* slice_qp_delta: 0 */ + + /* ignore for SP/SI */ + + if (m_pic_param.pic_fields.bits.deblocking_filter_control_present_flag) { + bitstream_put_ue(bs, m_slice_param.disable_deblocking_filter_idc); /* disable_deblocking_filter_idc: 0 */ + + if (m_slice_param.disable_deblocking_filter_idc != 1) { + bitstream_put_se(bs, m_slice_param.slice_alpha_c0_offset_div2); /* slice_alpha_c0_offset_div2: 2 */ + bitstream_put_se(bs, m_slice_param.slice_beta_offset_div2); /* slice_beta_offset_div2: 2 */ + } + } + + if (m_pic_param.pic_fields.bits.entropy_coding_mode_flag) { + bitstream_byte_aligning(bs, 1); + } +} +#endif +#endif diff --git a/libmetartc3/src/yangencoder/YangH264EncoderMeta.cpp b/libmetartc3/src/yangencoder/YangH264EncoderMeta.cpp new file mode 100755 index 00000000..5e143303 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderMeta.cpp @@ -0,0 +1,121 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH264EncoderMeta.h" +#include +#include +#include +#include +#include +#include "YangH264EncoderSoft.h" +YangH264EncoderMeta::YangH264EncoderMeta() { + m_lib.loadObject("libx264"); + loadLib(); +} +YangH264EncoderMeta::~YangH264EncoderMeta() { + unloadLib(); + m_lib.unloadObject(); +} +void YangH264EncoderMeta::loadLib() { + yang_x264_param_default = (void (*)(x264_param_t*)) m_lib.loadFunction( + "x264_param_default"); + yang_x264_param_default_preset = (int32_t (*)(x264_param_t*, const char *preset, + const char *tune)) m_lib.loadFunction("x264_param_default_preset"); + char s[30]; + memset(s, 0, 30); + sprintf(s, "x264_encoder_open_%d", X264_BUILD); + yang_x264_encoder_open = (x264_t* (*)(x264_param_t*)) m_lib.loadFunction(s); + yang_x264_param_apply_profile = + (int32_t (*)(x264_param_t*, const char *profile)) m_lib.loadFunction( + "x264_param_apply_profile"); + yang_x264_picture_alloc = (int32_t (*)(x264_picture_t *pic, int32_t i_csp, + int32_t i_width, int32_t i_height)) m_lib.loadFunction( + "x264_picture_alloc"); + yang_x264_encoder_headers = (int32_t (*)(x264_t*, x264_nal_t **pp_nal, + int32_t *pi_nal)) m_lib.loadFunction("x264_encoder_headers"); + yang_x264_picture_clean = + (void (*)(x264_picture_t *pic)) m_lib.loadFunction( + "x264_picture_clean"); + yang_x264_encoder_close = (void (*)(x264_t*)) m_lib.loadFunction( + "x264_encoder_close"); +} +void YangH264EncoderMeta::unloadLib() { + yang_x264_param_default_preset = NULL; + yang_x264_encoder_open = NULL; + yang_x264_param_default = NULL; + yang_x264_param_apply_profile = NULL; + yang_x264_picture_alloc = NULL; + yang_x264_encoder_headers = NULL; + yang_x264_picture_clean = NULL; + yang_x264_encoder_close = NULL; +} +#define HEX2BIN(a) (((a)&0x40)?((a)&0xf)+9:((a)&0xf)) + +void YangH264EncoderMeta::yang_getSpsPps(YangH2645Conf *p264, + YangVideoInfo *config, YangVideoEncInfo *penc) { + x264_t *p264Handle = NULL; + x264_param_t *param = new x264_param_t(); + if (penc->preset < 5) + yang_x264_param_default_preset(param, x264_preset_names[penc->preset], + "zerolatency"); + else + yang_x264_param_default(param); + + YangH264EncoderSoft::initX264Param(config, penc, param); + yang_x264_param_apply_profile(param, x264_profile_names[0]); + if ((p264Handle = yang_x264_encoder_open(param)) == NULL) { + printf("x264_encoder_open failed/n"); +#ifdef _MSC_VER + ExitProcess(1); +#else + _exit(1); + #endif + + } + int32_t iNal = 0; + x264_nal_t *p264Nal = NULL; + + yang_x264_encoder_headers(p264Handle, &p264Nal, &iNal); + + + for (int32_t i = 0; i < iNal; ++i) { + switch (p264Nal[i].i_type) { + case NAL_SPS: + + p264->spsLen = p264Nal[i].i_payload - 4; + //spsBuf + memcpy(p264->sps, p264Nal[i].p_payload + 4, p264->spsLen); + // put_be32((char*)spsBuf,sps_len); + + break; + case NAL_PPS: + p264->ppsLen = p264Nal[i].i_payload - 4; + + memcpy(p264->pps, p264Nal[i].p_payload + 4, p264->ppsLen); + // put_be32((char*)ppsBuf,pps_len); + + break; + default: + break; + } + } + + yang_x264_encoder_close(p264Handle); + p264Handle = NULL; + delete param; + param = NULL; + p264Nal = NULL; + +} + +void YangH264EncoderMeta::yang_initVmd(YangVideoMeta *p_vmd, + YangVideoInfo *p_config, YangVideoEncInfo *penc) { + if (!p_vmd->isInit) { + yang_getSpsPps(&p_vmd->mp4Meta, p_config, penc); + yang_getConfig_Flv_H264(&p_vmd->mp4Meta, p_vmd->livingMeta.buffer, + &p_vmd->livingMeta.bufLen); + + p_vmd->isInit = 1; + } +} + diff --git a/libmetartc3/src/yangencoder/YangH264EncoderMeta.h b/libmetartc3/src/yangencoder/YangH264EncoderMeta.h new file mode 100755 index 00000000..36f28ed6 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderMeta.h @@ -0,0 +1,37 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangH264VmdHandle__ +#define __YangH264VmdHandle__ +#include "yangutil/sys/YangLoadLib.h" +#include +#include +#include + +#include "x264.h" +class YangH264EncoderMeta:public YangVideoEncoderMeta{ +public: + YangH264EncoderMeta(); + ~YangH264EncoderMeta(); + void yang_initVmd(YangVideoMeta *p_vmd, YangVideoInfo *p_config, YangVideoEncInfo *penc); +private: + void yang_getH264Config(uint8_t *p_configBuf, int32_t *p_configLen, + YangVideoInfo *p_config); + //void yang_getH264Config_Flv(YangH2645Conf *p_264, uint8_t *configBuf, int32_t *p_configLen); + //void yang_getH264Config_1(YangVideoParam *p_config, YangH2645Conf *p264); + void yang_getSpsPps(YangH2645Conf *p264, YangVideoInfo *config, YangVideoEncInfo *penc); + + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + void (*yang_x264_param_default)(x264_param_t*); + int32_t (* yang_x264_param_default_preset)( x264_param_t *, const char *preset, const char *tune ); + int32_t (*yang_x264_param_apply_profile)(x264_param_t*, const char *profile); + int32_t (*yang_x264_picture_alloc)(x264_picture_t *pic, int32_t i_csp, int32_t i_width, int32_t i_height); + int32_t (*yang_x264_encoder_headers)(x264_t*, x264_nal_t **pp_nal, int32_t *pi_nal); + void (*yang_x264_picture_clean)(x264_picture_t *pic); + void (*yang_x264_encoder_close)(x264_t*); + x264_t* (*yang_x264_encoder_open)(x264_param_t*); +}; + +#endif diff --git a/libmetartc3/src/yangencoder/YangH264EncoderSoft.cpp b/libmetartc3/src/yangencoder/YangH264EncoderSoft.cpp new file mode 100755 index 00000000..1f4568f6 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderSoft.cpp @@ -0,0 +1,220 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH264EncoderSoft.h" +#include +#include +#include + + +#define HEX2BIN(a) (((a)&0x40)?((a)&0xf)+9:((a)&0xf)) + +int32_t YangH264EncoderSoft::hex2bin(char *str, char **hex) { + char *ptr; + int32_t i, l = strlen(str); + if (l & 1) + return 0; + *hex = (char*) malloc(l / 2); + ptr = *hex; + if (!ptr) + return 0; + for (i = 0; i < l; i += 2) + *ptr++ = (HEX2BIN(str[i]) << 4) | HEX2BIN(str[i + 1]); + return l / 2; +} + +void YangH264EncoderSoft::loadLib() { + yang_x264_param_default = (void (*)(x264_param_t*)) m_lib.loadFunction( + "x264_param_default"); + yang_x264_param_default_preset = (int32_t (*)(x264_param_t*, const char *preset, + const char *tune)) m_lib.loadFunction("x264_param_default_preset"); + yang_x264_param_apply_profile = + (int32_t (*)(x264_param_t*, const char *profile)) m_lib.loadFunction( + "x264_param_apply_profile"); + char s[30]; + memset(s, 0, 30); + sprintf(s, "x264_encoder_open_%d", X264_BUILD); + yang_x264_encoder_open = (x264_t* (*)(x264_param_t*)) m_lib.loadFunction(s); + yang_x264_picture_alloc = (int32_t (*)(x264_picture_t *pic, int32_t i_csp, + int32_t i_width, int32_t i_height)) m_lib.loadFunction( + "x264_picture_alloc"); + yang_x264_encoder_encode = + (int32_t (*)(x264_t*, x264_nal_t **pp_nal, int32_t *pi_nal, + x264_picture_t *pic_in, x264_picture_t *pic_out)) m_lib.loadFunction( + "x264_encoder_encode"); + yang_x264_picture_clean = + (void (*)(x264_picture_t *pic)) m_lib.loadFunction( + "x264_picture_clean"); + yang_x264_encoder_close = (void (*)(x264_t*)) m_lib.loadFunction( + "x264_encoder_close"); +} +void YangH264EncoderSoft::unloadLib() { + yang_x264_param_default_preset = NULL; + yang_x264_param_default = NULL; + yang_x264_param_apply_profile = NULL; + yang_x264_picture_alloc = NULL; + yang_x264_encoder_encode = NULL; + yang_x264_picture_clean = NULL; + yang_x264_encoder_close = NULL; + yang_x264_encoder_open = NULL; +} +YangH264EncoderSoft::YangH264EncoderSoft() { + m_nal = NULL; + m_264Nal = NULL; + m_sendKeyframe=0; + m_264Pic = NULL; + m_264Handle = NULL; + nal_len = 0; + destLength = 0; + m_i264Nal = 0; + nal_len = 0; + m_hasHeader=false; + unloadLib(); +} + +YangH264EncoderSoft::~YangH264EncoderSoft(void) { + yang_x264_picture_clean(m_264Pic); + yang_x264_encoder_close(m_264Handle); + m_264Pic = NULL; + m_264Handle = NULL; + m_264Nal = NULL; + + m_nal = NULL; + unloadLib(); + m_lib.unloadObject(); + +} +void YangH264EncoderSoft::sendKeyFrame(){ + m_sendKeyframe=1; +} + +void YangH264EncoderSoft::setVideoMetaData(YangVideoMeta *pvmd) { + +} +void YangH264EncoderSoft::initX264Param(YangVideoInfo *pvp, + YangVideoEncInfo *penc, x264_param_t *param) { + param->i_width = pvp->outWidth; //set frame width + param->i_height = pvp->outHeight; //set frame height + param->i_bframe = 0; + param->b_cabac = 0; + param->i_bitdepth = pvp->bitDepth; + + + param->b_interlaced = 0; + //param->rc.i_rc_method = X264_RC_ABR; //X264_RC_CQP ;////X264_RC_CQP X264_RC_CRF + param->i_level_idc = penc->level_idc; + //param->rc.i_bitrate = pvp->rate;//512; + + if (penc->preset > 4) { + param->rc.i_qp_min = 10; + param->rc.i_qp_max = 30; + param->rc.i_qp_constant = 26; + } + + param->i_fps_num = pvp->frame; //30; + param->i_fps_den = 1; + param->i_keyint_max = pvp->frame * 6; + param->i_keyint_min = pvp->frame; + param->i_log_level = X264_LOG_ERROR; //X264_LOG_NONE;//X264_LOG_DEBUG;//X264_LOG_NONE; + param->i_threads=penc->enc_threads; + + param->rc.b_mb_tree = 0; + +} +int32_t YangH264EncoderSoft::init(YangVideoInfo *pvp, YangVideoEncInfo *penc) { + if (m_isInit == 1) return Yang_Ok; + m_lib.loadObject("libx264"); + loadLib(); + setVideoPara(pvp, penc); + x264_param_t *param = new x264_param_t(); + if (penc->preset < 5) + yang_x264_param_default_preset(param, x264_preset_names[penc->preset], + "zerolatency"); + else + yang_x264_param_default(param); + //if (penc->preset < 5) + // yang_x264_param_default_preset(param, x264_preset_names[penc->preset], x264_tune_names[7]); + yang_x264_param_apply_profile(param, x264_profile_names[0]); + initX264Param(pvp, penc, param); + m_hasHeader=!penc->createMeta; + param->b_repeat_headers=m_hasHeader?1:0; + + m_264Pic = new x264_picture_t(); + memset(m_264Pic, 0, sizeof(x264_picture_t)); + //set default param + param->b_sliced_threads = 0; + //param->i_threads = penc->enc_threads; + if ((m_264Handle = yang_x264_encoder_open(param)) == NULL) { + printf("RE init x264_encoder_open failed\n"); + exit(1); + } + int32_t x264Format = pvp->bitDepth == 8 ? X264_CSP_I420 : X264_CSP_HIGH_DEPTH; + //x264_picture_alloc + yang_x264_picture_alloc(m_264Pic, x264Format,param->i_width, param->i_height); + m_264Pic->i_type = X264_TYPE_AUTO; + + m_isInit = 1; + delete param; + param = NULL; + return Yang_Ok; + +} + +int32_t YangH264EncoderSoft::encode(YangFrame* pframe, YangEncoderCallback* pcallback) { + int nalutype=0; + bool isKeyFrame=false; + memcpy(m_264Pic->img.plane[0], pframe->payload,pframe->nb); + //m_nal = m_vbuffer; + destLength = 0; + int32_t frametype = YANG_Frametype_P; + if(m_sendKeyframe==1) { + m_sendKeyframe=2; + m_264Pic->i_type = X264_TYPE_IDR;//X264_TYPE_AUTO; + } + if (yang_x264_encoder_encode(m_264Handle, &m_264Nal, &m_i264Nal, m_264Pic, + &pic_out) < 0) { + fprintf( stderr, "x264_encoder_encode failed/n"); + } + + for (int32_t i = 0; i < m_i264Nal; i++) { + m_nal = m_264Nal[i].p_payload; + nalutype=m_nal[4]& kNalTypeMask; + if ((m_nal[3]& kNalTypeMask) == YangAvcNaluTypeIDR) { + nal_len = 3; + isKeyFrame=true; + + } else { + if ((m_nal[4] & 0x60) == 0 || nalutype == YangAvcNaluTypeSPS || nalutype == YangAvcNaluTypePPS){ + if(!m_hasHeader) continue; + isKeyFrame=true; + } + nal_len = 4; + + } + + if(m_hasHeader&&isKeyFrame) { + yang_put_be32((char*)(m_vbuffer + destLength),(uint32_t)(m_264Nal[i].i_payload-4)); + memcpy(m_vbuffer + destLength+4, m_nal + nal_len, m_264Nal[i].i_payload - nal_len); + destLength += (m_264Nal[i].i_payload - nal_len+4); + }else{ + memcpy(m_vbuffer + destLength, m_nal + nal_len, m_264Nal[i].i_payload - nal_len); + destLength += (m_264Nal[i].i_payload - nal_len); + } + if (m_264Nal[i].i_type == NAL_SLICE_IDR) frametype = YANG_Frametype_I; + + + } + pframe->payload=m_vbuffer; + pframe->frametype=frametype; + pframe->nb=destLength; + if(pcallback) pcallback->onVideoData(pframe); + if(m_sendKeyframe==2) { + m_264Pic->i_type = X264_TYPE_AUTO;//X264_TYPE_AUTO; + m_sendKeyframe=0; + yang_trace("\n********************sendkey.......................frametype==%d\n",frametype); + } + + return 1; +} + + diff --git a/libmetartc3/src/yangencoder/YangH264EncoderSoft.h b/libmetartc3/src/yangencoder/YangH264EncoderSoft.h new file mode 100755 index 00000000..1b1cb955 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH264EncoderSoft.h @@ -0,0 +1,58 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef __YangH264EncoderSoft__ +#define __YangH264EncoderSoft__ +#include +#include +#include +#include +#include +#include + + + +class YangH264EncoderSoft: public YangVideoEncoder { +public: + YangH264EncoderSoft(); + ~YangH264EncoderSoft(void); + static void initX264Param(YangVideoInfo *pvp,YangVideoEncInfo *penc,x264_param_t *param); + int32_t init(YangVideoInfo *pvp,YangVideoEncInfo *penc); + + void setVideoMetaData(YangVideoMeta *pvmd); + void parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, int32_t *phei, int32_t *pfps); + void sendKeyFrame(); +private: + + int32_t m_sendKeyframe; + int32_t encode(YangFrame* pframe, YangEncoderCallback* pcallback); +protected: + int32_t hex2bin(char *str, char **hex); + + uint8_t *m_nal; + int32_t nal_len, destLength; + x264_nal_t *m_264Nal; + int32_t m_i264Nal; + x264_picture_t *m_264Pic; + x264_t *m_264Handle; + x264_picture_t pic_out; + YangLoadLib m_lib; + + void saveFile(char *fileName, uint8_t *pBuffer, int32_t BufferLen); + +private: + + bool m_hasHeader; + + void loadLib(); + void unloadLib(); + void (*yang_x264_param_default)( x264_param_t * ); + int32_t (*yang_x264_param_default_preset)( x264_param_t *, const char *preset, const char *tune ); + int32_t (*yang_x264_param_apply_profile)( x264_param_t *, const char *profile ); + int32_t (*yang_x264_picture_alloc)( x264_picture_t *pic, int32_t i_csp, int32_t i_width, int32_t i_height ); + int32_t (*yang_x264_encoder_encode)( x264_t *, x264_nal_t **pp_nal, int32_t *pi_nal, x264_picture_t *pic_in, x264_picture_t *pic_out ); + void (*yang_x264_picture_clean)( x264_picture_t *pic ); + void (*yang_x264_encoder_close)( x264_t * ); + x264_t* (*yang_x264_encoder_open)( x264_param_t * ); +}; +#endif diff --git a/libmetartc3/src/yangencoder/YangH265EncoderMeta.cpp b/libmetartc3/src/yangencoder/YangH265EncoderMeta.cpp new file mode 100755 index 00000000..0979bb01 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH265EncoderMeta.cpp @@ -0,0 +1,119 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH265EncoderMeta.h" +#include +#include +#include "YangH265EncoderSoft.h" + +YangH265EncoderMeta::YangH265EncoderMeta() { + + m_lib.loadObject("libx265"); + loadLib(); +} + +YangH265EncoderMeta::~YangH265EncoderMeta() { + unloadLib(); + m_lib.unloadObject(); +} + +void YangH265EncoderMeta::loadLib() { + + yang_x265_param_default = (void (*)(x265_param*)) m_lib.loadFunction( + "x265_param_default"); + yang_x265_param_default_preset=(int32_t (*)(x265_param *, const char *preset, const char *tune))m_lib.loadFunction("x265_param_default_preset"); + char s[30]; + memset(s, 0, 30); + sprintf(s, "x265_encoder_open_%d", X265_BUILD); + //sprintf(s,"x265_encoder_open"); + yang_x265_encoder_open = + (x265_encoder* (*)(x265_param*)) m_lib.loadFunction(s); + yang_x265_param_alloc = (x265_param* (*)(void)) m_lib.loadFunction( + "x265_param_alloc"); + yang_x265_param_free = (void (*)(x265_param*)) m_lib.loadFunction( + "x265_param_free"); + yang_x265_param_apply_profile = + (int32_t (*)(x265_param*, const char *profile)) m_lib.loadFunction( + "x265_param_apply_profile"); +// yang_x265_picture_alloc=(int32_t (*)( x265_picture *pic, int32_t i_csp, int32_t i_width, int32_t i_height ))m_lib.loadFunction("x265_picture_alloc"); + yang_x265_encoder_headers = (int32_t (*)(x265_encoder*, x265_nal **pp_nal, + int32_t *pi_nal)) m_lib.loadFunction("x265_encoder_headers"); + //yang_x265_picture_clean=(void (*)( x265_picture_t *pic ))m_lib.loadFunction("x265_picture_clean"); + yang_x265_encoder_close = (void (*)(x265_encoder*)) m_lib.loadFunction( + "x265_encoder_close"); +} +void YangH265EncoderMeta::unloadLib() { + yang_x265_param_default_preset=NULL; + yang_x265_encoder_open = NULL; + yang_x265_param_alloc = NULL; + yang_x265_param_free = NULL; + yang_x265_param_default = NULL; + yang_x265_param_apply_profile = NULL; + //yang_x265_picture_alloc=NULL; + yang_x265_encoder_headers = NULL; + //yang_x265_picture_clean=NULL; + yang_x265_encoder_close = NULL; +} +#define HEX2BIN(a) (((a)&0x40)?((a)&0xf)+9:((a)&0xf)) +; + +void YangH265EncoderMeta::yang_getSpsPps(YangH2645Conf *p264, + YangVideoInfo *p_yvp, YangVideoEncInfo *penc) { + x265_encoder *p265Handle = NULL; + x265_param *param = yang_x265_param_alloc(); + yang_x265_param_default(param); //set default param + if(penc->preset<5) yang_x265_param_default_preset(param,x265_preset_names[penc->preset],x265_tune_names[3]); + YangH265EncoderSoft::initX265Param(p_yvp,penc,param); + yang_x265_param_apply_profile(param, x265_profile_names[0]); + if ((p265Handle = yang_x265_encoder_open(param)) == NULL) { + printf("x265_encoder_open failed/n"); + return; + } + int32_t iNal = 0; + x265_nal *p264Nal = NULL; +//int32_t iResult = + yang_x265_encoder_headers(p265Handle, &p264Nal, &iNal); +//spsBuf=new uint8_t[128]; + + for (int32_t i = 0; i < iNal; ++i) { + switch (p264Nal[i].type) { + case NAL_UNIT_VPS: + p264->vpsLen = p264Nal[i].sizeBytes - 4; + memcpy(p264->vps, p264Nal[i].payload + 4, p264->vpsLen); + + break; + case NAL_UNIT_SPS: + p264->spsLen = p264Nal[i].sizeBytes - 4; + memcpy(p264->sps, p264Nal[i].payload + 4, p264->spsLen); + + break; + case NAL_UNIT_PPS: + p264->ppsLen = p264Nal[i].sizeBytes - 4; + memcpy(p264->pps, p264Nal[i].payload + 4, p264->ppsLen); + + break; + default: + break; + } + } + + yang_x265_encoder_close(p265Handle); + p265Handle = NULL; + yang_x265_param_free(param); + param = NULL; + + p264Nal = NULL; + +} + + +void YangH265EncoderMeta::yang_initVmd(YangVideoMeta *p_vmd, + YangVideoInfo *p_config, YangVideoEncInfo *penc) { + if (!p_vmd->isInit) { + yang_getSpsPps(&p_vmd->mp4Meta, p_config,penc); + yang_getConfig_Flv_H265(&p_vmd->mp4Meta, p_vmd->livingMeta.buffer, + &p_vmd->livingMeta.bufLen); + // yang_getH265Config_Flv(&p_vmd->mp4Meta, p_vmd->flvMeta.buffer, &p_vmd->flvMeta.bufLen); + p_vmd->isInit = 1; + } +} diff --git a/libmetartc3/src/yangencoder/YangH265EncoderMeta.h b/libmetartc3/src/yangencoder/YangH265EncoderMeta.h new file mode 100755 index 00000000..a54d093d --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH265EncoderMeta.h @@ -0,0 +1,40 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGENCODER_SRC_YANGH265ENCODERMETA_H_ +#define YANGENCODER_SRC_YANGH265ENCODERMETA_H_ +#include "yangutil/sys/YangLoadLib.h" +#include +#include + +#include "x265.h" +#include + +class YangH265EncoderMeta:public YangVideoEncoderMeta { +public: + YangH265EncoderMeta(); + virtual ~YangH265EncoderMeta(); + void yang_initVmd(YangVideoMeta *p_vmd, YangVideoInfo *p_config, YangVideoEncInfo *penc); +private: + void yang_getH265Config(uint8_t *p_configBuf, int32_t *p_configLen, + YangVideoInfo *p_config); + //void yang_getH265Config_Flv(YangH2645Conf *p_265, uint8_t *configBuf, int32_t *p_configLen); + //void yang_getH265Config_1(YangVideoParam *p_config, YangH2645Conf *p265); + void yang_getSpsPps(YangH2645Conf *p265, YangVideoInfo *config, YangVideoEncInfo *penc); + //void initParam(x265_param *param,YangVideoParam *yvp, int32_t p_rc_method); + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + x265_param *(*yang_x265_param_alloc)(void); + void (*yang_x265_param_free)(x265_param *); + void (*yang_x265_param_default)(x265_param*); + int32_t (*yang_x265_param_default_preset)(x265_param *, const char *preset, const char *tune); + int32_t (*yang_x265_param_apply_profile)(x265_param*, const char *profile); + //int32_t (*yang_x265_picture_alloc)(x265_picture_t *pic, int32_t i_csp, int32_t i_width, int32_t i_height); + int32_t (*yang_x265_encoder_headers)(x265_encoder*, x265_nal **pp_nal, int32_t *pi_nal); + //void (*yang_x265_picture_clean)(x265_picture *pic); + void (*yang_x265_encoder_close)(x265_encoder*); + x265_encoder* (*yang_x265_encoder_open)(x265_param*); +}; + +#endif /* YANGENCODER_SRC_YANGH265ENCODERMETA_H_ */ diff --git a/libmetartc3/src/yangencoder/YangH265EncoderSoft.cpp b/libmetartc3/src/yangencoder/YangH265EncoderSoft.cpp new file mode 100755 index 00000000..9739d2a5 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH265EncoderSoft.cpp @@ -0,0 +1,220 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangH265EncoderSoft.h" +#include +#include +#include +#include + + + + +#define HEX2BIN(a) (((a)&0x40)?((a)&0xf)+9:((a)&0xf)) + +int32_t YangH265EncoderSoft::hex2bin(char *str, char **hex) { + char *ptr; + int32_t i, l = strlen(str); + if (l & 1) + return 0; + *hex = (char*) malloc(l / 2); + ptr = *hex; + if (!ptr) + return 0; + for (i = 0; i < l; i += 2) + *ptr++ = (HEX2BIN(str[i]) << 4) | HEX2BIN(str[i + 1]); + return l / 2; +} + + + + +void YangH265EncoderSoft::loadLib(){ + yang_x265_param_default=(void (*)( x265_param * ))m_lib.loadFunction("x265_param_default"); + yang_x265_param_apply_profile=(int32_t (*)( x265_param *, const char *profile ))m_lib.loadFunction("x265_param_apply_profile"); + yang_x265_param_default_preset=(int32_t (*)(x265_param *, const char *preset, const char *tune))m_lib.loadFunction("x265_param_default_preset"); + char s[30]; + memset(s,0,30); + sprintf(s,"x265_encoder_open_%d",X265_BUILD); + yang_x265_param_alloc =(x265_param *(*)(void))m_lib.loadFunction("x265_param_alloc"); + yang_x265_param_free=(void (*)(x265_param *))m_lib.loadFunction("x265_param_free"); + yang_x265_encoder_open=(x265_encoder* (*)( x265_param * ))m_lib.loadFunction(s); + yang_x265_picture_alloc=(x265_picture* (*)())m_lib.loadFunction("x265_picture_alloc"); + yang_x265_picture_init=(void (*)(x265_param *param, x265_picture *pic))m_lib.loadFunction("x265_picture_init"); + yang_x265_encoder_encode=(int32_t (*)( x265_encoder *, x265_nal **pp_nal,uint32_t *pi_nal, x265_picture *pic_in, x265_picture *pic_out ))m_lib.loadFunction("x265_encoder_encode"); + yang_x265_picture_free=(void (*)( x265_picture *pic ))m_lib.loadFunction("x265_picture_free"); + yang_x265_encoder_close=(void (*)( x265_encoder * ))m_lib.loadFunction("x265_encoder_close"); +} +void YangH265EncoderSoft::unloadLib(){ + yang_x265_param_default_preset=NULL; + yang_x265_param_alloc=NULL; + yang_x265_param_free=NULL; + yang_x265_param_default=NULL; + yang_x265_param_apply_profile=NULL; + yang_x265_picture_alloc=NULL; + yang_x265_picture_init=NULL; + yang_x265_encoder_encode=NULL; + yang_x265_picture_free=NULL; + yang_x265_encoder_close=NULL; + yang_x265_encoder_open=NULL; +} +YangH265EncoderSoft::YangH265EncoderSoft() { + m_nal = NULL; + m_265Nal = NULL; + m_265Pic = NULL; + m_265Handle = NULL; + nal_len = 0; + destLength = 0; + m_i265Nal = 0; + nal_len = 0; + m_frametype=0; + m_hasHeader=false; + m_buffer=NULL; + unloadLib(); +} + +YangH265EncoderSoft::~YangH265EncoderSoft(void) { + yang_free(m_buffer); + m_265Pic->planes[0]=NULL; + m_265Pic->planes[1]=NULL; + m_265Pic->planes[2]=NULL; + yang_x265_picture_free(m_265Pic); + yang_x265_encoder_close(m_265Handle); + //x265_encoder_close + m_265Pic = NULL; + m_265Handle = NULL; + m_265Nal = NULL; + + m_nal = NULL; + + unloadLib(); + m_lib.unloadObject(); +} +void YangH265EncoderSoft::sendKeyFrame(){ + m_sendKeyframe=1; + +} +void YangH265EncoderSoft::setVideoMetaData(YangVideoMeta *pvmd) { + +} +void YangH265EncoderSoft::initX265Param(YangVideoInfo *pvp,YangVideoEncInfo *penc,x265_param *param){ + + param->sourceBitDepth=pvp->bitDepth; + //param->bRepeatHeaders=0;//wrtie spspps + param->sourceWidth = pvp->outWidth; //set frame width + param->sourceHeight = pvp->outHeight; //set frame height + param->interlaceMode=0; + + //param->b_cabac = 0; + param->bframes = 0; + +// param->analyse.i_weighted_pred = X265_WEIGHTP_NONE; +// param->analyse.b_weighted_bipred = 0; + // p265Param->b_interlaced=0; + //param->rc.aqMode = X265_RC_ABR; //X265_RC_CQP ;////X265_RC_CQP X265_RC_CRF + param->internalCsp=X265_CSP_I420; + param->levelIdc = penc->level_idc;//31; + //param->rc.bitrate = pvp->rate; + if(penc->preset>4){ + param->rc.qpMin= 10; + param->rc.qpMax = 30; + //param->rc.qp = 26; + } + param->fpsNum = pvp->frame; //30; + param->fpsDenom=1; + + param->keyframeMax = pvp->frame * 3; + param->logLevel = X265_LOG_NONE; + //param->p + //param->analyse.i_me_method=X265_ME_DIA; + //param->rc.b_mb_tree = 0; + //x265_param_apply_profile(param, x265_profile_names[0]); + +} +int32_t YangH265EncoderSoft::init(YangVideoInfo *pvp,YangVideoEncInfo *penc) { + if (m_isInit == 1) + return Yang_Ok; + m_lib.loadObject("libx265"); + loadLib(); + setVideoPara(pvp,penc); + x265_param *param = yang_x265_param_alloc(); + m_265Pic = new x265_picture(); + + memset(m_265Pic, 0, sizeof(x265_picture)); + //x265_param_default(param); + //x265_param_default_present() + yang_x265_param_default(param); //set default param + if(penc->preset<5) yang_x265_param_default_preset(param,x265_preset_names[penc->preset],x265_tune_names[3]); + initX265Param(pvp,penc,param); + m_hasHeader=!penc->createMeta; + param->bRepeatHeaders=m_hasHeader?1:0; + int32_t profileIndex=0; + if(pvp->bitDepth==10) profileIndex=1; + if(pvp->bitDepth==12) profileIndex=12; + + yang_x265_param_apply_profile(param, x265_profile_names[profileIndex]); + //x265_encoder_open + if ((m_265Handle = yang_x265_encoder_open(param)) == NULL) { + + exit(0); + return yang_error_wrap(1,"RE init x265_encoder_open failed"); + } + //x265_picture_alloc + m_265Pic=yang_x265_picture_alloc(); + yang_x265_picture_init(param,m_265Pic); + int32_t bitLen=param->sourceBitDepth==8?1:2; + int32_t fileLen=bitLen*param->sourceWidth*param->sourceHeight*3/2; + int32_t allLen=bitLen*param->sourceWidth*param->sourceHeight; + m_buffer=(uint8_t*)malloc(fileLen); + + m_265Pic->planes[0]=m_buffer; + m_265Pic->planes[1]=m_buffer+allLen; + m_265Pic->planes[2]=m_buffer+allLen*5/4; + m_265Pic->stride[0]=param->sourceWidth; + m_265Pic->stride[1]=param->sourceWidth/2; + m_265Pic->stride[2]=param->sourceWidth/2; + + m_isInit = 1; + yang_x265_param_free(param); + param = NULL; + return Yang_Ok; + +} + +int32_t YangH265EncoderSoft::encode(YangFrame* pframe, YangEncoderCallback* pcallback){ + memcpy(m_265Pic->planes[0], pframe->payload, pframe->nb); + //m_nal = m_vbuffer; + destLength = 0; + + m_frametype = YANG_Frametype_P; + if (m_sendKeyframe == 1) { + m_sendKeyframe = 2; + m_265Pic->sliceType = X265_TYPE_IDR; //X265_TYPE_AUTO; + } + if (yang_x265_encoder_encode(m_265Handle, &m_265Nal, &m_i265Nal, m_265Pic, + NULL) < 0) { + fprintf( stderr, "x265_encoder_encode failed/n"); + } + + for (uint32_t i = 0; i < m_i265Nal; i++) { + m_nal = m_265Nal[i].payload; + if (m_265Nal[i].type >= YANG_NAL_UNIT_CODED_SLICE_BLA&&m_265Nal[i].type <= YANG_NAL_UNIT_CODED_SLICE_CRA) + m_frametype = YANG_Frametype_I; + memcpy(m_vbuffer + destLength, m_nal + 4, m_265Nal[i].sizeBytes - 4); + destLength += (m_265Nal[i].sizeBytes - 4); + + } + pframe->payload = m_vbuffer; + pframe->frametype = m_frametype; + pframe->nb = destLength; + + if (pcallback) + pcallback->onVideoData(pframe); + if (m_sendKeyframe == 2) { + m_265Pic->sliceType = X265_TYPE_AUTO; //X264_TYPE_AUTO; + m_sendKeyframe = 0; + } + return 1; +} + + diff --git a/libmetartc3/src/yangencoder/YangH265EncoderSoft.h b/libmetartc3/src/yangencoder/YangH265EncoderSoft.h new file mode 100755 index 00000000..c44d68b0 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangH265EncoderSoft.h @@ -0,0 +1,71 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef YANGENCODER_SRC_YANGH265ENCODERSOFT_H_ +#define YANGENCODER_SRC_YANGH265ENCODERSOFT_H_ + + +#include +#include +#include +#include +#include +#include + + + +class YangH265EncoderSoft : public YangVideoEncoder { +public: + YangH265EncoderSoft(); + virtual ~YangH265EncoderSoft(); + static void initX265Param(YangVideoInfo *pvp,YangVideoEncInfo *penc,x265_param *param); + int32_t init(YangVideoInfo *pvp,YangVideoEncInfo *penc); + void setVideoMetaData(YangVideoMeta *pvmd); + void parseRtmpHeader(uint8_t *p, int32_t pLen, int32_t *pwid, int32_t *phei, int32_t *pfps); + void sendKeyFrame(); +private: + int32_t m_inWidth; + int32_t m_inHeight; + int32_t m_outWidth; + int32_t m_outHeight; + int32_t m_sendKeyframe; + + int32_t encode(YangFrame* pframe, YangEncoderCallback* pcallback); +protected: + bool m_hasHeader; + int32_t hex2bin(char *str, char **hex); + uint8_t *m_nal; + int32_t nal_len, destLength; + //void saveFile(char *fileName, uint8_t *pBuffer, int32_t BufferLen); + +private: + uint8_t *m_buffer; + + int32_t m_frametype; + + x265_nal *m_265Nal; + uint32_t m_i265Nal; + x265_picture *m_265Pic; + x265_encoder *m_265Handle; + +private: + YangLoadLib m_lib; + void loadLib(); + void unloadLib(); + x265_param* (*yang_x265_param_alloc)(void); + void (*yang_x265_param_free)(x265_param*); + void (*yang_x265_param_default)(x265_param*); + int32_t (*yang_x265_param_default_preset)(x265_param*, const char *preset, const char *tune); + int32_t (*yang_x265_param_apply_profile)(x265_param*, const char *profile); + + x265_picture* (*yang_x265_picture_alloc)(); + void (*yang_x265_picture_init)(x265_param *param, x265_picture *pic); + int32_t (*yang_x265_encoder_encode)(x265_encoder*, x265_nal **pp_nal,uint32_t *pi_nal, x265_picture *pic_in, x265_picture *pic_out); + void (*yang_x265_picture_free)(x265_picture *pic); + //x265_picture_free + void (*yang_x265_encoder_close)(x265_encoder*); + x265_encoder* (*yang_x265_encoder_open)(x265_param*); +}; + +#endif /* YANGENCODER_SRC_YANGH265ENCODERSOFT_H_ */ diff --git a/libmetartc3/src/yangencoder/YangVideoEncoder.cpp b/libmetartc3/src/yangencoder/YangVideoEncoder.cpp new file mode 100755 index 00000000..7d047587 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangVideoEncoder.cpp @@ -0,0 +1,23 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include + +YangVideoEncoder::YangVideoEncoder() { + m_isInit=0; + memset(&m_videoInfo,0,sizeof(YangVideoInfo)); + m_vbuffer=new uint8_t[YANG_VIDEO_ENCODE_BUFFER_LEN]; +} + +YangVideoEncoder::~YangVideoEncoder() { + if(m_vbuffer) delete m_vbuffer; + m_vbuffer=NULL; +} + +void YangVideoEncoder::setVideoPara(YangVideoInfo *pap,YangVideoEncInfo *penc){ + memcpy(&m_videoInfo,pap,sizeof(YangVideoInfo)); + memcpy(&m_enc,penc,sizeof(YangVideoEncInfo)); +} + diff --git a/libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.cpp b/libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.cpp new file mode 100755 index 00000000..cfdbda06 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.cpp @@ -0,0 +1,322 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangVideoEncoderFfmpeg.h" +#include "YangH264EncHeader.h" +#if Yang_Ffmpeg_UsingSo +void YangVideoEncoderFfmpeg::loadLib() { + yang_av_buffer_unref = (void (*)(AVBufferRef **buf)) m_lib1.loadFunction( + "av_buffer_unref"); + yang_av_hwframe_ctx_init = (int32_t (*)(AVBufferRef *ref)) m_lib1.loadFunction( + "av_hwframe_ctx_init"); + yang_av_frame_alloc = (AVFrame* (*)(void)) m_lib1.loadFunction( + "av_frame_alloc"); + yang_av_image_get_buffer_size = (int32_t (*)(enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_get_buffer_size"); + yang_av_hwdevice_ctx_create = (int32_t (*)(AVBufferRef **device_ctx, + enum AVHWDeviceType type, const char *device, AVDictionary *opts, + int32_t flags)) m_lib1.loadFunction("av_hwdevice_ctx_create"); + yang_av_hwframe_transfer_data = (int32_t (*)(AVFrame *dst, const AVFrame *src, + int32_t flags)) m_lib1.loadFunction("av_hwframe_transfer_data"); + yang_av_free = (void (*)(void *ptr)) m_lib1.loadFunction("av_free"); + yang_av_frame_free = (void (*)(AVFrame **frame)) m_lib1.loadFunction( + "av_frame_free"); + yang_av_buffer_ref = + (AVBufferRef* (*)(AVBufferRef *buf)) m_lib1.loadFunction( + "av_buffer_ref"); + yang_av_image_fill_arrays = (int32_t (*)(uint8_t *dst_data[4], + int32_t dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, + int32_t width, int32_t height, int32_t align)) m_lib1.loadFunction( + "av_image_fill_arrays"); + yang_av_hwframe_ctx_alloc = + (AVBufferRef* (*)(AVBufferRef *device_ctx)) m_lib1.loadFunction( + "av_hwframe_ctx_alloc"); + yang_av_hwframe_get_buffer = (int32_t (*)(AVBufferRef *hwframe_ctx, + AVFrame *frame, int32_t flags)) m_lib1.loadFunction( + "av_hwframe_get_buffer"); + yang_av_malloc = (void* (*)(size_t size)) m_lib1.loadFunction("av_malloc"); + + yang_avcodec_alloc_context3 = + (AVCodecContext* (*)(const AVCodec *codec)) m_lib.loadFunction( + "avcodec_alloc_context3"); + yang_av_init_packet = (void (*)(AVPacket *pkt)) m_lib.loadFunction( + "av_init_packet"); + yang_avcodec_find_encoder_by_name = + (AVCodec* (*)(const char *name)) m_lib.loadFunction( + "avcodec_find_encoder_by_name"); + yang_avcodec_open2 = (int32_t (*)(AVCodecContext *avctx, const AVCodec *codec, + AVDictionary **options)) m_lib.loadFunction("avcodec_open2"); + yang_avcodec_send_frame = (int32_t (*)(AVCodecContext *avctx, + const AVFrame *frame)) m_lib.loadFunction("avcodec_send_frame"); + yang_avcodec_receive_packet = (int32_t (*)(AVCodecContext *avctx, + AVPacket *avpkt)) m_lib.loadFunction("avcodec_receive_packet"); + yang_avcodec_close = (int32_t (*)(AVCodecContext *avctx)) m_lib.loadFunction( + "avcodec_close"); +} +void YangVideoEncoderFfmpeg::unloadLib() { + yang_av_hwframe_ctx_alloc = NULL; + yang_av_hwframe_ctx_init = NULL; + yang_av_buffer_unref = NULL; + yang_avcodec_find_encoder_by_name = NULL; + yang_av_hwdevice_ctx_create = NULL; + yang_av_frame_alloc = NULL; + yang_avcodec_open2 = NULL; + yang_av_image_get_buffer_size = NULL; + yang_av_malloc = NULL; + yang_av_image_fill_arrays = NULL; + yang_av_init_packet = NULL; + yang_av_hwframe_get_buffer = NULL; + yang_av_hwframe_transfer_data = NULL; + yang_avcodec_send_frame = NULL; + yang_avcodec_receive_packet = NULL; + yang_av_frame_free = NULL; + yang_avcodec_close = NULL; + yang_av_free = NULL; +} +#endif +enum AVPixelFormat get_hw_format(AVCodecContext *ctx, + const enum AVPixelFormat *pix_fmts) { + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Intel) return AV_PIX_FMT_VAAPI; + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Nvdia) return AV_PIX_FMT_CUDA; + if(YangVideoEncoderFfmpeg::g_hwType==YangV_Hw_Android) return AV_PIX_FMT_MEDIACODEC; + return AV_PIX_FMT_VAAPI; + /** const enum AVPixelFormat *p; + + for (p = pix_fmts; *p != -1; p++) { + if (*p == hw_pix_fmt) + return *p; + } + + fprintf(stderr, "Failed to get HW surface format.\n"); + return AV_PIX_FMT_NONE;**/ +} +YangVideoHwType YangVideoEncoderFfmpeg::g_hwType=YangV_Hw_Intel; +int32_t YangVideoEncoderFfmpeg::set_hwframe_ctx(AVPixelFormat ctxformat,AVPixelFormat swformat,YangVideoInfo *yvp,AVCodecContext *ctx, + AVBufferRef *hw_device_ctx, int32_t pwid, int32_t phei) { + AVBufferRef *hw_frames_ref; + AVHWFramesContext *frames_ctx = NULL; + int32_t err = 0; + + if (!(hw_frames_ref = yang_av_hwframe_ctx_alloc(hw_device_ctx))) { + printf("Failed to create VAAPI frame context.\n"); + return -1; + } + frames_ctx = (AVHWFramesContext*) (hw_frames_ref->data); + frames_ctx->format = ctxformat; + frames_ctx->sw_format = swformat; + frames_ctx->width = pwid; + frames_ctx->height = phei; + frames_ctx->initial_pool_size = 20; + if ((err = yang_av_hwframe_ctx_init(hw_frames_ref)) < 0) { + printf("Failed to initialize VAAPI frame context.Error code: %d\n", + ret); + yang_av_buffer_unref(&hw_frames_ref); + return err; + } + ctx->hw_frames_ctx = yang_av_buffer_ref(hw_frames_ref); + ctx->hw_device_ctx = yang_av_buffer_ref(hw_device_ctx); + // ctx->hwaccel_flags=1; + if (!ctx->hw_frames_ctx) + err = AVERROR(ENOMEM); + + yang_av_buffer_unref(&hw_frames_ref); + return err; +} + +YangVideoEncoderFfmpeg::YangVideoEncoderFfmpeg(int32_t ptype,int32_t phwtype) { + usingVaapi = 1; //pcontext->usingHwDec == 2 ? 1 : 0; + m_encoderType=(YangVideoCodec)ptype; + g_hwType=(YangVideoHwType)phwtype; + m_frame = NULL; + yLen =m_videoInfo.outWidth * m_videoInfo.outHeight; + uLen = yLen / 4; + allLen = yLen * 3 / 2; + buffer = NULL; + ret = 0; + m_codec = NULL; + hw_device_ctx = NULL; + frame_mem_gpu = NULL; + m_usingHw=true; +#if Yang_Ffmpeg_UsingSo + unloadLib(); +#endif + +} +YangVideoEncoderFfmpeg::~YangVideoEncoderFfmpeg() { + encode_close(); +#if Yang_Ffmpeg_UsingSo + unloadLib(); + m_lib.unloadObject(); + m_lib1.unloadObject(); +#endif + +} +void YangVideoEncoderFfmpeg::sendKeyFrame(){ + m_sendKeyframe=1; +} +void YangVideoEncoderFfmpeg::setVideoMetaData(YangVideoMeta *pvmd) { + + /**setZbVmd_f(m_codecCtx->extradata, m_codecCtx->extradata_size, + pvmd->livingMeta.buffer, &pvmd->livingMeta.bufLen);**/ + //printf("\n....%d\n",m_codecCtx->extradata_size); + // for(int32_t i=0;iextradata_size;i++){ + // printf("%02x,",*(m_codecCtx->extradata+i)); + // } +} +void YangVideoEncoderFfmpeg::initParam(AVCodecContext *p_codecCtx,YangVideoInfo *pvp,YangVideoEncInfo *penc){ + p_codecCtx->bit_rate = 1000 *pvp->rate; + //p_codecCtx->global_quality + printf("\nbitrate===%d\n",(int)p_codecCtx->bit_rate ); + p_codecCtx->width = pvp->outWidth; + p_codecCtx->height = pvp->outHeight; + p_codecCtx->profile = pvp->videoEncoderType==Yang_VED_264?FF_PROFILE_H264_CONSTRAINED_BASELINE:FF_PROFILE_HEVC_MAIN; //66; + //p_codecCtx->gop_size = pvp->frame; + //p_codecCtx->level = penc->level_idc; + + p_codecCtx->time_base.den=1;// = (AVRational){1, pvp->frame }; + p_codecCtx->time_base.num=pvp->frame; + p_codecCtx->framerate.den = pvp->frame;//(AVRational){pvp->frame , 1}; + p_codecCtx->framerate.num=1; + //p_codecCtx->time_base = (AVRational ) { 1, pvp->frame }; + if(pvp->videoEncoderType==Yang_VED_264) p_codecCtx->has_b_frames=0; + +} +int32_t YangVideoEncoderFfmpeg::init(YangVideoInfo *pvp,YangVideoEncInfo *penc) { + //AVCodecID codec_id = AV_CODEC_ID_H264; + // av_register_all(); + // avcodec_register_all(); + // avcodec_register(AV_CODEC_ID_H264); + //m_codec = avcodec_find_encoder_by_name("h264_vaapi");//avcodec_find_encoder(AV_CODEC_ID_H264); +#if Yang_Ffmpeg_UsingSo + m_lib.loadObject("libavcodec"); + m_lib1.loadObject("libavutil"); + loadLib(); +#endif + setVideoPara(pvp,penc); + usingVaapi=1; + yLen =m_videoInfo.outWidth * m_videoInfo.outHeight; + uLen = yLen / 4; + allLen = yLen * 3 / 2; + //hevc_vaapi nvenc nvdec vdpau h264_nvenc + if(m_encoderType==Yang_VED_264){ + if(g_hwType==YangV_Hw_Intel) m_codec = yang_avcodec_find_encoder_by_name("h264_vaapi");//avcodec_find_encoder(AV_CODEC_ID_H264); + if(g_hwType==YangV_Hw_Nvdia) { + m_codec = yang_avcodec_find_encoder_by_name("h264_nvenc"); + + } + if(g_hwType==YangV_Hw_Android) m_codec = yang_avcodec_find_encoder_by_name("h264_mediacodec"); + }else if(m_encoderType==Yang_VED_265){ + if(g_hwType==YangV_Hw_Intel) { + m_codec = yang_avcodec_find_encoder_by_name("hevc_vaapi"); + } + if(g_hwType==YangV_Hw_Nvdia) m_codec = yang_avcodec_find_encoder_by_name("hevc_nvenc"); + if(g_hwType==YangV_Hw_Android) m_codec = yang_avcodec_find_encoder_by_name("hevc_mediacodec"); + } + + m_codecCtx = yang_avcodec_alloc_context3(m_codec); + initParam(m_codecCtx,pvp,penc); + m_codecCtx->get_format = get_hw_format; // AV_PIX_FMT_NV12;//get_hw_format; + if(g_hwType==YangV_Hw_Intel){ + + ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,"/dev/dri/renderD128", NULL, 0); + m_codecCtx->pix_fmt = AV_PIX_FMT_VAAPI; + }else if(g_hwType==YangV_Hw_Nvdia){ + ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_CUDA,"CUDA", NULL, 0); + m_codecCtx->pix_fmt = AV_PIX_FMT_CUDA; + }else if(g_hwType==YangV_Hw_Android){ + ret = yang_av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_MEDIACODEC,"MEDIACODEC", NULL, 0); + m_codecCtx->pix_fmt = AV_PIX_FMT_MEDIACODEC; + } + //AVPixelFormat format= pvp->videoEncoderFormat==0?AV_PIX_FMT_NV12:AV_PIX_FMT_YUV420P; + AVPixelFormat format= AV_PIX_FMT_NV12;//AV_PIX_FMT_YUV420P; + if(pvp->bitDepth==10) format = AV_PIX_FMT_P010; + if(pvp->bitDepth==16) format = AV_PIX_FMT_P016; + + if ((ret = set_hwframe_ctx(m_codecCtx->pix_fmt,format,&m_videoInfo,m_codecCtx, hw_device_ctx, m_videoInfo.outWidth, + m_videoInfo.outHeight)) < 0) { + printf("Failed to set hwframe context.\n"); + //goto close; + } + + m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + + ret = yang_avcodec_open2(m_codecCtx, m_codec, NULL); + if (ret < 0){ + printf("\navcodec_open2 failure................\n"); + exit(1); + } + + m_frame = yang_av_frame_alloc(); + m_frame->width = m_codecCtx->width; + m_frame->height = m_codecCtx->height; + m_frame->format =format; + // int32_t numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P,m_width,m_height); + int32_t numBytes = yang_av_image_get_buffer_size(format, m_videoInfo.outWidth,m_videoInfo.outHeight, 1); + buffer = (uint8_t*) yang_av_malloc(numBytes * sizeof(uint8_t)); + yang_av_image_fill_arrays(m_frame->data, m_frame->linesize, buffer, + format, m_videoInfo.outWidth, m_videoInfo.outHeight, 1); + yang_av_init_packet(&packet); + frame_mem_gpu = yang_av_frame_alloc(); + frame_mem_gpu->format = m_codecCtx->pix_fmt; + frame_mem_gpu->width = m_codecCtx->width; + frame_mem_gpu->height = m_codecCtx->height; + ret = yang_av_hwframe_get_buffer(m_codecCtx->hw_frames_ctx, frame_mem_gpu, + 0); + if (ret < 0) + printf("\nERROR:av_hwframe_get_buffer failure!\n"); + + m_isInit = 1; + return Yang_Ok; + +} +int32_t YangVideoEncoderFfmpeg::encode(YangFrame* pframe, YangEncoderCallback* pcallback) { + if(!m_codecCtx) return Yang_Ok; + if(m_sendKeyframe==1) { + m_sendKeyframe=2; + m_frame->pict_type=AV_PICTURE_TYPE_I; + } + memcpy(m_frame->data[0], pframe->payload, yLen); + memcpy(m_frame->data[1], pframe->payload + yLen, uLen * 2); + yang_av_hwframe_transfer_data(frame_mem_gpu, m_frame, 0); + ret = yang_avcodec_send_frame(m_codecCtx, frame_mem_gpu); + ret = yang_avcodec_receive_packet(m_codecCtx, &packet); + int32_t destLen=0; + if (ret != 0) { + //destLen = 0; + return Yang_Ok; + } + destLen = packet.size-4; + int32_t frametype=YANG_Frametype_P; + memcpy(m_vbuffer, packet.data+4, destLen); + if(m_encoderType==Yang_VED_264) frametype=m_vbuffer[0]==0x67?YANG_Frametype_I:YANG_Frametype_P; + if(m_encoderType==Yang_VED_265) frametype=m_vbuffer[0]==0x40?YANG_Frametype_I:YANG_Frametype_P; + pframe->payload=m_vbuffer; + pframe->frametype=frametype; + pframe->nb=destLen; + if(pcallback) pcallback->onVideoData(pframe); + if(m_sendKeyframe==2) { + m_frame->pict_type=AV_PICTURE_TYPE_NONE; + m_sendKeyframe=0; + } + return 1; +} + +void YangVideoEncoderFfmpeg::encode_close() { +// av_parser_close(pCodecParserCtx); + if (usingVaapi) { + yang_av_buffer_unref(&hw_device_ctx); + yang_av_frame_free(&frame_mem_gpu); + } + yang_av_frame_free(&m_frame); + m_frame = NULL; + if (m_codecCtx){ + yang_avcodec_close(m_codecCtx); + yang_av_free(m_codecCtx); + } + m_codecCtx = NULL; + if(buffer) yang_av_free(buffer); + buffer = NULL; + +} + diff --git a/libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.h b/libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.h new file mode 100755 index 00000000..90d7b333 --- /dev/null +++ b/libmetartc3/src/yangencoder/YangVideoEncoderFfmpeg.h @@ -0,0 +1,102 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGH264DECODERFFMPEG_H +#define YANGH264DECODERFFMPEG_H +#include +#include +#include +extern "C"{ +#include +#include +#include + +} +class YangVideoEncoderFfmpeg : public YangVideoEncoder +{ + public: + YangVideoEncoderFfmpeg(int32_t pencType,int32_t phwtype); + ~YangVideoEncoderFfmpeg(); + int32_t init(YangVideoInfo *pvp,YangVideoEncInfo *penc); + static void initParam(AVCodecContext *p_codecCtx,YangVideoInfo *pvp,YangVideoEncInfo *penc); + static YangVideoHwType g_hwType; + //void encode(uint8_t *src,int32_t buflen, uint8_t *dest, int32_t *destLen, int32_t *frametype); + //int32_t encode(uint8_t *buf, int32_t buflen, uint8_t *dest, int32_t *destLen,int32_t *frametype); + int32_t encode(YangFrame* pframe, YangEncoderCallback* pcallback); + void setVideoMetaData(YangVideoMeta *pvmd); + void sendKeyFrame(); + void parseRtmpHeader(uint8_t *p,int32_t pLen,int32_t *pwid,int32_t *phei,int32_t *pfps); + protected: + + private: + int32_t ret; + YangVideoCodec m_encoderType; + int32_t m_sendKeyframe; + AVCodec *m_codec; + AVCodecContext *m_codecCtx = NULL; + AVPacket packet; + AVFrame *m_frame; + bool m_usingHw; + + int32_t yLen; + int32_t uLen; + int32_t allLen; + uint8_t* buffer =NULL; + int32_t usingVaapi; + AVBufferRef *hw_device_ctx; + AVFrame *frame_mem_gpu; + int32_t set_hwframe_ctx(AVPixelFormat ctxformat,AVPixelFormat swformat,YangVideoInfo *yvp,AVCodecContext *ctx, AVBufferRef *hw_device_ctx,int32_t pwid,int32_t phei); + private: + void encode_close(); +#if Yang_Ffmpeg_UsingSo + YangLoadLib m_lib,m_lib1; + void loadLib(); + void unloadLib(); + AVBufferRef *(*yang_av_hwframe_ctx_alloc)(AVBufferRef *device_ctx); + int32_t (*yang_av_hwframe_ctx_init)(AVBufferRef *ref); + void (*yang_av_buffer_unref)(AVBufferRef **buf); + AVCodec *(*yang_avcodec_find_encoder_by_name)(const char *name); + int32_t (*yang_av_hwdevice_ctx_create)(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int32_t flags); + AVFrame *(*yang_av_frame_alloc)(void); + int32_t (*yang_avcodec_open2)(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + int32_t (*yang_av_image_get_buffer_size)(enum AVPixelFormat pix_fmt, int32_t width, int32_t height, int32_t align); + void *(*yang_av_malloc)(size_t size); + int32_t (*yang_av_image_fill_arrays)(uint8_t *dst_data[4], int32_t dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int32_t width, int32_t height, int32_t align); + void (*yang_av_init_packet)(AVPacket *pkt); + int32_t (*yang_av_hwframe_get_buffer)(AVBufferRef *hwframe_ctx, AVFrame *frame, int32_t flags); + int32_t (*yang_av_hwframe_transfer_data)(AVFrame *dst, const AVFrame *src, int32_t flags); + int32_t (*yang_avcodec_send_frame)(AVCodecContext *avctx, const AVFrame *frame); + int32_t (*yang_avcodec_receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + void (*yang_av_frame_free)(AVFrame **frame); + int32_t (*yang_avcodec_close)(AVCodecContext *avctx); + void (*yang_av_free)(void *ptr); + AVBufferRef *(*yang_av_buffer_ref)(AVBufferRef *buf); + AVCodecContext *(*yang_avcodec_alloc_context3)(const AVCodec *codec); +#else +#define yang_av_hwframe_ctx_alloc av_hwframe_ctx_alloc +#define yang_av_hwframe_ctx_init av_hwframe_ctx_init +#define yang_av_buffer_unref av_buffer_unref +#define yang_avcodec_find_encoder_by_name avcodec_find_encoder_by_name +#define yang_av_hwdevice_ctx_create av_hwdevice_ctx_create +#define yang_av_frame_alloc av_frame_alloc +#define yang_avcodec_open2 avcodec_open2 +#define yang_av_image_get_buffer_size av_image_get_buffer_size +#define yang_av_malloc av_malloc +#define yang_av_image_fill_arrays av_image_fill_arrays +#define yang_av_init_packet av_init_packet +#define yang_av_hwframe_get_buffer av_hwframe_get_buffer +#define yang_av_hwframe_transfer_data av_hwframe_transfer_data +#define yang_avcodec_send_frame avcodec_send_frame +#define yang_avcodec_receive_packet avcodec_receive_packet +#define yang_av_frame_free av_frame_free +#define yang_avcodec_close avcodec_close +#define yang_av_free av_free +#define yang_av_buffer_ref av_buffer_ref +#define yang_avcodec_alloc_context3 avcodec_alloc_context3 +#endif +}; + +#endif // YANGH264DECODERFFMPEG_H diff --git a/libmetartc3/src/yangencoder/YangVideoEncoderHandle.cpp b/libmetartc3/src/yangencoder/YangVideoEncoderHandle.cpp new file mode 100755 index 00000000..20de504e --- /dev/null +++ b/libmetartc3/src/yangencoder/YangVideoEncoderHandle.cpp @@ -0,0 +1,194 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include + +#include +#include +#include +#include + +#include +#include + + + +YangVideoEncoderHandle::YangVideoEncoderHandle(YangVideoInfo *pcontext, + YangVideoEncInfo *enc) { + m_isInit = 0; + m_isStart = 0; + m_isConvert = 1; + m_in_videoBuffer = NULL; + m_out_videoBuffer = NULL; + m_para = pcontext; + m_enc = enc; + m_vmd = NULL; + m_sendKeyframe=0; + m_uid=0; +} + +YangVideoEncoderHandle::~YangVideoEncoderHandle(void) { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_para = NULL; + m_enc = NULL; + m_in_videoBuffer = NULL; + m_out_videoBuffer = NULL; + m_vmd = NULL; + +} +void YangVideoEncoderHandle::stop() { + stopLoop(); +} + +void YangVideoEncoderHandle::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangVideoEncoderHandle::sendKeyframe(){ + m_sendKeyframe=1; +} +void YangVideoEncoderHandle::setVideoMetaData(YangVideoMeta *pvmd) { + m_vmd = pvmd; +} + +void YangVideoEncoderHandle::init() { + m_isInit = 1; + +} + +void YangVideoEncoderHandle::setInVideoBuffer(YangVideoBuffer *plist) { + m_in_videoBuffer = plist; +} +void YangVideoEncoderHandle::setOutVideoBuffer(YangVideoEncoderBuffer *plist) { + m_out_videoBuffer = plist; +} +void YangVideoEncoderHandle::onVideoData(YangFrame* pframe){ + + if (pframe->nb > 4) { + // yang_trace("\n%d:",pframe->nb); + //for(int i=0;i<50;i++) yang_trace("%02x,",pframe->payload[i]); + m_out_videoBuffer->putEVideo(pframe); + } +} +void YangVideoEncoderHandle::onAudioData(YangFrame* pframe){ + +} +void YangVideoEncoderHandle::startLoop() { + m_isConvert = 1; + YangVideoInfo para; + YangYuvConvert yuv; + memcpy(¶, m_para, sizeof(YangVideoInfo)); + //int32_t is12bit = yvp.videoCaptureFormat > 0 ? 1 : 0; + int32_t isTrans = (para.width != para.outWidth ? 1 : 0); + int32_t isHw=m_para->videoEncHwType; + //int64_t videoTimestamp = 0; + int32_t bitLen = para.bitDepth == 8 ? 1 : 2; + int32_t m_in_fileSize = bitLen * para.width * para.height * 3 / 2; + int32_t m_out_fileSize = bitLen * para.outWidth * para.outHeight * 3 / 2; + + uint8_t *outVideoSrc = NULL; + if (isTrans) { + outVideoSrc = new uint8_t[para.outWidth * para.outHeight * 3 / 2]; + } + + if (m_in_videoBuffer != NULL) + m_in_videoBuffer->resetIndex(); + if (m_out_videoBuffer != NULL) + m_out_videoBuffer->resetIndex(); + + YangEncoderFactory ydf; + YangVideoEncoder *t_Encoder = ydf.createVideoEncoder(m_para); + + t_Encoder->init(¶, m_enc); + t_Encoder->setVideoMetaData(m_vmd); + + uint8_t* nv12Src=NULL; + //uint8_t* tmpNv12=NULL; + if(isHw) nv12Src=new uint8_t[m_in_fileSize]; + int64_t t_preTimestamp=0; + int32_t wid=m_para->width; + int32_t hei=m_para->height; + YangFrame videoFrame; + memset(&videoFrame,0,sizeof(YangFrame)); + + while (m_isConvert == 1) { + if (m_in_videoBuffer->size() == 0) { + yang_usleep(20000); + continue; + } + uint8_t *tmp=NULL; + uint8_t *tmpsrc=NULL; + + tmpsrc=m_in_videoBuffer->getVideoRef(&videoFrame); + + if(!tmpsrc) continue; + + if(isHw){ + + if(tmpsrc){ + //tmpNv12=m_in_videoBuffer->getVideoIn(&videoTimestamp); + if(para.videoEncoderFormat==YangI420) { + yuv.i420tonv12(tmpsrc,nv12Src,wid,hei); + tmp=nv12Src; + } + if(para.videoEncoderFormat==YangArgb){ + tmp=tmpsrc; + } + } + + }else{ + tmp=tmpsrc; + //tmp=m_in_videoBuffer->getVideoIn(&videoTimestamp); + } + + + if(t_preTimestamp){ + if(videoFrame.pts<=t_preTimestamp){ + tmp=NULL; + continue; + } + } + t_preTimestamp=videoFrame.pts; + if(m_sendKeyframe==1){ + t_Encoder->sendKeyFrame(); + m_sendKeyframe=0; + } + //videoFrame.timestamp=frame->timestamp; + videoFrame.uid=m_uid; + + if (isTrans) { + yuv.scaleI420(tmp, + outVideoSrc, para.width, para.height, para.outWidth, + para.outHeight); + videoFrame.payload=outVideoSrc; + videoFrame.nb=m_out_fileSize; + + t_Encoder->encode(&videoFrame,this); + } else { + videoFrame.payload=tmp; + videoFrame.nb=m_in_fileSize; + t_Encoder->encode(&videoFrame,this); + + } + tmp=NULL; + + } + //tmpNv12=NULL; + yang_deleteA(outVideoSrc); + yang_deleteA(nv12Src); + yang_delete(t_Encoder); + + +} + +void YangVideoEncoderHandle::stopLoop() { + m_isConvert = 0; + +} diff --git a/libmetartc3/src/yangencoder/lame.h b/libmetartc3/src/yangencoder/lame.h new file mode 100755 index 00000000..c3694b3b --- /dev/null +++ b/libmetartc3/src/yangencoder/lame.h @@ -0,0 +1,1342 @@ +/* + * Interface to MP3 LAME encoding engine + * + * Copyright (c) 1999 Mark Taylor + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +/* $Id: lame.h,v 1.192 2017/08/31 14:14:46 robert Exp $ */ + +#ifndef LAME_LAME_H +#define LAME_LAME_H + +/* for size_t typedef */ +#include +/* for va_list typedef */ +#include +/* for FILE typedef, TODO: remove when removing lame_mp3_tags_fid */ +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef void (*lame_report_function)(const char *format, va_list ap); + +#if defined(WIN32) || defined(_WIN32) +#undef CDECL +#define CDECL __cdecl +#else +#define CDECL +#endif + +#define DEPRECATED_OR_OBSOLETE_CODE_REMOVED 1 + +typedef enum vbr_mode_e { + vbr_off=0, + vbr_mt, /* obsolete, same as vbr_mtrh */ + vbr_rh, + vbr_abr, + vbr_mtrh, + vbr_max_indicator, /* Don't use this! It's used for sanity checks. */ + vbr_default=vbr_mtrh /* change this to change the default VBR mode of LAME */ +} vbr_mode; + + +/* MPEG modes */ +typedef enum MPEG_mode_e { + STEREO = 0, + JOINT_STEREO, + DUAL_CHANNEL, /* LAME doesn't supports this! */ + MONO, + NOT_SET, + MAX_INDICATOR /* Don't use this! It's used for sanity checks. */ +} MPEG_mode; + +/* Padding types */ +typedef enum Padding_type_e { + PAD_NO = 0, + PAD_ALL, + PAD_ADJUST, + PAD_MAX_INDICATOR /* Don't use this! It's used for sanity checks. */ +} Padding_type; + + + +/*presets*/ +typedef enum preset_mode_e { + /*values from 8 to 320 should be reserved for abr bitrates*/ + /*for abr I'd suggest to directly use the targeted bitrate as a value*/ + ABR_8 = 8, + ABR_320 = 320, + + V9 = 410, /*Vx to match Lame and VBR_xx to match FhG*/ + VBR_10 = 410, + V8 = 420, + VBR_20 = 420, + V7 = 430, + VBR_30 = 430, + V6 = 440, + VBR_40 = 440, + V5 = 450, + VBR_50 = 450, + V4 = 460, + VBR_60 = 460, + V3 = 470, + VBR_70 = 470, + V2 = 480, + VBR_80 = 480, + V1 = 490, + VBR_90 = 490, + V0 = 500, + VBR_100 = 500, + + + + /*still there for compatibility*/ + R3MIX = 1000, + STANDARD = 1001, + EXTREME = 1002, + INSANE = 1003, + STANDARD_FAST = 1004, + EXTREME_FAST = 1005, + MEDIUM = 1006, + MEDIUM_FAST = 1007 +} preset_mode; + + +/*asm optimizations*/ +typedef enum asm_optimizations_e { + MMX = 1, + AMD_3DNOW = 2, + SSE = 3 +} asm_optimizations; + + +/* psychoacoustic model */ +typedef enum Psy_model_e { + PSY_GPSYCHO = 1, + PSY_NSPSYTUNE = 2 +} Psy_model; + + +/* buffer considerations */ +typedef enum buffer_constraint_e { + MDB_DEFAULT=0, + MDB_STRICT_ISO=1, + MDB_MAXIMUM=2 +} buffer_constraint; + + +struct lame_global_struct; +typedef struct lame_global_struct lame_global_flags; +typedef lame_global_flags *lame_t; + + + + +/*********************************************************************** + * + * The LAME API + * These functions should be called, in this order, for each + * MP3 file to be encoded. See the file "API" for more documentation + * + ***********************************************************************/ + + +/* + * REQUIRED: + * initialize the encoder. sets default for all encoder parameters, + * returns NULL if some malloc()'s failed + * otherwise returns pointer to structure needed for all future + * API calls. + */ +lame_global_flags * CDECL lame_init(void); +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* obsolete version */ +int32_t CDECL lame_init_old(lame_global_flags *); +#endif + +/* + * OPTIONAL: + * set as needed to override defaults + */ + +/******************************************************************** + * input stream description + ***********************************************************************/ +/* number of samples. default = 2^32-1 */ +int32_t CDECL lame_set_num_samples(lame_global_flags *, unsigned long); +unsigned long CDECL lame_get_num_samples(const lame_global_flags *); + +/* input sample rate in Hz. default = 44100hz */ +int32_t CDECL lame_set_in_samplerate(lame_global_flags *, int); +int32_t CDECL lame_get_in_samplerate(const lame_global_flags *); + +/* number of channels in input stream. default=2 */ +int32_t CDECL lame_set_num_channels(lame_global_flags *, int); +int32_t CDECL lame_get_num_channels(const lame_global_flags *); + +/* + scale the input by this amount before encoding. default=1 + (not used by decoding routines) +*/ +int32_t CDECL lame_set_scale(lame_global_flags *, float); +float CDECL lame_get_scale(const lame_global_flags *); + +/* + scale the channel 0 (left) input by this amount before encoding. default=1 + (not used by decoding routines) +*/ +int32_t CDECL lame_set_scale_left(lame_global_flags *, float); +float CDECL lame_get_scale_left(const lame_global_flags *); + +/* + scale the channel 1 (right) input by this amount before encoding. default=1 + (not used by decoding routines) +*/ +int32_t CDECL lame_set_scale_right(lame_global_flags *, float); +float CDECL lame_get_scale_right(const lame_global_flags *); + +/* + output sample rate in Hz. default = 0, which means LAME picks best value + based on the amount of compression. MPEG only allows: + MPEG1 32, 44.1, 48khz + MPEG2 16, 22.05, 24 + MPEG2.5 8, 11.025, 12 + (not used by decoding routines) +*/ +int32_t CDECL lame_set_out_samplerate(lame_global_flags *, int); +int32_t CDECL lame_get_out_samplerate(const lame_global_flags *); + + +/******************************************************************** + * general control parameters + ***********************************************************************/ +/* 1=cause LAME to collect data for an MP3 frame analyzer. default=0 */ +int32_t CDECL lame_set_analysis(lame_global_flags *, int); +int32_t CDECL lame_get_analysis(const lame_global_flags *); + +/* + 1 = write a Xing VBR header frame. + default = 1 + this variable must have been added by a Hungarian notation Windows programmer :-) +*/ +int32_t CDECL lame_set_bWriteVbrTag(lame_global_flags *, int); +int32_t CDECL lame_get_bWriteVbrTag(const lame_global_flags *); + +/* 1=decode only. use lame/mpglib to convert mp3/ogg to wav. default=0 */ +int32_t CDECL lame_set_decode_only(lame_global_flags *, int); +int32_t CDECL lame_get_decode_only(const lame_global_flags *); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* 1=encode a Vorbis .ogg file. default=0 */ +/* DEPRECATED */ +int32_t CDECL lame_set_ogg(lame_global_flags *, int); +int32_t CDECL lame_get_ogg(const lame_global_flags *); +#endif + +/* + internal algorithm selection. True quality is determined by the bitrate + but this variable will effect quality by selecting expensive or cheap algorithms. + quality=0..9. 0=best (very slow). 9=worst. + recommended: 2 near-best quality, not too slow + 5 good quality, fast + 7 ok quality, really fast +*/ +int32_t CDECL lame_set_quality(lame_global_flags *, int); +int32_t CDECL lame_get_quality(const lame_global_flags *); + +/* + mode = 0,1,2,3 = stereo, jstereo, dual channel (not supported), mono + default: lame picks based on compression ration and input channels +*/ +int32_t CDECL lame_set_mode(lame_global_flags *, MPEG_mode); +MPEG_mode CDECL lame_get_mode(const lame_global_flags *); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* + mode_automs. Use a M/S mode with a switching threshold based on + compression ratio + DEPRECATED +*/ +int32_t CDECL lame_set_mode_automs(lame_global_flags *, int); +int32_t CDECL lame_get_mode_automs(const lame_global_flags *); +#endif + +/* + force_ms. Force M/S for all frames. For testing only. + default = 0 (disabled) +*/ +int32_t CDECL lame_set_force_ms(lame_global_flags *, int); +int32_t CDECL lame_get_force_ms(const lame_global_flags *); + +/* use free_format? default = 0 (disabled) */ +int32_t CDECL lame_set_free_format(lame_global_flags *, int); +int32_t CDECL lame_get_free_format(const lame_global_flags *); + +/* perform ReplayGain analysis? default = 0 (disabled) */ +int32_t CDECL lame_set_findReplayGain(lame_global_flags *, int); +int32_t CDECL lame_get_findReplayGain(const lame_global_flags *); + +/* decode on the fly. Search for the peak sample. If the ReplayGain + * analysis is enabled then perform the analysis on the decoded data + * stream. default = 0 (disabled) + * NOTE: if this option is set the build-in decoder should not be used */ +int32_t CDECL lame_set_decode_on_the_fly(lame_global_flags *, int); +int32_t CDECL lame_get_decode_on_the_fly(const lame_global_flags *); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* DEPRECATED: now does the same as lame_set_findReplayGain() + default = 0 (disabled) */ +int32_t CDECL lame_set_ReplayGain_input(lame_global_flags *, int); +int32_t CDECL lame_get_ReplayGain_input(const lame_global_flags *); + +/* DEPRECATED: now does the same as + lame_set_decode_on_the_fly() && lame_set_findReplayGain() + default = 0 (disabled) */ +int32_t CDECL lame_set_ReplayGain_decode(lame_global_flags *, int); +int32_t CDECL lame_get_ReplayGain_decode(const lame_global_flags *); + +/* DEPRECATED: now does the same as lame_set_decode_on_the_fly() + default = 0 (disabled) */ +int32_t CDECL lame_set_findPeakSample(lame_global_flags *, int); +int32_t CDECL lame_get_findPeakSample(const lame_global_flags *); +#endif + +/* counters for gapless encoding */ +int32_t CDECL lame_set_nogap_total(lame_global_flags*, int); +int32_t CDECL lame_get_nogap_total(const lame_global_flags*); + +int32_t CDECL lame_set_nogap_currentindex(lame_global_flags* , int); +int32_t CDECL lame_get_nogap_currentindex(const lame_global_flags*); + + +/* + * OPTIONAL: + * Set printf like error/debug/message reporting functions. + * The second argument has to be a pointer to a function which looks like + * void my_debugf(const char *format, va_list ap) + * { + * (void) vfprintf(stdout, format, ap); + * } + * If you use NULL as the value of the pointer in the set function, the + * lame buildin function will be used (prints to stderr). + * To quiet any output you have to replace the body of the example function + * with just "return;" and use it in the set function. + */ +int32_t CDECL lame_set_errorf(lame_global_flags *, lame_report_function); +int32_t CDECL lame_set_debugf(lame_global_flags *, lame_report_function); +int32_t CDECL lame_set_msgf (lame_global_flags *, lame_report_function); + + + +/* set one of brate compression ratio. default is compression ratio of 11. */ +int32_t CDECL lame_set_brate(lame_global_flags *, int); +int32_t CDECL lame_get_brate(const lame_global_flags *); +int32_t CDECL lame_set_compression_ratio(lame_global_flags *, float); +float CDECL lame_get_compression_ratio(const lame_global_flags *); + + +int32_t CDECL lame_set_preset( lame_global_flags* gfp, int32_t ); +int32_t CDECL lame_set_asm_optimizations( lame_global_flags* gfp, int, int32_t ); + + + +/******************************************************************** + * frame params + ***********************************************************************/ +/* mark as copyright. default=0 */ +int32_t CDECL lame_set_copyright(lame_global_flags *, int); +int32_t CDECL lame_get_copyright(const lame_global_flags *); + +/* mark as original. default=1 */ +int32_t CDECL lame_set_original(lame_global_flags *, int); +int32_t CDECL lame_get_original(const lame_global_flags *); + +/* error_protection. Use 2 bytes from each frame for CRC checksum. default=0 */ +int32_t CDECL lame_set_error_protection(lame_global_flags *, int); +int32_t CDECL lame_get_error_protection(const lame_global_flags *); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* padding_type. 0=pad no frames 1=pad all frames 2=adjust padding(default) */ +int32_t CDECL lame_set_padding_type(lame_global_flags *, Padding_type); +Padding_type CDECL lame_get_padding_type(const lame_global_flags *); +#endif + +/* MP3 'private extension' bit Meaningless. default=0 */ +int32_t CDECL lame_set_extension(lame_global_flags *, int); +int32_t CDECL lame_get_extension(const lame_global_flags *); + +/* enforce strict ISO compliance. default=0 */ +int32_t CDECL lame_set_strict_ISO(lame_global_flags *, int); +int32_t CDECL lame_get_strict_ISO(const lame_global_flags *); + + +/******************************************************************** + * quantization/noise shaping + ***********************************************************************/ + +/* disable the bit reservoir. For testing only. default=0 */ +int32_t CDECL lame_set_disable_reservoir(lame_global_flags *, int); +int32_t CDECL lame_get_disable_reservoir(const lame_global_flags *); + +/* select a different "best quantization" function. default=0 */ +int32_t CDECL lame_set_quant_comp(lame_global_flags *, int); +int32_t CDECL lame_get_quant_comp(const lame_global_flags *); +int32_t CDECL lame_set_quant_comp_short(lame_global_flags *, int); +int32_t CDECL lame_get_quant_comp_short(const lame_global_flags *); + +int32_t CDECL lame_set_experimentalX(lame_global_flags *, int); /* compatibility*/ +int32_t CDECL lame_get_experimentalX(const lame_global_flags *); + +/* another experimental option. for testing only */ +int32_t CDECL lame_set_experimentalY(lame_global_flags *, int); +int32_t CDECL lame_get_experimentalY(const lame_global_flags *); + +/* another experimental option. for testing only */ +int32_t CDECL lame_set_experimentalZ(lame_global_flags *, int); +int32_t CDECL lame_get_experimentalZ(const lame_global_flags *); + +/* Naoki's psycho acoustic model. default=0 */ +int32_t CDECL lame_set_exp_nspsytune(lame_global_flags *, int); +int32_t CDECL lame_get_exp_nspsytune(const lame_global_flags *); + +void CDECL lame_set_msfix(lame_global_flags *, double); +float CDECL lame_get_msfix(const lame_global_flags *); + + +/******************************************************************** + * VBR control + ***********************************************************************/ +/* Types of VBR. default = vbr_off = CBR */ +int32_t CDECL lame_set_VBR(lame_global_flags *, vbr_mode); +vbr_mode CDECL lame_get_VBR(const lame_global_flags *); + +/* VBR quality level. 0=highest 9=lowest */ +int32_t CDECL lame_set_VBR_q(lame_global_flags *, int); +int32_t CDECL lame_get_VBR_q(const lame_global_flags *); + +/* VBR quality level. 0=highest 9=lowest, Range [0,...,10[ */ +int32_t CDECL lame_set_VBR_quality(lame_global_flags *, float); +float CDECL lame_get_VBR_quality(const lame_global_flags *); + +/* Ignored except for VBR=vbr_abr (ABR mode) */ +int32_t CDECL lame_set_VBR_mean_bitrate_kbps(lame_global_flags *, int); +int32_t CDECL lame_get_VBR_mean_bitrate_kbps(const lame_global_flags *); + +int32_t CDECL lame_set_VBR_min_bitrate_kbps(lame_global_flags *, int); +int32_t CDECL lame_get_VBR_min_bitrate_kbps(const lame_global_flags *); + +int32_t CDECL lame_set_VBR_max_bitrate_kbps(lame_global_flags *, int); +int32_t CDECL lame_get_VBR_max_bitrate_kbps(const lame_global_flags *); + +/* + 1=strictly enforce VBR_min_bitrate. Normally it will be violated for + analog silence +*/ +int32_t CDECL lame_set_VBR_hard_min(lame_global_flags *, int); +int32_t CDECL lame_get_VBR_hard_min(const lame_global_flags *); + +/* for preset */ +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +int32_t CDECL lame_set_preset_expopts(lame_global_flags *, int); +#endif + +/******************************************************************** + * Filtering control + ***********************************************************************/ +/* freq in Hz to apply lowpass. Default = 0 = lame chooses. -1 = disabled */ +int32_t CDECL lame_set_lowpassfreq(lame_global_flags *, int); +int32_t CDECL lame_get_lowpassfreq(const lame_global_flags *); +/* width of transition band, in Hz. Default = one polyphase filter band */ +int32_t CDECL lame_set_lowpasswidth(lame_global_flags *, int); +int32_t CDECL lame_get_lowpasswidth(const lame_global_flags *); + +/* freq in Hz to apply highpass. Default = 0 = lame chooses. -1 = disabled */ +int32_t CDECL lame_set_highpassfreq(lame_global_flags *, int); +int32_t CDECL lame_get_highpassfreq(const lame_global_flags *); +/* width of transition band, in Hz. Default = one polyphase filter band */ +int32_t CDECL lame_set_highpasswidth(lame_global_flags *, int); +int32_t CDECL lame_get_highpasswidth(const lame_global_flags *); + + +/******************************************************************** + * psycho acoustics and other arguments which you should not change + * unless you know what you are doing + ***********************************************************************/ + +/* only use ATH for masking */ +int32_t CDECL lame_set_ATHonly(lame_global_flags *, int); +int32_t CDECL lame_get_ATHonly(const lame_global_flags *); + +/* only use ATH for short blocks */ +int32_t CDECL lame_set_ATHshort(lame_global_flags *, int); +int32_t CDECL lame_get_ATHshort(const lame_global_flags *); + +/* disable ATH */ +int32_t CDECL lame_set_noATH(lame_global_flags *, int); +int32_t CDECL lame_get_noATH(const lame_global_flags *); + +/* select ATH formula */ +int32_t CDECL lame_set_ATHtype(lame_global_flags *, int); +int32_t CDECL lame_get_ATHtype(const lame_global_flags *); + +/* lower ATH by this many db */ +int32_t CDECL lame_set_ATHlower(lame_global_flags *, float); +float CDECL lame_get_ATHlower(const lame_global_flags *); + +/* select ATH adaptive adjustment type */ +int32_t CDECL lame_set_athaa_type( lame_global_flags *, int); +int32_t CDECL lame_get_athaa_type( const lame_global_flags *); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* select the loudness approximation used by the ATH adaptive auto-leveling */ +int32_t CDECL lame_set_athaa_loudapprox( lame_global_flags *, int); +int32_t CDECL lame_get_athaa_loudapprox( const lame_global_flags *); +#endif + +/* adjust (in dB) the point32_t below which adaptive ATH level adjustment occurs */ +int32_t CDECL lame_set_athaa_sensitivity( lame_global_flags *, float); +float CDECL lame_get_athaa_sensitivity( const lame_global_flags* ); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* OBSOLETE: predictability limit (ISO tonality formula) */ +int32_t CDECL lame_set_cwlimit(lame_global_flags *, int); +int32_t CDECL lame_get_cwlimit(const lame_global_flags *); +#endif + +/* + allow blocktypes to differ between channels? + default: 0 for jstereo, 1 for stereo +*/ +int32_t CDECL lame_set_allow_diff_short(lame_global_flags *, int); +int32_t CDECL lame_get_allow_diff_short(const lame_global_flags *); + +/* use temporal masking effect (default = 1) */ +int32_t CDECL lame_set_useTemporal(lame_global_flags *, int); +int32_t CDECL lame_get_useTemporal(const lame_global_flags *); + +/* use temporal masking effect (default = 1) */ +int32_t CDECL lame_set_interChRatio(lame_global_flags *, float); +float CDECL lame_get_interChRatio(const lame_global_flags *); + +/* disable short blocks */ +int32_t CDECL lame_set_no_short_blocks(lame_global_flags *, int); +int32_t CDECL lame_get_no_short_blocks(const lame_global_flags *); + +/* force short blocks */ +int32_t CDECL lame_set_force_short_blocks(lame_global_flags *, int); +int32_t CDECL lame_get_force_short_blocks(const lame_global_flags *); + +/* Input PCM is emphased PCM (for instance from one of the rarely + emphased CDs), it is STRONGLY not recommended to use this, because + psycho does not take it into account, and last but not least many decoders + ignore these bits */ +int32_t CDECL lame_set_emphasis(lame_global_flags *, int); +int32_t CDECL lame_get_emphasis(const lame_global_flags *); + + + +/************************************************************************/ +/* internal variables, cannot be set... */ +/* provided because they may be of use to calling application */ +/************************************************************************/ +/* version 0=MPEG-2 1=MPEG-1 (2=MPEG-2.5) */ +int32_t CDECL lame_get_version(const lame_global_flags *); + +/* encoder delay */ +int32_t CDECL lame_get_encoder_delay(const lame_global_flags *); + +/* + padding appended to the input to make sure decoder can fully decode + all input. Note that this value can only be calculated during the + call to lame_encoder_flush(). Before lame_encoder_flush() has + been called, the value of encoder_padding = 0. +*/ +int32_t CDECL lame_get_encoder_padding(const lame_global_flags *); + +/* size of MPEG frame */ +int32_t CDECL lame_get_framesize(const lame_global_flags *); + +/* number of PCM samples buffered, but not yet encoded to mp3 data. */ +int32_t CDECL lame_get_mf_samples_to_encode( const lame_global_flags* gfp ); + +/* + size (bytes) of mp3 data buffered, but not yet encoded. + this is the number of bytes which would be output by a call to + lame_encode_flush_nogap. NOTE: lame_encode_flush() will return + more bytes than this because it will encode the reamining buffered + PCM samples before flushing the mp3 buffers. +*/ +int32_t CDECL lame_get_size_mp3buffer( const lame_global_flags* gfp ); + +/* number of frames encoded so far */ +int32_t CDECL lame_get_frameNum(const lame_global_flags *); + +/* + lame's estimate of the total number of frames to be encoded + only valid if calling program set num_samples +*/ +int32_t CDECL lame_get_totalframes(const lame_global_flags *); + +/* RadioGain value. Multiplied by 10 and rounded to the nearest. */ +int32_t CDECL lame_get_RadioGain(const lame_global_flags *); + +/* AudiophileGain value. Multipled by 10 and rounded to the nearest. */ +int32_t CDECL lame_get_AudiophileGain(const lame_global_flags *); + +/* the peak sample */ +float CDECL lame_get_PeakSample(const lame_global_flags *); + +/* Gain change required for preventing clipping. The value is correct only if + peak sample searching was enabled. If negative then the waveform + already does not clip. The value is multiplied by 10 and rounded up. */ +int32_t CDECL lame_get_noclipGainChange(const lame_global_flags *); + +/* user-specified scale factor required for preventing clipping. Value is + correct only if peak sample searching was enabled and no user-specified + scaling was performed. If negative then either the waveform already does + not clip or the value cannot be determined */ +float CDECL lame_get_noclipScale(const lame_global_flags *); + +/* returns the limit of PCM samples, which one can pass in an encode call + under the constrain of a provided buffer of size buffer_size */ +int32_t CDECL lame_get_maximum_number_of_samples(lame_t gfp, size_t buffer_size); + + + + +/* + * REQUIRED: + * sets more internal configuration based on data provided above. + * returns -1 if something failed. + */ +int32_t CDECL lame_init_params(lame_global_flags *); + + +/* + * OPTIONAL: + * get the version number, in a string. of the form: + * "3.63 (beta)" or just "3.63". + */ +const char* CDECL get_lame_version ( void ); +const char* CDECL get_lame_short_version ( void ); +const char* CDECL get_lame_very_short_version ( void ); +const char* CDECL get_psy_version ( void ); +const char* CDECL get_lame_url ( void ); +const char* CDECL get_lame_os_bitness ( void ); + +/* + * OPTIONAL: + * get the version numbers in numerical form. + */ +typedef struct { + /* generic LAME version */ + int32_t major; + int32_t minor; + int32_t alpha; /* 0 if not an alpha version */ + int32_t beta; /* 0 if not a beta version */ + + /* version of the psy model */ + int32_t psy_major; + int32_t psy_minor; + int32_t psy_alpha; /* 0 if not an alpha version */ + int32_t psy_beta; /* 0 if not a beta version */ + + /* compile time features */ + const char *features; /* Don't make assumptions about the contents! */ +} lame_version_t; +void CDECL get_lame_version_numerical(lame_version_t *); + + +/* + * OPTIONAL: + * print32_t internal lame configuration to message handler + */ +void CDECL lame_print_config(const lame_global_flags* gfp); + +void CDECL lame_print_internals( const lame_global_flags *gfp); + + +/* + * input pcm data, output (maybe) mp3 frames. + * This routine handles all buffering, resampling and filtering for you. + * + * return code number of bytes output in mp3buf. Can be 0 + * -1: mp3buf was too small + * -2: malloc() problem + * -3: lame_init_params() not called + * -4: psycho acoustic problems + * + * The required mp3buf_size can be computed from num_samples, + * samplerate and encoding rate, but here is a worst case estimate: + * + * mp3buf_size in bytes = 1.25*num_samples + 7200 + * + * I think a tighter bound could be: (mt, March 2000) + * MPEG1: + * num_samples*(bitrate/8)/samplerate + 4*1152*(bitrate/8)/samplerate + 512 + * MPEG2: + * num_samples*(bitrate/8)/samplerate + 4*576*(bitrate/8)/samplerate + 256 + * + * but test first if you use that! + * + * set mp3buf_size = 0 and LAME will not check if mp3buf_size is + * large enough. + * + * NOTE: + * if gfp->num_channels=2, but gfp->mode = 3 (mono), the L & R channels + * will be averaged into the L channel before encoding only the L channel + * This will overwrite the data in buffer_l[] and buffer_r[]. + * +*/ +int32_t CDECL lame_encode_buffer ( + lame_global_flags* gfp, /* global context handle */ + const int16_t buffer_l [], /* PCM data for left channel */ + const int16_t buffer_r [], /* PCM data for right channel */ + const int32_t nsamples, /* number of samples per channel */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + const int32_t mp3buf_size ); /* number of valid octets in this + stream */ + +/* + * as above, but input has L & R channel data interleaved. + * NOTE: + * num_samples = number of samples in the L (or R) + * channel, not the total number of samples in pcm[] + */ +int32_t CDECL lame_encode_buffer_interleaved( + lame_global_flags* gfp, /* global context handlei */ + int16_t pcm[], /* PCM data for left and right + channel, interleaved */ + int32_t num_samples, /* number of samples per channel, + _not_ number of samples in + pcm[] */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + int32_t mp3buf_size ); /* number of valid octets in this + stream */ + + +/* as lame_encode_buffer, but for 'float's. + * !! NOTE: !! data must still be scaled to be in the same range as + * short int, +/- 32768 + */ +int32_t CDECL lame_encode_buffer_float( + lame_global_flags* gfp, /* global context handle */ + const float pcm_l [], /* PCM data for left channel */ + const float pcm_r [], /* PCM data for right channel */ + const int32_t nsamples, /* number of samples per channel */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + const int32_t mp3buf_size ); /* number of valid octets in this + stream */ + +/* as lame_encode_buffer, but for 'float's. + * !! NOTE: !! data must be scaled to +/- 1 full scale + */ +int32_t CDECL lame_encode_buffer_ieee_float( + lame_t gfp, + const float pcm_l [], /* PCM data for left channel */ + const float pcm_r [], /* PCM data for right channel */ + const int32_t nsamples, + uint8_t * mp3buf, + const int32_t mp3buf_size); +int32_t CDECL lame_encode_buffer_interleaved_ieee_float( + lame_t gfp, + const float pcm[], /* PCM data for left and right + channel, interleaved */ + const int32_t nsamples, + uint8_t * mp3buf, + const int32_t mp3buf_size); + +/* as lame_encode_buffer, but for 'double's. + * !! NOTE: !! data must be scaled to +/- 1 full scale + */ +int32_t CDECL lame_encode_buffer_ieee_double( + lame_t gfp, + const double pcm_l [], /* PCM data for left channel */ + const double pcm_r [], /* PCM data for right channel */ + const int32_t nsamples, + uint8_t * mp3buf, + const int32_t mp3buf_size); +int32_t CDECL lame_encode_buffer_interleaved_ieee_double( + lame_t gfp, + const double pcm[], /* PCM data for left and right + channel, interleaved */ + const int32_t nsamples, + uint8_t * mp3buf, + const int32_t mp3buf_size); + +/* as lame_encode_buffer, but for long's + * !! NOTE: !! data must still be scaled to be in the same range as + * short int, +/- 32768 + * + * This scaling was a mistake (doesn't allow one to exploit full + * precision of type 'long'. Use lame_encode_buffer_long2() instead. + * + */ +int32_t CDECL lame_encode_buffer_long( + lame_global_flags* gfp, /* global context handle */ + const long buffer_l [], /* PCM data for left channel */ + const long buffer_r [], /* PCM data for right channel */ + const int32_t nsamples, /* number of samples per channel */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + const int32_t mp3buf_size ); /* number of valid octets in this + stream */ + +/* Same as lame_encode_buffer_long(), but with correct scaling. + * !! NOTE: !! data must still be scaled to be in the same range as + * type 'long'. Data should be in the range: +/- 2^(8*size(long)-1) + * + */ +int32_t CDECL lame_encode_buffer_long2( + lame_global_flags* gfp, /* global context handle */ + const long buffer_l [], /* PCM data for left channel */ + const long buffer_r [], /* PCM data for right channel */ + const int32_t nsamples, /* number of samples per channel */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + const int32_t mp3buf_size ); /* number of valid octets in this + stream */ + +/* as lame_encode_buffer, but for int's + * !! NOTE: !! input should be scaled to the maximum range of 'int' + * If int32_t is 4 bytes, then the values should range from + * +/- 2147483648. + * + * This routine does not (and cannot, without loosing precision) use + * the same scaling as the rest of the lame_encode_buffer() routines. + * + */ +int32_t CDECL lame_encode_buffer_int( + lame_global_flags* gfp, /* global context handle */ + const int32_t buffer_l [], /* PCM data for left channel */ + const int32_t buffer_r [], /* PCM data for right channel */ + const int32_t nsamples, /* number of samples per channel */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + const int32_t mp3buf_size ); /* number of valid octets in this + stream */ + +/* + * as above, but for interleaved data. + * !! NOTE: !! data must still be scaled to be in the same range as + * type 'int32_t'. Data should be in the range: +/- 2^(8*size(int32_t)-1) + * NOTE: + * num_samples = number of samples in the L (or R) + * channel, not the total number of samples in pcm[] + */ +int +lame_encode_buffer_interleaved_int( + lame_t gfp, + const int32_t pcm [], /* PCM data for left and right + channel, interleaved */ + const int32_t nsamples, /* number of samples per channel, + _not_ number of samples in + pcm[] */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + const int32_t mp3buf_size ); /* number of valid octets in this + stream */ + + + +/* + * REQUIRED: + * lame_encode_flush will flush the intenal PCM buffers, padding with + * 0's to make sure the final frame is complete, and then flush + * the internal MP3 buffers, and thus may return a + * final few mp3 frames. 'mp3buf' should be at least 7200 bytes long + * to hold all possible emitted data. + * + * will also write id3v1 tags (if any) into the bitstream + * + * return code = number of bytes output to mp3buf. Can be 0 + */ +int32_t CDECL lame_encode_flush( + lame_global_flags * gfp, /* global context handle */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + int32_t size); /* number of valid octets in this stream */ + +/* + * OPTIONAL: + * lame_encode_flush_nogap will flush the internal mp3 buffers and pad + * the last frame with ancillary data so it is a complete mp3 frame. + * + * 'mp3buf' should be at least 7200 bytes long + * to hold all possible emitted data. + * + * After a call to this routine, the outputed mp3 data is complete, but + * you may continue to encode new PCM samples and write future mp3 data + * to a different file. The two mp3 files will play back with no gaps + * if they are concatenated together. + * + * This routine will NOT write id3v1 tags into the bitstream. + * + * return code = number of bytes output to mp3buf. Can be 0 + */ +int32_t CDECL lame_encode_flush_nogap( + lame_global_flags * gfp, /* global context handle */ + uint8_t* mp3buf, /* pointer to encoded MP3 stream */ + int32_t size); /* number of valid octets in this stream */ + +/* + * OPTIONAL: + * Normally, this is called by lame_init_params(). It writes id3v2 and + * Xing headers into the front of the bitstream, and sets frame counters + * and bitrate histogram data to 0. You can also call this after + * lame_encode_flush_nogap(). + */ +int32_t CDECL lame_init_bitstream( + lame_global_flags * gfp); /* global context handle */ + + + +/* + * OPTIONAL: some simple statistics + * a bitrate histogram to visualize the distribution of used frame sizes + * a stereo mode histogram to visualize the distribution of used stereo + * modes, useful in joint-stereo mode only + * 0: LR left-right encoded + * 1: LR-I left-right and intensity encoded (currently not supported) + * 2: MS mid-side encoded + * 3: MS-I mid-side and intensity encoded (currently not supported) + * + * attention: don't call them after lame_encode_finish + * suggested: lame_encode_flush -> lame_*_hist -> lame_close + */ + +void CDECL lame_bitrate_hist( + const lame_global_flags * gfp, + int32_t bitrate_count[14] ); +void CDECL lame_bitrate_kbps( + const lame_global_flags * gfp, + int32_t bitrate_kbps [14] ); +void CDECL lame_stereo_mode_hist( + const lame_global_flags * gfp, + int32_t stereo_mode_count[4] ); + +void CDECL lame_bitrate_stereo_mode_hist ( + const lame_global_flags * gfp, + int32_t bitrate_stmode_count[14][4] ); + +void CDECL lame_block_type_hist ( + const lame_global_flags * gfp, + int32_t btype_count[6] ); + +void CDECL lame_bitrate_block_type_hist ( + const lame_global_flags * gfp, + int32_t bitrate_btype_count[14][6] ); + +#if (DEPRECATED_OR_OBSOLETE_CODE_REMOVED && 0) +#else +/* + * OPTIONAL: + * lame_mp3_tags_fid will rewrite a Xing VBR tag to the mp3 file with file + * pointer fid. These calls perform forward and backwards seeks, so make + * sure fid is a real file. Make sure lame_encode_flush has been called, + * and all mp3 data has been written to the file before calling this + * function. + * NOTE: + * if VBR tags are turned off by the user, or turned off by LAME because + * the output is not a regular file, this call does nothing + * NOTE: + * LAME wants to read from the file to skip an optional ID3v2 tag, so + * make sure you opened the file for writing and reading. + * NOTE: + * You can call lame_get_lametag_frame instead, if you want to insert + * the lametag yourself. +*/ +void CDECL lame_mp3_tags_fid(lame_global_flags *, FILE* fid); +#endif + +/* + * OPTIONAL: + * lame_get_lametag_frame copies the final LAME-tag into 'buffer'. + * The function returns the number of bytes copied into buffer, or + * the required buffer size, if the provided buffer is too small. + * Function failed, if the return value is larger than 'size'! + * Make sure lame_encode flush has been called before calling this function. + * NOTE: + * if VBR tags are turned off by the user, or turned off by LAME, + * this call does nothing and returns 0. + * NOTE: + * LAME inserted an empty frame in the beginning of mp3 audio data, + * which you have to replace by the final LAME-tag frame after encoding. + * In case there is no ID3v2 tag, usually this frame will be the very first + * data in your mp3 file. If you put some other leading data into your + * file, you'll have to do some bookkeeping about where to write this buffer. + */ +size_t CDECL lame_get_lametag_frame( + const lame_global_flags *, uint8_t* buffer, size_t size); + +/* + * REQUIRED: + * final call to free all remaining buffers + */ +int32_t CDECL lame_close (lame_global_flags *); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* + * OBSOLETE: + * lame_encode_finish combines lame_encode_flush() and lame_close() in + * one call. However, once this call is made, the statistics routines + * will no longer work because the data will have been cleared, and + * lame_mp3_tags_fid() cannot be called to add data to the VBR header + */ +int32_t CDECL lame_encode_finish( + lame_global_flags* gfp, + uint8_t* mp3buf, + int32_t size ); +#endif + + + + + + +/********************************************************************* + * + * decoding + * + * a simple interface to mpglib, part of mpg123, is also included if + * libmp3lame is compiled with HAVE_MPGLIB + * + *********************************************************************/ + +struct hip_global_struct; +typedef struct hip_global_struct hip_global_flags; +typedef hip_global_flags *hip_t; + + +typedef struct { + int32_t header_parsed; /* 1 if header was parsed and following data was + computed */ + int32_t stereo; /* number of channels */ + int32_t samplerate; /* sample rate */ + int32_t bitrate; /* bitrate */ + int32_t mode; /* mp3 frame type */ + int32_t mode_ext; /* mp3 frame type */ + int32_t framesize; /* number of samples per mp3 frame */ + + /* this data is only computed if mpglib detects a Xing VBR header */ + unsigned long nsamp; /* number of samples in mp3 file. */ + int32_t totalframes; /* total number of frames in mp3 file */ + + /* this data is not currently computed by the mpglib routines */ + int32_t framenum; /* frames decoded counter */ +} mp3data_struct; + +/* required call to initialize decoder */ +hip_t CDECL hip_decode_init(void); + +/* cleanup call to exit decoder */ +int32_t CDECL hip_decode_exit(hip_t gfp); + +/* HIP reporting functions */ +void CDECL hip_set_errorf(hip_t gfp, lame_report_function f); +void CDECL hip_set_debugf(hip_t gfp, lame_report_function f); +void CDECL hip_set_msgf (hip_t gfp, lame_report_function f); + +/********************************************************************* + * input 1 mp3 frame, output (maybe) pcm data. + * + * nout = hip_decode(hip, mp3buf,len,pcm_l,pcm_r); + * + * input: + * len : number of bytes of mp3 data in mp3buf + * mp3buf[len] : mp3 data to be decoded + * + * output: + * nout: -1 : decoding error + * 0 : need more data before we can complete the decode + * >0 : returned 'nout' samples worth of data in pcm_l,pcm_r + * pcm_l[nout] : left channel data + * pcm_r[nout] : right channel data + * + *********************************************************************/ +int32_t CDECL hip_decode( hip_t gfp + , uint8_t * mp3buf + , size_t len + , short pcm_l[] + , short pcm_r[] + ); + +/* same as hip_decode, and also returns mp3 header data */ +int32_t CDECL hip_decode_headers( hip_t gfp + , uint8_t* mp3buf + , size_t len + , short pcm_l[] + , short pcm_r[] + , mp3data_struct* mp3data + ); + +/* same as hip_decode, but returns at most one frame */ +int32_t CDECL hip_decode1( hip_t gfp + , uint8_t* mp3buf + , size_t len + , short pcm_l[] + , short pcm_r[] + ); + +/* same as hip_decode1, but returns at most one frame and mp3 header data */ +int32_t CDECL hip_decode1_headers( hip_t gfp + , uint8_t* mp3buf + , size_t len + , short pcm_l[] + , short pcm_r[] + , mp3data_struct* mp3data + ); + +/* same as hip_decode1_headers, but also returns enc_delay and enc_padding + from VBR Info tag, (-1 if no info tag was found) */ +int32_t CDECL hip_decode1_headersB( hip_t gfp + , uint8_t* mp3buf + , size_t len + , short pcm_l[] + , short pcm_r[] + , mp3data_struct* mp3data + , int32_t *enc_delay + , int32_t *enc_padding + ); + + + +/* OBSOLETE: + * lame_decode... functions are there to keep old code working + * but it is strongly recommended to replace calls by hip_decode... + * function calls, see above. + */ +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +int32_t CDECL lame_decode_init(void); +int32_t CDECL lame_decode( + uint8_t * mp3buf, + int32_t len, + short pcm_l[], + short pcm_r[] ); +int32_t CDECL lame_decode_headers( + uint8_t* mp3buf, + int32_t len, + short pcm_l[], + short pcm_r[], + mp3data_struct* mp3data ); +int32_t CDECL lame_decode1( + uint8_t* mp3buf, + int32_t len, + short pcm_l[], + short pcm_r[] ); +int32_t CDECL lame_decode1_headers( + uint8_t* mp3buf, + int32_t len, + short pcm_l[], + short pcm_r[], + mp3data_struct* mp3data ); +int32_t CDECL lame_decode1_headersB( + uint8_t* mp3buf, + int32_t len, + short pcm_l[], + short pcm_r[], + mp3data_struct* mp3data, + int32_t *enc_delay, + int32_t *enc_padding ); +int32_t CDECL lame_decode_exit(void); + +#endif /* obsolete lame_decode API calls */ + + +/********************************************************************* + * + * id3tag stuff + * + *********************************************************************/ + +/* + * id3tag.h -- Interface to write ID3 version 1 and 2 tags. + * + * Copyright (C) 2000 Don Melton. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +/* utility to obtain alphabetically sorted list of genre names with numbers */ +void CDECL id3tag_genre_list( + void (*handler)(int, const char *, void *), + void* cookie); + +void CDECL id3tag_init (lame_t gfp); + +/* force addition of version 2 tag */ +void CDECL id3tag_add_v2 (lame_t gfp); + +/* add only a version 1 tag */ +void CDECL id3tag_v1_only (lame_t gfp); + +/* add only a version 2 tag */ +void CDECL id3tag_v2_only (lame_t gfp); + +/* pad version 1 tag with spaces instead of nulls */ +void CDECL id3tag_space_v1 (lame_t gfp); + +/* pad version 2 tag with extra 128 bytes */ +void CDECL id3tag_pad_v2 (lame_t gfp); + +/* pad version 2 tag with extra n bytes */ +void CDECL id3tag_set_pad (lame_t gfp, size_t n); + +void CDECL id3tag_set_title(lame_t gfp, const char* title); +void CDECL id3tag_set_artist(lame_t gfp, const char* artist); +void CDECL id3tag_set_album(lame_t gfp, const char* album); +void CDECL id3tag_set_year(lame_t gfp, const char* year); +void CDECL id3tag_set_comment(lame_t gfp, const char* comment); + +/* return -1 result if track number is out of ID3v1 range + and ignored for ID3v1 */ +int32_t CDECL id3tag_set_track(lame_t gfp, const char* track); + +/* return non-zero result if genre name or number is invalid + result 0: OK + result -1: genre number out of range + result -2: no valid ID3v1 genre name, mapped to ID3v1 'Other' + but taken as-is for ID3v2 genre tag */ +int32_t CDECL id3tag_set_genre(lame_t gfp, const char* genre); + +/* return non-zero result if field name is invalid */ +int32_t CDECL id3tag_set_fieldvalue(lame_t gfp, const char* fieldvalue); + +/* return non-zero result if image type is invalid */ +int32_t CDECL id3tag_set_albumart(lame_t gfp, const char* image, size_t size); + +/* lame_get_id3v1_tag copies ID3v1 tag into buffer. + * Function returns number of bytes copied into buffer, or number + * of bytes rquired if buffer 'size' is too small. + * Function fails, if returned value is larger than 'size'. + * NOTE: + * This functions does nothing, if user/LAME disabled ID3v1 tag. + */ +size_t CDECL lame_get_id3v1_tag(lame_t gfp, uint8_t* buffer, size_t size); + +/* lame_get_id3v2_tag copies ID3v2 tag into buffer. + * Function returns number of bytes copied into buffer, or number + * of bytes rquired if buffer 'size' is too small. + * Function fails, if returned value is larger than 'size'. + * NOTE: + * This functions does nothing, if user/LAME disabled ID3v2 tag. + */ +size_t CDECL lame_get_id3v2_tag(lame_t gfp, uint8_t* buffer, size_t size); + +/* normaly lame_init_param writes ID3v2 tags into the audio stream + * Call lame_set_write_id3tag_automatic(gfp, 0) before lame_init_param + * to turn off this behaviour and get ID3v2 tag with above function + * write it yourself into your file. + */ +void CDECL lame_set_write_id3tag_automatic(lame_global_flags * gfp, int); +int32_t CDECL lame_get_write_id3tag_automatic(lame_global_flags const* gfp); + +/* experimental */ +int32_t CDECL id3tag_set_textinfo_latin1(lame_t gfp, char const *id, char const *text); + +/* experimental */ +int32_t CDECL id3tag_set_comment_latin1(lame_t gfp, char const *lang, char const *desc, char const *text); + +#if DEPRECATED_OR_OBSOLETE_CODE_REMOVED +#else +/* experimental */ +int32_t CDECL id3tag_set_textinfo_ucs2(lame_t gfp, char const *id, unsigned short const *text); + +/* experimental */ +int32_t CDECL id3tag_set_comment_ucs2(lame_t gfp, char const *lang, + unsigned short const *desc, unsigned short const *text); + +/* experimental */ +int32_t CDECL id3tag_set_fieldvalue_ucs2(lame_t gfp, const unsigned short *fieldvalue); +#endif + +/* experimental */ +int32_t CDECL id3tag_set_fieldvalue_utf16(lame_t gfp, const unsigned short *fieldvalue); + +/* experimental */ +int32_t CDECL id3tag_set_textinfo_utf16(lame_t gfp, char const *id, unsigned short const *text); + +/* experimental */ +int32_t CDECL id3tag_set_comment_utf16(lame_t gfp, char const *lang, unsigned short const *desc, unsigned short const *text); + + +/*********************************************************************** +* +* list of valid bitrates [kbps] & sample frequencies [Hz]. +* first index: 0: MPEG-2 values (sample frequencies 16...24 kHz) +* 1: MPEG-1 values (sample frequencies 32...48 kHz) +* 2: MPEG-2.5 values (sample frequencies 8...12 kHz) +***********************************************************************/ + +extern const int32_t bitrate_table [3][16]; +extern const int32_t samplerate_table [3][ 4]; + +/* access functions for use in DLL, global vars are not exported */ +int32_t CDECL lame_get_bitrate(int32_t mpeg_version, int32_t table_index); +int32_t CDECL lame_get_samplerate(int32_t mpeg_version, int32_t table_index); + + +/* maximum size of albumart image (128KB), which affects LAME_MAXMP3BUFFER + as well since lame_encode_buffer() also returns ID3v2 tag data */ +#define LAME_MAXALBUMART (128 * 1024) + +/* maximum size of mp3buffer needed if you encode at most 1152 samples for + each call to lame_encode_buffer. see lame_encode_buffer() below + (LAME_MAXMP3BUFFER is now obsolete) */ +#define LAME_MAXMP3BUFFER (16384 + LAME_MAXALBUMART) + + +typedef enum { + LAME_OKAY = 0, + LAME_NOERROR = 0, + LAME_GENERICERROR = -1, + LAME_NOMEM = -10, + LAME_BADBITRATE = -11, + LAME_BADSAMPFREQ = -12, + LAME_INTERNALERROR = -13, + + FRONTEND_READERROR = -80, + FRONTEND_WRITEERROR = -81, + FRONTEND_FILETOOLARGE = -82 + +} lame_errorcodes_t; + +#if defined(__cplusplus) +} +#endif +#endif /* LAME_LAME_H */ + diff --git a/libmetartc3/src/yangplayer/YangPlayFactory.cpp b/libmetartc3/src/yangplayer/YangPlayFactory.cpp new file mode 100755 index 00000000..8558febc --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayFactory.cpp @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + + +#include + +#include +YangPlayFactory::YangPlayFactory() { + // TODO Auto-generated constructor stub + +} + +YangPlayFactory::~YangPlayFactory() { + // TODO Auto-generated destructor stub +} + +YangAudioPlay* YangPlayFactory::createAudioPlay(YangAudioInfo *pcontext){ +#ifndef _WIN32 + return NULL;//if(pcontext->audioPlayType==1) return new YangAudioPlayAlsa(pcontext); +#endif + return NULL;// new YangAudioPlaySdl(pcontext); +} + +YangAudioPlay *YangPlayFactory::createAudioPlay(YangAudioPlayType paet,YangAudioInfo *pcontext){ +#ifndef _WIN32 + return NULL;//if(paet==Yang_AP_ALSA) return new YangAudioPlayAlsa(pcontext); +#endif + return NULL;//new YangAudioPlaySdl(pcontext); +} diff --git a/libmetartc3/src/yangplayer/YangPlayReceive.cpp b/libmetartc3/src/yangplayer/YangPlayReceive.cpp new file mode 100755 index 00000000..68396104 --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayReceive.cpp @@ -0,0 +1,188 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include + +#include +#include +#include + +void g_playrecv_receiveAudio(void* user,YangFrame *audioFrame){ + if(user==NULL) return; + YangPlayReceive* rtcHandle=(YangPlayReceive*)user; + rtcHandle->receiveAudio(audioFrame); +} +void g_playrecv_receiveVideo(void* user,YangFrame *videoFrame){ + if(user==NULL) return; + YangPlayReceive* rtcHandle=(YangPlayReceive*)user; + rtcHandle->receiveVideo(videoFrame); +} + + +YangPlayReceive::YangPlayReceive(YangContext* pcontext) { + m_context=pcontext; + m_isStart = 0; + m_out_videoBuffer = NULL; + m_out_audioBuffer = NULL; + isReceived = 0; + isReceiveConvert = 0; + m_headLen = 1; //pcontext->audio.audioDecoderType == 0 ? 2 : 1; + m_recv = NULL; + m_recvCallback.receiveAudio=g_playrecv_receiveAudio; + m_recvCallback.receiveVideo=g_playrecv_receiveVideo; + m_recvCallback.context=this; +} + +YangPlayReceive::~YangPlayReceive() { + disConnect(); + if (isReceiveConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + + if (m_recv) + delete m_recv; + m_recv = NULL; + + m_out_audioBuffer = NULL; + m_out_videoBuffer = NULL; + m_context=NULL; +} + +void YangPlayReceive::disConnect() { + if(m_recv) m_recv->disConnectServer(m_recv->context); + yang_destroy_streamHandle(m_recv); + yang_free(m_recv); + +} +void YangPlayReceive::setBuffer(YangAudioEncoderBuffer *al, YangVideoDecoderBuffer *vl) { + m_out_audioBuffer = al; + m_out_videoBuffer = vl; +} +void YangPlayReceive::receiveAudio(YangFrame *audioFrame) { + audioFrame->payload += m_headLen; + audioFrame->nb -= m_headLen; + m_out_audioBuffer->putPlayAudio(audioFrame); +} +void YangPlayReceive::receiveVideo(YangFrame *videoFrame) { + if(videoFrame==NULL||videoFrame->payload==NULL) return; + uint8_t *temp = videoFrame->payload; + int videoLen=videoFrame->nb; + + if( (temp[0] == 0x27|| temp[0] == 0x2c)&&temp[1] == 0x01){ + videoFrame->payload = temp + 5; + videoFrame->nb -= 5; + videoFrame->frametype = YANG_Frametype_P; + if(yang_hasH264Pframe(videoFrame->payload)) m_out_videoBuffer->putEVideo(videoFrame); + return; + } + if ((temp[0] == 0x17 || temp[0] == 0x1c) ) { + if(temp[1] == 0x00){ + videoFrame->frametype = YANG_Frametype_Spspps; + m_out_videoBuffer->putEVideo(videoFrame); + return; + } + if(temp[1] == 0x01){ + + YangH264NaluData nalu; + videoFrame->payload=temp+5; + videoFrame->nb=videoLen-5; + + yang_parseH264Nalu(videoFrame,&nalu); + + if(nalu.spsppsPos>-1){ + uint8_t meta[200] = { 0 }; + videoFrame->payload=meta; + yang_getH264SpsppseNalu(videoFrame,temp+5+nalu.spsppsPos); + videoFrame->frametype = YANG_Frametype_Spspps; + m_out_videoBuffer->putEVideo(videoFrame); + } + if(nalu.keyframePos>-1){ + videoFrame->payload = temp + 5+nalu.keyframePos; + videoFrame->nb = videoLen-5-nalu.keyframePos; + videoFrame->frametype =YANG_Frametype_I; + m_out_videoBuffer->putEVideo(videoFrame); + + } + } + } +} + +int32_t YangPlayReceive::init(int32_t nettype, string server, int32_t pport, + string stream) { + if (!m_recv){ + m_recv = (YangStreamHandle*)calloc(sizeof(YangStreamHandle),1); + YangStreamConfig streamConf; + memset(&streamConf,0,sizeof(streamConf)); + + strcpy(streamConf.app,"live"); + streamConf.streamOptType = Yang_Stream_Play; + streamConf.uid = 0; + + + memset(streamConf.localIp,0,sizeof(streamConf.localIp)); + strcpy(streamConf.localIp,"127.0.0.1"); + streamConf.localPort = 8100; + //streamConf.serverIp = server; + strcpy(streamConf.serverIp,server.c_str()); + streamConf.serverPort = pport; + //streamConf.stream = stream; + strcpy(streamConf.stream,stream.c_str()); + + m_recv->context->videoStream = 1; + m_recv->context->uid = 0; + yang_create_streamHandle(nettype,m_recv, 0,&streamConf,&m_context->avinfo,&m_context->stream,&m_recvCallback); + } + + int32_t ret = m_recv->connectServer(m_recv->context); + if (ret) + return ret; + m_recv->context->streamInit = 1; + return ret; + +} +void YangPlayReceive::stop() { + isReceiveConvert = 0; +} +void YangPlayReceive::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangPlayReceive::startLoop() { + + yang_reindex(m_out_audioBuffer); + yang_reindex(m_out_videoBuffer); + + int32_t bufLen = 0; + int32_t retCode = Yang_Ok; + isReceiveConvert = 1; + isReceived = 1; + + while (isReceiveConvert == 1) { + if (!m_recv) { + yang_usleep(10000); + continue; + } + + if (!m_recv->context->streamInit) + continue; + bufLen = 0; + //tuid=m_players.at(i)->m_uid; + retCode = m_recv->receiveData(m_recv->context,&bufLen); + + + //if (retCode) { + // yang_error("Receive Data Error:%d", retCode); + // break; + //} + if (bufLen == 0) + yang_usleep(2000); + } //end while + + isReceived = 0; +} diff --git a/libmetartc3/src/yangplayer/YangPlayerBase.cpp b/libmetartc3/src/yangplayer/YangPlayerBase.cpp new file mode 100755 index 00000000..67c303d3 --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayerBase.cpp @@ -0,0 +1,68 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +YangPlayerBase::YangPlayerBase() +{ + + m_ydb=NULL; + m_ypb=NULL; + +} + +YangPlayerBase::~YangPlayerBase() +{ + + yang_delete(m_ydb); + yang_delete(m_ypb); +} + +void YangPlayerBase::stopAll(){ + if(m_ydb) m_ydb->stopAll(); + if(m_ypb) m_ypb->stopAll(); +} + + +void YangPlayerBase::init(YangContext* pcontext){ + //YangAudioInfo* audio=&pcontext->audio; + if(m_ydb==NULL) { + m_ydb=new YangPlayerDecoder(pcontext); + //m_ydb->m_audio.sample=audio->sample; + //m_ydb->m_audio.channel=audio->channel; + //m_ydb->m_audio.usingMono=audio->usingMono; + //m_ydb->m_audio.audioDecoderType=audio->audioDecoderType; + m_ydb->initAudioDecoder(); + m_ydb->initVideoDecoder(); + } + + if(m_ypb==NULL) { + m_ypb=new YangPlayerPlay(); + m_ypb->initAudioPlay(pcontext); + //m_ypb->initVideoPlay(m_ydb->m_videoDec); + //m_ydb->m_videoDec->m_yvp=m_ypb-> + m_ypb->setInAudioList(m_ydb->getOutAudioBuffer()); + //m_ypb->setInVideoList(m_ydb->getOutVideoBuffer()); + } + +} + +void YangPlayerBase::startAudioDecoder(YangAudioEncoderBuffer *prr){ + m_ydb->setInAudioBuffer(prr); + m_ydb->startAudioDecoder(); +} +void YangPlayerBase::startVideoDecoder(YangVideoDecoderBuffer *prr){ + m_ydb->setInVideoBuffer(prr); + m_ydb->startVideoDecoder(); +} + +void YangPlayerBase::startAudioPlay(YangContext* paudio) { + //m_ydb->startDecoder(); + if(m_ypb){ + m_ypb->initAudioPlay(paudio); + m_ypb->startAudioPlay(); + } +} + + + diff --git a/libmetartc3/src/yangplayer/YangPlayerDecoder.cpp b/libmetartc3/src/yangplayer/YangPlayerDecoder.cpp new file mode 100755 index 00000000..80f47704 --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayerDecoder.cpp @@ -0,0 +1,114 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "pthread.h" +#include +#include +#include +#include "yangdecoder/YangDecoderFactory.h" + +YangPlayerDecoder::YangPlayerDecoder(YangContext* pcontext) { + m_context=pcontext; + m_out_audioBuffer=NULL; + m_out_videoBuffer=NULL; + m_audioDec=NULL; + m_videoDec=NULL; + /** + m_audio.audioCacheNum=50; + m_audio.audioDecoderType=0; + m_audio.usingMono=0; + m_audio.channel=2; + m_audio.sample=44100; + + m_video.bitDepth=8; + m_video.videoCacheNum=20; + m_video.evideoCacheNum=20; + m_video.videoDecoderType=0; + m_video.videoDecHwType=0;**/ +} + +YangPlayerDecoder::~YangPlayerDecoder() { + if(m_audioDec&&m_audioDec->m_isStart){ + m_audioDec->stop(); + while(m_audioDec->m_isStart){ + yang_usleep(1000); + } + } + if(m_videoDec&&m_videoDec->m_isStart){ + m_videoDec->stop(); + while(m_videoDec->m_isStart){ + yang_usleep(1000); + } + } + //yang_usleep(50000); + yang_delete(m_audioDec); + yang_delete(m_videoDec); + //int32_t i=0; + if(m_out_videoBuffer){ + delete m_out_videoBuffer;m_out_videoBuffer=NULL; + } + if(m_out_audioBuffer){ + delete m_out_audioBuffer;m_out_audioBuffer=NULL; + } + +} +void YangPlayerDecoder::stopAll(){ + if(m_audioDec) m_audioDec->stop(); + if(m_videoDec) m_videoDec->stop(); +} + + +void YangPlayerDecoder::initAudioDecoder(){ + if(m_out_audioBuffer==NULL) { + m_out_audioBuffer=new YangAudioPlayBuffer(); + + } + if(m_audioDec==NULL) { + // YangDecoderFactory df; + //YangAudioParam audio={0}; + + m_audioDec=new YangAudioDecoderHandle(m_context); + m_audioDec->init(); + m_audioDec->setOutAudioBuffer(m_out_audioBuffer); + } +} + +void YangPlayerDecoder::initVideoDecoder(){ + //YangConfig *p_config = m_context; + if(m_out_videoBuffer==NULL) { + m_out_videoBuffer=new YangVideoBuffer(1); + //m_out_videoBuffer->m_syn=&m_syn; + } + //m_context->videoBuffers=m_out_videoBuffer; +// YangDecoderFactory df; + if(m_videoDec==NULL) { + + m_videoDec=new YangVideoDecoderHandle(m_context); + m_videoDec->init(); + m_videoDec->setOutVideoBuffer(m_out_videoBuffer); + } + +} + +void YangPlayerDecoder::startAudioDecoder(){ + if(m_audioDec&&!m_audioDec->m_isStart) if(m_audioDec) m_audioDec->start(); +} + +void YangPlayerDecoder::startVideoDecoder(){ + if(m_videoDec&&!m_videoDec->m_isStart) m_videoDec->start(); +} + +void YangPlayerDecoder::setInVideoBuffer(YangVideoDecoderBuffer *pvel){ + if(m_videoDec!=NULL) m_videoDec->setInVideoBuffer(pvel); +} +void YangPlayerDecoder::setInAudioBuffer(YangAudioEncoderBuffer *pael){ + if(m_audioDec!=NULL) m_audioDec->setInAudioBuffer(pael); +} +YangVideoBuffer* YangPlayerDecoder::getOutVideoBuffer(){ + return m_out_videoBuffer; +} +YangAudioPlayBuffer* YangPlayerDecoder::getOutAudioBuffer(){ + return m_out_audioBuffer; +} + diff --git a/libmetartc3/src/yangplayer/YangPlayerHandleImpl.cpp b/libmetartc3/src/yangplayer/YangPlayerHandleImpl.cpp new file mode 100755 index 00000000..a10cbd06 --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayerHandleImpl.cpp @@ -0,0 +1,151 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangPlayerHandleImpl.h" +#include +//#include +#include +#include +#include + + +YangPlayerHandle* YangPlayerHandle::createPlayerHandle(YangContext* pcontext,YangSysMessageI* pmessage){ + return new YangPlayerHandleImpl(pcontext,pmessage); +} +YangPlayerHandleImpl::YangPlayerHandleImpl(YangContext* pcontext,YangSysMessageI* pmessage) { + m_context=pcontext; + m_message=pmessage; + m_recv = NULL; + m_play = NULL; + m_rtcRecv=NULL; + m_outVideoBuffer = NULL; + m_outAudioBuffer = NULL; + m_url.netType=0; + m_url.port=1935; +} + +YangPlayerHandleImpl::~YangPlayerHandleImpl() { + if(m_rtcRecv) m_rtcRecv->disConnect(); + yang_delete(m_recv); + yang_delete(m_play); + yang_delete(m_rtcRecv); + yang_delete(m_outVideoBuffer); + yang_delete(m_outAudioBuffer); + +} + +void YangPlayerHandleImpl::stopPlay(){ + if(m_rtcRecv) { + m_rtcRecv->disConnect(); + } + if(m_recv){ + m_recv->disConnect(); + } + if(m_play) m_play->stopAll(); + yang_stop(m_rtcRecv); + yang_stop_thread(m_rtcRecv); + yang_delete(m_rtcRecv); + + //yang_delete(m_play); + yang_stop(m_recv); + yang_stop_thread(m_recv); + yang_delete(m_recv); + + yang_delete(m_play); + +} +int YangPlayerHandleImpl::play(string url,int32_t localPort) { + //m_url.app=""; + memset(m_url.server,0,sizeof(m_url.server)); + //m_url.server=""; + //m_url.stream=""; + m_url.port=0; + if(yang_srs_url_parse((char*)url.c_str(),&m_url)) return 1; + + stopPlay(); + printf("\nnetType==%d,server=%s,port=%d,app=%s,stream=%s\n",m_url.netType,m_url.server,m_url.port,m_url.app,m_url.stream); + m_context->avinfo.sys.transType=m_url.netType; + if(m_context->streams.m_playBuffer) m_context->streams.m_playBuffer->setTranstype(m_url.netType); + if(m_url.netType ==Yang_Webrtc){ + + return playRtc(0,m_url.server,localPort,m_url.server,1985,m_url.app,m_url.stream); + + } + + if (!m_play) { + m_play = new YangPlayerBase(); + + m_context->avinfo.audio.sample=44100; + m_context->avinfo.audio.channel=2; + m_context->avinfo.audio.audioDecoderType=0; + m_context->avinfo.audio.usingMono=0; + m_context->avinfo.audio.aIndex=-1; + m_play->init(m_context); + } + initList(); + m_play->startAudioDecoder(m_outAudioBuffer); + m_play->startVideoDecoder(m_outVideoBuffer); + m_play->startAudioPlay(m_context); + //m_play->startVideoPlay(); + + + if (!m_recv) { + + m_recv = new YangPlayReceive(m_context); //sf.createStreamBase(m_url.netType,0,m_context); + m_recv->setBuffer(m_outAudioBuffer, m_outVideoBuffer); + } + if(m_recv->init(m_url.netType, m_url.server, m_url.port, m_url.stream)){ + printf("\n connect server failure!"); + return 1; + } + + m_recv->start(); + return Yang_Ok; + + +} + +int32_t YangPlayerHandleImpl::playRtc(int32_t puid,std::string localIp,int32_t localPort, std::string server, int32_t pport,std::string app,std::string stream){ + + stopPlay(); + if (!m_play) { + m_play = new YangPlayerBase(); + + m_context->avinfo.audio.sample=48000; + m_context->avinfo.audio.channel=2; + m_context->avinfo.audio.audioDecoderType=Yang_AED_OPUS;//3; + m_context->avinfo.audio.usingMono=0; + m_context->avinfo.audio.aIndex=-1; + m_play->init(m_context); + } + initList(); + m_play->startAudioDecoder(m_outAudioBuffer); + m_play->startVideoDecoder(m_outVideoBuffer); + + m_play->startAudioPlay(m_context); + + + if(m_rtcRecv==NULL) { + m_rtcRecv=new YangRtcReceive(m_context,m_message); + m_rtcRecv->setBuffer(m_outAudioBuffer, m_outVideoBuffer); + m_rtcRecv->init(puid,localIp,localPort,server,pport,app,stream); + } + + m_rtcRecv->start(); + + return Yang_Ok; +} + +YangVideoBuffer* YangPlayerHandleImpl::getVideoBuffer(){ + if(m_play) return m_play->m_ydb->getOutVideoBuffer(); + return NULL; +} + +void YangPlayerHandleImpl::initList() { + if (m_outAudioBuffer == NULL) { + m_outAudioBuffer = new YangAudioEncoderBuffer(10); + } + if (m_outVideoBuffer == NULL) + m_outVideoBuffer = new YangVideoDecoderBuffer(); + +} diff --git a/libmetartc3/src/yangplayer/YangPlayerHandleImpl.h b/libmetartc3/src/yangplayer/YangPlayerHandleImpl.h new file mode 100755 index 00000000..c4712b1f --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayerHandleImpl.h @@ -0,0 +1,39 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGMEETING_INCLUDE_YANGPLAYERHANDLE_H_ +#define YANGMEETING_INCLUDE_YANGPLAYERHANDLE_H_ +#include +#include +#include +#include +#include +#include +#include "YangRtcReceive.h" + + +class YangPlayerHandleImpl :public YangPlayerHandle{ +public: + YangPlayerHandleImpl(YangContext* pcontext,YangSysMessageI* pmessage); + virtual ~YangPlayerHandleImpl(); + YangVideoBuffer* getVideoBuffer(); + int play(string url,int32_t localport); + + int32_t playRtc(int32_t puid,std::string localIp,int32_t localPort, std::string server, int32_t pport,std::string app,std::string stream); + void stopPlay(); +protected: + + YangUrlData m_url; + void initList(); + YangPlayReceive *m_recv; + YangPlayerBase *m_play; + YangRtcReceive *m_rtcRecv; + +private: + YangContext* m_context; + YangVideoDecoderBuffer* m_outVideoBuffer; + YangAudioEncoderBuffer* m_outAudioBuffer; + YangSysMessageI* m_message; +}; + +#endif /* YANGMEETING_INCLUDE_YANGPLAYERHANDLE_H_ */ diff --git a/libmetartc3/src/yangplayer/YangPlayerPlay.cpp b/libmetartc3/src/yangplayer/YangPlayerPlay.cpp new file mode 100755 index 00000000..7fe198dd --- /dev/null +++ b/libmetartc3/src/yangplayer/YangPlayerPlay.cpp @@ -0,0 +1,76 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include + + + +YangPlayerPlay::YangPlayerPlay() { + m_audioPlay=NULL; + vm_audio_player_start=0; +} + +YangPlayerPlay::~YangPlayerPlay() { + //m_context=NULL; + yang_stop(m_audioPlay); + yang_stop_thread(m_audioPlay); + +} +void YangPlayerPlay::stopAll(){ + if(m_audioPlay) m_audioPlay->stop(); + +} +void YangPlayerPlay::initAudioPlay(YangContext* paudio){ + + if (m_audioPlay == NULL) { +#ifdef _WIN32 + m_audioPlay = new YangWinAudioApiRender(paudio); +#else + m_audioPlay = new YangAudioPlayAlsa(paudio); +#endif + m_audioPlay->init(); + + } + +} +/** +void YangPlayerPlay::initVideoPlay(YangVideoDecoderHandle *handle){ + if (vm_videoPlay == NULL) { + vm_videoPlay = new YangVideoPlay(); + vm_videoPlay->init(); + //handle->m_yvp=vm_videoPlay; + } + +} + +void YangPlayerPlay::setInVideoList(vector *pvideoList){ + if(vm_videoPlay!=NULL) vm_videoPlay->setVideoList(pvideoList);//setAudioList(paudioList); +} +void YangPlayerPlay::startVideoPlay(){ + //if(vm_audio_player_start) return; + vm_videoPlay->start(); + //vm_audio_player_start=1; +} +int32_t YangPlayerPlay::getIsAecInit(){ + if(vm_audioPlay!=NULL) return vm_audioPlay->m_aecInit; + return 0; +} +//F_initSdlWin YangPlayApp::getInitSdlWin(){ +// return vm_contexttSdlWin; +//} + +void YangPlayerPlay::setAec(YangRtcAec *paec){ + if(vm_audioPlay!=NULL) vm_audioPlay->setAecBase(paec); +} +**/ +void YangPlayerPlay::startAudioPlay(){ + if(vm_audio_player_start) return; + m_audioPlay->start(); + vm_audio_player_start=1; +} +void YangPlayerPlay::setInAudioList(YangAudioPlayBuffer *paudioList){ + if(m_audioPlay!=NULL) m_audioPlay->setAudioBuffer(paudioList); +} + diff --git a/libmetartc3/src/yangplayer/YangRtcReceive.cpp b/libmetartc3/src/yangplayer/YangRtcReceive.cpp new file mode 100755 index 00000000..260ac1b6 --- /dev/null +++ b/libmetartc3/src/yangplayer/YangRtcReceive.cpp @@ -0,0 +1,150 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangRtcReceive.h" + +#include +#include + +void g_rtcrecv_receiveAudio(void* user,YangFrame *audioFrame){ + if(user==NULL) return; + YangRtcReceive* rtcHandle=(YangRtcReceive*)user; + rtcHandle->receiveAudio(audioFrame); +} +void g_rtcrecv_receiveVideo(void* user,YangFrame *videoFrame){ + if(user==NULL) return; + YangRtcReceive* rtcHandle=(YangRtcReceive*)user; + rtcHandle->receiveVideo(videoFrame); +} + +YangRtcReceive::YangRtcReceive(YangContext* pcontext,YangSysMessageI* pmessage) { + m_message=pmessage; + m_context=pcontext; + m_isStart = 0; + m_out_videoBuffer = NULL; + m_out_audioBuffer = NULL; + m_isReceived = 0; + m_loops = 0; + m_headLen = 1; //pcontext->audio.audioDecoderType == 0 ? 2 : 1; + m_recv = NULL; + m_waitState = 0; + pthread_mutex_init(&m_lock,NULL); + pthread_cond_init(&m_cond_mess,NULL); +// m_lock = PTHREAD_MUTEX_INITIALIZER; +// m_cond_mess = PTHREAD_COND_INITIALIZER; + m_recvCallback.receiveAudio=g_rtcrecv_receiveAudio; + m_recvCallback.receiveVideo=g_rtcrecv_receiveVideo; + m_recvCallback.context=this; + +} + +YangRtcReceive::~YangRtcReceive() { + disConnect(); + if (m_loops) { + while (m_isStart) { + yang_usleep(1000); + } + } + yang_delete(m_recv); + + + m_out_audioBuffer = NULL; + m_out_videoBuffer = NULL; + m_message=NULL; + pthread_mutex_destroy(&m_lock); + pthread_cond_destroy(&m_cond_mess); +} + +void YangRtcReceive::disConnect() { + if (m_recv) + m_recv->disconnectServer(m_recv->context); + stop(); + yang_destroy_rtcstream_handle(m_recv); + yang_free(m_recv); + +} +void YangRtcReceive::setBuffer(YangAudioEncoderBuffer *al,YangVideoDecoderBuffer *vl) { + m_out_audioBuffer = al; + m_out_videoBuffer = vl; +} +void YangRtcReceive::setMediaConfig(int32_t puid, YangAudioParam *audio,YangVideoParam *video){ + +} +void YangRtcReceive::receiveAudio(YangFrame *audioFrame) { + if(audioFrame==NULL||!audioFrame->payload) return; + m_out_audioBuffer->putPlayAudio(audioFrame); +} +void YangRtcReceive::receiveVideo(YangFrame *videoFrame) { + if(videoFrame==NULL||videoFrame->payload==NULL) return; + m_out_videoBuffer->putEVideo(videoFrame); + + +} + +int32_t YangRtcReceive::init(int32_t puid, string localIp, int32_t localPort, + string server, int32_t pport, string app, string stream) { + //m_conf.localIp = localIp; + //memset(m_conf.localIp,0,sizeof(m_conf.localIp)); + memset(&m_conf,0,sizeof(m_conf)); + strcpy(m_conf.localIp,localIp.c_str()); + m_conf.localPort = localPort; + strcpy(m_conf.serverIp,server.c_str()); + m_conf.serverPort = pport; + strcpy(m_conf.app,app.c_str()); + strcpy(m_conf.stream,stream.c_str()); + m_conf.uid = puid; + m_conf.streamOptType = Yang_Stream_Play; + if (!m_recv){ + m_recv=(YangRtcHandle*)calloc(sizeof(YangRtcHandle),1); + yang_create_rtcstream_handle(m_recv, &m_context->avinfo,&m_context->stream); + } + m_recv->recvcb=&m_recvCallback; + m_recv->init(m_recv,m_recv->context,&m_conf); + return Yang_Ok; + +} +void YangRtcReceive::stop() { + m_loops = 0; + if (m_recv) + m_recv->disconnectServer(m_recv->context); + //pthread_mutex_unlock(&m_lock); + + if (m_waitState) { + pthread_mutex_lock(&m_lock); + pthread_cond_signal(&m_cond_mess); + pthread_mutex_unlock(&m_lock); + + } +} +void YangRtcReceive::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangRtcReceive::startLoop() { + + yang_reindex(m_out_audioBuffer); + yang_reindex(m_out_videoBuffer); + m_loops = 1; + m_isReceived = 1; + int err=Yang_Ok; + if ((err=m_recv->connectRtcServer(m_recv->context))!=Yang_Ok) { + m_loops=0; + if(m_message) m_message->failure(err); + }else{ + if(m_message) m_message->success(); + } + + pthread_mutex_lock(&m_lock); + while (m_loops == 1) { + m_waitState = 1; + + pthread_cond_wait(&m_cond_mess, &m_lock); + m_waitState = 0; + } //end while + + if (m_recv) + m_recv->disconnectServer(m_recv->context); + m_isReceived = 0; + pthread_mutex_unlock(&m_lock); +} diff --git a/libmetartc3/src/yangplayer/YangRtcReceive.h b/libmetartc3/src/yangplayer/YangRtcReceive.h new file mode 100755 index 00000000..875b7049 --- /dev/null +++ b/libmetartc3/src/yangplayer/YangRtcReceive.h @@ -0,0 +1,58 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGPLAYER_SRC_YANGRTCRECEIVE_H_ +#define SRC_YANGPLAYER_SRC_YANGRTCRECEIVE_H_ +#include +#include +#include +#include +#include +#include +#include +#include + + + +using namespace std; +class YangRtcReceive : public YangThread,public YangMediaConfigCallback{ +public: + YangRtcReceive(YangContext* pcontext,YangSysMessageI* pmessage); + virtual ~YangRtcReceive(); + void receiveAudio(YangFrame* audioFrame); + void receiveVideo(YangFrame* videoFrame); + //void handleKeyframe(YangFrame* videoFrame); + int32_t init(int32_t puid,string localIp,int32_t localPort, string server, int32_t pport,string app, string stream); + void setBuffer(YangAudioEncoderBuffer *al,YangVideoDecoderBuffer *vl); + void disConnect(); + void play(char* pserverStr,char *streamName); + void setMediaConfig(int32_t puid, YangAudioParam *audio,YangVideoParam *video); + YangRtcHandle *m_recv; + int32_t m_isReceived; //,isHandled; + int32_t m_loops; //,isHandleAllInvoke; + int32_t m_isStart; + void stop(); +protected: + void run(); + void startLoop(); + YangContext* m_context; + YangSysMessageI* m_message; + + + +private: + pthread_mutex_t m_lock; + pthread_cond_t m_cond_mess; + YangStreamConfig m_conf; + int32_t m_waitState; + int32_t m_headLen; + YangAudioEncoderBuffer *m_out_audioBuffer; + YangVideoDecoderBuffer *m_out_videoBuffer; + YangRtcInfo m_rtcinfo; + YangH264NaluData m_nalu; + YangReceiveCallback m_recvCallback; + + //uint8_t* m_keyBuf; +}; + +#endif /* SRC_YANGPLAYER_SRC_YANGRTCRECEIVE_H_ */ diff --git a/libmetartc3/src/yangpush/YangPushCapture.cpp b/libmetartc3/src/yangpush/YangPushCapture.cpp new file mode 100755 index 00000000..5ef1b358 --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushCapture.cpp @@ -0,0 +1,331 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +//#include +//#include +#include + + +YangPushCapture::YangPushCapture(YangContext *pcontext) { + m_context=pcontext; + + m_out_audioBuffer = NULL; + m_screenCapture=NULL; + m_videoCapture=NULL; + m_audioCapture=NULL; + + + m_out_videoBuffer=NULL; + m_screen_pre_videoBuffer=NULL; + m_screen_out_videoBuffer=NULL; + m_pre_videoBuffer=new YangVideoBuffer(pcontext->avinfo.video.bitDepth==8?1:2); + m_pre_videoBuffer->isPreview=1; + m_pre_videoBuffer->m_frames=pcontext->avinfo.video.frame; + m_isConvert=0; + m_isStart=0; +#if Yang_HaveVr + m_out_vr_pre_videoBuffer=NULL; +#endif +} + +YangPushCapture::~YangPushCapture() { + m_context=NULL; + stopAll(); + yang_stop_thread(this); + yang_stop_thread(m_audioCapture); + yang_stop_thread(m_videoCapture); + yang_stop_thread(m_screenCapture); + + + yang_delete(m_audioCapture); + yang_delete(m_videoCapture); + yang_delete(m_screenCapture); + + yang_delete(m_out_audioBuffer); + yang_delete(m_pre_videoBuffer); + yang_delete(m_out_videoBuffer); +#if Yang_HaveVr + yang_delete(m_out_vr_pre_videoBuffer); +#endif + m_screen_pre_videoBuffer=NULL; + m_screen_out_videoBuffer=NULL; + //yang_delete(m_screen_pre_videoBuffer); + +} + + +void YangPushCapture::startAudioCaptureState() { + if(m_audioCapture) m_audioCapture->setCatureStart(); +} +void YangPushCapture::stopAudioCaptureState() { + if(m_audioCapture) m_audioCapture->setCatureStop(); +} +void YangPushCapture::setAec(YangRtcAec *paec) { + if (m_audioCapture) { + m_audioCapture->setAec(paec); + } +} +void YangPushCapture::setInAudioBuffer(vector *pbuf){ + if(m_audioCapture!=NULL) m_audioCapture->setInAudioBuffer(pbuf); +} +void YangPushCapture::startAudioCapture() { + if (m_audioCapture && !m_audioCapture->m_isStart) + m_audioCapture->start(); +} +YangAudioBuffer* YangPushCapture::getOutAudioBuffer() { + return m_out_audioBuffer; +} +int32_t YangPushCapture::initAudio(YangPreProcess *pp) { + if (m_out_audioBuffer == NULL) { + if (m_context->avinfo.audio.usingMono) + m_out_audioBuffer = new YangAudioBuffer(m_context->avinfo.audio.audioCacheNum); + else + m_out_audioBuffer = new YangAudioBuffer(m_context->avinfo.audio.audioCacheNum); + } + if (m_audioCapture == NULL) { + YangCaptureFactory m_capture; + m_audioCapture = m_capture.createRecordAudioCapture(m_context); //new YangAudioCapture(m_context); + int32_t ret=m_audioCapture->init(); + if(ret){ + if(ret==ERROR_SYS_NoAudioDevice||ret==ERROR_SYS_NoAudioCaptureDevice) { + yang_error("ERROR_SYS_NoAudioDevice"); + return ret; + } + + } + m_audioCapture->setPreProcess(pp); + m_audioCapture->setOutAudioBuffer(m_out_audioBuffer); + + m_audioCapture->setCatureStop(); + } + stopAudioCaptureState(); + return Yang_Ok; +} + + + + +void YangPushCapture::stopAll(){ + stop(); + yang_stop(m_audioCapture); + yang_stop(m_videoCapture); + yang_stop(m_screenCapture); + +} + + +void YangPushCapture::startVideoCaptureState() { + m_videoCapture->initstamp(); + m_videoCapture->setVideoCaptureStart(); +} + +void YangPushCapture::startScreenCaptureState() { + + m_screenCapture->setVideoCaptureStart(); +} + +void YangPushCapture::stopVideoCaptureState() { + if(m_videoCapture) m_videoCapture->setVideoCaptureStop(); + +} +void YangPushCapture::stopScreenCaptureState(){ + if(m_screenCapture) m_screenCapture->setVideoCaptureStop(); +} +void YangPushCapture::change(int32_t st) { + +} + + +int32_t YangPushCapture::initVideo(){ + if(m_out_videoBuffer==NULL) m_out_videoBuffer = new YangVideoBuffer(m_context->avinfo.video.bitDepth==8?1:2); + int32_t err=Yang_Ok; + if (m_videoCapture == NULL) { + YangCaptureFactory cf; + + m_videoCapture = cf.createRecordVideoCapture(&m_context->avinfo.video);//new YangVideoCapture(m_context); + if((err=m_videoCapture->init())!=Yang_Ok){ + return yang_error_wrap(err,"video capture init fail!"); + } + + m_out_videoBuffer->init(m_context->avinfo.video.width,m_context->avinfo.video.height,m_context->avinfo.video.videoEncoderFormat); + m_pre_videoBuffer->init(m_context->avinfo.video.width,m_context->avinfo.video.height,m_context->avinfo.video.videoEncoderFormat); + m_videoCapture->setOutVideoBuffer(m_out_videoBuffer); + m_videoCapture->setPreVideoBuffer(m_pre_videoBuffer); + //m_videoCapture->setVideoCaptureStart(); + } + stopVideoCaptureState(); + return err; + +} + +int32_t YangPushCapture::initScreen(){ + int32_t err=Yang_Ok; + if (m_screenCapture == NULL) { + YangCaptureFactory cf; + + m_screenCapture = cf.createScreenCapture(m_context);//new YangVideoCapture(m_context); + + if((err=m_screenCapture->init())!=Yang_Ok){ + return yang_error_wrap(err,"screen capture fail...."); + } + + m_screen_out_videoBuffer=m_screenCapture->getOutVideoBuffer(); + m_screen_pre_videoBuffer=m_screenCapture->getPreVideoBuffer(); + + //m_screenCapture->setVideoCaptureStart(); + } + stopVideoCaptureState(); + return err; +} + +void YangPushCapture::startVideoCapture(){ + if(m_videoCapture&&!m_videoCapture->m_isStart) m_videoCapture->start(); +} +void YangPushCapture::startScreenCapture(){ + if(m_screenCapture&&!m_screenCapture->m_isStart) m_screenCapture->start(); +} +YangVideoBuffer * YangPushCapture::getOutVideoBuffer(){ + + return m_out_videoBuffer; +} + +YangVideoBuffer * YangPushCapture::getPreVideoBuffer(){ + + return m_pre_videoBuffer; +} +void YangPushCapture::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangPushCapture::stop() { + stopLoop(); +} +void YangPushCapture::stopLoop() { + m_isConvert = 0; +} +#if Yang_HaveVr +#include +void YangPushCapture::startLoop() { + + m_isConvert = 1; + int32_t inWidth = m_context->avinfo.video.width; + int32_t inHeight = m_context->avinfo.video.height; + int64_t prestamp = 0; + long az = inWidth * inHeight * 2; + uint8_t *srcData=new uint8_t[az];// { 0 }; + //if (is12bits) + az = inWidth * inHeight * 3 / 2; + + //mf.getYangMatImage(); + + uint8_t *matDst=new uint8_t[inWidth * inHeight * 2]; + uint8_t *matSrcRgb=new uint8_t [inWidth * inHeight * 3]; + uint8_t *matSrcBgr=new uint8_t [inWidth * inHeight * 3]; + YangYuvConvert yuv; + + + YangMatImageCv *mat = new YangMatImageCv(); + mat->initImg(m_context->avinfo.bgFilename, m_context->avinfo.video.width, m_context->avinfo.video.height, 3); + yang_trace("bgfilename===%s",m_context->avinfo.bgFilename); + YangFrame videoFrame; + memset(&videoFrame,0,sizeof(YangFrame)); + while (m_isConvert == 1) { + + if (m_out_vr_pre_videoBuffer->size() == 0) { + yang_usleep(1000); + continue; + } + videoFrame.payload=srcData; + videoFrame.nb=az; + m_out_vr_pre_videoBuffer->getVideo(&videoFrame); + yuv.I420torgb24(srcData, matSrcRgb, inWidth, inHeight); + yang_rgbtobgr(matSrcRgb, matSrcBgr, inWidth, inHeight); + mat->matImage(matSrcBgr, matDst); + + if (videoFrame.timestamp - prestamp <= 0) { + prestamp = videoFrame.timestamp; + continue; + } + prestamp = videoFrame.timestamp; + videoFrame.payload=matDst; + videoFrame.nb=az; + if (m_videoCapture->getVideoCaptureState()) + m_out_videoBuffer->putVideo(&videoFrame); + m_pre_videoBuffer->putVideo(&videoFrame); + + } + + yang_delete(mat); + yang_deleteA(srcData); + yang_deleteA(matDst); + yang_deleteA(matSrcRgb); + yang_deleteA(matSrcBgr); + + +} +void YangPushCapture::addVr(){ + if (m_out_vr_pre_videoBuffer == NULL) + m_out_vr_pre_videoBuffer = new YangVideoBuffer(m_context->avinfo.video.width, + m_context->avinfo.video.height, 12, m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + + m_out_vr_pre_videoBuffer->resetIndex(); + + m_pre_videoBuffer->resetIndex(); + m_out_videoBuffer->resetIndex(); + m_videoCapture->setPreVideoBuffer(m_out_vr_pre_videoBuffer); + m_videoCapture->setOutVideoBuffer(NULL);//(m_out_vr_pre_videoBuffer); + start(); +} +void YangPushCapture::delVr(){ + stop(); + while(m_isStart){ + yang_usleep(1000); + } + m_pre_videoBuffer->resetIndex(); + m_out_videoBuffer->resetIndex(); + m_videoCapture->setOutVideoBuffer(m_out_videoBuffer); + m_videoCapture->setPreVideoBuffer(m_pre_videoBuffer); +} +#endif +YangVideoBuffer* YangPushCapture::getScreenOutVideoBuffer() { + return m_screen_out_videoBuffer; +} + +YangVideoBuffer* YangPushCapture::getScreenPreVideoBuffer() { + return m_screen_pre_videoBuffer; +} + +void YangPushCapture::startCamera() { + initVideo(); + startVideoCapture(); +} + +void YangPushCapture::startScreen() { + initScreen(); + startScreenCapture(); +} + +void YangPushCapture::stopCamera() { + yang_stop(m_videoCapture); + yang_stop_thread(m_videoCapture); + yang_delete(m_videoCapture); +} + +void YangPushCapture::stopScreen() { + yang_stop(m_screenCapture); + yang_stop_thread(m_screenCapture); + yang_delete(m_screenCapture); +} + +void YangPushCapture::setScreenInterval(int32_t pinterval) { + if(m_screenCapture) m_screenCapture->setInterval(pinterval); +} + +void YangPushCapture::setDrawmouse(bool isDraw) { + if(m_screenCapture) m_screenCapture->setDrawmouse(isDraw); +} diff --git a/libmetartc3/src/yangpush/YangPushEncoder.cpp b/libmetartc3/src/yangpush/YangPushEncoder.cpp new file mode 100755 index 00000000..775ab5d3 --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushEncoder.cpp @@ -0,0 +1,113 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "pthread.h" +#include +#include +#include +#include "yangencoder/YangEncoderFactory.h" + +YangPushEncoder::YangPushEncoder(YangContext *pcontext) { + m_ae=NULL; + m_ve=NULL; + m_out_videoBuffer = NULL; + m_out_auidoBuffer = NULL; + + m_context = pcontext; + m_videoInfo=&pcontext->avinfo.video; + m_vmd = NULL; +} + +YangPushEncoder::~YangPushEncoder() { + stopAll(); + yang_stop_thread(m_ae); + yang_stop_thread(m_ve); + + yang_delete(m_ae); + yang_delete(m_ve); + + yang_delete(m_out_videoBuffer); //=NULL; + yang_delete(m_out_auidoBuffer); //=NULL; + + yang_free(m_vmd); + m_context = NULL; +} +void YangPushEncoder::stopAll() { + yang_stop(m_ae); + yang_stop(m_ve); + +} + +void YangPushEncoder::deleteVideoEncoder(){ + yang_stop(m_ve); + yang_stop_thread(m_ve); + yang_delete(m_ve); + yang_free(m_vmd); + m_vmd=NULL; +} +YangVideoMeta* YangPushEncoder::getOutVideoMetaData() { + return m_vmd; +} +void YangPushEncoder::setVideoInfo(YangVideoInfo* pvideo){ + if(pvideo) m_videoInfo=pvideo; +} +void YangPushEncoder::initAudioEncoder() { + if (m_out_auidoBuffer == NULL) + m_out_auidoBuffer = new YangAudioEncoderBuffer(m_context->avinfo.audio.audioCacheNum); + if (m_ae == NULL) { + // YangEncoderFactory yf; + m_ae = new YangAudioEncoderHandle(&m_context->avinfo.audio); + m_ae->setOutAudioBuffer(m_out_auidoBuffer); + m_ae->init(); + } + +} +void YangPushEncoder::initVideoEncoder() { + if (m_out_videoBuffer == NULL) + m_out_videoBuffer = new YangVideoEncoderBuffer(m_context->avinfo.video.evideoCacheNum); + if(m_context&&m_context->avinfo.enc.createMeta){ + if (m_vmd == NULL) + m_vmd = (YangVideoMeta*) calloc(1, sizeof(YangVideoMeta)); + YangEncoderFactory fac; + YangVideoEncoderMeta *yvh = fac.createVideoEncoderMeta(m_videoInfo); + yvh->yang_initVmd(m_vmd, m_videoInfo,&m_context->avinfo.enc); + yang_delete(yvh); + } + if (m_ve == NULL) { + // YangEncoderFactory yf; + m_ve = new YangVideoEncoderHandle(m_videoInfo, &m_context->avinfo.enc); + m_ve->setOutVideoBuffer(m_out_videoBuffer); + m_ve->init(); + m_ve->setVideoMetaData(m_vmd); + } +} +void YangPushEncoder::sendKeyframe(){ + if(m_ve) m_ve->sendKeyframe(); +} +void YangPushEncoder::startAudioEncoder() { + if (m_ae && !m_ae->m_isStart) { + m_ae->start(); + yang_usleep(1000); + } +} +void YangPushEncoder::startVideoEncoder() { + if (m_ve && !m_ve->m_isStart) { + m_ve->start(); + yang_usleep(2000); + } +} +void YangPushEncoder::setInAudioBuffer(YangAudioBuffer *pbuf) { + if (m_ae != NULL) + m_ae->setInAudioBuffer(pbuf); +} +void YangPushEncoder::setInVideoBuffer(YangVideoBuffer *pbuf) { + if (m_ve != NULL) + m_ve->setInVideoBuffer(pbuf); +} +YangAudioEncoderBuffer* YangPushEncoder::getOutAudioBuffer() { + return m_out_auidoBuffer; +} +YangVideoEncoderBuffer* YangPushEncoder::getOutVideoBuffer() { + return m_out_videoBuffer; +} + diff --git a/libmetartc3/src/yangpush/YangPushEncoder.h b/libmetartc3/src/yangpush/YangPushEncoder.h new file mode 100755 index 00000000..87248ecd --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushEncoder.h @@ -0,0 +1,46 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGAPP_YangPushEncoder_H_ +#define YANGAPP_YangPushEncoder_H_ + +#include "yangencoder/YangAudioEncoderHandle.h" +#include "yangencoder/YangVideoEncoderHandle.h" +#include "yangutil/buffer/YangAudioBuffer.h" +#include "yangutil/buffer/YangAudioEncoderBuffer.h" +#include "yangutil/buffer/YangVideoBuffer.h" +#include "yangutil/buffer/YangVideoEncoderBuffer.h" +#include "yangutil/sys/YangIni.h" +class YangPushEncoder { +public: + YangPushEncoder(YangContext *pcontext); + virtual ~YangPushEncoder(); +public: + void setVideoInfo(YangVideoInfo* pvideo); + void initVideoEncoder(); + void initAudioEncoder(); + void startAudioEncoder(); + void startVideoEncoder(); + void setInAudioBuffer(YangAudioBuffer *pal); + void setInVideoBuffer(YangVideoBuffer *pvl); + YangAudioEncoderBuffer * getOutAudioBuffer(); + YangVideoEncoderBuffer * getOutVideoBuffer(); + YangVideoMeta * getOutVideoMetaData(); + void stopAll(); + void sendKeyframe(); + + void deleteVideoEncoder(); +private: + + YangVideoEncoderHandle *m_ve; + YangAudioEncoderHandle *m_ae; + + YangAudioEncoderBuffer *m_out_auidoBuffer; + YangVideoEncoderBuffer *m_out_videoBuffer; + + YangContext *m_context; + YangVideoMeta *m_vmd; + YangVideoInfo* m_videoInfo; +}; + +#endif /* YANGAPP_YANGENCODERAPP_H_ */ diff --git a/libmetartc3/src/yangpush/YangPushFactory.cpp b/libmetartc3/src/yangpush/YangPushFactory.cpp new file mode 100755 index 00000000..260d660b --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushFactory.cpp @@ -0,0 +1,39 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include "YangPushMessageHandle.h" +YangPushFactory::YangPushFactory() { + + +} + +YangPushFactory::~YangPushFactory() { + +} + +YangPushHandle* YangPushFactory::createPushHandle(bool hasAudio,bool initVideo,int pvideotype, YangVideoInfo *screenvideo, YangVideoInfo *outvideo, YangContext *pcontext, + YangSysMessageI *pmessage) { + return new YangPushHandleImpl(hasAudio,initVideo,pvideotype,screenvideo,outvideo,pcontext, pmessage); +} + +YangSysMessageHandle* YangPushFactory::createPushMessageHandle(bool hasAudio,bool initVideo, + int pvideotype, YangVideoInfo *screenvideo, YangVideoInfo *outvideo, YangContext *pcontext, + YangSysMessageI *pmessage,YangSysMessageHandleI* pmessagehandle) { + return new YangPushMessageHandle(hasAudio,pvideotype,screenvideo,outvideo,pcontext, pmessage,pmessagehandle); +} + +YangVideoBuffer* YangPushFactory::getPreVideoBuffer(YangSysMessageHandle* pmessageHandle){ + if(!pmessageHandle) return NULL; + YangPushMessageHandle* mess=dynamic_cast(pmessageHandle); + if(mess&&mess->m_push) return mess->m_push->getPreVideoBuffer(); + return NULL; + +} + +YangSendVideoI* YangPushFactory::getSendVideo(YangSysMessageHandle* pmessageHandle){ + if(!pmessageHandle) return NULL; + YangPushMessageHandle* mess=dynamic_cast(pmessageHandle); + if(mess&&mess->m_push) return mess->m_push->getSendVideo(); + return NULL; +} diff --git a/libmetartc3/src/yangpush/YangPushHandleImpl.cpp b/libmetartc3/src/yangpush/YangPushHandleImpl.cpp new file mode 100755 index 00000000..a7d3bf83 --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushHandleImpl.cpp @@ -0,0 +1,241 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include + +#include + +YangPushHandle::YangPushHandle() { + +} +YangPushHandle::~YangPushHandle() { + +} + +YangPushHandleImpl::YangPushHandleImpl(bool phasAudio,bool initVideo,int pvideotype,YangVideoInfo* screenvideo,YangVideoInfo* outvideo,YangContext* pcontext,YangSysMessageI* pmessage) { + m_pub = NULL; + m_videoState=pvideotype; + m_screenInfo=screenvideo; + m_outInfo=outvideo; + m_context = pcontext; + m_message = pmessage; + m_cap = new YangPushPublish(m_context); + m_cap->setCaptureType(m_videoState); + m_hasAudio = phasAudio; + m_isInit=initVideo; + init(); + m_send=NULL; + m_rec=NULL; + + +} + +YangPushHandleImpl::~YangPushHandleImpl() { + if (m_pub) + m_pub->disConnectMediaServer(); + if (m_cap) + m_cap->stopAll(); + yang_delete(m_pub); + yang_delete(m_cap); + yang_delete(m_rec); + +} +void YangPushHandleImpl::disconnect() { + if (m_cap) { + if(m_hasAudio) m_cap->stopAudioCaptureState(); + m_cap->stopVideoCaptureState(); + m_cap->stopScreenCaptureState(); + } + stopPublish(); +} +void YangPushHandleImpl::init() { + if(m_isInit) return; + changeSrc(m_videoState,true); + m_isInit=true; +} +void YangPushHandleImpl::startCapture() { + +} +YangSendVideoI* YangPushHandleImpl::getSendVideo(){ + if(m_send==NULL&&m_videoState==Yang_VideoSrc_OutInterface) { + m_send=new YangSendVideoImpl(); + if(m_cap){ + m_send->m_outVideoBuffer=m_cap->getOutVideoBuffer(); + m_send->m_outPreVideoBuffer=m_cap->getOutPreVideoBuffer(); + } + } + return m_send; +} +void YangPushHandleImpl::switchToCamera(bool pisinit) { + m_videoState = Yang_VideoSrc_Camera; + if(m_cap) m_cap->setCaptureType(m_videoState); + if(m_cap) m_cap->setVideoInfo(&m_context->avinfo.video); + if(!pisinit) stopScreen(); + + startCamera(); +} + +void YangPushHandleImpl::switchToScreen(bool pisinit) { + + m_videoState = Yang_VideoSrc_Screen; + if(m_cap) m_cap->setCaptureType(m_videoState); + if(m_cap) m_cap->setVideoInfo(m_screenInfo); + + if(!pisinit) stopCamera(); + + startScreen(); + +} + +void YangPushHandleImpl::recordFile(char *filename) { + m_context->avinfo.enc.createMeta=1; + if(m_rec==NULL) m_rec=new YangRecordHandle(m_context); + if(m_hasAudio) { + m_hasAudio=bool(m_cap->startAudioCapture()==Yang_Ok); + } + m_rec->init(m_cap->getPushCapture()); + m_rec->recordFile(filename); + if (m_hasAudio) + m_cap->startAudioCaptureState(); + if (m_videoState == Yang_VideoSrc_Camera) + m_cap->startVideoCaptureState(); + else if(m_videoState == Yang_VideoSrc_Screen){ + m_cap->startScreenCaptureState(); + } + +} + +void YangPushHandleImpl::stopRecord() { + if(m_rec) m_rec->stopRecord(); + yang_delete(m_rec); + if(m_cap) m_cap->deleteVideoEncoding(); +} + +void YangPushHandleImpl::switchToOutside(bool pisinit){ + if(m_cap) m_cap->setCaptureType(m_videoState); + if(m_cap) m_cap->setVideoInfo(m_outInfo); + +} +void YangPushHandleImpl::changeSrc(int videoSrcType,bool pisinit){ + m_videoState=videoSrcType; + if(m_videoState==Yang_VideoSrc_Camera){ + switchToCamera(pisinit); + }else if(m_videoState==Yang_VideoSrc_Screen){ + switchToScreen(pisinit); + }else if(m_videoState==Yang_VideoSrc_OutInterface) { + switchToOutside(pisinit); + } +} +void YangPushHandleImpl::setScreenInterval(int32_t pinterval) { + if(m_cap) m_cap->setScreenInterval(pinterval); +} + +void YangPushHandleImpl::setDrawmouse(bool isDraw) { + if(m_cap) m_cap->setDrawmouse(isDraw); +} + +void YangPushHandleImpl::stopPublish() { + if (m_pub) { + m_pub->disConnectMediaServer(); + } + yang_stop(m_pub); + yang_stop_thread(m_pub); + yang_delete(m_pub); + if(m_cap) m_cap->deleteVideoEncoding(); +} +YangVideoBuffer* YangPushHandleImpl::getPreVideoBuffer() { + if (m_videoState == Yang_VideoSrc_Camera) { + if (m_cap) return m_cap->getPreVideoBuffer(); + } else if (m_videoState == Yang_VideoSrc_Screen) { + if (m_cap) return m_cap->getScreenPreVideoBuffer(); + }else if (m_videoState == Yang_VideoSrc_OutInterface) { + if(m_cap) return m_cap->getOutPreVideoBuffer(); + } + return NULL; +} +void YangPushHandleImpl::setScreenVideoInfo(int videoSrcType,YangVideoInfo* pvideo){ + m_videoState = videoSrcType; + if (videoSrcType == Yang_VideoSrc_Screen) { + m_screenInfo = pvideo; + } else if (m_videoState == Yang_VideoSrc_OutInterface) { + m_outInfo = pvideo; + } +} + + + +int YangPushHandleImpl::publish(string url, string localIp, int32_t localPort) { + + int err = Yang_Ok; + memset(&m_url,0,sizeof(m_url)); + if (yang_srs_url_parse((char*) url.c_str(), &m_url)) return 1; + if(m_url.netType==Yang_Rtmp){ + + } + + stopPublish(); + yang_trace("\nnetType==%d,server=%s,port=%d,app=%s,stream=%s\n", + m_url.netType, m_url.server, m_url.port, m_url.app, + m_url.stream); + + if (m_pub == NULL) { + m_pub = new YangRtcPublish(m_context); + + } + if(m_hasAudio) { + m_hasAudio=bool(m_cap->startAudioCapture()==Yang_Ok); + } + if (m_hasAudio) { + + m_cap->initAudioEncoding(); + } + + m_cap->initVideoEncoding(); + m_cap->setNetBuffer(m_pub); + + if (m_hasAudio) + m_cap->startAudioEncoding(); + m_cap->startVideoEncoding(); + if ((err = m_pub->init(m_url.netType, m_url.server, localIp, localPort, + 1985, m_url.app, m_url.stream)) != Yang_Ok) { + return yang_error_wrap(err, " connect server failure!"); + } + + m_pub->start(); + if (m_hasAudio) + m_cap->startAudioCaptureState(); + if (m_videoState == Yang_VideoSrc_Camera) + m_cap->startVideoCaptureState(); + else if(m_videoState == Yang_VideoSrc_Screen){ + m_cap->startScreenCaptureState(); + } + return err; + +} + +void YangPushHandleImpl::startCamera() { + if(m_cap) m_cap->startCamera(); +} + +void YangPushHandleImpl::startScreen() { + if(m_cap) m_cap->startScreen(); +} + +void YangPushHandleImpl::stopCamera() { + if(m_cap) m_cap->stopCamera(); +} + +void YangPushHandleImpl::stopScreen() { + if(m_cap) m_cap->stopScreen(); +} +#if Yang_HaveVr +void YangPushHandleImpl::addVr(){ + if(m_cap) m_cap->addVr(); +} +void YangPushHandleImpl::delVr(){ + if(m_cap) m_cap->delVr(); + +} +#endif diff --git a/libmetartc3/src/yangpush/YangPushHandleImpl.h b/libmetartc3/src/yangpush/YangPushHandleImpl.h new file mode 100755 index 00000000..baf656d1 --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushHandleImpl.h @@ -0,0 +1,61 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGPUSH_YANGPUSHHANDLEIMPL_H_ +#define YANGPUSH_YANGPUSHHANDLEIMPL_H_ +#include +#include +#include +#include +#include +#include +class YangPushHandleImpl :public YangPushHandle{ +public: + YangPushHandleImpl(bool hasAudio,bool initVideo,int pvideotype,YangVideoInfo* screenvideo,YangVideoInfo* outvideo,YangContext* pcontext,YangSysMessageI* pmessage); + virtual ~YangPushHandleImpl(); + void init(); + void startCapture(); + int publish(string url,string localIp,int32_t localport); + void setScreenVideoInfo(int videoSrcType,YangVideoInfo* pvideo); + void setScreenInterval(int32_t pinterval); + YangVideoBuffer* getPreVideoBuffer(); + void recordFile(char* filename); + void stopRecord(); + void disconnect(); + void changeSrc(int videoSrcType,bool pisinit); + void setDrawmouse(bool isDraw); + void addVr(); + void delVr(); + + YangSendVideoI* getSendVideo(); + YangSendVideoImpl* m_send; + +private: + void startCamera(); + void startScreen(); + void stopCamera(); + void stopScreen(); + void stopPublish(); + + void switchToCamera(bool pisinit); + void switchToScreen(bool pisinit); + void switchToOutside(bool pisinit); + +private: + bool m_hasAudio; + int m_videoState; + bool m_isInit; + + YangPushPublish* m_cap; + YangRtcPublish* m_pub; + YangRecordHandle *m_rec; + YangContext* m_context; + YangUrlData m_url; + YangSysMessageI* m_message; + YangVideoInfo* m_screenInfo; + + YangVideoInfo* m_outInfo; + +}; + +#endif /* YANGPUSH_YANGPUSHHANDLEIMPL_H_ */ diff --git a/libmetartc3/src/yangpush/YangPushMessageHandle.cpp b/libmetartc3/src/yangpush/YangPushMessageHandle.cpp new file mode 100755 index 00000000..479f1f21 --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushMessageHandle.cpp @@ -0,0 +1,118 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangPushMessageHandle.h" +#include +#include +#include +#include +#include +#include +#include +#include + +YangPushMessageHandle::YangPushMessageHandle(bool hasAudio,int pvideotype,YangVideoInfo* screenvideo,YangVideoInfo* outvideo,YangContext* pcontext,YangSysMessageI* pmessage,YangSysMessageHandleI* pmessageHandle) { + +// m_receive = prec; + m_context=pcontext; + m_receive= pmessageHandle; + m_push = new YangPushHandleImpl(hasAudio,false,pvideotype,screenvideo,outvideo,pcontext,pmessage); + +} + +YangPushMessageHandle::~YangPushMessageHandle() { + deleteAll(); +} +void YangPushMessageHandle::initAll(){ + +} +void YangPushMessageHandle::deleteAll(){ + m_context=NULL; + yang_delete(m_push); +} +int32_t YangPushMessageHandle::pushPublish(char* user){ + if(m_push==NULL) return 1; + char url[256]={0}; + yang_getLocalInfo(url); + return m_push->publish(user,url,m_context->avinfo.sys.rtcLocalPort); +} +void YangPushMessageHandle::handleMessage(YangSysMessage *mss) { + + + int32_t ret = Yang_Ok; + + switch (mss->messageId) { + case YangM_Push_StartVideoCapture: + { + if(m_push) m_push->changeSrc(Yang_VideoSrc_Camera,false); + break; + } + case YangM_Push_StartScreenCapture: + { + if(m_push) m_push->changeSrc(Yang_VideoSrc_Screen,false); + break; + } + case YangM_Push_StartOutCapture: + { + if(m_push) m_push->changeSrc(Yang_VideoSrc_OutInterface,false); + break; + } + case YangM_Push_Publish_Start: + { + if(mss->user&&m_push) ret = pushPublish((char*)mss->user); + break; + } + case YangM_Push_Publish_Stop: + { + if(m_push) m_push->disconnect(); + break; + } + case YangM_Push_Record_Start: + { + if(mss->user&&m_push) m_push->recordFile((char*)mss->user); + break; + } + case YangM_Push_Record_Stop: + { + if(m_push) m_push->stopRecord(); + break; + } + case YangM_Push_SwitchToCamera: + { + if(m_push) + m_push->changeSrc(Yang_VideoSrc_Camera,false); + break; + } + case YangM_Push_SwitchToScreen: + { + if(m_push) + m_push->changeSrc(Yang_VideoSrc_Screen,false); + break; + } + case YangM_Sys_Setvr: + ret =Yang_Ok; +#if Yang_HaveVr + if(m_push) m_push->addVr(); +#endif + break; + case YangM_Sys_UnSetvr: + ret = Yang_Ok; +#if Yang_HaveVr + if(m_push) m_push->delVr(); +#endif + break; + + + } + + if (mss->handle) { + if (ret) + mss->handle->failure(ret); + else + mss->handle->success(); + } + + if (m_receive) + m_receive->receiveSysMessage(mss,ret); +} + diff --git a/libmetartc3/src/yangpush/YangPushMessageHandle.h b/libmetartc3/src/yangpush/YangPushMessageHandle.h new file mode 100755 index 00000000..1869e8b6 --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushMessageHandle.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef SRC_YANGMEETING_SRC_YangPushMessageHandle_H_ +#define SRC_YANGMEETING_SRC_YangPushMessageHandle_H_ +#include "YangPushHandleImpl.h" + +#include +#include +#include +#include "yangutil/sys/YangThread.h" + +using namespace std; + +class YangPushMessageHandle :public YangSysMessageHandle{ +public: + YangPushMessageHandle(bool hasAudio,int pvideotype,YangVideoInfo* screenvideo,YangVideoInfo* outvideo,YangContext* pcontext,YangSysMessageI* pmessage,YangSysMessageHandleI* pmessageHandle); + virtual ~YangPushMessageHandle(); + + void initAll(); + void deleteAll(); + void handleMessage(YangSysMessage* mss); + YangPushHandleImpl* m_push; +private: + int32_t pushPublish(char* user); + YangContext *m_context; + YangSysMessageHandleI *m_receive; + +}; + +#endif /* SRC_YANGMEETING_SRC_YangPushMessageHandle_H_ */ diff --git a/libmetartc3/src/yangpush/YangPushPublish.cpp b/libmetartc3/src/yangpush/YangPushPublish.cpp new file mode 100755 index 00000000..0410c40b --- /dev/null +++ b/libmetartc3/src/yangpush/YangPushPublish.cpp @@ -0,0 +1,219 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include + + +YangPushPublish::YangPushPublish(YangContext *pcontext) { + m_context = pcontext; + m_context->streams.setSendRequestCallback(this); + m_videoInfo=&pcontext->avinfo.video; + m_encoder = NULL; + m_capture = NULL; + m_outPreVideoBuffer=NULL; + m_outVideoBuffer=NULL; + isStartAudioCapture = 0, isStartVideoCapture = 0,isStartScreenCapture=0; + isStartAudioEncoder = 0, isStartVideoEncoder = 0; + m_captureType=Yang_VideoSrc_Camera; + +} + +YangPushPublish::~YangPushPublish() { + stopAll(); + m_context = NULL; + + yang_delete(m_encoder); + yang_delete(m_capture); + yang_delete(m_outPreVideoBuffer); + yang_delete(m_outVideoBuffer); +} +void YangPushPublish::sendRequest(int32_t puid,uint32_t ssrc,YangRequestType req){ + if(req==Yang_Req_Sendkeyframe) { + sendKeyframe(); + } + if(req==Yang_Req_Connected) { + + + } +} +void YangPushPublish::setCaptureType(int pct){ + m_captureType=pct; +} +void YangPushPublish::setVideoInfo(YangVideoInfo* pvideo){ + m_videoInfo=pvideo; + if(m_captureType==Yang_VideoSrc_OutInterface) { + if(m_outVideoBuffer==NULL) m_outVideoBuffer=new YangVideoBuffer(pvideo->width,pvideo->height,pvideo->videoEncoderFormat,m_context->avinfo.video.bitDepth==8?1:2); + if(m_outPreVideoBuffer==NULL) m_outPreVideoBuffer=new YangVideoBuffer(pvideo->width,pvideo->height,pvideo->videoEncoderFormat,m_context->avinfo.video.bitDepth==8?1:2); + } +} +void YangPushPublish::stopAll(){ + if(m_capture) m_capture->stopAll(); + if(m_encoder) m_encoder->stopAll(); +} +void YangPushPublish::sendKeyframe(){ + if(m_encoder) m_encoder->sendKeyframe(); +} + +YangPushCapture* YangPushPublish::getPushCapture(){ + return m_capture; +} + +int32_t YangPushPublish::startAudioCapture() { + if (isStartAudioCapture == 1) return Yang_Ok; + if (m_capture == NULL) m_capture=new YangPushCapture(m_context); + int32_t err=Yang_Ok; + if((err=m_capture->initAudio(NULL))!=Yang_Ok) return yang_error_wrap(err,"init audioCapture fail"); + + m_capture->startAudioCapture(); + isStartAudioCapture = 1; + return err; + +} +int32_t YangPushPublish::startVideoCapture() { + if (isStartVideoCapture == 1) return Yang_Ok; + if (m_capture == NULL) m_capture=new YangPushCapture(m_context); + int32_t err=Yang_Ok; + if((err=m_capture->initVideo())!=Yang_Ok) return yang_error_wrap(err,"init videoCapture fail"); + m_capture->startVideoCapture(); + isStartVideoCapture = 1; + return err; +} + +int32_t YangPushPublish::startScreenCapture(){ + if (isStartScreenCapture == 1) return Yang_Ok; + if (m_capture == NULL) m_capture = new YangPushCapture(m_context); + m_capture->initScreen(); + m_capture->startScreenCapture(); + isStartScreenCapture = 1; + return Yang_Ok; +} +void YangPushPublish::setNetBuffer(YangRtcPublish *prr){ + yang_reindex(m_encoder->getOutAudioBuffer()); + yang_reindex(m_encoder->getOutVideoBuffer()); + m_encoder->getOutVideoBuffer()->resetIndex(); + prr->setInAudioList(m_encoder->getOutAudioBuffer()); + prr->setInVideoList(m_encoder->getOutVideoBuffer()); + prr->setInVideoMetaData(m_encoder->getOutVideoMetaData()); +} +void YangPushPublish::initAudioEncoding() { + + if (isStartAudioEncoder == 1) return; + if (m_encoder == NULL) + m_encoder = new YangPushEncoder(m_context); + m_encoder->initAudioEncoder(); + m_encoder->setInAudioBuffer(m_capture->getOutAudioBuffer()); + + isStartAudioEncoder = 1; +} + +void YangPushPublish::change(int32_t st){ + if(m_capture) m_capture->change(st); +} +void YangPushPublish::setInAudioBuffer(vector *pbuf){ + if(m_capture) m_capture->setInAudioBuffer(pbuf); +} +void YangPushPublish::initVideoEncoding() { + if (isStartVideoEncoder == 1) return; + if (m_encoder == NULL) + m_encoder = new YangPushEncoder(m_context); + m_encoder->setVideoInfo(m_videoInfo); + + m_encoder->initVideoEncoder(); + if(m_captureType==Yang_VideoSrc_Camera){ + m_encoder->setInVideoBuffer(m_capture->getOutVideoBuffer()); + }else if(m_captureType==Yang_VideoSrc_Screen){ + m_encoder->setInVideoBuffer(m_capture->getScreenOutVideoBuffer()); + }else if(m_captureType==Yang_VideoSrc_OutInterface){ + m_encoder->setInVideoBuffer(m_outVideoBuffer); + } + isStartVideoEncoder = 1; +} +void YangPushPublish::startAudioEncoding() { + if (m_encoder) + m_encoder->startAudioEncoder(); +} +void YangPushPublish::startVideoEncoding() { + if (m_encoder) + m_encoder->startVideoEncoder(); +} +void YangPushPublish::deleteVideoEncoding(){ + if(m_encoder) m_encoder->deleteVideoEncoder(); + isStartVideoEncoder=0; + +} +void YangPushPublish::startAudioCaptureState() { + if (m_capture ) + m_capture->startAudioCaptureState(); +} +YangVideoBuffer* YangPushPublish::getPreVideoBuffer(){ + if (m_capture ) return m_capture->getPreVideoBuffer(); + return NULL; +} + +void YangPushPublish::startVideoCaptureState() { + if (m_capture ) + m_capture->startVideoCaptureState(); +} +void YangPushPublish::startScreenCaptureState(){ + if (m_capture ) + m_capture->startScreenCaptureState(); +} +void YangPushPublish::stopAudioCaptureState() { + if (m_capture ) + m_capture->stopAudioCaptureState(); +} +void YangPushPublish::stopVideoCaptureState() { + if (m_capture ) + m_capture->stopVideoCaptureState(); +} +void YangPushPublish::stopScreenCaptureState(){ + if (m_capture ) + m_capture->stopScreenCaptureState(); +} +#if Yang_HaveVr +void YangPushPublish::addVr(){ + if (m_capture ) m_capture->addVr(); +} +void YangPushPublish::delVr(){ + if (m_capture ) m_capture->delVr(); +} +#endif +YangVideoBuffer* YangPushPublish::getScreenPreVideoBuffer() { + if (m_capture ) return m_capture->getScreenPreVideoBuffer(); + return NULL; +} +YangVideoBuffer* YangPushPublish::getScreenOutVideoBuffer() { + if (m_capture ) return m_capture->getScreenOutVideoBuffer(); + return NULL; +} +YangVideoBuffer* YangPushPublish::getOutPreVideoBuffer(){ + return m_outPreVideoBuffer; +} + YangVideoBuffer* YangPushPublish::getOutVideoBuffer(){ + return m_outVideoBuffer; + + } +void YangPushPublish::startCamera() { + startVideoCapture(); +} + +void YangPushPublish::startScreen() { + startScreenCapture(); +} + +void YangPushPublish::stopCamera() { + if (m_capture ) m_capture->stopCamera(); +} + +void YangPushPublish::stopScreen() { + if (m_capture ) m_capture->stopScreen(); +} + +void YangPushPublish::setScreenInterval(int32_t pinterval) { + if(m_capture) m_capture->setScreenInterval(pinterval); +} + +void YangPushPublish::setDrawmouse(bool isDraw) { + if(m_capture) m_capture->setDrawmouse(isDraw); +} diff --git a/libmetartc3/src/yangpush/YangRtcPublish.cpp b/libmetartc3/src/yangpush/YangRtcPublish.cpp new file mode 100755 index 00000000..9dd9574e --- /dev/null +++ b/libmetartc3/src/yangpush/YangRtcPublish.cpp @@ -0,0 +1,257 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include + +#include +#include +#include +#include +#include +YangRtcPublish::YangRtcPublish(YangContext *pcontext) { + m_context = pcontext; + + m_in_videoBuffer = NULL; + m_in_audioBuffer = NULL; + m_isStart = 0; + m_isConvert = 0; + m_vmd = NULL; + m_audioEncoderType = m_context->avinfo.audio.audioEncoderType; + m_netState = 1; + m_isInit = 0; + isPublished = 0; + + m_transType=Yang_Webrtc; + notifyState=0; +} + +YangRtcPublish::~YangRtcPublish() { + if (m_isConvert) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_context = NULL; + m_in_videoBuffer = NULL; + m_in_audioBuffer = NULL; + m_vmd = NULL; +} + + +int32_t YangRtcPublish::connectServer(int32_t puid){ + return m_pushs.back()->connectServer(m_pushs.back()->context); +} + + + +int32_t YangRtcPublish::reconnectMediaServer() { + return m_pushs.back()->reconnect(m_pushs.back()->context); + +} + +int32_t YangRtcPublish::init(int32_t nettype, string server, string localIp,int32_t localPort,int32_t pport, + string app,string stream) { + + int32_t ret = 0; + YangStreamConfig streamConf; + memset(&streamConf,0,sizeof(streamConf)); + + strcpy(streamConf.app,app.c_str()); + streamConf.streamOptType=Yang_Stream_Publish; + + strcpy(streamConf.serverIp,server.c_str()); + streamConf.serverPort=pport; + + strcpy(streamConf.stream,stream.c_str()); + streamConf.uid=0;//0 singleuser 1 multiuser + memset(streamConf.localIp,0,sizeof(streamConf.localIp)); + strcpy(streamConf.localIp,localIp.c_str()); + + streamConf.localPort=localPort; + + if (m_pushs.size() == 0) { + YangStreamHandle* sh=(YangStreamHandle*)calloc(sizeof(YangStreamHandle),1); + yang_create_streamHandle(m_context->avinfo.sys.transType,sh,0,&streamConf,&m_context->avinfo,&m_context->stream,NULL); + m_pushs.push_back(sh); + } + + + if (m_pushs.back()->isconnected(m_pushs.back()->context)) return Yang_Ok; + ret = m_pushs.back()->connectServer(m_pushs.back()->context); + + if (ret) return ret; + m_pushs.back()->context->streamInit = 1; + yang_reindex(m_in_audioBuffer); + yang_reindex(m_in_videoBuffer); + return Yang_Ok; + +} +int32_t YangRtcPublish::connectMediaServer() { + if(m_pushs.size()>0) return Yang_Ok; + + return Yang_Ok; +} +int32_t YangRtcPublish::disConnectMediaServer() { + if (m_pushs.size() > 0) { + m_pushs.back()->context->streamInit = 0; + yang_destroy_streamHandle(m_pushs.back()); + yang_free(m_pushs.back()); + + m_pushs.clear(); + } + return Yang_Ok; +} +void YangRtcPublish::stop() { + m_isConvert = 0; +} + +void YangRtcPublish::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} + +void YangRtcPublish::setInAudioList(YangAudioEncoderBuffer *pbuf) { + m_in_audioBuffer = pbuf; +} +void YangRtcPublish::setInVideoList(YangVideoEncoderBuffer *pbuf) { + m_in_videoBuffer = pbuf; +} +void YangRtcPublish::setInVideoMetaData(YangVideoMeta *pvmd) { + m_vmd = pvmd; +} + +void YangRtcPublish::startLoop() { + + isPublished = 0; + m_isConvert = 1; + + YangStreamCapture data; + memset(&data,0,sizeof(YangStreamCapture)); + yang_create_streamCapture(&data); + YangFrame audioFrame; + YangFrame videoFrame; + memset(&audioFrame,0,sizeof(YangFrame)); + memset(&videoFrame,0,sizeof(YangFrame)); + data.initAudio(data.context,m_context->avinfo.sys.transType,m_context->avinfo.audio.sample, m_context->avinfo.audio.channel, + (YangAudioCodec) m_context->avinfo.audio.audioEncoderType); + data.initVideo(data.context,m_context->avinfo.sys.transType); + YangVideoCodec videoType = (YangVideoCodec) m_context->avinfo.video.videoEncoderType; + + int32_t ret = Yang_Ok; + isPublished = 1; + notifyState=1; + YangVideoMeta* vmd=NULL; + if(!m_context->avinfo.enc.createMeta){ + vmd=new YangVideoMeta(); + } + //uint8_t* tmp=NULL; + YangH264NaluData nalu; + //uint8_t meta[200] = { 0 }; + + while (m_isConvert == 1) { + + + if ((m_in_videoBuffer && m_in_videoBuffer->size() == 0) + && (m_in_audioBuffer && m_in_audioBuffer->size() == 0)) { + yang_usleep(2000); + continue; + } + if (m_pushs.size() == 0 || !m_pushs.back()->context->streamInit) { + yang_usleep(500); + continue; + } + YangStreamHandle* stream=m_pushs.back(); + + if(stream->isconnected(stream->context)){ + if(notifyState&&m_transTypestreams.sendRequest(0,0,Yang_Req_Connected); + notifyState=0; + } + + }else{ + yang_usleep(500); + continue; + } + + if (m_in_audioBuffer && m_in_audioBuffer->size() > 0) { + + audioFrame.payload = m_in_audioBuffer->getAudioRef(&audioFrame); + data.setAudioData(data.context,&audioFrame); + + //for (i = 0; i < m_pushs.size(); i++) { + ret = stream->publishAudioData(stream->context,&data); + if (ret&&!stream->isconnected(stream->context)) { + stream->context->streamInit = 0; + stream->disConnectServer(stream->context); + //yang_post_message(YangM_Sys_PushMediaServerError,m_pushs.back()->m_uid,NULL); + } + //} + } + + if (m_in_videoBuffer && m_in_videoBuffer->size() > 0) { + + videoFrame.payload = m_in_videoBuffer->getEVideoRef(&videoFrame); + + if (videoFrame.frametype == YANG_Frametype_I) { + + if (m_vmd) { + data.setVideoMeta(data.context,m_vmd->livingMeta.buffer, + m_vmd->livingMeta.bufLen, videoType); + //if (ret) continue; + } else { + if (!vmd->isInit) { + if (videoType == Yang_VED_264) { + yang_createH264Meta(vmd, &videoFrame); + yang_getConfig_Flv_H264(&vmd->mp4Meta, + vmd->livingMeta.buffer, + &vmd->livingMeta.bufLen); + } else if (videoType == Yang_VED_265) { + yang_createH265Meta(vmd, &videoFrame); + yang_getConfig_Flv_H265(&vmd->mp4Meta, + vmd->livingMeta.buffer, + &vmd->livingMeta.bufLen); + } + } + data.setVideoMeta(data.context,vmd->livingMeta.buffer, + vmd->livingMeta.bufLen, videoType); + } + data.setVideoFrametype(data.context,YANG_Frametype_Spspps); + ret = stream->publishVideoData(stream->context,&data); + + + + if (!m_context->avinfo.enc.createMeta) { + memset(&nalu, 0, sizeof(YangH264NaluData)); + yang_parseH264Nalu(&videoFrame, &nalu); + if (nalu.keyframePos > -1) { + videoFrame.payload += nalu.keyframePos + 4; + videoFrame.nb -= (nalu.keyframePos + 4); + + } else { + videoFrame.payload = NULL; + continue; + } + } + } + + data.setVideoData(data.context,&videoFrame, videoType); + //for (i = 0; i < m_pushs.size(); i++) { + ret = stream->publishVideoData(stream->context,&data); + + if (ret && !stream->isconnected(stream->context)) { + stream->context->streamInit = 0; + stream->disConnectServer(stream->context); + + } + + } //end + } + isPublished = 0; + yang_destroy_streamCapture(&data); + yang_delete(vmd); +} diff --git a/libmetartc3/src/yangpush/YangSendVideoImpl.cpp b/libmetartc3/src/yangpush/YangSendVideoImpl.cpp new file mode 100755 index 00000000..39170225 --- /dev/null +++ b/libmetartc3/src/yangpush/YangSendVideoImpl.cpp @@ -0,0 +1,44 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +YangSendVideoImpl::YangSendVideoImpl() { + m_outVideoBuffer=NULL; + m_outPreVideoBuffer=NULL; + m_width=1920; + m_height=1080; + m_buf=NULL; + m_len=0; + memset(&m_videoFrame,0,sizeof(YangFrame)); + +} + +YangSendVideoImpl::~YangSendVideoImpl() { + yang_deleteA(m_buf); +} +void YangSendVideoImpl::init(int32_t wid,int32_t hei){ + m_width=wid; + m_height=hei; +} +void YangSendVideoImpl::putVideoRgba(uint8_t *data, int len,int64_t timestamp) { + if(m_buf==NULL){ + m_len=m_width*m_height*3/2; + m_buf=new uint8_t[m_len]; + + } + + m_yuv.bgratoI420(data,m_buf,m_width,m_height); + putVideoI420(m_buf,m_len,timestamp); + +} + +void YangSendVideoImpl::putVideoI420(uint8_t *data, int len,int64_t timestamp) { + m_videoFrame.payload=data; + m_videoFrame.nb=len; + m_videoFrame.pts=timestamp; + if(m_outVideoBuffer) m_outVideoBuffer->putVideo(&m_videoFrame); + if(m_outPreVideoBuffer) m_outPreVideoBuffer->putVideo(&m_videoFrame); + + +} diff --git a/libmetartc3/src/yangpush/YangSendVideoImpl.h b/libmetartc3/src/yangpush/YangSendVideoImpl.h new file mode 100755 index 00000000..e424e8a5 --- /dev/null +++ b/libmetartc3/src/yangpush/YangSendVideoImpl.h @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef SRC_YANGPUSH_YANGSENDVIDEOIMPL_H_ +#define SRC_YANGPUSH_YANGSENDVIDEOIMPL_H_ +#include +#include +#include +class YangSendVideoImpl:public YangSendVideoI { +public: + YangSendVideoImpl(); + virtual ~YangSendVideoImpl(); + void putVideoRgba(uint8_t* data,int len,int64_t timestamp); + void putVideoI420(uint8_t* data,int len,int64_t timestamp); + void init(int32_t wid,int32_t hei); + + + YangVideoBuffer* m_outPreVideoBuffer; + YangVideoBuffer* m_outVideoBuffer; +private: + YangYuvConvert m_yuv; + YangFrame m_videoFrame; + uint8_t* m_buf; + + int32_t m_width; + int32_t m_height; + int32_t m_len; + + +}; + +#endif /* SRC_YANGPUSH_YANGSENDVIDEOIMPL_H_ */ diff --git a/libmetartc3/src/yangrecord/YangFlvWrite.cpp b/libmetartc3/src/yangrecord/YangFlvWrite.cpp new file mode 100755 index 00000000..54e0a041 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangFlvWrite.cpp @@ -0,0 +1,486 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +#include +#include +#include +#include +#include +#include "yangutil/yang_unistd.h" +#include "yangutil/sys/YangAmf.h" +#define REFERENCE_TIME int64_t +//#define _T(x) x +typedef uint8_t uint8; + + +//----------------------------------------------------------------------------- +// +// FLVWriter class +// +//----------------------------------------------------------------------------- + +YangFlvWriter::YangFlvWriter(char* fileName, YangAudioInfo *paudio,YangVideoInfo *pvideo) + +{ + m_audio=paudio; + m_video=pvideo; + m_Video_Bit_Count = m_video->videoCaptureFormat==0?16:12; + m_Video_Width = m_video->width; + m_Video_Height = m_video->height; + + video_codec = FLV_CODEC_H264; + audio_codec = m_audio->audioEncoderType==0?FLV_CODEC_AAC:FLV_CODEC_MP3; + + video_raw_size = 0; + time_first_ms = 0; + time_last_ms = 0; + video_frames = 0; + audio_channels = 0; + audio_samplerate = 0; + is_first = true; + duration_ms = 0; + video_fps = 0; + file_size = 0; + vcount = 0; + vtime = 0; + atime = 0; + perSt = (m_audio->audioEncoderType==0?1024.0:1152.0) * 1000.0 / 44100.0; + atime1 = 0; + //vtime1 = 0; + vtcou = 0; + file = fopen(fileName,"wb"); + pre_vt=0; + framerate=30; + pre_at=0; + metadatapos=0; + i_bitrate=0; + i_level_idc=0; + last_tag_size=0; + + +} + +YangFlvWriter::~YangFlvWriter() { + m_audio=NULL; + m_video=NULL; + + Reset(); +} + +int32_t YangFlvWriter::Reset() { + + video_raw_size = 0; + video_frames = 0; + audio_channels = 0; + audio_samplerate = 0; + last_tag_size = 0; + video_fps = 0; + file_size = 0; + + // no streams... + video_codec = FLV_CODEC_H264; + audio_codec = m_audio->audioEncoderType==0?FLV_CODEC_AAC:FLV_CODEC_MP3; + + time_first_ms = 0; + time_last_ms = 0; + duration_ms = 0; + is_first = true; + vtime = 0; + atime = 0; + + return 0; +} + +int32_t YangFlvWriter::Start() { + + // write the file header + BYTE header[9] = { 'F', 'L', 'V', // FLV file signature + 0x01, // FLV file version = 1 + 0, // Flags - modified later + 0, 0, 0, 9 // size of the header + }; + + header[4] |= 0x01; + //header[4] |= 0x04; + //rewind(file); + //io->Seek(0); + file_size = 0; + fwrite(header, 1, sizeof(header), file); + + metadatapos = ftell(file); + WriteMetaData(); + + file_size = ftell(file); + metadatapos = file_size; + return 0; +} + +int32_t YangFlvWriter::Stop() { + + BYTE prev[4]; + //long ttt=vtime; + prev[0] = (last_tag_size >> 24) & 0xff; + prev[1] = (last_tag_size >> 16) & 0xff; + prev[2] = (last_tag_size >> 8) & 0xff; + prev[3] = (last_tag_size >> 0) & 0xff; + fwrite(prev, 1, 4, file); + uint8_t last[] = { 0x09, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x17, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10 }; + last[4] = (vtime >> 16) & 0xff; // TimeStamp UI24 + last[5] = (vtime >> 8) & 0xff; + last[6] = (vtime >> 0) & 0xff; + last[7] = (vtime >> 24) & 0xff; + file_size += (4 + sizeof(last)); + fwrite(last, 1, sizeof(last), file); + //printf("\nvtime==%d,atime=%d,filesieze=%d\n", vtime, atime,file_size); + rewind(file); + fseek(file, 9, SEEK_SET); + WriteMetaData(); + + return 0; +} +void YangFlvWriter::Close() { + Stop(); + fflush(file); + fclose(file); + file = NULL; +} +int32_t YangFlvWriter::MakeAVCc(char* data, int32_t size, char *output_data, + int32_t output_size) { + if (!data || size <= 0) + return -1; + int32_t ps_size = (data[0] << 8) | (data[1]); + int32_t ss_size = (data[ps_size + 2] << 8) | (data[ps_size + 3]); + int32_t buf_size = 6 + ps_size + 2 + 1 + ss_size + 2; + + if (buf_size > output_size) + return -1; + + char* temp = data; + char* output_temp = output_data; + + output_temp[0] = 1; + output_temp[1] = temp[3]; + output_temp[2] = temp[4]; + output_temp[3] = temp[5]; + output_temp[4] = 0xff; + output_temp[5] = 0xe1; + output_temp += 6; + + memcpy(output_temp, temp, ps_size + 2); + output_temp += ps_size + 2; + temp += ps_size + 2; + + output_temp[0] = 1; + output_temp += 1; + + memcpy(output_temp, temp, ss_size + 2); + + return buf_size; +} + +void YangFlvWriter::WriteVideoInfo(uint8_t* buf1, int32_t buflen) { + + /**BYTE prev[4]; + BYTE tag_hdr[16]; + int32_t tag_data_size(0); + int32_t towrite(0); + BYTE *data(NULL); + int32_t size(0);**/ + + memset(tag_hdr, 0, sizeof(tag_hdr)); + //memset(prev,0,4); + prev[0] = (last_tag_size >> 24) & 0xff; + prev[1] = (last_tag_size >> 16) & 0xff; + prev[2] = (last_tag_size >> 8) & 0xff; + prev[3] = (last_tag_size >> 0) & 0xff; + fwrite(prev, 1, 4, file); + + //int32_t extradata_size = MakeAVCc(video_extradata.data, video_extradata.size, extradata, sizeof(extradata)); + //tag_data_size = 5 + extradata_size; + tag_hdr[0] = 0x09; + tag_hdr[11] = 0x17; + //towrite = 16; + //data = (BYTE*)extradata; + //size = extradata_size; + + tag_hdr[1] = (buflen >> 16) & 0xff; + tag_hdr[2] = (buflen >> 8) & 0xff; + tag_hdr[3] = (buflen >> 0) & 0xff; + + //last_tag_size = tag_data_size + 11; + fwrite(tag_hdr, 1, 11, file); + fwrite(buf1, 1, buflen, file); + +} + +int32_t YangFlvWriter::WriteAudioPacket(YangFrame* audioFrame) { + if (!file) + return 0; + //printf("%d,",p_len); + //BYTE tag_hdr[20]; + memset(tag_hdr, 0, sizeof(tag_hdr)); + tag_hdr[0] = 0x08; + int p_len=audioFrame->nb; + int32_t tag_data_size = p_len; + tag_data_size += 1; + tag_hdr[1] = ((p_len + 1) >> 16) & 0xff; + tag_hdr[2] = ((p_len + 1) >> 8) & 0xff; + tag_hdr[3] = ((p_len + 1) >> 0) & 0xff; + + //__int64 timestamp_ms = 0; + pre_at = atime; + + + vtcou++; + atime1 += perSt; + atime = (unsigned int) atime1; + tag_hdr[4] = (pre_at >> 16) & 0xff; // TimeStamp UI24 + tag_hdr[5] = (pre_at >> 8) & 0xff; + tag_hdr[6] = (pre_at >> 0) & 0xff; + tag_hdr[7] = (pre_at >> 24) & 0xff; // TimestampExtended UI8 + + // keep track of the last timestamp + time_last_ms = pre_at; + duration_ms = time_last_ms; // for now we consider the last timestamp duration + + // StreamID = always 0 + tag_hdr[8] = 0; + tag_hdr[9] = 0; + tag_hdr[10] = 0; + + /* + Now write the TAG + */ + + // 1. previous tag size + //BYTE prev[4]; + prev[0] = (last_tag_size >> 24) & 0xff; + prev[1] = (last_tag_size >> 16) & 0xff; + prev[2] = (last_tag_size >> 8) & 0xff; + prev[3] = (last_tag_size >> 0) & 0xff; + + fwrite(prev, 1, 4, file); + tag_hdr[11] = 0x2f; + fwrite(tag_hdr, 1, 12, file); + fwrite(audioFrame->payload, 1, p_len, file); + last_tag_size = p_len + 12; + file_size = ftell(file); //io->GetPosition(); + return 1; + +} +int32_t YangFlvWriter::WriteVideoPacket(YangFrame* videoFrame) { + if (!file) + return 0; + + + memset(tag_hdr, 0, sizeof(tag_hdr)); + // we support only two streams + + tag_hdr[0] = 0x09; + //printf("-a%d-", vtime); + // tag size + int32_t len=videoFrame->nb; + int32_t tag_data_size = len; + + // VIDEO DATA follows after the tag + tag_data_size += 5; + + tag_hdr[1] = ((len + 9) >> 16) & 0xff; + tag_hdr[2] = ((len + 9) >> 8) & 0xff; + tag_hdr[3] = ((len + 9) >> 0) & 0xff; + + time_first_ms = 0; // we will offset all timestamps by this value + pre_vt=vtime; + vtime = videoFrame->pts; + //printf("%d,",p_timestamp); + //vcount++; + + tag_hdr[4] = (pre_vt >> 16) & 0xff; // TimeStamp UI24 + tag_hdr[5] = (pre_vt >> 8) & 0xff; + tag_hdr[6] = (pre_vt >> 0) & 0xff; + tag_hdr[7] = (pre_vt >> 24) & 0xff; // TimestampExtended UI8 + + // keep track of the last timestamp + time_last_ms = pre_vt; + duration_ms = time_last_ms; // for now we consider the last timestamp duration + + // StreamID = always 0 + tag_hdr[8] = 0; + tag_hdr[9] = 0; + tag_hdr[10] = 0; + + /* + Now write the TAG + */ + + // 1. previous tag size + + prev[0] = (last_tag_size >> 24) & 0xff; + prev[1] = (last_tag_size >> 16) & 0xff; + prev[2] = (last_tag_size >> 8) & 0xff; + prev[3] = (last_tag_size >> 0) & 0xff; + + fwrite(prev, 1, 4, file); + + if (videoFrame->frametype == 1) + tag_hdr[11] = 0x17; + else + tag_hdr[11] = 0x27; + + tag_hdr[12] = 0x01; + //int32_t diff=33; + // tag_hdr[15]=0x42; + tag_hdr[19] = len & 0xff; + tag_hdr[18] = len >> 8; + tag_hdr[17] = len >> 16; + tag_hdr[16] = len >> 24; + + fwrite(tag_hdr, 1, 20, file); + fwrite(videoFrame->payload, 1, len, file); + + video_raw_size += len; //packet->size; + video_frames += 1; + + last_tag_size = len + 20; + file_size = ftell(file); //io->GetPosition(); + return 0; +} + +int32_t YangFlvWriter::WriteMetaData() { + + /* + We assemble some basic onMetaData structure. + */ + //Flash::AS_String name; + //Flash::AS_ECMA_Array vals; + + //name.value = _T("onMetaData"); + + /* + We create the following metadata + */ + char meta[1024]; + char * szTmp=meta; + + // char * szTmp=(char *)temp; + + // szTmp=yang_put_byte(szTmp, AMF_STRING ); + // szTmp=put_amf_string(szTmp, "@setDataFrame" ); + szTmp=yang_put_byte(szTmp, AMF_STRING ); + szTmp=put_amf_string(szTmp, "onMetaData" ); + szTmp=yang_put_byte(szTmp, AMF_OBJECT ); + + szTmp=put_amf_string( szTmp, "metadatacreator" ); + szTmp=yang_put_byte(szTmp, AMF_STRING ); + szTmp=put_amf_string( szTmp, "FLV Mux" ); + + szTmp=put_amf_string( szTmp, "duration" ); + szTmp=put_amf_double( szTmp, vtime/1000 ); + + szTmp=put_amf_string( szTmp, "filesize" ); + szTmp=put_amf_double( szTmp, file_size ); + + szTmp=put_amf_string( szTmp, "lasttimestamp" ); + szTmp=put_amf_double( szTmp, time_last_ms/1000 ); + + + szTmp=put_amf_string( szTmp, "hasVideo" ); + szTmp=put_amf_double( szTmp, 1 ); + + + + //szTmp=put_amf_string( szTmp, "Custom" ); + + szTmp=put_amf_string( szTmp, "width" ); + szTmp=put_amf_double( szTmp, m_Video_Width ); + + szTmp=put_amf_string( szTmp, "height" ); + szTmp=put_amf_double( szTmp, m_Video_Height ); + + szTmp=put_amf_string( szTmp, "framerate" ); + szTmp=put_amf_double( szTmp, m_video->frame); + + szTmp=put_amf_string( szTmp, "videocodecid" ); + szTmp=yang_put_byte(szTmp, AMF_STRING ); + szTmp=put_amf_string( szTmp, "avc1" ); + + szTmp=put_amf_string( szTmp, "videodatarate" ); + szTmp=put_amf_double( szTmp, i_bitrate ); + + szTmp=put_amf_string( szTmp, "avclevel" ); + szTmp=put_amf_double( szTmp, i_level_idc ); + + szTmp=put_amf_string( szTmp, "avcprofile" ); + szTmp=put_amf_double( szTmp, 0x42 ); + + szTmp=put_amf_string( szTmp, "videokeyframe_frequency" ); + szTmp=put_amf_double( szTmp, 3 ); + // szTmp=put_amf_string( szTmp, "" ); + + szTmp=put_amf_string( szTmp, "hasAudio" ); + szTmp=put_amf_double( szTmp, 1); + + szTmp=put_amf_string( szTmp, "audiocodecid" ); + szTmp=put_amf_double( szTmp, 2 ); + + + szTmp=put_amf_string( szTmp, "audiosamplesize" ); + szTmp=put_amf_double( szTmp, 16 ); + + szTmp=put_amf_string( szTmp, "stereo" ); + szTmp=put_amf_double( szTmp, 1 ); + + szTmp=put_amf_string( szTmp, "audiosamplerate" ); + szTmp=put_amf_double( szTmp, 44100 ); + + szTmp=put_amf_string( szTmp, "" ); + szTmp=yang_put_byte( szTmp, AMF_OBJECT_END ); + + int32_t total_size=szTmp-meta; + BYTE tag_hdr[] = { 0, 0, 0, 0, // previous tag size + 0x12, // Type UI8 = Script Data Tag, + (total_size >> 16) & 0xff, // DataSize UI24 + (total_size >> 8) & 0xff, (total_size >> 0) & 0xff, 0, 0, 0, 0, // TimeStamp UI24 + TimestampExtended UI8 + 0, 0, 0 // StreamID UI24 (always 0) + }; + + fwrite(tag_hdr, 1, sizeof(tag_hdr), file); + fwrite(meta, 1, total_size, file); + last_tag_size = (sizeof(tag_hdr) + total_size - 4); // the first 4 bytes don't count + szTmp=NULL; + + + return 0; +} + + +char * YangFlvWriter::put_amf_string( char *c, const char *str ) +{ + uint16_t len = strlen( str ); + c=yang_put_be16( c, len ); + memcpy(c,str,len); + return c+len; +} +char * YangFlvWriter::put_amf_double( char *c, double d ) +{ + *c++ = 0; /* type: Number */ + { + uint8_t *ci, *co; + ci = (uint8_t *)&d; + co = (uint8_t *)c; + co[0] = ci[7]; + co[1] = ci[6]; + co[2] = ci[5]; + co[3] = ci[4]; + co[4] = ci[3]; + co[5] = ci[2]; + co[6] = ci[1]; + co[7] = ci[0]; + } + return c+8; +} + diff --git a/libmetartc3/src/yangrecord/YangMp4File.cpp b/libmetartc3/src/yangrecord/YangMp4File.cpp new file mode 100755 index 00000000..3df640c7 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangMp4File.cpp @@ -0,0 +1,193 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +void YangMp4File::loadLib() { + yang_MP4TagsAlloc = (const MP4Tags* (*)(void)) m_lib.loadFunction("MP4TagsAlloc"); + yang_MP4TagsFree=(void (*)( const MP4Tags* tags )) m_lib.loadFunction("MP4TagsFree"); + yang_MP4Create =(MP4FileHandle (*)(const char *fileName, uint32_t flags)) m_lib.loadFunction("MP4Create"); + yang_MP4TagsFetch =(bool (*)(const MP4Tags *tags, MP4FileHandle hFile)) m_lib.loadFunction("MP4TagsFetch"); + yang_MP4TagsSetSortArtist = (bool (*)(const MP4Tags*, const char*)) m_lib.loadFunction("MP4TagsSetSortArtist"); + yang_MP4TagsStore =(bool (*)(const MP4Tags *tags, MP4FileHandle hFile)) m_lib.loadFunction("MP4TagsStore"); + yang_MP4SetTimeScale =(bool (*)(MP4FileHandle hFile, uint32_t value)) m_lib.loadFunction("MP4SetTimeScale"); + yang_MP4SetVideoProfileLevel =(void (*)(MP4FileHandle hFile, uint8_t value))m_lib.loadFunction("MP4SetVideoProfileLevel"); + yang_MP4SetAudioProfileLevel =(void (*)(MP4FileHandle hFile, uint8_t value))m_lib.loadFunction("MP4SetAudioProfileLevel"); + yang_MP4AddAudioTrack =(MP4TrackId (*)(MP4FileHandle hFile, uint32_t timeScale,MP4Duration sampleDuration, uint8_t audioType))m_lib.loadFunction("MP4AddAudioTrack"); + yang_MP4AddH264VideoTrack = (MP4TrackId (*)(MP4FileHandle hFile,uint32_t timeScale, MP4Duration sampleDuration, uint16_t width, + uint16_t height, uint8_t AVCProfileIndication,uint8_t profile_compat,uint8_t AVCLevelIndication, + uint8_t sampleLenFieldSizeMinusOne))m_lib.loadFunction("MP4AddH264VideoTrack"); + yang_MP4SetTrackESConfiguration =(bool (*)(MP4FileHandle hFile, MP4TrackId trackId,const uint8_t *pConfig, uint32_t configSize))m_lib.loadFunction("MP4SetTrackESConfiguration"); + yang_MP4AddH264SequenceParameterSet =(void (*)(MP4FileHandle hFile, MP4TrackId trackId,const uint8_t *pSequence, uint16_t sequenceLen)) m_lib.loadFunction( + "MP4AddH264SequenceParameterSet"); + yang_MP4AddH264PictureParameterSet =(void (*)(MP4FileHandle hFile, MP4TrackId trackId, + const uint8_t *pPict, uint16_t pictLen)) m_lib.loadFunction("MP4AddH264PictureParameterSet"); + yang_MP4WriteSample = (bool (*)(MP4FileHandle hFile, MP4TrackId trackId,const uint8_t *pBytes, uint32_t numBytes, MP4Duration duration, + MP4Duration renderingOffset, bool isSyncSample)) m_lib.loadFunction("MP4WriteSample"); + yang_MP4Close =(void (*)(MP4FileHandle hFile, uint32_t flags)) m_lib.loadFunction("MP4Close"); + + yang_MP4AddH265VideoTrack=(MP4TrackId (*)( + MP4FileHandle hFile, + uint32_t timeScale, + MP4Duration sampleDuration, + uint16_t width, + uint16_t height, + uint8_t isIso)) m_lib.loadFunction("MP4AddH265VideoTrack"); + yang_MP4AddH265SequenceParameterSet=(void (*)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pSequence, + uint16_t sequenceLen )) m_lib.loadFunction("MP4AddH265SequenceParameterSet"); + yang_MP4AddH265PictureParameterSet=(void (*)( + MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t* pPict, + uint16_t pictLen )) m_lib.loadFunction("MP4AddH265PictureParameterSet"); + yang_MP4AddH265VideoParameterSet=(void (*) (MP4FileHandle hFile, + MP4TrackId trackId, + const uint8_t *pSequence, + uint16_t sequenceLen)) m_lib.loadFunction("MP4AddH265VideoParameterSet"); + yang_MP4GetTrackTimeScale=(uint32_t (*) ( + MP4FileHandle hFile, + MP4TrackId trackId )) m_lib.loadFunction("MP4GetTrackTimeScale"); + yang_MP4SetTrackTimeScale=(bool (*) ( + MP4FileHandle hFile, + MP4TrackId trackId, + uint32_t value )) m_lib.loadFunction("MP4SetTrackTimeScale"); +} + +void YangMp4File::unloadLib() { + yang_MP4AddH265VideoTrack=NULL; + yang_MP4AddH265SequenceParameterSet=NULL; + yang_MP4AddH265PictureParameterSet=NULL; + yang_MP4AddH265VideoParameterSet=NULL; + yang_MP4TagsAlloc = NULL; + yang_MP4TagsFree=NULL; + yang_MP4Create = NULL; + yang_MP4TagsFetch = NULL; + yang_MP4TagsSetSortArtist = NULL; + yang_MP4TagsStore = NULL; + yang_MP4SetTimeScale = NULL; + yang_MP4SetVideoProfileLevel = NULL; + yang_MP4SetAudioProfileLevel = NULL; + yang_MP4AddAudioTrack = NULL; + yang_MP4AddH264VideoTrack = NULL; + yang_MP4SetTrackESConfiguration = NULL; + yang_MP4AddH264SequenceParameterSet = NULL; + yang_MP4AddH264PictureParameterSet = NULL; + yang_MP4WriteSample = NULL; + yang_MP4Close = NULL; +} +YangMp4File::~YangMp4File(void) { + closeMp4(); + unloadLib(); + m_lib.unloadObject(); +} + +YangMp4File::YangMp4File(char *fileName, YangVideoInfo *pcontext) { + m_context = pcontext; + m_MP4hFile = MP4_INVALID_FILE_HANDLE; + m_mp4Audiotrack = 0; + m_mp4Videotrack = 0; + m_ntracks = 0, m_trackno = 0; + m_ndiscs = 0, m_discno = 0; + m_newtick = 0; + m_oldtick = 0; + m_newalltick = 0, m_oldalltick = 0; + m_tmptick = 0; + m_tick = 0; + m_cou = 0; + m_interval = 2 * (1000 / m_context->frame) * 90; + m_interval1 = 2 * (1000 / m_context->frame); + + m_artist = NULL; + + m_total_samples = 0; + m_encoded_samples = 0; + m_delay_samples = 0; + m_frameSize = 0; + //hEncoder=NULL; + + yang_trace("\ncreate mp4 file======%s", fileName); + m_lib.loadObject("libmp4v2"); + loadLib(); + //MP4Create + m_MP4hFile = yang_MP4Create(fileName, 0); + +} +void YangMp4File::init(uint8_t *p_spsBuf, int32_t p_spsLen) { + + const MP4Tags *tags = yang_MP4TagsAlloc(); + yang_MP4TagsFetch(tags, m_MP4hFile); + yang_MP4TagsStore(tags, m_MP4hFile); + yang_MP4SetTimeScale(m_MP4hFile, 90000); + m_mp4Audiotrack = yang_MP4AddAudioTrack(m_MP4hFile, 44100, 1024,MP4_MPEG4_AUDIO_TYPE); + // MP4SetAudioProfileLevel(MP4hFile, 0x0F); + //mp4Videotrack = MP4AddH264VideoTrack(MP4hFile, 90000, 90000/config.Frame_Num, config.Video_Width,config.Video_Height,0x42,0xc0,0x1f,3); + //MP4AddH265VideoTrack + if(m_context->videoEncoderType==1){ + //printf("\n**********MP4AddH265VideoTrack***********\n"); + m_mp4Videotrack = yang_MP4AddH265VideoTrack(m_MP4hFile, 90000, -1, + m_context->width, m_context->height, 0); + + }else{ + m_mp4Videotrack = yang_MP4AddH264VideoTrack(m_MP4hFile, 90000, -1, + m_context->width, m_context->height, //0x42, 0xc0, 0x1f, 3); + *(p_spsBuf+1), *(p_spsBuf+2), *(p_spsBuf+3), 3); + } + m_frameSize = 1024; + m_delay_samples = 1024; + if(tags) yang_MP4TagsFree(tags); + tags=NULL; + + +} +void YangMp4File::WriteVideoInfo(uint8_t *p_vpsBuf,int32_t p_vpsLen,uint8_t *p_spsBuf, int32_t p_spsLen,uint8_t *p_ppsBuf, int32_t p_ppsLen) { + //for(int32_t i=0;ivideoEncoderType==1){ + //MP4AddH265VideoParameterSet + //MP4SetVideoProfileLevel + // yang_MP4SetVideoProfileLevel(m_MP4hFile, *(p_spsBuf));//0x7f); + //printf("\n****************MP4AddH265VideoParameterSet**************************\n");0x08 + + yang_MP4SetVideoProfileLevel(m_MP4hFile, 0x08);//0x7f); + yang_MP4AddH265VideoParameterSet(m_MP4hFile, m_mp4Videotrack, p_vpsBuf,p_vpsLen); + yang_MP4AddH265SequenceParameterSet(m_MP4hFile, m_mp4Videotrack, p_spsBuf,p_spsLen); + yang_MP4AddH265PictureParameterSet(m_MP4hFile, m_mp4Videotrack, p_ppsBuf, p_ppsLen); + yang_MP4SetTrackTimeScale(m_MP4hFile, m_mp4Videotrack,90000); + //printf("\nvpsLen=%d,spsLen=%d,ppsLen=%d",p_vpsLen,p_spsLen,p_ppsLen); + + }else{ + yang_MP4SetVideoProfileLevel(m_MP4hFile, *(p_spsBuf+1));//0x7f); + yang_MP4AddH264SequenceParameterSet(m_MP4hFile, m_mp4Videotrack, p_spsBuf,p_spsLen); + yang_MP4AddH264PictureParameterSet(m_MP4hFile, m_mp4Videotrack, p_ppsBuf, p_ppsLen); + } + //printf("\n1****************timescale====%d**************************\n",yang_MP4GetTrackTimeScale(m_MP4hFile, m_mp4Videotrack)); + +} +void YangMp4File::WriteAudioInfo(uint8_t *pasc, unsigned long pasclen,uint8_t *buf1, int32_t buflen) { + yang_MP4SetAudioProfileLevel(m_MP4hFile, 0x0f); + yang_MP4SetTrackESConfiguration(m_MP4hFile, m_mp4Audiotrack, pasc, pasclen); +} +int32_t YangMp4File::WriteAudioPacket(YangFrame* audioFrame) { + yang_MP4WriteSample(m_MP4hFile, m_mp4Audiotrack, audioFrame->payload, audioFrame->nb, -1, 0, 0); + m_encoded_samples += m_frameSize; + return 1; +} + +int32_t YangMp4File::WriteVideoPacket(YangFrame* vidoeFrame) { + + yang_MP4WriteSample(m_MP4hFile, m_mp4Videotrack, vidoeFrame->payload, vidoeFrame->nb, vidoeFrame->pts, 0, 0); + + return 1; +} + +void YangMp4File::closeMp4() { + if (m_MP4hFile != NULL) + yang_MP4Close(m_MP4hFile, 0); + m_MP4hFile = NULL; +} diff --git a/libmetartc3/src/yangrecord/YangMp4FileApp.cpp b/libmetartc3/src/yangrecord/YangMp4FileApp.cpp new file mode 100755 index 00000000..c130df2e --- /dev/null +++ b/libmetartc3/src/yangrecord/YangMp4FileApp.cpp @@ -0,0 +1,76 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +#include + + + +YangMp4FileApp::YangMp4FileApp(YangAudioInfo *paudio,YangVideoInfo *pvideo,YangVideoEncInfo *penc) { + m_audio=paudio; + m_video=pvideo; + m_encPara=penc; + m_enc=NULL; + m_rec=NULL; + m_isPause=0; +} + +YangMp4FileApp::~YangMp4FileApp() { + m_audio=NULL; + m_video=NULL; + m_enc=NULL; + yang_delete(m_enc); + yang_delete(m_rec); +} +void YangMp4FileApp::init(){ + + if(!m_enc) { + m_enc=new YangRecEncoder(m_audio,m_video,m_encPara); + m_enc->initAudioEncoder(); + m_enc->initVideoEncoder(); + //m_enc->setInAudioBuffer(m_cap->m_out_al); + //m_enc->setInVideoBuffer(m_cap->m_out_vl); + } + if(!m_rec) { + m_rec=new YangRecordMp4(m_audio,m_video,m_encPara); + m_rec->setInAudioBuffer(m_enc->getOutAudioBuffer()); + m_rec->setInVideoBuffer(m_enc->getOutVideoBuffer()); + } +} +void YangMp4FileApp::setFileTimeLen(int32_t ptlen_min){ + if(m_rec) m_rec->setFileTimeLen(ptlen_min); +} +void YangMp4FileApp::setInAudioBuffer(YangAudioBuffer *pbuf){ + if(m_enc) m_enc->setInAudioBuffer(pbuf); +} + void YangMp4FileApp::setInVideoBuffer(YangVideoBuffer *pbuf){ + if(m_enc) m_enc->setInVideoBuffer(pbuf); + } + void YangMp4FileApp::pauseRecord(){ + if(m_rec) m_rec->pauseRec(); + m_isPause=1; + } + void YangMp4FileApp::resumeRecord(){ + if(!m_isPause) return; + if(m_rec) m_rec->resumeRec(); + m_isPause=0; + } +void YangMp4FileApp::startRecordMp4(char *filename0,int32_t p_module,int32_t p_isMp4) +{ + + if(m_rec){ + m_rec->initPara(m_enc->getOutVideoMetaData(),filename0,1); + m_rec->start(); + + } + if(m_enc) { + m_enc->startAudioEncoder(); + m_enc->startVideoEncoder(); + } + +} +void YangMp4FileApp::stopRecordMp4(){ + m_rec->stop(); + yang_sleep(1); +} diff --git a/libmetartc3/src/yangrecord/YangRecEncoder.cpp b/libmetartc3/src/yangrecord/YangRecEncoder.cpp new file mode 100755 index 00000000..8933aec7 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecEncoder.cpp @@ -0,0 +1,105 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +//include + + +#include "pthread.h" +#include +#include +#include + + +YangRecEncoder::YangRecEncoder(YangAudioInfo *paudio, YangVideoInfo *pvideo, + YangVideoEncInfo *penc) { + m_ve=NULL; + m_ae=NULL; + m_audio = paudio; + m_video = pvideo; + m_enc = penc; + m_vmd = NULL; + m_out_videoBuffer = NULL; + m_out_auidoBuffer = NULL; + +} + +YangRecEncoder::~YangRecEncoder() { + if(m_ae) m_ae->stop(); + if(m_ve) m_ve->stop(); + if(m_ae){ + while(m_ae->m_isStart) yang_usleep(500); + } + if(m_ve){ + while(m_ve->m_isStart) yang_usleep(500); + } + yang_delete(m_ae); + yang_delete(m_ve); + m_audio = NULL; + m_video = NULL; + m_enc = NULL; + yang_free(m_vmd); + yang_delete(m_out_videoBuffer); //=NULL; + yang_delete(m_out_auidoBuffer); //=NULL; +} +YangVideoMeta* YangRecEncoder::getOutVideoMetaData() { + return m_vmd; +} + +void YangRecEncoder::initAudioEncoder() { + if (m_out_auidoBuffer == NULL) + m_out_auidoBuffer = new YangAudioEncoderBuffer(m_audio->audioCacheNum); + if (m_ae == NULL) { + // YangEncoderFactory yf; + m_ae = new YangAudioEncoderHandle(m_audio); + m_ae->setOutAudioBuffer(m_out_auidoBuffer); + m_ae->init(); + } + +} +void YangRecEncoder::initVideoEncoder() { + if (m_out_videoBuffer == NULL) + m_out_videoBuffer = new YangVideoEncoderBuffer(m_video->videoCacheNum); + if (m_vmd == NULL) + m_vmd = (YangVideoMeta*) calloc(1, sizeof(YangVideoMeta)); + YangEncoderFactory fac; + YangVideoEncoderMeta *yvh = fac.createVideoEncoderMeta(m_video); + yvh->yang_initVmd(m_vmd, m_video, m_enc); + yang_delete(yvh); + if (m_ve == NULL) { + // YangEncoderFactory yf; + m_ve = new YangVideoEncoderHandle(m_video, m_enc); + m_ve->setOutVideoBuffer(m_out_videoBuffer); + m_ve->init(); + m_ve->setVideoMetaData(m_vmd); + } +} +void YangRecEncoder::startAudioEncoder() { + if (m_ae && !m_ae->m_isStart) { + m_ae->start(); + yang_usleep(1000); + } +} +void YangRecEncoder::startVideoEncoder() { + if (m_ve && !m_ve->m_isStart) { + m_ve->start(); + yang_usleep(2000); + } +} +void YangRecEncoder::setInAudioBuffer(YangAudioBuffer *pbuf) { + if (m_ae != NULL) + m_ae->setInAudioBuffer(pbuf); +} +void YangRecEncoder::setInVideoBuffer(YangVideoBuffer *pbuf) { + //printf("\n.......................%u\n",m_ve); + if (m_ve != NULL) + m_ve->setInVideoBuffer(pbuf); +} +YangAudioEncoderBuffer* YangRecEncoder::getOutAudioBuffer() { + return m_out_auidoBuffer; +} +YangVideoEncoderBuffer* YangRecEncoder::getOutVideoBuffer() { + return m_out_videoBuffer; +} + diff --git a/libmetartc3/src/yangrecord/YangRecord.cpp b/libmetartc3/src/yangrecord/YangRecord.cpp new file mode 100755 index 00000000..9747e1d7 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecord.cpp @@ -0,0 +1,192 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include +#include +#include +#include +#include + +YangRecord::YangRecord(YangAudioInfo *paudio,YangVideoInfo *pvideo,YangVideoEncInfo *penc) { + m_audio=paudio; + m_video=pvideo; + m_enc=penc; + videoDestLen = 0; + preVideotimestamp = 0; + minusStamp = 0; + oldalltick = 0, alltick = 0; + isMp4 = 1; + curVideoTimestamp = 0; + basestamp=0; + m_prePauseTime=0,m_afterPauseTime=0; + m_pauseTime=0; + m_fileId=0; + m_alltime=0; + m_alltime1=0; + m_fileTimeLen=3600; + m_isCreateNewFile=0; + memset(&m_mp4Para,0,sizeof(m_mp4Para)); + mp4 = NULL; + flv = NULL; + +} + +YangRecord::~YangRecord(void) { + m_audio=NULL; + m_video=NULL; + m_enc=NULL; + yang_free(m_mp4Para.fileName); + m_mp4Para.vmd=NULL; + yang_delete(mp4); + yang_delete(flv); +} + + +void YangRecord::writeAudioData(YangFrame* audioFrame) { + if (audioFrame->nb < 0 || audioFrame->nb > 1000) return; + if (isMp4){ + mp4->WriteAudioPacket(audioFrame); + } + else + flv->WriteAudioPacket(audioFrame); +} + + +void YangRecord::writeVideoData(YangFrame* videoFrame) { + if(m_alltime>m_fileTimeLen){ + m_isCreateNewFile=1; + m_alltime=0; + m_alltime1=0; + } + if(m_isCreateNewFile&&videoFrame->frametype){ + // int64_t t1=m_time.getMilliTick(); + createNewfile(); + // int64_t t2=m_time.getMilliTick()-t1; + // printf("\n**************************differ==================%lld",t2); + m_isCreateNewFile=0; + m_alltime=0; + m_alltime1=0; + } + if (basestamp == 0) basestamp = videoFrame->pts; + if (isMp4) { + alltick = (videoFrame->pts - basestamp-m_pauseTime) * 9 / 100; + //if(m_pauseTime) printf("%lld-%lld-%lld,",videoTimestamp,basestamp,m_pauseTime); + minusStamp = alltick - oldalltick; + m_alltime1+=minusStamp; + m_alltime=m_alltime1/90000; + //printf("v%lld,",videoTimestamp); + yang_put_be32((char*)videoFrame->payload,videoFrame->nb); + videoFrame->nb+=4; + videoFrame->pts=minusStamp; + mp4->WriteVideoPacket(videoFrame); + //oldalltick += minusStamp; + oldalltick=alltick; + } else { + curVideoTimestamp = (videoFrame->pts - basestamp-m_pauseTime) / 1000; + m_alltime=curVideoTimestamp/1000; + //curVideoTimestamp = (videoTimestamp - basestamp-m_pauseTime) / 100; + videoFrame->pts=curVideoTimestamp; + flv->WriteVideoPacket(videoFrame); + } + +} + + +void YangRecord::pauseRec(){ + m_prePauseTime=yang_get_milli_tick();//m_time.getMilliTick(); +} + void YangRecord::resumeRec(){ + + m_afterPauseTime=yang_get_milli_tick();//m_time.getMilliTick(); + m_pauseTime+=m_afterPauseTime-m_prePauseTime; + //printf("") + } + void YangRecord::initRecPara(){ + //videoTimestamp = 0; + //videoBufLen = 0; + preVideotimestamp = 0; + minusStamp = 0; + oldalltick = 0, alltick = 0; + curVideoTimestamp = 0; + basestamp=0; + m_prePauseTime=0,m_afterPauseTime=0; + m_pauseTime=0; + //if(m_in_audioBuffer) m_in_audioBuffer->resetIndex(); + //if(m_in_videoBuffer) m_in_videoBuffer->resetIndex(); + } + void YangRecord::createNewfile(){ + closeRec(); + char filename1[128]; + memset(filename1,0,128); + memcpy(filename1,m_mp4Para.fileName,strlen(m_mp4Para.fileName)-4); + m_fileId++; + char filename[255]; + memset(filename,0,255); + sprintf(filename,"%s_%d.%s",filename1,m_fileId,isMp4?"mp4":"flv"); + createFile(filename); + initRecPara(); + + } + void YangRecord::createFile(char* filename){ + if (isMp4) { + mp4 = new YangMp4File(filename, m_video); + mp4->init(m_mp4Para.vmd->mp4Meta.sps, m_mp4Para.vmd->mp4Meta.spsLen); + uint8_t pasc[1024]; + int32_t pasclen=0; + YangAudioEncoderMeta yem; + yem.createMeta(pasc,&pasclen); + memset(m_mp4Para.asc,0,10); + memcpy(m_mp4Para.asc,pasc,pasclen); + m_mp4Para.ascLen=pasclen; + //printf("\n*************************aselen==%d\n",pasclen); + //for(int32_t i=0;iWriteAudioInfo(pasc,pasclen,NULL, 0); + mp4->WriteVideoInfo(m_mp4Para.vmd->mp4Meta.vps, m_mp4Para.vmd->mp4Meta.vpsLen,m_mp4Para.vmd->mp4Meta.sps, m_mp4Para.vmd->mp4Meta.spsLen, + m_mp4Para.vmd->mp4Meta.pps, m_mp4Para.vmd->mp4Meta.ppsLen); + } else { + flv = new YangFlvWriter(filename, m_audio,m_video); + flv->framerate = m_video->frame; + flv->i_bitrate = (double)m_video->rate; + flv->i_level_idc = 31; + flv->Start(); + + flv->WriteVideoInfo(m_mp4Para.vmd->livingMeta.buffer, m_mp4Para.vmd->livingMeta.bufLen); + //flv->WriteAudioHeadPacket(); + + } + + } +void YangRecord::initPara(YangVideoMeta *p_vmd,char *filename, int32_t p_isMp4) { + //printf("\n*************filename===%s",filename); + isMp4 = p_isMp4; + m_mp4Para.vmd=p_vmd; + int32_t flen=strlen(filename); + m_mp4Para.fileName=(char*)malloc(flen+1); + memset(m_mp4Para.fileName,0,flen+1); + memcpy(m_mp4Para.fileName,filename,flen); + createFile(filename); + +} +void YangRecord::setFileTimeLen(int32_t ptlen_min){ + m_fileTimeLen=ptlen_min*60; +} +void YangRecord::closeRec() { + if (mp4 != NULL) { + mp4->closeMp4(); + printf("................mp4 is closed!\n"); + yang_delete(mp4); + } + if (flv != NULL) { + flv->Close(); + printf("................flv is closed!\n"); + yang_delete(flv); + } + +} + + + + diff --git a/libmetartc3/src/yangrecord/YangRecordApp.cpp b/libmetartc3/src/yangrecord/YangRecordApp.cpp new file mode 100755 index 00000000..b42b1ec9 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecordApp.cpp @@ -0,0 +1,67 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include + +YangRecordApp::YangRecordApp(YangContext* pcontext) { + m_context=pcontext; + m_context->avinfo.video.videoCacheNum*=2; + m_context->avinfo.audio.audioCacheNum*=2; + m_cap=NULL; + m_rec=NULL; + +} + +YangRecordApp::~YangRecordApp() { + + yang_delete(m_cap); + yang_delete(m_rec); + m_context=NULL; +} + + +void YangRecordApp::init(){ + if(!m_cap){ + m_cap=new YangRecordCapture(m_context); + m_cap->initAudio(NULL); + m_cap->initVideo(); + m_cap->startAudioCapture(); + m_cap->startVideoCapture(); + } + if(!m_rec){ + m_rec=new YangMp4FileApp(&m_context->avinfo.audio,&m_context->avinfo.video,&m_context->avinfo.enc); + m_rec->init(); + m_rec->setInAudioBuffer(m_cap->getOutAudioBuffer()); + m_rec->setInVideoBuffer(m_cap->getOutVideoBuffer()); + //m_rec->setFileTimeLen(1); + } +} +void YangRecordApp::pauseRecord(){ + if(m_cap) m_cap->startPauseCaptureState(); + if(m_rec) m_rec->pauseRecord(); +} +void YangRecordApp::resumeRecord(){ + if(m_rec) m_rec->resumeRecord(); + if(m_cap) m_cap->stopPauseCaptureState(); + +} +void YangRecordApp::recordFile(char* filename){ + m_rec->startRecordMp4(filename,1,1); + yang_sleep(1); + m_cap->startVideoCaptureState(); + m_cap->startAudioCaptureState(); +} +void YangRecordApp::stopRecord(){ + m_cap->stopAudioCaptureState(); + m_cap->stopVideoCaptureState(); + m_cap->m_audioCapture->stop(); + m_cap->m_videoCapture->stop(); + m_rec->m_enc->m_ae->stop(); + m_rec->m_enc->m_ve->stop(); + //m_rec->m_enc-> + yang_sleep(1); + + m_rec->stopRecordMp4(); +} diff --git a/libmetartc3/src/yangrecord/YangRecordCapture.cpp b/libmetartc3/src/yangrecord/YangRecordCapture.cpp new file mode 100755 index 00000000..b8ec53ff --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecordCapture.cpp @@ -0,0 +1,337 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +//#include "yangavutil/video/YangMatImage.h" +//#include "yangavutil/vr/YangMatImageCv.h" +#include "yangavutil/video/YangYuvConvert.h" +#include "yangavutil/video/YangYuvUtil.h" +#include "yangavutil/video/YangBmp.h" +#include + +YangRecordCapture::YangRecordCapture(YangContext* pcontext) { + //m_audio = paudio; + //m_video = pvideo; + m_context=pcontext; + m_audioCapture = NULL; + m_videoCapture = NULL; + m_screenCapture = NULL; + + m_out_audioBuffer = NULL; + m_out_videoBuffer = NULL; + m_pre_videoBuffer = new YangVideoBuffer(m_context->avinfo.video.width, m_context->avinfo.video.height, m_context->avinfo.video.videoEncoderFormat, + m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + m_vr_videoBuffer = NULL; + m_screen_videoBuffer = NULL; + m_pre_videoBuffer->isPreview = 1; + m_isStart = 0; + m_isConvert = 0; + m_isScreen = 0; + +} + +YangRecordCapture::~YangRecordCapture() { + if (m_audioCapture) + m_audioCapture->stop(); + if (m_videoCapture) + m_videoCapture->stop(); + + if (m_audioCapture) { + while (m_audioCapture->m_isStart) { + yang_usleep(1000); + } + } + if (m_videoCapture) { + while (m_videoCapture->m_isStart) { + yang_usleep(1000); + } + } + m_context=NULL; + yang_delete(m_pre_videoBuffer); + yang_delete(m_out_videoBuffer); + yang_delete(m_out_audioBuffer); + //yang_delete(m_aecPlay_al); + //yang_usleep(5000); + + yang_delete(m_audioCapture); + yang_delete(m_videoCapture); + +} +void YangRecordCapture::stop() { + stopLoop(); +} + +void YangRecordCapture::run() { + m_isStart = 1; + if (m_isScreen) + startScreenLoop(); + else + startVrLoop(); + m_isStart = 0; +} +void YangRecordCapture::stopLoop() { + m_isConvert = 0; +} + +void YangRecordCapture::startAudioCaptureState() { + m_audioCapture->setCatureStart(); +} +void YangRecordCapture::startVideoCaptureState() { + m_videoCapture->initstamp(); + m_videoCapture->setVideoCaptureStart(); +} +void YangRecordCapture::startPauseCaptureState() { + m_audioCapture->setCatureStop(); + m_videoCapture->setVideoCaptureStop(); + +} +void YangRecordCapture::stopPauseCaptureState() { + m_videoCapture->setVideoCaptureStart(); + m_audioCapture->setCatureStart(); +} +void YangRecordCapture::stopAudioCaptureState() { + m_audioCapture->setCatureStop(); +} +void YangRecordCapture::stopVideoCaptureState() { + m_videoCapture->setVideoCaptureStop(); + +} + +void YangRecordCapture::initAudio(YangPreProcess *pp) { + if (m_out_audioBuffer == NULL) { + if (m_context->avinfo.audio.usingMono) + m_out_audioBuffer = new YangAudioBuffer(m_context->avinfo.audio.audioCacheNum); + else + m_out_audioBuffer = new YangAudioBuffer(m_context->avinfo.audio.audioCacheNum); + } + if (m_audioCapture == NULL) { + m_audioCapture = m_capture.createRecordAudioCapture(m_context); //.createAudioCapture(m_context);//new YangAudioCapture(m_context); + m_audioCapture->setPreProcess(pp); + m_audioCapture->setOutAudioBuffer(m_out_audioBuffer); + m_audioCapture->init(); + m_audioCapture->setCatureStop(); + } + stopAudioCaptureState(); +} +void YangRecordCapture::initVideo() { + if (m_out_videoBuffer == NULL) + m_out_videoBuffer = new YangVideoBuffer(m_context->avinfo.video.width, m_context->avinfo.video.height, + m_context->avinfo.video.videoEncoderFormat, m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + if (m_videoCapture == NULL) { + m_videoCapture = m_capture.createVideoCapture(&m_context->avinfo.video); //new YangVideoCapture(m_context); + m_videoCapture->setOutVideoBuffer(m_out_videoBuffer); + m_videoCapture->setPreVideoBuffer(m_pre_videoBuffer); + m_videoCapture->setVideoCaptureStart(); + m_videoCapture->init(); + } + stopVideoCaptureState(); +} +void YangRecordCapture::startAudioCapture() { + if (m_audioCapture && !m_audioCapture->m_isStart) + m_audioCapture->start(); +} +void YangRecordCapture::startVideoCapture() { + if (m_videoCapture && !m_videoCapture->m_isStart) + m_videoCapture->start(); +} +void YangRecordCapture::stopAudioCapture() { + if (m_audioCapture) + m_audioCapture->stop(); +} +void YangRecordCapture::stopVideoCapture() { + if (m_videoCapture) + m_videoCapture->stop(); +} +YangAudioBuffer* YangRecordCapture::getOutAudioBuffer() { + return m_out_audioBuffer; +} +YangVideoBuffer* YangRecordCapture::getOutVideoBuffer() { + return m_out_videoBuffer; +} + +YangVideoBuffer* YangRecordCapture::getPreVideoBuffer() { + return m_pre_videoBuffer; +} +void YangRecordCapture::setInAudioBuffer(vector *pbuf) { + if (m_audioCapture != NULL) + m_audioCapture->setInAudioBuffer(pbuf); +} +void YangRecordCapture::setAec(YangRtcAec *paec) { + if (m_audioCapture) { + m_audioCapture->setAec(paec); + // if(m_aecPlay_al==NULL){ + // m_aecPlay_al=new YangAudioBuffer(m_audio->usingMono?640:4096,m_audio->audioCacheNum); + // if(paec!=NULL){ + // m_ac->setAec(paec); + // m_ac->setPlayAudoBuffer(m_aecPlay_al); + // paec->setPlayBuffer(m_aecPlay_al); + // } + // } + } +} +void YangRecordCapture::startVr(char *pbg) { + m_bgFileName = string(pbg); + if (m_vr_videoBuffer == NULL) + m_vr_videoBuffer = new YangVideoBuffer(m_context->avinfo.video.width,m_context->avinfo.video.height, + m_context->avinfo.video.videoEncoderFormat, m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + if (m_videoCapture) { + m_videoCapture->setPreVideoBuffer(m_vr_videoBuffer); + m_videoCapture->setOutVideoBuffer(NULL); + } + yang_reindex(m_pre_videoBuffer); + yang_reindex(m_out_videoBuffer); + start(); +} + +void YangRecordCapture::stopVr() { + stop(); + yang_reindex(m_pre_videoBuffer); + yang_reindex(m_out_videoBuffer); + if (m_videoCapture) { + m_videoCapture->setPreVideoBuffer(m_pre_videoBuffer); + m_videoCapture->setOutVideoBuffer(m_out_videoBuffer); + } + +} + +void YangRecordCapture::startScreen() { + m_isScreen = 1; + if (m_screen_videoBuffer == NULL) + m_screen_videoBuffer = new YangVideoBuffer(m_context->avinfo.video.width, + m_context->avinfo.video.height, m_context->avinfo.video.videoEncoderFormat, m_context->avinfo.video.bitDepth == 8 ? 1 : 2); + if (m_videoCapture) { + m_videoCapture->setPreVideoBuffer(NULL); + m_videoCapture->setOutVideoBuffer(NULL); + } + + if (m_screenCapture) { + //m_screenCapture->setPreVideoBuffer(m_screen_videoBuffer); + //m_screenCapture->setOutVideoBuffer(m_screen_videoBuffer); + } + yang_reindex(m_pre_videoBuffer); + yang_reindex(m_screen_videoBuffer); + start(); +} +void YangRecordCapture::stopScreen() { + m_isScreen = 0; + if (m_videoCapture) { + m_videoCapture->setPreVideoBuffer(m_pre_videoBuffer); + m_videoCapture->setOutVideoBuffer(m_out_videoBuffer); + } +} + +void YangRecordCapture::startVrLoop() { + /** + printf("\n***************start vr..................."); + m_isConvert = 1; + int32_t inWidth = m_video->width; + int32_t inHeight = m_video->height; + //int32_t is12bits = m_video->videoCaptureFormat > 0 ? 1 : 0; + + //int64_t videoTimestamp = 0, + int64_t prestamp = 0; + //int64_t videoTimestamp1 = 0; + long az = inWidth * inHeight * 2; + uint8_t *srcData=new uint8_t[az];// { 0 }; + //if (is12bits) + az = inWidth * inHeight * 3 / 2; + + YangMatImageCv *mat = new YangMatImageCv(); //mf.getYangMatImage(); + + uint8_t *matDst=new uint8_t[m_video->width * m_video->height * 2]; + uint8_t *matSrcRgb=new uint8_t [m_video->width * m_video->height * 3]; + uint8_t *matSrcBgr=new uint8_t [m_video->width * m_video->height * 3]; + YangYuvConvert yuv; + + if (mat) + mat->initImg((char*) m_bgFileName.c_str(), m_video->width,m_video->height, 3); + //yang_rgbtobgr(mat->m_bkData, matSrcBgr, inWidth, inHeight); + //yuv.rgb24toI420(matSrcBgr, matDst, m_video->width, m_video->height); + //YangBmp bmp; + //bmp.create_bmpheader(inWidth,inHeight); + //yang_rgbtobgr(mat->m_bkData,matSrcBgr,inWidth,inHeight); + //bmp.save_bmp("/home/yang/test.bmp",(char*)matSrcBgr,inHeight*inWidth*3); + + // printf("\n***********************vr capture is starting...***********************\n"); + * + YangFrame videoFrame; +memset(&videoFrame,0,sizeof(YangFrame)); + while (m_isConvert == 1) { + if (mat) { + if (m_vr_videoBuffer->size() == 0) { + yang_usleep(1000); + continue; + } + videoFrame.payload=srcData; + videoFrame.nb=az; + m_vr_videoBuffer->getVideo(&videoFrame); + yuv.I420torgb24(srcData, matSrcRgb, inWidth, inHeight); + yang_rgbtobgr(matSrcRgb, matSrcBgr, inWidth, inHeight); + mat->matImage(matSrcBgr, matDst); + + } + if (videoFrame.timestamp - prestamp <= 0) { + prestamp = videoFrame.timestamp; + continue; + } + + prestamp = videoFrame.timestamp; + videoFrame.payload=matDst; + videoFrame.nb=az; + if (m_videoCapture->getVideoCaptureState()) m_out_videoBuffer->putVideo(&videoFrame); + m_pre_videoBuffer->putVideo(&videoFrame); + } + //if (videoTimestamp - prestamp <= 0) continue; + //prestamp = videoTimestamp; + + yang_delete(mat); + yang_deleteA(srcData); + yang_deleteA(matDst); + yang_deleteA(matSrcRgb); + yang_deleteA(matSrcBgr); + **/ +} +void YangRecordCapture::startScreenLoop() { + /** + + m_isConvert = 1; + int32_t inWidth = m_video->width; + int32_t inHeight = m_video->height; + //int32_t is12bits = m_video->videoCaptureFormat > 0 ? 1 : 0; + + int64_t videoTimestamp = 0, prestamp = 0; + //int64_t videoTimestamp1 = 0; + long az = inWidth * inHeight * 2; + uint8_t srcData[az] = { 0 }; + //if (is12bits) + az = inWidth * inHeight * 3 / 2; + + YangMatImageCv *mat = new YangMatImageCv(); //mf.getYangMatImage(); + uint8_t matDst[m_video->width * m_video->height * 2]; + YangYuvConvert yuv; + + while (m_isConvert == 1) { + + if (m_screen_videoBuffer->size() == 0) { + yang_usleep(1000); + continue; + } + m_screen_videoBuffer->getVideo(srcData, az, &videoTimestamp); + + if (videoTimestamp - prestamp <= 0) { + prestamp = videoTimestamp; + continue; + } + + prestamp = videoTimestamp; + + if (m_videoCapture->getVideoCaptureState()) + m_out_videoBuffer->putVideo(matDst, az, videoTimestamp); + m_pre_videoBuffer->putVideo(matDst, az, videoTimestamp); + } + //if (videoTimestamp - prestamp <= 0) continue; + //prestamp = videoTimestamp; + yang_delete(mat); + **/ +} + diff --git a/libmetartc3/src/yangrecord/YangRecordHandle.cpp b/libmetartc3/src/yangrecord/YangRecordHandle.cpp new file mode 100755 index 00000000..d73e5ba0 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecordHandle.cpp @@ -0,0 +1,63 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include + +YangRecordHandle::YangRecordHandle(YangContext* pcontext) { + m_context=pcontext; + m_context->avinfo.video.videoCacheNum*=2; + m_context->avinfo.audio.audioCacheNum*=2; + m_cap=NULL; + m_rec=NULL; + +} + +YangRecordHandle::~YangRecordHandle() { + m_cap=NULL; + //yang_delete(m_cap); + yang_delete(m_rec); + m_context=NULL; +} + + +void YangRecordHandle::init(YangPushCapture* pcap){ + m_cap=pcap; + if(!m_rec){ + m_rec=new YangMp4FileApp(&m_context->avinfo.audio,&m_context->avinfo.video,&m_context->avinfo.enc); + m_rec->init(); + m_rec->setInAudioBuffer(m_cap->getOutAudioBuffer()); + m_rec->setInVideoBuffer(m_cap->getOutVideoBuffer()); + //m_rec->setFileTimeLen(1); + } +} +/** +void YangRecordHandle::pauseRecord(){ + if(m_cap) m_cap->startPauseCaptureState(); + if(m_rec) m_rec->pauseRecord(); +} +void YangRecordHandle::resumeRecord(){ + if(m_rec) m_rec->resumeRecord(); + if(m_cap) m_cap->stopPauseCaptureState(); + +} + +**/ +void YangRecordHandle::recordFile(char* filename){ + m_rec->startRecordMp4(filename,1,1); + yang_sleep(1); + //m_cap->startVideoCaptureState(); + //m_cap->startAudioCaptureState(); +} +void YangRecordHandle::stopRecord(){ + m_cap->stopAudioCaptureState(); + m_cap->stopVideoCaptureState(); +// m_cap->stopAll(); + m_rec->m_enc->m_ae->stop(); + m_rec->m_enc->m_ve->stop(); + //m_rec->m_enc-> + yang_sleep(1); + + m_rec->stopRecordMp4(); +} diff --git a/libmetartc3/src/yangrecord/YangRecordHandle.h b/libmetartc3/src/yangrecord/YangRecordHandle.h new file mode 100755 index 00000000..7c8ec075 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecordHandle.h @@ -0,0 +1,29 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGRECORD_INCLUDE_YangRecordHandle_H_ +#define YANGRECORD_INCLUDE_YangRecordHandle_H_ +#include +#include +#include +#include + +class YangRecordHandle { +public: + YangRecordHandle(YangContext* pcontext); + virtual ~YangRecordHandle(); + YangPushCapture *m_cap; + YangMp4FileApp *m_rec; + void init(YangPushCapture* pcap); + void recordFile(char* filename); + void stopRecord(); + //void pauseRecord(); + //void resumeRecord(); + +private: + YangContext* m_context; + + +}; + +#endif /* YANGRECORD_INCLUDE_YangRecordHandle_H_ */ diff --git a/libmetartc3/src/yangrecord/YangRecordMp4.cpp b/libmetartc3/src/yangrecord/YangRecordMp4.cpp new file mode 100755 index 00000000..4b36f2c0 --- /dev/null +++ b/libmetartc3/src/yangrecord/YangRecordMp4.cpp @@ -0,0 +1,263 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include "yangutil/sys/YangLog.h" +#include "time.h" +#include "yangutil/yang_unistd.h" +YangRecordMp4::YangRecordMp4(YangAudioInfo *paudio,YangVideoInfo *pvideo,YangVideoEncInfo *penc) { + m_audio=paudio; + m_video=pvideo; + m_enc=penc; + m_isConvert = 1; + m_isStart=0; + videoDestLen = 0; + frameType = 1; + //audioBufLen = 0; + m_videoTimestamp = 0; + //videoBufLen = 0; + m_preVideotimestamp = 0; + m_mp4Stamp = 0; + oldalltick = 0, alltick = 0; + isMp4 = 1; + curVideoTimestamp = 0; + m_startStamp=0; + m_prePauseTime=0,m_afterPauseTime=0; + m_pauseTime=0; + m_fileId=0; + m_alltime=0; + m_alltime1=0; + m_fileTimeLen=3600; + m_isCreateNewFile=0; + memset(&m_mp4Para,0,sizeof(m_mp4Para)); + srcVideoSource=NULL; + srcAudioSource=NULL; + m_in_audioBuffer=NULL; + m_in_videoBuffer=NULL; + mp4 = NULL; + flv = NULL; + memset(&m_audioFrame,0,sizeof(YangFrame)); + memset(&m_videoFrame,0,sizeof(YangFrame)); + +} + +YangRecordMp4::~YangRecordMp4(void) { + m_audio=NULL; + m_video=NULL; + m_enc=NULL; + yang_free(m_mp4Para.fileName); + m_mp4Para.vmd=NULL; + m_in_audioBuffer=NULL; + m_in_videoBuffer=NULL; + yang_deleteA(srcVideoSource); + yang_deleteA(srcAudioSource); + yang_delete(mp4); + yang_delete(flv); +} +void YangRecordMp4::stop() { + stopLoop(); +} + +void YangRecordMp4::run() { + m_isStart=1; + startLoop(); + m_isStart=0; +} +void YangRecordMp4::setInVideoBuffer(YangVideoEncoderBuffer *pbuf) { + m_in_videoBuffer = pbuf; +} +void YangRecordMp4::setInAudioBuffer(YangAudioEncoderBuffer *pbuf) { + m_in_audioBuffer = pbuf; +} + + +void YangRecordMp4::writeAudioData() { + + m_in_audioBuffer->getAudio(&m_audioFrame); +// printf("a%d,",audioBufLen); + if (m_audioFrame.nb < 0 || m_audioFrame.nb > 1000) return; + if (isMp4) + mp4->WriteAudioPacket(&m_audioFrame); + else + flv->WriteAudioPacket(&m_audioFrame); +} + +//int32_t tcou=0; +void YangRecordMp4::writeVideoData() { + m_videoFrame.payload=srcVideoSource+4; + m_in_videoBuffer->getEVideo(&m_videoFrame); + + m_videoTimestamp=m_videoFrame.pts; + if(m_alltime>m_fileTimeLen){ + m_isCreateNewFile=1; + m_alltime=0; + m_alltime1=0; + } + if(m_isCreateNewFile&&frameType){ + // int64_t t1=m_time.getMilliTick(); + createNewfile(); + m_isCreateNewFile=0; + m_alltime=0; + m_alltime1=0; + } + if (m_startStamp == 0) m_startStamp = m_videoTimestamp; + + if (isMp4) { + + alltick = (m_videoTimestamp - m_startStamp-m_pauseTime) * 9 / 100; + + m_mp4Stamp = alltick - oldalltick; + m_alltime1+=m_mp4Stamp; + m_alltime=m_alltime1/90000; + + yang_put_be32((char*)srcVideoSource,m_videoFrame.nb); + m_videoFrame.payload=srcVideoSource; + m_videoFrame.nb+=4; + m_videoFrame.pts=m_mp4Stamp; + + mp4->WriteVideoPacket(&m_videoFrame); + oldalltick=alltick; + } else { + curVideoTimestamp = (m_videoTimestamp - m_startStamp-m_pauseTime) / 1000; + m_alltime=curVideoTimestamp/1000; + m_videoFrame.payload=srcVideoSource; + m_videoFrame.pts=curVideoTimestamp; + //curVideoTimestamp = (videoTimestamp - basestamp-m_pauseTime) / 100; + flv->WriteVideoPacket(&m_videoFrame); + } + +} + + +void YangRecordMp4::pauseRec(){ + m_prePauseTime=yang_get_milli_tick(); +} + void YangRecordMp4::resumeRec(){ + + m_afterPauseTime=yang_get_milli_tick();//m_time.getMilliTick(); + m_pauseTime+=m_afterPauseTime-m_prePauseTime; + //printf("") + } + void YangRecordMp4::initRecPara(){ + m_preVideotimestamp = 0; + m_mp4Stamp = 0; + oldalltick = 0, alltick = 0; + curVideoTimestamp = 0; + m_startStamp=0; + m_prePauseTime=0,m_afterPauseTime=0; + m_pauseTime=0; + //if(m_in_audioBuffer) m_in_audioBuffer->resetIndex(); + //if(m_in_videoBuffer) m_in_videoBuffer->resetIndex(); + } + void YangRecordMp4::createNewfile(){ + closeRec(); + char filename1[256]; + memset(filename1,0,256); + memcpy(filename1,m_mp4Para.fileName,strlen(m_mp4Para.fileName)-4); + m_fileId++; + char filename[300]; + memset(filename,0,300); + sprintf(filename,"%s_%d.%s",filename1,m_fileId,isMp4?"mp4":"flv"); + createFile(filename); + initRecPara(); + + } + void YangRecordMp4::createFile(char* filename){ + if (isMp4) { + mp4 = new YangMp4File(filename, m_video); + mp4->init(m_mp4Para.vmd->mp4Meta.sps, m_mp4Para.vmd->mp4Meta.spsLen); + uint8_t pasc[1024]; + int32_t pasclen=0; + YangAudioEncoderMeta yem; + yem.createMeta(pasc,&pasclen); + memset(m_mp4Para.asc,0,10); + memcpy(m_mp4Para.asc,pasc,pasclen); + m_mp4Para.ascLen=pasclen; + mp4->WriteAudioInfo(pasc,pasclen,NULL, 0); + mp4->WriteVideoInfo(m_mp4Para.vmd->mp4Meta.vps, m_mp4Para.vmd->mp4Meta.vpsLen,m_mp4Para.vmd->mp4Meta.sps, m_mp4Para.vmd->mp4Meta.spsLen, + m_mp4Para.vmd->mp4Meta.pps, m_mp4Para.vmd->mp4Meta.ppsLen); + } else { + flv = new YangFlvWriter(filename, m_audio,m_video); + flv->framerate = m_video->frame; + flv->i_bitrate = (double)m_video->rate; + flv->i_level_idc = 31; + flv->Start(); + + flv->WriteVideoInfo(m_mp4Para.vmd->livingMeta.buffer, m_mp4Para.vmd->livingMeta.bufLen); + //flv->WriteAudioHeadPacket(); + + } + + } +void YangRecordMp4::initPara(YangVideoMeta *p_vmd,char *filename, int32_t p_isMp4) { + isMp4 = p_isMp4; + m_mp4Para.vmd=p_vmd; + int32_t flen=strlen(filename); + m_mp4Para.fileName=(char*)malloc(flen+1); + memset(m_mp4Para.fileName,0,flen+1); + memcpy(m_mp4Para.fileName,filename,flen); + createFile(filename); + +} +void YangRecordMp4::setFileTimeLen(int32_t ptlen_min){ + m_fileTimeLen=ptlen_min*60; +} +void YangRecordMp4::closeRec() { + if (mp4 != NULL) { + mp4->closeMp4(); + yang_trace("\n................mp4 is closed!\n"); + yang_delete(mp4); + } + if (flv != NULL) { + flv->Close(); + yang_trace("\n................flv is closed!\n"); + yang_delete(flv); + } + +} + + +void YangRecordMp4::startLoop() { + m_isConvert = 1; + + if(srcVideoSource==NULL) srcVideoSource=new uint8_t[512*1024]; + if(srcAudioSource==NULL) srcAudioSource=new uint8_t[2048]; + m_audioFrame.payload=srcAudioSource; + m_videoFrame.payload=srcVideoSource; + srcVideoSource[0]=0x00; + srcVideoSource[1]=0x00; + srcVideoSource[2]=0x00; + srcVideoSource[3]=0x01; + while (m_isConvert == 1) { + if (m_in_videoBuffer->size() > 5) { + yang_info("write cache big..%d\n", m_in_videoBuffer->size()); + } + if (m_in_videoBuffer->size() ==0 && m_in_audioBuffer->size() ==0) { + + yang_usleep(2000); + continue; + } + + + if(m_in_audioBuffer->size()>1) writeAudioData(); + if(m_in_videoBuffer->size()){ + writeVideoData(); + + } + + } + //while (m_in_videoBuffer->size()> 1 || m_in_audioBuffer->size() > 0) { + // writeAudioData(); + //writeVideoData(); + //} + + closeRec(); + +} +void YangRecordMp4::stopLoop() { + m_isConvert = 0; +} + + diff --git a/libmetartc3/src/yangsrt/YangSrtBase.cpp b/libmetartc3/src/yangsrt/YangSrtBase.cpp new file mode 100755 index 00000000..4aaf7419 --- /dev/null +++ b/libmetartc3/src/yangsrt/YangSrtBase.cpp @@ -0,0 +1,409 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +#ifdef _WIN32 +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x602 +#endif +#include +//#pragma comment(lib,"Ws2_32.lib") +#else +#include + +#include +#include +#endif +#include +#include +#include +#include "string.h" + +#include "yangutil/sys/YangLog.h" + +#define POLLING_TIME 1 /// Time in milliseconds between interrupt check +#define TS_UDP_LEN 188*7 + +using namespace std; + +#define HAVE_GETTIMEOFDAY 1 + +int64_t sls_gettime(void) //rturn micro-second + { +#ifndef _WIN32 + struct timeval tv; + gettimeofday(&tv, NULL); + return (int64_t) tv.tv_sec * 1000000 + tv.tv_usec; +#else + FILETIME ft; + int64_t t; + GetSystemTimeAsFileTime(&ft); + t = (int64_t) ft.dwHighDateTime << 32 | ft.dwLowDateTime; + return t / 10 - 11644473600000000; // Jan 1, 1601 + + //return -1; +#endif +} +int64_t sls_gettime_ms(void) //rturn millisecond + { + return sls_gettime() / 1000; +} + +void YangSrtBase::loadLib() { + yang_srt_startup = (int32_t (*)(void)) m_lib.loadFunction("srt_startup"); + ; + yang_srt_setloglevel = (void (*)(int32_t ll)) m_lib.loadFunction( + "srt_setloglevel"); + + yang_srt_create_socket = (SRTSOCKET (*)(void)) m_lib.loadFunction( + "srt_create_socket"); + yang_srt_setsockopt = (int32_t (*)(SRTSOCKET u, int32_t level, SRT_SOCKOPT optname, + const void *optval, int32_t optlen)) m_lib.loadFunction( + "srt_setsockopt"); + yang_srt_setsockflag = (int32_t (*)(SRTSOCKET u, SRT_SOCKOPT opt, + const void *optval, int32_t optlen)) m_lib.loadFunction( + "srt_setsockflag"); + yang_srt_connect = (int32_t (*)(SRTSOCKET u, const struct sockaddr *name, + int32_t namelen)) m_lib.loadFunction("srt_connect"); + yang_srt_epoll_create = (int32_t (*)(void)) m_lib.loadFunction( + "srt_epoll_create"); + yang_srt_epoll_set = + (int32_t (*)(int32_t eid, int32_t flags)) m_lib.loadFunction( + "srt_epoll_set"); + yang_srt_epoll_add_usock = + (int32_t (*)(int32_t eid, SRTSOCKET u, const int32_t *events)) m_lib.loadFunction( + "srt_epoll_add_usock"); + yang_srt_epoll_remove_usock = + (int32_t (*)(int32_t eid, SRTSOCKET u)) m_lib.loadFunction( + "srt_epoll_remove_usock"); + yang_srt_epoll_release = (int32_t (*)(int32_t eid)) m_lib.loadFunction( + "srt_epoll_release"); + yang_srt_close = (int32_t (*)(SRTSOCKET u)) m_lib.loadFunction("srt_close"); + yang_srt_getsockstate = + (SRT_SOCKSTATUS (*)(SRTSOCKET u)) m_lib.loadFunction( + "srt_getsockstate"); + yang_srt_cleanup = (int32_t (*)(void)) m_lib.loadFunction("srt_cleanup"); + yang_srt_epoll_wait = (int32_t (*)(int32_t eid, SRTSOCKET *readfds, int32_t *rnum, + SRTSOCKET *writefds, int32_t *wnum, int64_t msTimeOut, SYSSOCKET *lrfds, + int32_t *lrnum, SYSSOCKET *lwfds, int32_t *lwnum)) m_lib.loadFunction( + "srt_epoll_wait"); + yang_srt_sendmsg = (int32_t (*)(SRTSOCKET u, const char *buf, int32_t len, + int32_t ttl/* = -1*/, int32_t inorder/* = false*/)) m_lib.loadFunction( + "srt_sendmsg"); + yang_srt_recvmsg = + (int32_t (*)(SRTSOCKET u, char *buf, int32_t len)) m_lib.loadFunction( + "srt_recvmsg"); + yang_srt_getlasterror = (int32_t (*)(int32_t *errno_loc)) m_lib.loadFunction( + "srt_getlasterror"); + yang_srt_getlasterror_str = (const char* (*)(void)) m_lib.loadFunction( + "srt_getlasterror_str"); +} + +void YangSrtBase::unloadLib() { //srt_cleanup + yang_srt_startup = NULL; + yang_srt_getsockstate = NULL; + yang_srt_setloglevel = NULL; + yang_srt_create_socket = NULL; + yang_srt_setsockopt = NULL; + yang_srt_setsockflag = NULL; + yang_srt_connect = NULL; + yang_srt_epoll_create = NULL; + yang_srt_epoll_set = NULL; + yang_srt_epoll_add_usock = NULL; + yang_srt_epoll_remove_usock = NULL; + yang_srt_epoll_release = NULL; + yang_srt_close = NULL; + yang_srt_cleanup = NULL; + yang_srt_epoll_wait = NULL; + yang_srt_sendmsg = NULL; + yang_srt_recvmsg = NULL; + yang_srt_getlasterror = NULL; + yang_srt_getlasterror_str = NULL; + +} +YangSrtBase::YangSrtBase() { + m_port = 9000; + m_sfd = -1; + m_eid = -1; + m_dataCount = 0; + m_beginTm = 0; + m_bitRate = 0; + m_errState = 0; + m_contextt = 0; + memset(m_server, 0, YangSrtBase_strlen); + unloadLib(); +} + +YangSrtBase::~YangSrtBase() { + closeSrt(); + yang_srt_cleanup(); + unloadLib(); + m_lib.unloadObject(); +} + +void YangSrtBase::closeSrt() { + if (!m_contextt) + return; + int32_t st = 0; + + if (m_eid > 0) { + st = yang_srt_epoll_remove_usock(m_eid, m_sfd); + yang_srt_epoll_release(m_eid); + + } + //srt_close + if (m_sfd > 0) { + st = yang_srt_close(m_sfd); + } + if (st == SRT_ERROR) { + yang_error("srt_close: %s", yang_srt_getlasterror_str()); + return; + } + m_sfd = 0; + //srt_cleanup(); + +} +int32_t YangSrtBase::getSrtSocketStatus() { + return yang_srt_getsockstate(m_sfd); +} +int32_t YangSrtBase::init(char *pserver, int32_t pport) { + if (m_contextt) + return Yang_Ok; +#ifdef _WIN32 + WORD wVersionRequested; + WSADATA wsaData; + wVersionRequested = MAKEWORD(2, 2); //create 16bit data + if (WSAStartup(wVersionRequested, &wsaData) != 0) { + printf("Load WinSock Failed!"); + exit(1); + return ERROR_SOCKET; + } +#endif + + m_lib.loadObject("libsrt"); + loadLib(); + strcpy(m_server, pserver); + m_port = pport; + yang_srt_startup(); + yang_srt_setloglevel(srt_logging::LogLevel::debug); + + m_errState = Yang_Ok; + m_contextt = 1; + return Yang_Ok; +} +#ifdef _WIN32 +int32_t YangSrtBase::yang_inet_pton(int32_t af, const char *src, void *dst) { + struct sockaddr_storage ss; + int32_t ssSize = sizeof(ss); + char srcCopy[INET6_ADDRSTRLEN + 1]; + + ZeroMemory(&ss, sizeof(ss)); + + // work around non-const API + strncpy(srcCopy, src, INET6_ADDRSTRLEN + 1); + srcCopy[INET6_ADDRSTRLEN] = '\0'; + + if (WSAStringToAddressA(srcCopy, af, NULL, (struct sockaddr*) &ss, &ssSize) + != 0) { + return 0; + } + + switch (af) { + case AF_INET: { + *(struct in_addr*) dst = ((struct sockaddr_in*) &ss)->sin_addr; + return 1; + } + case AF_INET6: { + *(struct in6_addr*) dst = ((struct sockaddr_in6*) &ss)->sin6_addr; + return 1; + } + default: { + // No-Op + } + } + + return 0; +} +#endif +int32_t YangSrtBase::initConnect(char *streamid) { + m_sfd = yang_srt_create_socket(); + if (m_sfd == SRT_ERROR) { + yang_error("srt_socket: %s", yang_srt_getlasterror_str()); + return ERROR_SRT_SocketConnectCreate; + } + int32_t no = 0; //,m_is_write=1; + // srt_setsockflag(m_sfd, SRTO_SENDER, &m_is_write, sizeof m_is_write); +#ifdef _WIN32 + //int32_t mss = 1052; + //yang_srt_setsockopt(m_sfd, 0, SRTO_MSS, &mss, sizeof(int)); +#endif + yang_srt_setsockopt(m_sfd, 0, SRTO_SNDSYN, &no, sizeof no); // for async write + yang_srt_setsockopt(m_sfd, 0, SRTO_RCVSYN, &no, sizeof no); + if (yang_srt_setsockopt(m_sfd, 0, SRTO_STREAMID, streamid, strlen(streamid)) + < 0) { + yang_error( + "[%p]CSLSRelay::open, srt_setsockopt SRTO_STREAMID failure. err=%s.", + this, yang_srt_getlasterror_str()); + return ERROR_SRT_StreamIdSetFailure; + } + + // int32_t minversion = SRT_VERSION_FEAT_HSv5; + // srt_setsockflag(m_sfd, SRTO_MINVERSION, &minversion, sizeof minversion); + + // Require also non-live message mode. + int32_t file_mode = SRTT_LIVE; //SRTT_FILE; + // int32_t yes = 1; + yang_srt_setsockflag(m_sfd, SRTO_TRANSTYPE, &file_mode, sizeof file_mode); + // srt_setsockflag(m_sfd, SRTO_MESSAGEAPI, &yes, sizeof yes); + + addrinfo hints, *res; + char portstr[10] = { 0 }; + snprintf(portstr, sizeof(portstr), "%d", m_port); + memset(&hints, 0, sizeof(addrinfo)); + + hints.ai_socktype = SOCK_DGRAM; //SOCK_STREAM; + hints.ai_family = AF_INET; + struct sockaddr_in sa; + sa.sin_port = htons(m_port); + sa.sin_family = AF_INET; //AF_UNSPEC; + getaddrinfo(m_server, portstr, &hints, &res); +#ifdef _WIN32 + if (yang_inet_pton(AF_INET, (const char*) m_server, &sa.sin_addr) != 1) { + return ERROR_SRT_SocketConnect; + } +#else + if (inet_pton(AF_INET, (const char*) m_server, &sa.sin_addr) != 1) { + return ERROR_SRT_SocketConnect; + } +#endif + //srt_connect + int32_t st = yang_srt_connect(m_sfd, (struct sockaddr*) &sa, sizeof sa); + SRT_SOCKSTATUS status = yang_srt_getsockstate(m_sfd); + yang_info("srt connect status===%d", status); + if (st == SRT_ERROR) { + yang_error("srt_connect: %s", yang_srt_getlasterror_str()); + return ERROR_SRT_SocketConnect; + } +//#ifndef _WIN32 + m_eid = yang_srt_epoll_create(); + if (m_eid < 0) { + yang_error("work, srt_epoll_create failed."); + return ERROR_SRT_EpollCreateFailure; + // return CSLSSrt::libsrt_neterrno(); + } + //compatible with srt v1.4.0 when container is empty. + yang_srt_epoll_set(m_eid, SRT_EPOLL_ENABLE_EMPTY); + int32_t modes = SRT_EPOLL_IN | SRT_EPOLL_OUT | SRT_EPOLL_ERR; + + int32_t ret = yang_srt_epoll_add_usock(m_eid, m_sfd, &modes); + if (ret < 0) { + yang_error( + "srt_add_to_epoll, srt_epoll_add_usock failed, m_eid=%d, fd=%d, modes=%d.", + m_eid, m_sfd, modes); + return ERROR_SRT_EpollSetFailure; //libsrt_neterrno(); + } +//#endif + return Yang_Ok; +} +int32_t YangSrtBase::connectServer() { + int32_t srtRet = 0; + for (int32_t i = 0; i < 500; i++) { + srtRet = getSrtSocketStatus(); + if (srtRet == SRTS_CONNECTED) { + return Yang_Ok; + } + yang_usleep(1000); + } + + srtRet += Yang_SRTS_SocketBase; + if (srtRet == Yang_SRTS_CONNECTING) srtRet = Yang_SRTS_NONSRTSERVER; + return yang_error_wrap(srtRet,"srt connect error"); + +} +int32_t YangSrtBase::receive(char *szData, int32_t *plen) { + if (yang_srt_getsockstate(m_sfd) != SRTS_CONNECTED) + return ERROR_SRT_NotInit; + //char szData[TS_UDP_LEN]; + *plen = 0; + int32_t ret = 0; +//#ifndef _WIN32 + SRTSOCKET read_socks[1]; + SRTSOCKET write_socks[1]; + + int32_t read_len = 1; + int32_t write_len = 0; + ret = yang_srt_epoll_wait(m_eid, read_socks, &read_len, write_socks, + &write_len, POLLING_TIME, 0, 0, 0, 0); + + if (ret < 2) { + //yang_error("srt_epoll failure, n=%s.", yang_srt_getlasterror_str()); + return Yang_Ok; //ERROR_SRT_EpollSelectFailure; + } + if (0 >= read_socks[0]) { + //yang_error("srt_reader failure, n=%s.", yang_srt_getlasterror_str()); + return Yang_Ok; //ERROR_SRT_ReadSocket; + } +//#endif + //read data + ret = yang_srt_recvmsg(m_sfd, szData, TS_UDP_LEN); + + // if(ret>0) printf("a%d,",*plen); + + if (ret == SRT_ERROR) { + // int32_t err_no = srt_getlasterror(NULL); + if (getSrtSocketStatus() == SRTS_CONNECTED) + return Yang_Ok; + yang_error("read_data_handler, srt_read failure, errno=%d...err=%s", + yang_srt_getlasterror(NULL), yang_srt_getlasterror_str()); + return ERROR_SRT_PullFailure; + } + *plen = ret; + //*len=ret; + //update invalid begin time + //m_invalid_begin_tm = sls_gettime(); + //printf("%d,",ret); + m_dataCount += ret; + int64_t cur_tm = sls_gettime_ms(); + int32_t d = cur_tm - m_beginTm; + if (d >= 500) { + m_bitRate = m_dataCount * 8 / d; + m_dataCount = 0; + m_beginTm = sls_gettime_ms(); + } + return Yang_Ok; + +} + +int32_t YangSrtBase::publish(char *message, int32_t len) { +//#ifndef _WIN32 + if (yang_srt_getsockstate(m_sfd) != SRTS_CONNECTED) + return ERROR_SRT_NotInit; + SRTSOCKET read_socks[1]; + SRTSOCKET write_socks[1]; + int32_t read_len = 0; + int32_t write_len = 1; + + int32_t ret = yang_srt_epoll_wait(m_eid, read_socks, &read_len, write_socks, + &write_len, POLLING_TIME, 0, 0, 0, 0); + if (0 > ret) { + //return ERROR_SRT_EpollSelectFailure; + return Yang_Ok; + } + if (0 >= write_socks[0]) { + //yang_error("srt_write failure, n=%s.", yang_srt_getlasterror_str()); + return Yang_Ok; //ERROR_SRT_WriteSocket; + } +//#endif + //write data + int32_t n = yang_srt_sendmsg(m_sfd, message, len, -1, 0); + if (n == SRT_ERROR) { + yang_error("srt_write failure, n=%d.", n); + + return ERROR_SRT_PushFailure; + + } + // m_sync_clock.wait(tm_ms); + + return Yang_Ok; +} diff --git a/libmetartc3/src/yangsrt/YangTsBuffer.cpp b/libmetartc3/src/yangsrt/YangTsBuffer.cpp new file mode 100755 index 00000000..5ac10efb --- /dev/null +++ b/libmetartc3/src/yangsrt/YangTsBuffer.cpp @@ -0,0 +1,219 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +YangTsBuffer::YangTsBuffer() + : _pos(0) +{ + curPos=0; +} + +YangTsBuffer::YangTsBuffer(int32_t size, char value) + : _pos(0) +{ + curPos=0; +} + +YangTsBuffer::~YangTsBuffer() +{ +} + +void YangTsBuffer::write_1byte(char val) +{ + _data[curPos++]=val; +} + +void YangTsBuffer::write_2bytes(int16_t val) +{ + char *p = (char *)&val; + + for (int32_t i = 1; i >= 0; --i) { + _data[curPos++]=p[i]; + + } +} + +void YangTsBuffer::write_3bytes(int32_t val) +{ + char *p = (char *)&val; + + for (int32_t i = 2; i >= 0; --i) { + _data[curPos++]=p[i]; + + } +} + +void YangTsBuffer::write_4bytes(int32_t val) +{ + char *p = (char *)&val; + + for (int32_t i = 3; i >= 0; --i) { + _data[curPos++]=p[i]; + } +} + +void YangTsBuffer::write_8bytes(int64_t val) +{ + char *p = (char *)&val; + + for (int32_t i = 7; i >= 0; --i) { + _data[curPos++]=p[i]; + } +} +void YangTsBuffer::writeBytes(uint8_t* bytes,int32_t size){ + memcpy(_data+curPos,bytes,size); + curPos+=size; +} + + +void YangTsBuffer::append( uint8_t* bytes, int32_t size) +{ + if (!bytes || size <= 0) + return; + memcpy(_data+curPos,bytes,size); + curPos+=size; +} + +char YangTsBuffer::read_1byte() +{ + assert(require(1)); + + char val = _data[_pos]; + _pos++; + + return val; +} + +int16_t YangTsBuffer::read_2bytes() +{ + assert(require(2)); + + int16_t val = 0; + char *p = (char *)&val; + + for (int32_t i = 1; i >= 0; --i) { + p[i] = _data[_pos]; + _pos++; + } + + return val; +} + +int32_t YangTsBuffer::read_3bytes() +{ + assert(require(3)); + + int32_t val = 0; + char *p = (char *)&val; + + for (int32_t i = 2; i >= 0; --i) { + p[i] = _data[_pos];//_data.at(0 + _pos); + _pos++; + } + + return val; +} + +int32_t YangTsBuffer::read_4bytes() +{ + assert(require(4)); + + int32_t val = 0; + char *p = (char *)&val; + + for (int32_t i = 3; i >= 0; --i) { + p[i] = _data[_pos]; + _pos++; + } + + return val; +} + +int64_t YangTsBuffer::read_8bytes() +{ + assert(require(8)); + + int64_t val = 0; + char *p = (char *)&val; + + for (int32_t i = 7; i >= 0; --i) { + p[i] = _data[_pos]; + _pos++; + } + + return val; +} +void YangTsBuffer::readBytes(uint8_t *p,int32_t len){ + memcpy(p,_data+_pos,len); + _pos += len; +} +std::string YangTsBuffer::read_string(int32_t len) +{ + assert(require(len)); + + std::string val((char*)_data + _pos, len); + _pos += len; + + return val; +} + +void YangTsBuffer::skip(int32_t size) +{ + _pos += size; +} + +bool YangTsBuffer::require(int32_t required_size) +{ + assert(required_size >= 0); + + return required_size <= curPos-_pos; +} + +bool YangTsBuffer::empty() +{ + return _pos >= curPos; +} + +int32_t YangTsBuffer::size() +{ + return curPos; +} + +int32_t YangTsBuffer::pos() +{ + return _pos; +} + +uint8_t *YangTsBuffer::data() +{ + return (size() == 0) ? nullptr : _data; +} + +void YangTsBuffer::clear() +{ + _pos = 0; + curPos=0; + +} + +void YangTsBuffer::set_data(int32_t pos, const uint8_t *data, int32_t len) +{ + if (!data) + return; + + if (pos + len > size()) { + return; + } + + for (int32_t i = 0; i < len; i++) { + _data[pos + i] = data[i]; + } +} + +std::string YangTsBuffer::to_string() +{ + return std::string(_data, _data+curPos); +} diff --git a/libmetartc3/src/yangsrt/YangTsMuxer.cpp b/libmetartc3/src/yangsrt/YangTsMuxer.cpp new file mode 100755 index 00000000..333eb597 --- /dev/null +++ b/libmetartc3/src/yangsrt/YangTsMuxer.cpp @@ -0,0 +1,424 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include + +#include +#include +static const uint16_t MPEGTS_NULL_PACKET_PID = 0x1FFF; +static const uint16_t MPEGTS_PAT_PID = 0x00; +static const uint16_t MPEGTS_PMT_PID = 0x100; +static const uint16_t MPEGTS_PCR_PID = 0x110; + + +YangTsMuxer::YangTsMuxer() { + m_pmt_pid = MPEGTS_PMT_PID; + m_stream_pid_map[STREAM_TYPE_VIDEO_H264]=Yang_H264_PID; + m_stream_pid_map[STREAM_TYPE_VIDEO_HEVC]=Yang_H265_PID; + m_stream_pid_map[STREAM_TYPE_AUDIO_AAC]=Yang_AAC_PID; + m_stream_pid_map[STREAM_TYPE_AUDIO_OPUS]=Yang_OPUS_PID; + m_stream_pid_map[STREAM_TYPE_PRIVATE_DATA]=Yang_PRIVATE_PID; + + + /**m_stream_pid_map.insert( + pair(STREAM_TYPE_VIDEO_H264, Yang_H264_PID)); + m_stream_pid_map.insert( + pair(STREAM_TYPE_VIDEO_HEVC, Yang_H265_PID)); + m_stream_pid_map.insert( + pair(STREAM_TYPE_AUDIO_AAC, Yang_AAC_PID)); + m_stream_pid_map.insert( + pair(STREAM_TYPE_AUDIO_OPUS, Yang_OPUS_PID)); + m_stream_pid_map.insert( + pair(STREAM_TYPE_PRIVATE_DATA, Yang_PRIVATE_PID));**/ + current_index = 0; + +} + +YangTsMuxer::~YangTsMuxer() { + +} + +void YangTsMuxer::create_pat(YangTsBuffer *sb, uint16_t pmt_pid, uint8_t cc) { + YangTsBuffer pat_sb; + TsHeader ts_header; + ts_header.sync_byte = 0x47; + ts_header.transport_error_indicator = 0; + ts_header.payload_unit_start_indicator = 1; + ts_header.transport_priority = 0; + ts_header.pid = MPEGTS_PAT_PID; + ts_header.transport_scrambling_control = 0; + ts_header.adaptation_field_control = + MpegTsAdaptationFieldType::payload_only; + ts_header.continuity_counter = cc; + + AdaptationFieldHeader adapt_field; + + PATHeader pat_header; + pat_header.table_id = 0x00; + pat_header.section_syntax_indicator = 1; + pat_header.b0 = 0; + pat_header.reserved0 = 0x3; + pat_header.transport_stream_id = 0; + pat_header.reserved1 = 0x3; + pat_header.version_number = 0; + pat_header.current_next_indicator = 1; + pat_header.section_number = 0x0; + pat_header.last_section_number = 0x0; + + //program_number + uint16_t program_number = 0x0001; + //program_map_PID + uint16_t program_map_PID = 0xe000 | (pmt_pid & 0x1fff); + + uint32_t section_length = 4 + 4 + 5; + pat_header.section_length = section_length & 0x3ff; + + ts_header.encode(&pat_sb); + adapt_field.encode(&pat_sb); + pat_header.encode(&pat_sb); + pat_sb.write_2bytes(program_number); + pat_sb.write_2bytes(program_map_PID); + + // crc32 + uint32_t crc_32 = crc32((uint8_t*) pat_sb.data() + 5, pat_sb.size() - 5); + pat_sb.write_4bytes(crc_32); + + int32_t suffLen = 188 - pat_sb.size(); + //uint8_t stuff[suffLen]; + uint8_t* stuff=new uint8_t[suffLen]; + YangAutoFreeA(uint8_t,stuff); + memset(stuff, 0xff, suffLen); + pat_sb.writeBytes(stuff, 188 - suffLen); + // memcpy() + //std::string stuff(188 - pat_sb.size(), 0xff); + //pat_sb.write_string(stuff); + + sb->append(pat_sb.data(), pat_sb.size()); +} + +void YangTsMuxer::create_pmt(YangTsBuffer *sb, uint8_t cc) { + YangTsBuffer pmt_sb; + TsHeader ts_header; + ts_header.sync_byte = 0x47; + ts_header.transport_error_indicator = 0; + ts_header.payload_unit_start_indicator = 1; + ts_header.transport_priority = 0; + ts_header.pid = m_pmt_pid; + ts_header.transport_scrambling_control = 0; + ts_header.adaptation_field_control = + MpegTsAdaptationFieldType::payload_only; + ts_header.continuity_counter = cc; + + AdaptationFieldHeader adapt_field; + + PMTHeader pmt_header; + pmt_header.table_id = 0x02; + pmt_header.section_syntax_indicator = 1; + pmt_header.b0 = 0; + pmt_header.reserved0 = 0x3; + pmt_header.section_length = 0; + pmt_header.program_number = 0x0001; + pmt_header.reserved1 = 0x3; + pmt_header.version_number = 0; + pmt_header.current_next_indicator = 1; + pmt_header.section_number = 0x00; + pmt_header.last_section_number = 0x00; + pmt_header.reserved2 = 0x7; + pmt_header.reserved3 = 0xf; + pmt_header.program_info_length = 0; + for (auto it = m_stream_pid_map.begin(); it != m_stream_pid_map.end(); + it++) { + pmt_header.infos.push_back( + std::shared_ptr( + new PMTElementInfo(it->first, it->second))); + if (it->first == MpegTsStream::AVC) { + pmt_header.PCR_PID = it->second; + } + } + + uint16_t section_length = pmt_header.size() - 3 + 4; + pmt_header.section_length = section_length & 0x3ff; + + ts_header.encode(&pmt_sb); + adapt_field.encode(&pmt_sb); + pmt_header.encode(&pmt_sb); + + // crc32 + uint32_t crc_32 = crc32((uint8_t*) pmt_sb.data() + 5, pmt_sb.size() - 5); + pmt_sb.write_4bytes(crc_32); + + int32_t suffLen = 188 - pmt_sb.size(); + //uint8_t stuff[suffLen]; + uint8_t* stuff=new uint8_t[suffLen]; + YangAutoFreeA(uint8_t,stuff); + memset(stuff, 0xff, suffLen); + pmt_sb.writeBytes(stuff, 188 - suffLen); + // std::string stuff(188 - pmt_sb.size(), 0xff); + // pmt_sb.write_string(stuff); + + sb->append(pmt_sb.data(), pmt_sb.size()); +} +void YangTsMuxer::create_pes(YangTsPes *frame, uint8_t *p, int32_t plen, + int32_t frametype, int64_t timestamp, YangTsStream pstreamType) { + YangTsBuffer packet; + PESHeader pes_header; + uint8_t streamType = STREAM_TYPE_VIDEO_H264; + if (pstreamType == TS_H264) { + streamType = PES_VIDEO_ID; //STREAM_TYPE_VIDEO_H264; + frame->pid = Yang_H264_PID; + frame->dts = timestamp; + frame->pts = frame->dts; + } + if (pstreamType == TS_H265) { + streamType = PES_VIDEO_ID; //STREAM_TYPE_VIDEO_H265; + frame->pid = Yang_H265_PID; + frame->dts = timestamp; + frame->pts = timestamp; + } + if (pstreamType == TS_AAC) { + streamType = PES_AUDIO_ID; //STREAM_TYPE_AUDIO_AAC; + frame->pid = Yang_AAC_PID; + frame->pts = frame->dts = timestamp; + } + if (pstreamType == TS_OPUS) { + streamType = PES_AUDIO_ID; //STREAM_TYPE_AUDIO_OPUS; + frame->pid = Yang_OPUS_PID; + frame->pts = frame->dts = timestamp; + } + if (pstreamType == TS_PRIVATE) { + streamType = STREAM_TYPE_PRIVATE_DATA; //STREAM_TYPE_AUDIO_OPUS; + frame->pid = Yang_PRIVATE_PID; + + } + + frame->stream_type = streamType; + + pes_header.packet_start_code = 0x000001; + pes_header.stream_id = frame->stream_type; + pes_header.marker_bits = 0x02; + pes_header.original_or_copy = 0x01; + + if (frame->pts != frame->dts) { + pes_header.pts_dts_flags = 0x03; + pes_header.header_data_length = 0x0A; + } else { + pes_header.pts_dts_flags = 0x2; + pes_header.header_data_length = 0x05; + } + + uint32_t pes_size = (pes_header.header_data_length + frame->len + 3); + //printf("\n*****************pes=====%d,",pes_size); + pes_header.pes_packet_length = pes_size > 0xffff ? 0 : pes_size; + pes_header.encode(&packet); + + if (pes_header.pts_dts_flags == 0x03) { + write_pts(&packet, 3, frame->pts); + write_pts(&packet, 1, frame->dts); + } else { + write_pts(&packet, 2, frame->pts); + } + memcpy(frame->data, packet.data(), packet.size()); + memcpy(frame->data + packet.size(), p, plen); + frame->len = plen + packet.size(); +} +void YangTsMuxer::create_ts(YangTsPes *frame, vector *sb) { + bool first = true; + // while (!frame->_data->empty()) { + while (frame->pos < frame->len) { + YangTsBuffer packet; + + TsHeader ts_header; + ts_header.pid = frame->pid; + ts_header.adaptation_field_control = + MpegTsAdaptationFieldType::payload_only; + ts_header.continuity_counter = get_cc(frame->stream_type); + + if (first) { + ts_header.payload_unit_start_indicator = 0x01; + if (frame->stream_type == STREAM_TYPE_VIDEO_H264 + || frame->stream_type == STREAM_TYPE_VIDEO_HEVC) { + ts_header.adaptation_field_control |= 0x02; + AdaptationFieldHeader adapt_field_header; + adapt_field_header.adaptation_field_length = 0x07; + adapt_field_header.random_access_indicator = 0x01; + adapt_field_header.pcr_flag = 0x01; + + ts_header.encode(&packet); + adapt_field_header.encode(&packet); + write_pcr(&packet, frame->dts); + } else { + ts_header.encode(&packet); + } + //pes handle + + first = false; + } else { + ts_header.encode(&packet); + } + // printf("\n********tsheader===%d\n",packet.size()); + uint32_t pos = packet.size(); + uint32_t body_size = 188 - pos; + uint8_t* bodys=new uint8_t[body_size]; + YangAutoFreeA(uint8_t,bodys); + memset(bodys,0,body_size); + packet.writeBytes(bodys, body_size); + // packet.write_string(std::string(body_size, 0)); + packet.skip(pos); + uint32_t in_size = frame->len - frame->pos; + if (body_size <= in_size) { // MpegTsAdaptationFieldType::payload_only or MpegTsAdaptationFieldType::payload_adaption_both for AVC +// packet.write_string(frame->_data->read_string(body_size)); + // std::string body_string = frame->_data->read_string(body_size); + packet.set_data(pos, frame->data + frame->pos, body_size); + frame->pos += body_size; + } else { + uint16_t stuff_size = body_size - in_size; + if (ts_header.adaptation_field_control + == MpegTsAdaptationFieldType::adaption_only + || ts_header.adaptation_field_control + == MpegTsAdaptationFieldType::payload_adaption_both) { + uint8_t *base = packet.data() + 5 + packet.data()[4]; + packet.set_data(base - packet.data() + stuff_size, base, + packet.data() + packet.pos() - base); + memset(base, 0xff, stuff_size); + packet.skip(stuff_size); + packet.data()[4] += stuff_size; + } else { + // adaptation_field_control |= 0x20 == MpegTsAdaptationFieldType::payload_adaption_both + packet.data()[3] |= 0x20; + packet.set_data(188 - 4 - stuff_size, packet.data() + 4, + packet.pos() - 4); + packet.skip(stuff_size); + packet.data()[4] = stuff_size - 1; + if (stuff_size >= 2) { + packet.data()[5] = 0; + memset(&(packet.data()[6]), 0xff, stuff_size - 2); + } + } +// packet.write_string(frame->_data->read_string(in_size)); + // std::string body_string = frame->_data->read_string(in_size); + //packet.set_data(packet.pos(), body_string.c_str(), body_string.length()); + packet.set_data(packet.pos(), frame->data + frame->pos, in_size); + frame->pos += in_size; + } + sb->push_back(packet); + // sb->append(packet.data(), packet.size()); + } +} + +void YangTsMuxer::create_pcr(YangTsBuffer *sb) { + uint64_t pcr = 0; + TsHeader ts_header; + ts_header.sync_byte = 0x47; + ts_header.transport_error_indicator = 0; + ts_header.payload_unit_start_indicator = 0; + ts_header.transport_priority = 0; + ts_header.pid = MPEGTS_PCR_PID; + ts_header.transport_scrambling_control = 0; + ts_header.adaptation_field_control = + MpegTsAdaptationFieldType::adaption_only; + ts_header.continuity_counter = 0; + + AdaptationFieldHeader adapt_field; + adapt_field.adaptation_field_length = 188 - 4 - 1; + adapt_field.discontinuity_indicator = 0; + adapt_field.random_access_indicator = 0; + adapt_field.elementary_stream_priority_indicator = 0; + adapt_field.pcr_flag = 1; + adapt_field.opcr_flag = 0; + adapt_field.splicing_point_flag = 0; + adapt_field.transport_private_data_flag = 0; + adapt_field.adaptation_field_extension_flag = 0; + + // char *p = sb->data(); + ts_header.encode(sb); + adapt_field.encode(sb); + write_pcr(sb, pcr); +} + +void YangTsMuxer::create_null(YangTsBuffer *sb) { + TsHeader ts_header; + ts_header.sync_byte = 0x47; + ts_header.transport_error_indicator = 0; + ts_header.payload_unit_start_indicator = 0; + ts_header.transport_priority = 0; + ts_header.pid = MPEGTS_NULL_PACKET_PID; + ts_header.transport_scrambling_control = 0; + ts_header.adaptation_field_control = + MpegTsAdaptationFieldType::payload_only; + ts_header.continuity_counter = 0; + ts_header.encode(sb); +} + +void YangTsMuxer::encode(uint8_t *p, int32_t plen, int32_t frametype, + int64_t timestamp, YangTsStream streamType, + vector *sb) { + if (should_create_pat()) { + encodePmtWithoutData(sb); + } + YangTsPes *frame = (YangTsPes*) malloc(sizeof(YangTsPes)); + memset(frame, 0, sizeof(YangTsPes)); + frame->data = (uint8_t*) malloc(plen + 20); + frame->len = plen; + create_pes(frame, p, plen, frametype, timestamp, streamType); + create_ts(frame, sb); + free(frame->data); + frame->data = nullptr; + free(frame); + frame = nullptr; +} +void YangTsMuxer::encodeWithPmt(uint8_t *p, int32_t plen, int32_t frametype, + int64_t timestamp, YangTsStream streamType, + vector *sb) { + encodePmtWithoutData(sb); + + YangTsPes *frame = (YangTsPes*) malloc(sizeof(YangTsPes)); + memset(frame, 0, sizeof(YangTsPes)); + frame->data = (uint8_t*) malloc(plen + 20); + frame->len = plen; + create_pes(frame, p, plen, frametype, timestamp, streamType); + create_ts(frame, sb); + free(frame->data); + frame->data = nullptr; + free(frame); + frame = nullptr; +} +//YangTsMuxer::current_index=0; +//void YangTsMuxer::encodePmt(vector *sb) { +// if (should_create_pat()) { +// encodePmtWithoutData(sb); +// } +//} +uint8_t YangTsMuxer::get_cc(uint32_t with_pid) { + if (_pid_cc_map.find(with_pid) != _pid_cc_map.end()) { + _pid_cc_map[with_pid] = (_pid_cc_map[with_pid] + 1) & 0x0F; + return _pid_cc_map[with_pid]; + } + + _pid_cc_map[with_pid] = 0; + return 0; +} +void YangTsMuxer::encodePmtWithoutData(vector *sb){ + current_index=0; + sb->push_back(YangTsBuffer()); + uint8_t pat_pmt_cc = get_cc(0); + create_pat(&sb->at(0), m_pmt_pid, pat_pmt_cc); + sb->push_back(YangTsBuffer()); + create_pmt(&sb->at(1), pat_pmt_cc); +} +bool YangTsMuxer::should_create_pat() { + bool ret = false; + + if (current_index % pat_interval == 0) { + if (current_index > 0) { + current_index = 0; + } + ret = true; + } + + current_index++; + + return ret; +} + diff --git a/libmetartc3/src/yangsrt/YangTsPacket.cpp b/libmetartc3/src/yangsrt/YangTsPacket.cpp new file mode 100755 index 00000000..cafaca8f --- /dev/null +++ b/libmetartc3/src/yangsrt/YangTsPacket.cpp @@ -0,0 +1,509 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include + +TsFrame::TsFrame(){ + _data.reset(new YangTsBuffer); + pts=dts=pcr=0; + stream_type=stream_id=0; + completed=false; + pid=0; + expected_pes_packet_length=0; + + +} + +TsFrame::TsFrame(uint8_t st){ + stream_type=st; + pts=dts=pcr=0; + stream_id=0; + completed=false; + pid=0; + expected_pes_packet_length=0; + _data.reset(new YangTsBuffer); +} + +bool TsFrame::empty() +{ + return _data->size() == 0; +} + +void TsFrame::reset() +{ + pid = 0; + completed = false; + expected_pes_packet_length = 0; + _data.reset(new YangTsBuffer); +} + +TsHeader::TsHeader() + : sync_byte(0x47) + , transport_error_indicator(0) + , payload_unit_start_indicator(0) + , transport_priority(0) + , pid(0) + , transport_scrambling_control(0) + , adaptation_field_control(0) + , continuity_counter(0) +{ +} + +TsHeader::~TsHeader() +{ +} + +void TsHeader::encode(YangTsBuffer *sb) +{ + sb->write_1byte(sync_byte); + + uint16_t b1b2 = pid & 0x1FFF; + b1b2 |= (transport_priority << 13) & 0x2000; + b1b2 |= (payload_unit_start_indicator << 14) & 0x4000; + b1b2 |= (transport_error_indicator << 15) & 0x8000; + sb->write_2bytes(b1b2); + + uint8_t b3 = continuity_counter & 0x0F; + b3 |= (adaptation_field_control << 4) & 0x30; + b3 |= (transport_scrambling_control << 6) & 0xC0; + sb->write_1byte(b3); +} + +void TsHeader::decode(YangTsBuffer *sb) +{ + sync_byte = sb->read_1byte(); + + uint16_t b1b2 = sb->read_2bytes(); + pid = b1b2 & 0x1FFF; + transport_error_indicator = (b1b2 >> 13) & 0x01; + payload_unit_start_indicator = (b1b2 >> 14) & 0x01; + transport_error_indicator = (b1b2 >> 15) & 0x01; + + uint8_t b3 = sb->read_1byte(); + continuity_counter = b3 & 0x0F; + adaptation_field_control = (b3 >> 4) & 0x03; + transport_scrambling_control = (b3 >> 6) & 0x03; +} + +PATHeader::PATHeader() + : table_id(0) + , section_syntax_indicator(0) + , b0(0) + , reserved0(0) + , section_length(0) + , transport_stream_id(0) + , reserved1(0) + , version_number(0) + , current_next_indicator(0) + , section_number(0) + , last_section_number(0) +{ + +} + +PATHeader::~PATHeader() +{ + +} + +void PATHeader::encode(YangTsBuffer *sb) +{ + sb->write_1byte(table_id); + + uint16_t b1b2 = section_length & 0x0FFF; + b1b2 |= (reserved0 << 12) & 0x3000; + b1b2 |= (b0 << 14) & 0x4000; + b1b2 |= (section_syntax_indicator << 15) & 0x8000; + sb->write_2bytes(b1b2); + + sb->write_2bytes(transport_stream_id); + + uint8_t b5 = current_next_indicator & 0x01; + b5 |= (version_number << 1) & 0x3E; + b5 |= (reserved1 << 6) & 0xC0; + sb->write_1byte(b5); + + sb->write_1byte(section_number); + sb->write_1byte(last_section_number); +} + +void PATHeader::decode(YangTsBuffer *sb) +{ + table_id = sb->read_1byte(); + + uint16_t b1b2 = sb->read_2bytes(); + section_syntax_indicator = (b1b2 >> 15) & 0x01; + b0 = (b1b2 >> 14) & 0x01; + section_length = b1b2 & 0x0FFF; + + transport_stream_id = sb->read_2bytes(); + + uint8_t b5 = sb->read_1byte(); + reserved1 = (b5 >> 6) & 0x03; + version_number = (b5 >> 1) & 0x1F; + current_next_indicator = b5 & 0x01; + + section_number = sb->read_1byte(); + + last_section_number = sb->read_1byte(); +} + +void PATHeader::print() +{ + std::cout << "----------PAT information----------" << std::endl; + std::cout << "table_id: " << std::to_string(table_id) << std::endl; + std::cout << "section_syntax_indicator: " << std::to_string(section_syntax_indicator) << std::endl; + std::cout << "b0: " << std::to_string(b0) << std::endl; + std::cout << "reserved0: " << std::to_string(reserved0) << std::endl; + std::cout << "section_length: " << std::to_string(section_length) << std::endl; + std::cout << "transport_stream_id: " << std::to_string(transport_stream_id) << std::endl; + std::cout << "reserved1: " << std::to_string(reserved1) << std::endl; + std::cout << "version_number: " << std::to_string(version_number) << std::endl; + std::cout << "current_next_indicator: " << std::to_string(current_next_indicator) << std::endl; + std::cout << "section_number: " << std::to_string(section_number) << std::endl; + std::cout << "last_section_number: " << std::to_string(last_section_number) << std::endl; + std::cout << std::endl; + std::flush(std::cout); +} + +PMTElementInfo::PMTElementInfo(uint8_t st, uint16_t pid) + : stream_type(st) + , reserved0(0x7) + , elementary_PID(pid) + , reserved1(0xf) + , ES_info_length(0) +{ + +} + +PMTElementInfo::PMTElementInfo() + : PMTElementInfo(0, 0) +{ + +} + +PMTElementInfo::~PMTElementInfo() +{ + +} + +void PMTElementInfo::encode(YangTsBuffer *sb) +{ + sb->write_1byte(stream_type); + + uint16_t b1b2 = elementary_PID & 0x1FFF; + b1b2 |= (reserved0 << 13) & 0xE000; + sb->write_2bytes(b1b2); + + int16_t b3b4 = ES_info_length & 0x0FFF; + b3b4 |= (reserved1 << 12) & 0xF000; + sb->write_2bytes(b3b4); + + if (ES_info_length > 0) { + // TODO: + } +} + +void PMTElementInfo::decode(YangTsBuffer *sb) +{ + stream_type = sb->read_1byte(); + + uint16_t b1b2 = sb->read_2bytes(); + reserved0 = (b1b2 >> 13) & 0x07; + elementary_PID = b1b2 & 0x1FFF; + + uint16_t b3b4 = sb->read_2bytes(); + reserved1 = (b3b4 >> 12) & 0xF; + ES_info_length = b3b4 & 0xFFF; + + if (ES_info_length > 0) { + ES_info = sb->read_string(ES_info_length); + } +} + +uint16_t PMTElementInfo::size() +{ + return 5 + ES_info_length; +} + +void PMTElementInfo::print() +{ + std::cout << "**********PMTElement information**********" << std::endl; + std::cout << "stream_type: " << std::to_string(stream_type) << std::endl; + std::cout << "reserved0: " << std::to_string(reserved0) << std::endl; + std::cout << "elementary_PID: " << std::to_string(elementary_PID) << std::endl; + std::cout << "reserved1: " << std::to_string(reserved1) << std::endl; + std::cout << "ES_info_length: " << std::to_string(ES_info_length) << std::endl; + std::cout << "ES_info: " << ES_info << std::endl; + std::flush(std::cout); +} + +PMTHeader::PMTHeader() + : table_id(0x02) + , section_syntax_indicator(0) + , b0(0) + , reserved0(0) + , section_length(0) + , program_number(0) + , reserved1(0) + , version_number(0) + , current_next_indicator(0) + , section_number(0) + , last_section_number(0) + , reserved2(0) + , PCR_PID(0) + , reserved3(0) + , program_info_length(0) +{ + +} + +PMTHeader::~PMTHeader() +{ + +} + +void PMTHeader::encode(YangTsBuffer *sb) +{ + sb->write_1byte(table_id); + + uint16_t b1b2 = section_length & 0xFFFF; + b1b2 |= (reserved0 << 12) & 0x3000; + b1b2 |= (b0 << 14) & 0x4000; + b1b2 |= (section_syntax_indicator << 15) & 0x8000; + sb->write_2bytes(b1b2); + + sb->write_2bytes(program_number); + + uint8_t b5 = current_next_indicator & 0x01; + b5 |= (version_number << 1) & 0x3E; + b5 |= (reserved1 << 6) & 0xC0; + sb->write_1byte(b5); + + sb->write_1byte(section_number); + sb->write_1byte(last_section_number); + + uint16_t b8b9 = PCR_PID & 0x1FFF; + b8b9 |= (reserved2 << 13) & 0xE000; + sb->write_2bytes(b8b9); + + uint16_t b10b11 = program_info_length & 0xFFF; + b10b11 |= (reserved3 << 12) & 0xF000; + sb->write_2bytes(b10b11); + + for (int32_t i = 0; i < (int)infos.size(); i++) { + infos[i]->encode(sb); + } +} + +void PMTHeader::decode(YangTsBuffer *sb) +{ + table_id = sb->read_1byte(); + + uint16_t b1b2 = sb->read_2bytes(); + section_syntax_indicator = (b1b2 >> 15) & 0x01; + b0 = (b1b2 >> 14) & 0x01; + reserved0 = (b1b2 >> 12) & 0x03; + section_length = b1b2 & 0xFFF; + + program_number = sb->read_2bytes(); + + uint8_t b5 = sb->read_1byte(); + reserved1 = (b5 >> 6) & 0x03; + version_number = (b5 >> 1) & 0x1F; + current_next_indicator = b5 & 0x01; + + section_number = sb->read_1byte(); + last_section_number = sb->read_1byte(); + + uint16_t b8b9 = sb->read_2bytes(); + reserved2 = (b8b9 >> 13) & 0x07; + PCR_PID = b8b9 & 0x1FFF; + + uint16_t b10b11 = sb->read_2bytes(); + reserved3 = (b10b11 >> 12) & 0xF; + program_info_length = b10b11 & 0xFFF; + + if (program_info_length > 0) { + sb->read_string(program_info_length); + } + + int32_t remain_bytes = section_length - 4 - 9 - program_info_length; + while (remain_bytes > 0) { + std::shared_ptr element_info(new PMTElementInfo); + element_info->decode(sb); + infos.push_back(element_info); + remain_bytes -= element_info->size(); + } +} + +uint16_t PMTHeader::size() +{ + uint16_t ret = 12; + for (int32_t i = 0; i < (int)infos.size(); i++) { + ret += infos[i]->size(); + } + + return ret; +} + +void PMTHeader::print() +{ + std::cout << "----------PMT information----------" << std::endl; + std::cout << "table_id: " << std::to_string(table_id) << std::endl; + std::cout << "section_syntax_indicator: " << std::to_string(section_syntax_indicator) << std::endl; + std::cout << "b0: " << std::to_string(b0) << std::endl; + std::cout << "reserved0: " << std::to_string(reserved0) << std::endl; + std::cout << "section_length: " << std::to_string(section_length) << std::endl; + std::cout << "program_number: " << std::to_string(program_number) << std::endl; + std::cout << "reserved1: " << std::to_string(reserved1) << std::endl; + std::cout << "version_number: " << std::to_string(version_number) << std::endl; + std::cout << "current_next_indicator: " << std::to_string(current_next_indicator) << std::endl; + std::cout << "section_number: " << std::to_string(section_number) << std::endl; + std::cout << "last_section_number: " << std::to_string(last_section_number) << std::endl; + std::cout << "reserved2: " << std::to_string(reserved2) << std::endl; + std::cout << "PCR_PID: " << std::to_string(PCR_PID) << std::endl; + std::cout << "reserved3: " << std::to_string(reserved3) << std::endl; + std::cout << "program_info_length: " << std::to_string(program_info_length) << std::endl; + for (int32_t i = 0; i < (int)infos.size(); i++) { + infos[i]->print(); + } + std::cout << std::endl; + std::flush(std::cout); +} + +AdaptationFieldHeader::AdaptationFieldHeader() + : adaptation_field_length(0) + , adaptation_field_extension_flag(0) + , transport_private_data_flag(0) + , splicing_point_flag(0) + , opcr_flag(0) + , pcr_flag(0) + , elementary_stream_priority_indicator(0) + , random_access_indicator(0) + , discontinuity_indicator(0) +{ + +} + +AdaptationFieldHeader::~AdaptationFieldHeader() +{ + +} + +void AdaptationFieldHeader::encode(YangTsBuffer *sb) +{ + sb->write_1byte(adaptation_field_length); + if (adaptation_field_length != 0) { + uint8_t val = adaptation_field_extension_flag & 0x01; + val |= (transport_private_data_flag << 1) & 0x02; + val |= (splicing_point_flag << 2) & 0x04; + val |= (opcr_flag << 3) & 0x08; + val |= (pcr_flag << 4) & 0x10; + val |= (elementary_stream_priority_indicator << 5) & 0x20; + val |= (random_access_indicator << 6) & 0x40; + val |= (discontinuity_indicator << 7) & 0x80; + sb->write_1byte(val); + } +} + +void AdaptationFieldHeader::decode(YangTsBuffer *sb) +{ + adaptation_field_length = sb->read_1byte(); + if (adaptation_field_length != 0) { + uint8_t val = sb->read_1byte(); + adaptation_field_extension_flag = val & 0x01; + transport_private_data_flag = (val >> 1) & 0x01; + splicing_point_flag = (val >> 2) & 0x01; + opcr_flag = (val >> 3) & 0x01; + pcr_flag = (val >> 4) & 0x01; + elementary_stream_priority_indicator = (val >> 5) & 0x01; + random_access_indicator = (val >> 6) & 0x01; + discontinuity_indicator = (val >> 7) & 0x01; + } +} + +PESHeader::PESHeader() + : packet_start_code(0x000001) + , stream_id(0) + , pes_packet_length(0) + , original_or_copy(0) + , copyright(0) + , data_alignment_indicator(0) + , pes_priority(0) + , pes_scrambling_control(0) + , marker_bits(0x02) + , pes_ext_flag(0) + , pes_crc_flag(0) + , add_copy_info_flag(0) + , dsm_trick_mode_flag(0) + , es_rate_flag(0) + , escr_flag(0) + , pts_dts_flags(0) + , header_data_length(0) +{ + +} + +PESHeader::~PESHeader() +{ + +} + +void PESHeader::encode(YangTsBuffer *sb) +{ + uint32_t b0b1b2b3 = (packet_start_code << 8) & 0xFFFFFF00; + b0b1b2b3 |= stream_id & 0xFF; + sb->write_4bytes(b0b1b2b3); + + sb->write_2bytes(pes_packet_length); + + uint8_t b6 = original_or_copy & 0x01; + b6 |= (copyright << 1) & 0x02; + b6 |= (data_alignment_indicator << 2) & 0x04; + b6 |= (pes_priority << 3) & 0x08; + b6 |= (pes_scrambling_control << 4) & 0x30; + b6 |= (marker_bits << 6) & 0xC0; + sb->write_1byte(b6); + + uint8_t b7 = pes_ext_flag & 0x01; + b7 |= (pes_crc_flag << 1) & 0x02; + b7 |= (add_copy_info_flag << 2) & 0x04; + b7 |= (dsm_trick_mode_flag << 3) & 0x08; + b7 |= (es_rate_flag << 4) & 0x10; + b7 |= (escr_flag << 5) & 0x20; + b7 |= (pts_dts_flags << 6) & 0xC0; + sb->write_1byte(b7); + + sb->write_1byte(header_data_length); +} + +void PESHeader::decode(YangTsBuffer *sb) +{ + uint32_t b0b1b2b3 = sb->read_4bytes(); + packet_start_code = (b0b1b2b3 >> 8) & 0x00FFFFFF; + stream_id = (b0b1b2b3) & 0xFF; + + pes_packet_length = sb->read_2bytes(); + + uint8_t b6 = sb->read_1byte(); + original_or_copy = b6 & 0x01; + copyright = (b6 >> 1) & 0x01; + data_alignment_indicator = (b6 >> 2) & 0x01; + pes_priority = (b6 >> 3) & 0x01; + pes_scrambling_control = (b6 >> 4) & 0x03; + marker_bits = (b6 >> 6) & 0x03; + + uint8_t b7 = sb->read_1byte(); + pes_ext_flag = b7 & 0x01; + pes_crc_flag = (b7 >> 1) & 0x01; + add_copy_info_flag = (b7 >> 2) & 0x01; + dsm_trick_mode_flag = (b7 >> 3) & 0x01; + es_rate_flag = (b7 >> 4) & 0x01; + escr_flag = (b7 >> 5) & 0x01; + pts_dts_flags = (b7 >> 6) & 0x03; + + header_data_length = sb->read_1byte(); +} diff --git a/libmetartc3/src/yangsrt/YangTsdemux.cpp b/libmetartc3/src/yangsrt/YangTsdemux.cpp new file mode 100755 index 00000000..ec6a5805 --- /dev/null +++ b/libmetartc3/src/yangsrt/YangTsdemux.cpp @@ -0,0 +1,599 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include "memory.h" +using namespace std; + +YangTsdemux::YangTsdemux():_data_total(0) +,_last_pid(0) +,_last_dts(0) +,_last_pts(0) { + + +} + +YangTsdemux::~YangTsdemux() { + +} + +int32_t YangTsdemux::decode_unit(uint8_t* data_p, ts_media_data_callback_I* callback) +{ + int32_t pos = 0; + int32_t npos = 0; + ts_header ts_header_info; + + ts_header_info._sync_byte = data_p[pos]; + pos++; + + ts_header_info._transport_error_indicator = (data_p[pos]&0x80)>>7; + ts_header_info._payload_unit_start_indicator = (data_p[pos]&0x40)>>6; + ts_header_info._transport_priority = (data_p[pos]&0x20)>>5; + ts_header_info._PID = ((data_p[pos]<<8)|data_p[pos+1])&0x1FFF; + pos += 2; + + ts_header_info._transport_scrambling_control = (data_p[pos]&0xC0)>>6; + ts_header_info._adaptation_field_control = (data_p[pos]&0x30)>>4; + ts_header_info._continuity_counter = (data_p[pos]&0x0F); + pos++; + npos = pos; + + adaptation_field* field_p = &(ts_header_info._adaptation_field_info); + //printf("%02x-%hd,",ts_header_info._adaptation_field_control,ts_header_info._PID); + // adaptation field + // 0x01 No adaptation_field, payload only + // 0x02 Adaptation_field only, no payload + // 0x03 Adaptation_field followed by payload + if( ts_header_info._adaptation_field_control == 2 + || ts_header_info._adaptation_field_control == 3 ){ + // adaptation_field() + field_p->_adaptation_field_length = data_p[pos]; + pos++; + + if( field_p->_adaptation_field_length > 0 ){ + field_p->_discontinuity_indicator = (data_p[pos]&0x80)>>7; + field_p->_random_access_indicator = (data_p[pos]&0x40)>>6; + field_p->_elementary_stream_priority_indicator = (data_p[pos]&0x20)>>5; + field_p->_PCR_flag = (data_p[pos]&0x10)>>4; + field_p->_OPCR_flag = (data_p[pos]&0x08)>>3; + field_p->_splicing_point_flag = (data_p[pos]&0x04)>>2; + field_p->_transport_private_data_flag = (data_p[pos]&0x02)>>1; + field_p->_adaptation_field_extension_flag = (data_p[pos]&0x01); + pos++; + + if( field_p->_PCR_flag == 1 ) { // PCR info + //program_clock_reference_base 33 uimsbf + //reserved 6 bslbf + //program_clock_reference_extension 9 uimsbf + pos += 6; + } + if( field_p->_OPCR_flag == 1 ) { + //original_program_clock_reference_base 33 uimsbf + //reserved 6 bslbf + //original_program_clock_reference_extension 9 uimsbf + pos += 6; + } + if( field_p->_splicing_point_flag == 1 ) { + //splice_countdown 8 tcimsbf + pos++; + } + if( field_p->_transport_private_data_flag == 1 ) { + //transport_private_data_length 8 uimsbf + field_p->_transport_private_data_length = data_p[pos]; + pos++; + memcpy(field_p->_private_data_byte, data_p + pos, field_p->_transport_private_data_length); + } + if( field_p->_adaptation_field_extension_flag == 1 ) { + //adaptation_field_extension_length 8 uimsbf + field_p->_adaptation_field_extension_length = data_p[pos]; + pos++; + //ltw_flag 1 bslbf + field_p->_ltw_flag = (data_p[pos]&0x80)>>7; + //piecewise_rate_flag 1 bslbf + field_p->_piecewise_rate_flag = (data_p[pos]&0x40)>>6; + //seamless_splice_flag 1 bslbf + field_p->_seamless_splice_flag = (data_p[pos]&0x20)>>5; + //reserved 5 bslbf + pos++; + if (field_p->_ltw_flag == 1) { + //ltw_valid_flag 1 bslbf + //ltw_offset 15 uimsbf + pos += 2; + } + if (field_p->_piecewise_rate_flag == 1) { + //reserved 2 bslbf + //piecewise_rate 22 uimsbf + pos += 3; + } + if (field_p->_seamless_splice_flag == 1) { + //splice_type 4 bslbf + //DTS_next_AU[32..30] 3 bslbf + //marker_bit 1 bslbf + //DTS_next_AU[29..15] 15 bslbf + //marker_bit 1 bslbf + //DTS_next_AU[14..0] 15 bslbf + //marker_bit 1 bslbf + pos += 5; + } + } + } + npos += sizeof(field_p->_adaptation_field_length) + field_p->_adaptation_field_length; + } + + if(ts_header_info._adaptation_field_control == 1 + || ts_header_info._adaptation_field_control == 3 ) { + // data_byte with placeholder + // payload parser + if(ts_header_info._PID == 0x00){ + // PAT // program association table + if(ts_header_info._payload_unit_start_indicator) { + pos++; + } + _pat._table_id = data_p[pos]; + pos++; + _pat._section_syntax_indicator = (data_p[pos]>>7)&0x01; + // skip 3 bits of 1 zero and 2 reserved + _pat._section_length = ((data_p[pos]<<8)|data_p[pos+1])&0x0FFF; + pos += 2; + _pat._transport_stream_id = (data_p[pos]<<8)|data_p[pos+1]; + pos += 2; + // reserved 2 bits + _pat._version_number = (data_p[pos]&0x3E)>>1; + _pat._current_next_indicator = data_p[pos]&0x01; + pos++; + _pat._section_number = data_p[pos]; + pos++; + _pat._last_section_number = data_p[pos]; + // assert(_pat._table_id == 0x00); + // assert((188 - npos) > (_pat._section_length+3)); // PAT = section_length + 3 + pos++; + _pat._pid_vec.clear(); + for (;pos+4 <= _pat._section_length-5-4+9 + npos;) { // 4:CRC, 5:follow section_length item rpos + 4(following unit length) section_length + 9(above field and unit_start_first_byte ) + PID_INFO pid_info; + //program_number 16 uimsbf + pid_info._program_number = data_p[pos]<<8|data_p[pos+1]; + pos += 2; +// reserved 3 bslbf + + if (pid_info._program_number == 0) { +// // network_PID 13 uimsbf + pid_info._network_id = (data_p[pos]<<8|data_p[pos+1])&0x1FFF; + //printf("#### network id:%d.\r\n", pid_info._network_id); + pos += 2; + } + else { +// // program_map_PID 13 uimsbf + pid_info._pid = (data_p[pos]<<8|data_p[pos+1])&0x1FFF; + //printf("#### pmt id:%d.\r\n", pid_info._pid); + pos += 2; + } + _pat._pid_vec.push_back(pid_info); + // network_PID and program_map_PID save to list + } +// CRC_32 use pat to calc crc32, eq + pos += 4; + }else if(ts_header_info._PID == 0x01){ + // CAT // conditional access table + }else if(ts_header_info._PID == 0x02){ + //TSDT // transport stream description table + }else if(ts_header_info._PID == 0x03){ + //IPMP // IPMP control information table + // 0x0004-0x000F Reserved + // 0x0010-0x1FFE May be assigned as network_PID, Program_map_PID, elementary_PID, or for other purposes + }else if(ts_header_info._PID == 0x11){ + // SDT // https://en.wikipedia.org/wiki/Service_Description_Table / https://en.wikipedia.org/wiki/MPEG_transport_stream + }else if(is_pmt(ts_header_info._PID)) { + if(ts_header_info._payload_unit_start_indicator) + pos++; + _pmt._table_id = data_p[pos]; + pos++; + _pmt._section_syntax_indicator = (data_p[pos]>>7)&0x01; + // skip 3 bits of 1 zero and 2 reserved + _pmt._section_length = ((data_p[pos]<<8)|data_p[pos+1])&0x0FFF; + pos += 2; + _pmt._program_number = (data_p[pos]<<8)|data_p[pos+1]; + pos += 2; + // reserved 2 bits + _pmt._version_number = (data_p[pos]&0x3E)>>1; + _pmt._current_next_indicator = data_p[pos]&0x01; + pos++; + _pmt._section_number = data_p[pos]; + pos++; + _pmt._last_section_number = data_p[pos]; + pos++; + // skip 3 bits for reserved 3 bslbf + _pmt._PCR_PID = ((data_p[pos]<<8)|data_p[pos+1])&0x1FFF; //PCR_PID 13 uimsbf + pos += 2; + + //reserved 4 bslbf + _pmt._program_info_length = ((data_p[pos]<<8)|data_p[pos+1])&0x0FFF;//program_info_length 12 uimsbf + pos += 2; + // assert(_pmt._table_id==0x02); // 0x02, // TS_program_map_section + memcpy(_pmt._dscr, data_p+pos, _pmt._program_info_length); +// for (i = 0; i < N; i++) { +// descriptor() +// } + pos += _pmt._program_info_length; + _pmt._stream_pid_vec.clear(); + _pmt._pid2steamtype.clear(); + + for (; pos + 5 <= _pmt._section_length + 4 - 4 + npos; ) { // pos(above field length) i+5(following unit length) section_length +3(PMT begin three bytes)+1(payload_unit_start_indicator) -4(crc32) + STREAM_PID_INFO pid_info; + pid_info._stream_type = data_p[pos];//stream_type 8 uimsbf 0x1B AVC video stream as defined in ITU-T Rec. H.264 | ISO/IEC 14496-10 Video + pos++; + //reserved 3 bslbf + pid_info._elementary_PID = ((data_p[pos]<<8)|data_p[pos+1])&0x1FFF; //elementary_PID 13 uimsbf + pos += 2; + //reserved 4 bslbf + pid_info._ES_info_length = ((data_p[pos]<<8)|data_p[pos+1])&0x0FFF; //ES_info_length 12 uimsbf + pos += 2; + if( pos + pid_info._ES_info_length > _pmt._section_length + 4 - 4 + npos ) + break; + int32_t absES_info_length = pos + pid_info._ES_info_length; + for (; pos< absES_info_length; ) { + //descriptor() + int32_t descriptor_tag = data_p[pos]; + (void)descriptor_tag; + pos++; + int32_t descriptor_length = data_p[pos]; + pos++; + memcpy(pid_info._dscr, data_p + pos, descriptor_length); + pos += descriptor_length; + } + // save program_number(stream num) elementary_PID(PES PID) stream_type(stream codec) + //printf("pmt pid:%d, streamtype:%d, pos:%d\r\n", pid_info._elementary_PID, pid_info._stream_type, pos); + _pmt._stream_pid_vec.push_back(pid_info); + _pmt._pid2steamtype.insert(std::make_pair((unsigned short)pid_info._elementary_PID, pid_info._stream_type)); + } + pos += 4;//CRC_32 + }else if(ts_header_info._PID == 0x0042){ + // USER + }else if(ts_header_info._PID == 0x1FFF){ + // Null packet + }else{//pes packet or pure data packet + //bool isFound = false; + //printf("%d,",_pmt._stream_pid_vec.size()); + for (size_t i = 0; i < _pmt._stream_pid_vec.size(); i++) { + if(ts_header_info._PID == _pmt._stream_pid_vec[i]._elementary_PID){ + //if(ts_header_info._PID == 225||ts_header_info._PID == 192){ + //isFound = true; + if(ts_header_info._payload_unit_start_indicator){ + uint8_t* ret_data_p = nullptr; + size_t ret_size = 0; + uint64_t dts = 0; + uint64_t pts = 0; + + //callback last media data in data buffer + on_callback(callback, _last_pid, _last_dts, _last_pts); + int32_t peslen=0; + int32_t ret = pes_parse(data_p+npos, npos, &ret_data_p, ret_size, dts, pts,&peslen); + // assert(ret <= 188); + if (ret > 188) { + return -1; + } + + _last_pts = pts; + _last_dts = (dts == 0) ? pts : dts; + if ((ret_data_p != nullptr) && (ret_size > 0)) { + insert_into_databuf(ret_data_p, ret_size, ts_header_info._PID); + if((peslen+npos)<=188) on_callback(callback, _last_pid, _last_dts, _last_pts); + + } + }else{ + insert_into_databuf(data_p + npos, 188-npos,ts_header_info._PID); + } + } + } + + } + } + + return 0; +} +int32_t YangTsdemux::decode(SRT_DATA_MSG_PTR data_ptr, ts_media_data_callback_I* callback) +{ + int32_t ret = -1; + std::string path; + + if (!data_ptr || (data_ptr->data_len() < 188) || (data_ptr->data_len()%188 != 0)) + { + return -1; + } + + uint32_t count = data_ptr->data_len()/188; + for (uint32_t index = 0; index < count; index++) + { + uint8_t* data = data_ptr->get_data() + 188*index; + if (data[0] != 0x47) { + continue; + } + ret = decode_unit(data, callback); + if (ret < 0) + { + break; + } + } + return ret; +} + +void YangTsdemux::insert_into_databuf(uint8_t* data_p, size_t data_size, unsigned short pid) { + _last_pid = pid; + _data_total += data_size; + int32_t key=pid; + _data_buffer_map.find(key); + std::map>::iterator iter1=_data_buffer_map.find(key); + if(iter1 ==_data_buffer_map.end()){ + _data_buffer_map.insert(pair>(key,vector())); + std::map>::iterator iter=_data_buffer_map.find(key); + iter->second.push_back(std::make_shared(data_p, data_size)); + }else{ + iter1->second.push_back(std::make_shared(data_p, data_size)); + } + +} + +void YangTsdemux::on_callback(ts_media_data_callback_I* callback, unsigned short pid, + uint64_t dts, uint64_t pts) { + + int32_t stream_type = pid;//0xe1;//iter->second; + std::map>::iterator iter=_data_buffer_map.find(stream_type); + vector* _data_buffer_vec=NULL; + if(iter ==_data_buffer_map.end()){ + _data_buffer_map.insert(pair>(stream_type,vector())); + return; + } + _data_buffer_vec=&iter->second; + if(_data_buffer_vec->size()==0){ + _data_buffer_vec=NULL; + return; + } + int32_t dataLen=0; + size_t index =0; + for ( index = 0; index < _data_buffer_vec->size(); index++) { + dataLen+=_data_buffer_vec->at(index)->data_len(); + } + if(dataLen>0){ + auto total_data_ptr = std::make_shared(dataLen); + size_t pos = 0; + for ( index = 0; index < _data_buffer_vec->size(); index++) { + memcpy(total_data_ptr->get_data() + pos, + _data_buffer_vec->at(index)->get_data(), + _data_buffer_vec->at(index)->data_len()); + pos += _data_buffer_vec->at(index)->data_len(); + } + _data_buffer_vec->clear(); + _data_buffer_vec=NULL; + callback->on_data_callback(callback->context,total_data_ptr, stream_type, dts, pts); + }else{ + _data_buffer_vec->clear(); + _data_buffer_vec=NULL; + } + + return; +} + +bool YangTsdemux::is_pmt(unsigned short pid) { + //printf("%hd,",pid); + for (size_t index = 0; index < _pat._pid_vec.size(); index++) { + if (_pat._pid_vec[index]._program_number != 0) { + if (_pat._pid_vec[index]._pid == pid) { + return true; + } + } + } + return false; +} + + +int32_t YangTsdemux::pes_parse(uint8_t* p, size_t npos, + uint8_t** ret_pp, size_t& ret_size, + uint64_t& dts, uint64_t& pts,int32_t *pesLen) { + int32_t pos = 0; + // int32_t packet_start_code_prefix = (p[pos]<<16)|(p[pos+1]<<8)|p[pos+2]; //packet_start_code_prefix 24 bslbf + pos += 3; + int32_t stream_id = p[pos]; //stream_id 8 uimsbf + pos++; + + int32_t PES_packet_length = ((unsigned int)p[pos]<<8)|p[pos+1]; //PES_packet_length 16 uimsbf + + *pesLen=PES_packet_length; + (void)PES_packet_length; + + pos += 2; + // packet_start_code_prefix, npos, PES_packet_length, stream_id); + // assert(0x00000001 == packet_start_code_prefix); + if (stream_id != 188//program_stream_map 1011 1100 + && stream_id != 190//padding_stream 1011 1110 + && stream_id != 191//private_stream_2 1011 1111 + && stream_id != 240//ECM 1111 0000 + && stream_id != 241//EMM 1111 0001 + && stream_id != 255//program_stream_directory 1111 1111 + && stream_id != 242//DSMCC_stream 1111 0010 + && stream_id != 248//ITU-T Rec. H.222.1 type E stream 1111 1000 + ) + { + // assert(0x80 == p[pos]); + //skip 2bits//'10' 2 bslbf + int32_t PES_scrambling_control = (p[pos]&30)>>4; //PES_scrambling_control 2 bslbf + (void)PES_scrambling_control; + int32_t PES_priority = (p[pos]&0x08)>>3; //PES_priority 1 bslbf + (void)PES_priority; + int32_t data_alignment_indicator = (p[pos]&0x04)>>2;//data_alignment_indicator 1 bslbf + (void)data_alignment_indicator; + int32_t copyright = (p[pos]&0x02)>>1; //copyright 1 bslbf + (void)copyright; + int32_t original_or_copy = (p[pos]&0x01);//original_or_copy 1 bslbf + (void)original_or_copy; + pos++; + int32_t PTS_DTS_flags = (p[pos]&0xC0)>>6; //PTS_DTS_flags 2 bslbf + int32_t ESCR_flag = (p[pos]&0x20)>>5; // ESCR_flag 1 bslbf + int32_t ES_rate_flag = (p[pos]&0x10)>>4;//ES_rate_flag 1 bslbf + int32_t DSM_trick_mode_flag = (p[pos]&0x08)>>3;//DSM_trick_mode_flag 1 bslbf + int32_t additional_copy_info_flag = (p[pos]&0x04)>>2; //additional_copy_info_flag 1 bslbf + int32_t PES_CRC_flag = (p[pos]&0x02)>>1; //PES_CRC_flag 1 bslbf + int32_t PES_extension_flag = (p[pos]&0x01);//PES_extension_flag 1 bslbf + pos++; + int32_t PES_header_data_length = p[pos]; //PES_header_data_length 8 uimsbf + (void)PES_header_data_length; + pos++; + + if (PTS_DTS_flags == 2) { + // skip 4 bits '0010' 4 bslbf + // PTS [32..30] 3 bslbf + // marker_bit 1 bslbf + // PTS [29..15] 15 bslbf + // marker_bit 1 bslbf + // PTS [14..0] 15 bslbf + // marker_bit 1 bslbf + pts = (((p[pos]>>1)&0x07) << 30) | (p[pos+1]<<22) | (((p[pos+2]>>1)&0x7F)<<15) | (p[pos+3]<<7) | ((p[pos+4]>>1)&0x7F); + pos += 5; + } + if (PTS_DTS_flags == 3) { + // '0011' 4 bslbf + // PTS [32..30] 3 bslbf + // marker_bit 1 bslbf + //PTS [29..15] 15 bslbf + //marker_bit 1 bslbf + // PTS [14..0] 15 bslbf + // marker_bit 1 bslbf + pts = (((p[pos]>>1)&0x07) << 30) | (p[pos+1]<<22) | (((p[pos+2]>>1)&0x7F)<<15) | (p[pos+3]<<7) | ((p[pos+4]>>1)&0x7F); + pos += 5; + // '0001' 4 bslbf + // DTS [32..30] 3 bslbf + // marker_bit 1 bslbf + // DTS [29..15] 15 bslbf + // marker_bit 1 bslbf + // DTS [14..0] 15 bslbf + // marker_bit 1 bslbf + dts = (((p[pos]>>1)&0x07) << 30) | (p[pos+1]<<22) | (((p[pos+2]>>1)&0x7F)<<15) | (p[pos+3]<<7) | ((p[pos+4]>>1)&0x7F); + pos += 5; + } + if (ESCR_flag == 1) { + // reserved 2 bslbf + // ESCR_base[32..30] 3 bslbf + // marker_bit 1 bslbf + // ESCR_base[29..15] 15 bslbf + // marker_bit 1 bslbf + // ESCR_base[14..0] 15 bslbf + // marker_bit 1 bslbf + // ESCR_extension 9 uimsbf + // marker_bit 1 bslbf + uint64_t ESCR_base = ((((uint64_t)p[pos] >> 3) & 0x07) << 30) | (((uint64_t)p[pos] & 0x03) << 28) | ((uint64_t)p[pos + 1] << 20) | ((((uint64_t)p[pos + 2] >> 3) & 0x1F) << 15) | (((uint64_t)p[pos + 2] & 0x3) << 13) | ((uint64_t)p[pos + 3] << 5) | ((p[pos + 4] >> 3) & 0x1F); + int32_t ESCR_extension = ((p[pos + 4] & 0x03) << 7) | ((p[pos + 5] >> 1) & 0x7F); + (void)ESCR_base; + (void)ESCR_extension; + pos += 6; + } + if (ES_rate_flag == 1) { + // marker_bit 1 bslbf + // ES_rate 22 uimsbf + // marker_bit 1 bslbf + int32_t ES_rate = (p[pos]&0x7F)<<15 | (p[pos+1])<<7 | (p[pos+2]&0x7F)>>1; + (void)ES_rate; + pos += 3; + } + if (DSM_trick_mode_flag == 1) { // ignore + int32_t trick_mode_control = (p[pos]&0xE0)>>5;//trick_mode_control 3 uimsbf + if ( trick_mode_control == 0/*fast_forward*/ ) { + // field_id 2 bslbf + // intra_slice_refresh 1 bslbf + // frequency_truncation 2 bslbf + } + else if ( trick_mode_control == 1/*slow_motion*/ ) { + //rep_cntrl 5 uimsbf + } + else if ( trick_mode_control == 2/*freeze_frame*/ ) { + // field_id 2 uimsbf + // reserved 3 bslbf + } + else if ( trick_mode_control == 3/*fast_reverse*/ ) { + // field_id 2 bslbf + // intra_slice_refresh 1 bslbf + // frequency_truncation 2 bslbf + }else if ( trick_mode_control == 4/*slow_reverse*/ ) { + // rep_cntrl 5 uimsbf + } + else{ + //reserved 5 bslbf + } + pos++; + } + if ( additional_copy_info_flag == 1) { // ignore + // marker_bit 1 bslbf + // additional_copy_info 7 bslbf + pos++; + } + if ( PES_CRC_flag == 1) { // ignore + // previous_PES_packet_CRC 16 bslbf + pos += 2; + } + if ( PES_extension_flag == 1) { // ignore + int32_t PES_private_data_flag = (p[pos]&0x80)>>7;// PES_private_data_flag 1 bslbf + int32_t pack_header_field_flag = (p[pos]&0x40)>>6;// pack_header_field_flag 1 bslbf + int32_t program_packet_sequence_counter_flag = (p[pos]&0x20)>>5;// program_packet_sequence_counter_flag 1 bslbf + int32_t P_STD_buffer_flag = (p[pos]&0x10)>>4; // P-STD_buffer_flag 1 bslbf + // reserved 3 bslbf + int32_t PES_extension_flag_2 = (p[pos]&0x01);// PES_extension_flag_2 1 bslbf + pos++; + + if ( PES_private_data_flag == 1) { + // PES_private_data 128 bslbf + pos += 16; + } + if (pack_header_field_flag == 1) { + // pack_field_length 8 uimsbf + // pack_header() + } + if (program_packet_sequence_counter_flag == 1) { + // marker_bit 1 bslbf + // program_packet_sequence_counter 7 uimsbf + // marker_bit 1 bslbf + // MPEG1_MPEG2_identifier 1 bslbf + // original_stuff_length 6 uimsbf + pos += 2; + } + if ( P_STD_buffer_flag == 1) { + // '01' 2 bslbf + // P-STD_buffer_scale 1 bslbf + // P-STD_buffer_size 13 uimsbf + pos += 2; + } + if ( PES_extension_flag_2 == 1) { + // marker_bit 1 bslbf + int32_t PES_extension_field_length = (p[pos]&0x7F);// PES_extension_field_length 7 uimsbf + pos++; + for (int32_t i = 0; i < PES_extension_field_length; i++) { + // reserved 8 bslbf + pos++; + } + } + } + + + *ret_pp = p+pos; + ret_size = 188-(npos+pos); + // ret_size, p[pos], p[pos+1], p[pos+2], p[pos+3], p[pos+4], p[pos+5], + // dts, dts/90, pts, pts/90); + } + else if ( stream_id == 188//program_stream_map 1011 1100 BC + || stream_id == 191//private_stream_2 1011 1111 BF + || stream_id == 240//ECM 1111 0000 F0 + || stream_id == 241//EMM 1111 0001 F1 + || stream_id == 255//program_stream_directory 1111 1111 FF + || stream_id == 242//DSMCC_stream 1111 0010 F2 + || stream_id == 248//ITU-T Rec. H.222.1 type E stream 1111 1000 F8 + ) { + + *ret_pp = p+pos; + ret_size = 188-(npos+pos); + + } + else if ( stream_id == 190//padding_stream 1011 1110 + ) { + *ret_pp = p+pos; + ret_size = 188-(npos+pos); + } + + return pos; +} diff --git a/libmetartc3/src/yangsrt/common.cpp b/libmetartc3/src/yangsrt/common.cpp new file mode 100755 index 00000000..c6b3e813 --- /dev/null +++ b/libmetartc3/src/yangsrt/common.cpp @@ -0,0 +1,65 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + + #include + +void write_pcr(YangTsBuffer *sb, uint64_t pcr) { + sb->write_1byte((char)(pcr >> 25)); + sb->write_1byte((char)(pcr >> 17)); + sb->write_1byte((char)(pcr >> 9)); + sb->write_1byte((char)(pcr >> 1)); + sb->write_1byte((char)(pcr << 7 | 0x7e)); + sb->write_1byte(0); +} + +void write_pts(YangTsBuffer *sb, uint32_t fb, uint64_t pts) { + uint32_t val; + + val = fb << 4 | (((pts >> 30) & 0x07) << 1) | 1; + sb->write_1byte((char)val); + + val = (((pts >> 15) & 0x7fff) << 1) | 1; + sb->write_2bytes((int16_t)val); + + val = (((pts) & 0x7fff) << 1) | 1; + sb->write_2bytes((int16_t)val); +} + +uint64_t read_pts(YangTsBuffer *sb) { + uint64_t pts = 0; + uint32_t val = 0; + val = sb->read_1byte(); + pts |= ((val >> 1) & 0x07) << 30; + + val = sb->read_2bytes(); + pts |= ((val >> 1) & 0x7fff) << 15; + + val = sb->read_2bytes(); + pts |= ((val >> 1) & 0x7fff); + + return pts; +} + +uint64_t read_pcr(YangTsBuffer *sb) { + uint64_t pcr = 0; + uint64_t val = sb->read_1byte(); + pcr |= (val << 25) & 0x1FE000000; + + val = sb->read_1byte(); + pcr |= (val << 17) & 0x1FE0000; + + val = sb->read_1byte(); + pcr |= (val << 9) & 0x1FE00; + + val = sb->read_1byte(); + pcr |= (val << 1) & 0x1FE; + + val = sb->read_1byte(); + pcr |= ((val >> 7) & 0x01); + + sb->read_1byte(); + + return pcr; +} diff --git a/libmetartc3/src/yangsrt/crc.cpp b/libmetartc3/src/yangsrt/crc.cpp new file mode 100755 index 00000000..0f8d7fb7 --- /dev/null +++ b/libmetartc3/src/yangsrt/crc.cpp @@ -0,0 +1,16 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +// @see http://www.stmc.edu.hk/~vincent/ffmpeg_0.4.9-pre1/libavformat/mpegtsenc.c +uint32_t crc32(const uint8_t *data, int32_t len) +{ + int32_t i; + uint32_t crc = 0xffffffff; + + for (i = 0; i> 24) ^ *data++) & 0xff]; + + return crc; +} diff --git a/libmetartc3/src/yangsrt/srt_data.cpp b/libmetartc3/src/yangsrt/srt_data.cpp new file mode 100755 index 00000000..2ecb2c63 --- /dev/null +++ b/libmetartc3/src/yangsrt/srt_data.cpp @@ -0,0 +1,48 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +SRT_DATA_MSG::SRT_DATA_MSG():_msg_type(SRT_MSG_DATA_TYPE) + ,_len(0) + ,_data_p(nullptr) + //,_key_path(path) +{ + +} + + +SRT_DATA_MSG::SRT_DATA_MSG(uint32_t len):_msg_type(SRT_MSG_DATA_TYPE) + ,_len(len) + //,_key_path(path) +{ + _data_p = new uint8_t[len]; + memset(_data_p, 0, len); +} + +SRT_DATA_MSG::SRT_DATA_MSG(uint8_t* data_p, uint32_t len):_msg_type(SRT_MSG_DATA_TYPE) + ,_len(len) + +{ + _data_p = new uint8_t[len]; + memcpy(_data_p, data_p, len); +} + +SRT_DATA_MSG::~SRT_DATA_MSG() { + if (_data_p && (_len > 0)) { + delete _data_p; + } +} + +uint32_t SRT_DATA_MSG::msg_type() { + return _msg_type; +} + + +uint32_t SRT_DATA_MSG::data_len() { + return _len; +} + +uint8_t* SRT_DATA_MSG::get_data() { + return _data_p; +} diff --git a/libmetartc3/src/yangstream/YangStreamHandle.cpp b/libmetartc3/src/yangstream/YangStreamHandle.cpp new file mode 100755 index 00000000..2e88e1e5 --- /dev/null +++ b/libmetartc3/src/yangstream/YangStreamHandle.cpp @@ -0,0 +1,30 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include + +#include +#include + + +void yang_create_streamHandle(int32_t transType,YangStreamHandle* streamHandle,int32_t puid,YangStreamConfig* streamconfig,YangAVInfo* avinfo,YangContextStream* stream,YangReceiveCallback* callback) { + if(streamHandle==NULL) return; + yang_create_stream(transType,streamHandle,puid,streamconfig,avinfo,stream,callback); + if(transType==Yang_Srt){ + yang_create_stream_srt(streamHandle); + } +} + +void yang_destroy_streamHandle(YangStreamHandle* streamHandle) { + if(streamHandle==NULL) return; + if(streamHandle->context->transtype==Yang_Srt){ + yang_destroy_stream_srt(streamHandle); + } + yang_destroy_stream(streamHandle); +} + + + + diff --git a/libmetartc3/src/yangstream/YangStreamManager.cpp b/libmetartc3/src/yangstream/YangStreamManager.cpp new file mode 100755 index 00000000..2ed7aa34 --- /dev/null +++ b/libmetartc3/src/yangstream/YangStreamManager.cpp @@ -0,0 +1,123 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include +#include +YangStreamManager::YangStreamManager() { + m_playBuffers=NULL; + m_playBuffer=NULL; + m_sendPli=NULL; + m_mediaConfig_dec=NULL; + m_mediaConfig_render=NULL; + m_rtcMsg=NULL; + m_rtcMsgMap=NULL; + + m_audioClock=0; + m_videoClock=0; +} + +YangStreamManager::~YangStreamManager() { + yang_delete(m_playBuffer); + if(m_playBuffers){ + for(size_t i=0;isize();i++){ + yang_delete(m_playBuffers->at(i)); + } + m_playBuffers->clear(); + } + m_sendPli=NULL; + m_mediaConfig_dec=NULL; + m_mediaConfig_render=NULL; + if(m_rtcMsgMap){ + std::map::iterator it; + for (it = m_rtcMsgMap->begin(); it != m_rtcMsgMap->end(); ++it) + { + yang_free(it->second ); + } + m_rtcMsgMap->clear(); + yang_delete(m_rtcMsgMap); + } +} +int32_t YangStreamManager::getAudioClock(){ + return m_audioClock; +} + +void YangStreamManager::sendRequest(int32_t puid, YangRtcMessageType msg) { + if(puid>0&&m_rtcMsgMap){ + std::map::iterator iter=m_rtcMsgMap->find(puid); + if(iter!=m_rtcMsgMap->end()){ + yang_trace("\nuser(%d)..send pli....",puid); + iter->second->notify(iter->second->context,puid, msg); + } + }else if(m_rtcMsg) { + yang_trace("\n..send pli...."); + m_rtcMsg->notify(m_rtcMsg->context,puid, msg); + } +} + + int32_t YangStreamManager::getVideoClock(){ + return m_videoClock; + } +void YangStreamManager::setSendRequestCallback(YangSendRequestCallback* pli){ + m_sendPli=pli; +} + void YangStreamManager::setDecoderMediaConfigCallback(YangMediaConfigCallback* dec){ + m_mediaConfig_dec=dec; + } + void YangStreamManager::setRenderMediaConfigCallback(YangMediaConfigCallback* render){ + m_mediaConfig_render=render; + } +void YangStreamManager::setMediaConfig(int32_t puid,YangAudioParam* audio,YangVideoParam* video){ + if(m_mediaConfig_dec) m_mediaConfig_dec->setMediaConfig(puid,audio,video); + if(m_mediaConfig_render) m_mediaConfig_render->setMediaConfig(puid,audio,video); + size_t i=0; + if(audio){ + if(m_playBuffer) m_playBuffer->setAudioClock(audio->audioClock); + if(m_playBuffers){ + for(i=0;isize();i++){ + m_playBuffers->at(i)->setAudioClock(audio->audioClock); + } + } + m_audioClock=audio->audioClock; + } + if(video){ + if(m_playBuffer) m_playBuffer->setVideoClock(video->videoClock); + if(m_playBuffers){ + for(i=0;isize();i++){ + m_playBuffers->at(i)->setVideoClock(video->videoClock); + } + } + m_videoClock=video->videoClock; + } +} + void YangStreamManager::sendRequest(int32_t puid,uint32_t ssrc,YangRequestType req){ + if(m_sendPli) m_sendPli->sendRequest(puid,ssrc,req); + } +int YangStreamManager::getIndex(int puid){ + if(m_playBuffers){ + for(size_t i=0;isize();i++){ + if(m_playBuffers->at(i)->m_uid==puid) return i; + } + } + return -1; +} + +YangSynBuffer* YangStreamManager::getSynBuffer(int puid){ + int ind=getIndex(puid); + if(ind==-1) return NULL; + return m_playBuffers->at(ind); +} + +void YangStreamManager::setRtcMessageNotify(int puid,YangRtcMessageNotify *rtcmsg) { + + if(puid>0){ + if(m_rtcMsgMap==NULL) m_rtcMsgMap=new std::map(); + std::map::iterator iter=m_rtcMsgMap->find(puid); + if(iter==m_rtcMsgMap->end()){ + (*m_rtcMsgMap)[puid]=rtcmsg; + } + }else{ + m_rtcMsg=rtcmsg; + + } +} diff --git a/libmetartc3/src/yangstream/YangStreamSrt.cpp b/libmetartc3/src/yangstream/YangStreamSrt.cpp new file mode 100755 index 00000000..cc7cbabf --- /dev/null +++ b/libmetartc3/src/yangstream/YangStreamSrt.cpp @@ -0,0 +1,249 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "YangStreamSrt.h" +#include "yangsrt/YangSrtBase.h" +#include "yangsrt/YangTsMuxer.h" +#include "yangsrt/YangTsdemux.h" +#include + +#include "yangutil/sys/YangLog.h" +#define YangSrtUnitSize 1316 + +typedef struct{ + int32_t bufLen; + int32_t bufReceiveLen; + int32_t bufRemainLen; + char *buffer; + YangSrtBase *srt; + ts_media_data_callback_I callback; + YangTsdemux demux; + YangTsMuxer ts; + + YangFrame audioFrame; + YangFrame videoFrame; +}YangStreamSrt; + + + +int32_t yang_stream_srt_isconnected(YangStreamContext* context){ + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + if(srt->srt) + return srt->srt->getSrtSocketStatus()==SRTS_CONNECTED; + else + return 0; +} + + +int32_t yang_stream_srt_getConnectState(YangStreamContext* context){ + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + int32_t ret=srt->srt->getSrtSocketStatus(); + if(ret==SRTS_CONNECTED) + return Yang_Ok; + else + return Yang_SRTS_SocketBase+ret; + //return ERROR_SOCKET; +} +int32_t yang_stream_srt_reconnect(YangStreamContext* context) { + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + srt->bufLen=0; + char url[1024]={0}; + sprintf(url,"%s.sls.com/%s/%s",context->streamconfig.streamOptType?"uplive":"live",context->streamconfig.app,context->streamconfig.stream); + int32_t ret=srt->srt->initConnect(url); + if(ret) return ret; + return srt->srt->connectServer(); + +} + + + +int32_t yang_stream_srt_connectServer(YangStreamContext* context){ + if(context==NULL||context->context==NULL) return 1; + + if(yang_stream_srt_isconnected(context)) return Yang_Ok; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + if(!srt->buffer) srt->buffer=new char[Yang_Srt_CacheSize]; + if(srt->srt==NULL) srt->srt=new YangSrtBase(); + if(!srt->srt->m_contextt){ + srt->bufLen=0; + //char s[512]={0}; + sprintf(context->streamconfig.url,"%s.sls.com/%s/%s",context->streamconfig.streamOptType==Yang_Stream_Publish?"uplive":"live",context->streamconfig.app,context->streamconfig.stream); + //srt->conf.url=s; + srt->srt->init((char*)context->streamconfig.serverIp,context->streamconfig.serverPort); + } + int32_t ret=srt->srt->initConnect((char*)context->streamconfig.url); + if(ret) return ret; + context->netState=srt->srt->connectServer(); + return context->netState; + + +} + +int32_t yang_stream_srt_disConnectServer(YangStreamContext* context){ + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + srt->srt->closeSrt(); + context->netState=1; + return Yang_Ok; +} + + +int32_t yang_stream_srt_publishAudioData(YangStreamContext* context,YangStreamCapture* audioFrame) { + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + vector sb ; + srt->ts.encode(audioFrame->getAudioData(audioFrame->context), audioFrame->getAudioLen(audioFrame->context), + 1, audioFrame->getAudioTimestamp(audioFrame->context), audioFrame->getAudioType(audioFrame->context) == 0 ? TS_AAC : TS_OPUS, &sb); + int32_t ret=Yang_Ok; + for (size_t i = 0; i < sb.size(); i++) { + memcpy(srt->buffer+srt->bufLen,sb.at(i).data(),188); + srt->bufLen+=188; + if(srt->bufLen==YangSrtUnitSize){ + ret=srt->srt->publish((char*) srt->buffer, YangSrtUnitSize); + srt->bufLen=0; + } + + if (ret){ + yang_error("publish audio error..%d",ret); + context->netState=ret; + return ret; + } + + } + return Yang_Ok; +} + + +int32_t yang_stream_srt_publishVideoData(YangStreamContext* context,YangStreamCapture* videoFrame) { + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + int32_t ret=Yang_Ok; + vector sb; + srt->ts.encode(videoFrame->getVideoData(videoFrame->context), videoFrame->getVideoLen(videoFrame->context), + videoFrame->getVideoFrametype(videoFrame->context), videoFrame->getVideoTimestamp(videoFrame->context), TS_H264, &sb); + for (size_t i = 0; i < sb.size(); i++) { + memcpy(srt->buffer+srt->bufLen,sb.at(i).data(),188); + srt->bufLen+=188; + if(srt->bufLen==YangSrtUnitSize){ + ret=srt->srt->publish((char*) srt->buffer, YangSrtUnitSize); + srt->bufLen=0; + } + if (ret) { + return ret; + context->netState=ret; + } + } + return Yang_Ok; +} + + +void yang_stream_srt_on_data_callback(void* pcontext,SRT_DATA_MSG_PTR data_ptr, + uint32_t media_type, uint64_t dts, uint64_t pts) { + if(pcontext==NULL) return; + YangStreamContext* context=(YangStreamContext*)pcontext; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + uint8_t *temp = data_ptr->get_data(); + int32_t len = data_ptr->data_len(); + + if ((media_type == Yang_H264_PID || media_type == Yang_H265_PID) + && context->videoStream) { + //int64_t t_timestamp = dts; + srt->videoFrame.pts=dts; + srt->videoFrame.uid=context->uid; + srt->videoFrame.payload=temp; + srt->videoFrame.nb=len; + if (context->data) context->data->receiveVideo(context->data->context,&srt->videoFrame); + } else if (media_type == Yang_AAC_PID && context->audioStream) { + srt->audioFrame.uid=context->uid; + srt->audioFrame.nb=len; + srt->audioFrame.payload=temp; + if (context->data) context->data->receiveAudio(context->data->context,&srt->audioFrame); + } else if (media_type == Yang_OPUS_PID && context->audioStream) { + srt->audioFrame.uid=context->uid; + srt->audioFrame.nb=len; + srt->audioFrame.payload=temp; + if (context->data) context->data->receiveAudio(context->data->context,&srt->audioFrame); + } + temp = NULL; +} + +int32_t yang_stream_srt_receiveData(YangStreamContext* context,int32_t *plen) { + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + + if (!srt->srt) + return Yang_Ok; + srt->bufReceiveLen = 0; + if (srt->srt->receive(srt->buffer + srt->bufRemainLen, &srt->bufReceiveLen)) + return ERROR_SRT_PushFailure; + srt->bufLen = srt->bufReceiveLen + srt->bufRemainLen; + if (srt->bufLen < YangSrtUnitLen) { + srt->bufRemainLen = srt->bufLen; + return Yang_Ok; + } + srt->bufRemainLen = srt->bufLen % YangSrtUnitLen; + *plen = srt->bufLen; + auto input_ptr = std::make_shared((uint8_t*) srt->buffer, + srt->bufLen - srt->bufRemainLen); + srt->demux.decode(input_ptr, &srt->callback); + if (srt->bufRemainLen > 0) + memcpy(srt->buffer, srt->buffer + (srt->bufLen - srt->bufRemainLen), + srt->bufRemainLen); + return Yang_Ok; +} + + +int32_t yang_stream_srt_sendPmt(YangStreamContext* context){ + if(context==NULL||context->context==NULL) return 1; + YangStreamSrt* srt=(YangStreamSrt*)context->context; + vector sb; + srt->ts.encodePmtWithoutData(&sb); + int32_t ret=Yang_Ok; + for (size_t i = 0; i < sb.size(); i++) { + ret=srt->srt->publish((char*) sb.at(i).data(), 188); + if (ret){ + yang_error("publish audio error..%d",i); + return ret; + } + + } + sb.clear(); + return Yang_Ok; +} + +void yang_create_stream_srt(YangStreamHandle* handle) { + if(handle==NULL||handle->context==NULL) return; + handle->context->context=calloc(sizeof(YangStreamSrt),1); + YangStreamSrt* srt=(YangStreamSrt*)handle->context->context; + srt->callback.context=handle->context; + srt->callback.on_data_callback=yang_stream_srt_on_data_callback; + srt->bufLen = 0; + srt->buffer = NULL; + srt->bufReceiveLen = 0, srt->bufRemainLen = 0; + srt->srt = NULL; + memset(&srt->audioFrame,0,sizeof(YangFrame)); + memset(&srt->videoFrame,0,sizeof(YangFrame)); + + handle->connectServer = yang_stream_srt_connectServer; + handle->disConnectServer = yang_stream_srt_disConnectServer; + handle->getConnectState = yang_stream_srt_getConnectState; + + handle->isconnected = yang_stream_srt_isconnected; + handle->publishAudioData = yang_stream_srt_publishAudioData; + handle->publishVideoData = yang_stream_srt_publishVideoData; + handle->receiveData = yang_stream_srt_receiveData; + handle->reconnect = yang_stream_srt_reconnect; +} + +void yang_destroy_stream_srt(YangStreamHandle* handle) { + if(handle==NULL||handle->context==NULL) return; + YangStreamSrt* srt=(YangStreamSrt*)handle->context->context; + if (srt->buffer) + delete[] srt->buffer; + srt->buffer = NULL; +} + + diff --git a/libmetartc3/src/yangstream/YangStreamSrt.h b/libmetartc3/src/yangstream/YangStreamSrt.h new file mode 100755 index 00000000..6bc75959 --- /dev/null +++ b/libmetartc3/src/yangstream/YangStreamSrt.h @@ -0,0 +1,11 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#ifndef YANGSTREAM_SRC_YANGSTREAMSRT_H_ +#define YANGSTREAM_SRC_YANGSTREAMSRT_H_ +#include + +void yang_create_stream_srt(YangStreamHandle* handle); +void yang_destroy_stream_srt(YangStreamHandle* handle); + +#endif /* YANGSTREAM_SRC_YANGSTREAMSRT_H_ */ diff --git a/libmetartc3/src/yangstream/YangSynBuffer.cpp b/libmetartc3/src/yangstream/YangSynBuffer.cpp new file mode 100755 index 00000000..3917c0fd --- /dev/null +++ b/libmetartc3/src/yangstream/YangSynBuffer.cpp @@ -0,0 +1,271 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include + + +YangSynBuffer::YangSynBuffer() { + m_transtype = Yang_Webrtc; + if (m_transtype == Yang_Webrtc) { + m_videoClock = 90000; + m_audioClock = 48000; + } else if (m_transtype == Yang_Rtmp) { + m_videoClock = 1000; + m_audioClock = 1000; + } + + m_videoCacheTime=Yang_Video_Cache_time; + + resetAudioClock(); + resetVideoClock(); + + + m_baseClock = 0; + + m_paused = 0; + m_uid = 0; + m_width = 0; + m_height = 0; + + m_synType = YANG_SYNC_AUDIO_MASTER; + m_maxAudioMinus = Yang_Max_Audio_Intervaltime; + m_maxVideoMinus = Yang_Max_Video_Intervaltime; + m_videoBuffer = NULL; + m_audioBuffer = NULL; +} + +YangSynBuffer::~YangSynBuffer() { + m_videoBuffer = NULL; + m_audioBuffer = NULL; +} +void YangSynBuffer::resetVideoClock(){ + m_videoBase = 0; + m_videoTime = 0; + m_videoMinus = 0; + m_pre_videoTime = 0; + + m_video_startClock=0; + m_videoNegativeCount=0; + m_videoTimeoutCount=0; + + m_isFirstVideo=false; + m_video_time_state=0; + +} +void YangSynBuffer::resetAudioClock(){ + m_audioBase = 0; + m_audioTime = 0; + m_audioMinus = 0; + m_pre_audioTime = 0; + m_lostAudioCount=0; + m_audio_startClock = 0; + m_audioNegativeCount=0; + m_audioTimeoutCount=0; + m_audioDelay=0; + m_isFirstAudio=false; +} +void YangSynBuffer::setAudioClock(int paudioclock){ + if(paudioclock<=0) return; + m_audioClock = paudioclock; +} +void YangSynBuffer::setVideoClock(int pvideoclock){ + if(pvideoclock<=0) return; + m_videoClock = pvideoclock; + +} + +void YangSynBuffer::setVideoCacheTime(int pctime){ + m_videoCacheTime=pctime; +} +void YangSynBuffer::setInVideoBuffer(YangVideoBuffer *pbuf) { + m_videoBuffer = pbuf; +} +void YangSynBuffer::setInAudioBuffer(YangAudioPlayBuffer *pbuf) { + m_audioBuffer = pbuf; +} +void YangSynBuffer::setTranstype(int transtype) { + m_transtype = transtype; + + if (m_transtype == Yang_Webrtc) { + m_videoClock = 90000; + m_audioClock = 48000; + } else if (m_transtype == Yang_Rtmp) { + m_videoClock = 1000; + m_audioClock = 1000; + } +} +uint8_t* YangSynBuffer::getVideoRef(YangFrame *pframe) { + if (!m_videoBuffer || !m_videoBuffer->size()) + return NULL; + uint8_t *tmp = NULL; + int err=0; + + + if ((err=playVideoFrame(m_videoBuffer->getCurVideoFrame()))==Yang_Ok) { + + tmp = m_videoBuffer->getVideoRef(pframe); + m_width = m_videoBuffer->m_width; + m_height = m_videoBuffer->m_height; + return tmp; + }else if(err==-1){ + tmp = m_videoBuffer->getVideoRef(pframe); + m_width = m_videoBuffer->m_width; + m_height = m_videoBuffer->m_height; + return NULL; + } + + return tmp; +} + +uint8_t* YangSynBuffer::getAudioRef(YangFrame *audioFrame) { + if (!m_audioBuffer || !m_audioBuffer->size()) + return NULL; + + //return m_audioBuffer->getAudios(audioFrame); + + int err=0; + if ((err=playAudioFrame(m_audioBuffer->getNextTimestamp()))==Yang_Ok){ + + return m_audioBuffer->getAudios(audioFrame); + } + if(err==-1){ + m_audioBuffer->getAudios(audioFrame); + + return getAudioRef(audioFrame); + } + + return NULL; + +} + +int32_t YangSynBuffer::getAudioSize() { + if (m_audioBuffer) + return m_audioBuffer->size(); + return 0; +} +int32_t YangSynBuffer::getVideoSize() { + if (m_videoBuffer) + return m_videoBuffer->size(); + return 0; +} + +int YangSynBuffer::playAudioFrame(int64_t pts) { + + if (m_audioBase == 0) { + if(!m_isFirstAudio){ + //clear cache + yang_reindex(m_audioBuffer); + m_isFirstAudio=true; + return false; + } + updateAudioBaseTimestamp(pts); + } + + if(m_transtype == Yang_Webrtc){ + //get relative time + m_audioTime = (pts - m_audioBase) * 1000 / m_audioClock; + }else{ + m_audioTime = (pts - m_audioBase); + } + + m_audioMinus = m_audioTime +Yang_Audio_Cache_time- (yang_get_milli_tick() - m_audio_startClock); + + m_pre_audioTime = m_audioTime; + + if(m_audioMinus<0) { + m_audioNegativeCount++; + if(m_audioNegativeCount>10){ + updateAudioBaseTimestamp(pts); + m_audioNegativeCount=0; + } + return -1; + } + if(m_audioMinus <= m_maxAudioMinus) { + if(m_audioTime>Yang_Audio_Base_Update_Interval) { + //int delay=m_videoTime- (yang_get_milli_tick() - m_video_startClock); + + if(m_lostAudioCount>5){ + YangFrame frame; + memset(&frame,0,sizeof(YangFrame)); + + m_audioBuffer->getAudios(&frame); + m_audioBuffer->getAudios(&frame); + m_lostAudioCount=0; + } + if(m_audioBuffer->size()>2) m_lostAudioCount++; + updateAudioBaseTimestamp(pts); + + } + return Yang_Ok; + } + m_audioTimeoutCount++; + if(m_audioTimeoutCount>10){ + m_audioTimeoutCount=0; + updateAudioBaseTimestamp(pts); + } + return 1; +} + +int YangSynBuffer::playVideoFrame(YangFrame* frame) { + // if(!m_audio_startClock) return false; + if(m_videoBase==0) { + if(!m_isFirstVideo){ + //clear cache + yang_reindex(m_videoBuffer); + m_isFirstVideo=true; + return false; + } + + updateVideoBaseTimestamp(frame->pts); + } + if(!m_video_time_state&&frame->frametype==YANG_Frametype_I){ + + updateVideoBaseTimestamp(frame->pts); + m_video_time_state=1; + } + + if (m_transtype == Yang_Webrtc) + m_videoTime = (frame->pts - m_videoBase) * 1000 / m_videoClock; + else + m_videoTime = frame->pts - m_videoBase; + + m_videoMinus = m_videoTime+m_videoCacheTime - (yang_get_milli_tick() - m_video_startClock); + + if(m_videoMinus<0) { + m_videoNegativeCount++; + if(m_videoNegativeCount>6){ + updateVideoBaseTimestamp(frame->pts); + m_videoNegativeCount=0; + } + return -1; + } + if(m_videoMinus <= m_maxVideoMinus) { + if(frame->frametype==YANG_Frametype_I) updateVideoBaseTimestamp(frame->pts); + return Yang_Ok; + } + m_videoTimeoutCount++; + if(m_videoTimeoutCount>6){ + m_videoTimeoutCount=0; + updateVideoBaseTimestamp(frame->pts); + } + + return 1; + +} + +void YangSynBuffer::updateVideoBaseTimestamp(int64_t pts){ + m_videoBase = pts; + m_video_startClock=yang_get_milli_tick(); +} +void YangSynBuffer::updateAudioBaseTimestamp(int64_t pts){ + m_audioBase=pts; + m_audio_startClock = yang_get_milli_tick(); +} +void YangSynBuffer::setClock() { + m_audio_startClock = yang_get_milli_tick(); +} + diff --git a/libmetartc3/src/yangutil/YangAvinfo.cpp b/libmetartc3/src/yangutil/YangAvinfo.cpp new file mode 100755 index 00000000..f3ce3784 --- /dev/null +++ b/libmetartc3/src/yangutil/YangAvinfo.cpp @@ -0,0 +1,104 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include +#include +void yang_context_setMediaConfig(void* context,int32_t puid,YangAudioParam* audio,YangVideoParam* video){ + if(context==NULL) return; + YangStreamManager* streams=(YangStreamManager*)context; + streams->setMediaConfig(puid, audio, video); + +} +void yang_context_setRtcMessageNotify(void* context,int puid,YangRtcMessageNotify *rtcmsg){ + if(context==NULL) return; + YangStreamManager* streams=(YangStreamManager*)context; + streams->setRtcMessageNotify(puid, rtcmsg); +} +void yang_context_sendRequest(void* context,int32_t puid,uint32_t ssrc,YangRequestType req){ + if(context==NULL) return; + YangStreamManager* streams=(YangStreamManager*)context; + streams->sendRequest(puid, ssrc, req); +} + +class YangInitContext { +public: + YangInitContext() { + } + + ~YangInitContext() { + } + + +}; + +YangContext::YangContext() { + stream.context=&streams; + stream.setMediaConfig=yang_context_setMediaConfig; + stream.setRtcMessageNotify=yang_context_setRtcMessageNotify; + stream.sendRequest=yang_context_sendRequest; + init(); + //m_certificate=NULL; +} + +YangContext::~YangContext() { + yang_closeLogFile(); + //yang_delete(m_certificate); +} +void YangContext::init(char *filename) { + YangIni ini; + ini.init(filename); + ini.initAudio(&avinfo.audio); + ini.initVideo(&avinfo.video); + ini.initSys(&avinfo.sys); + ini.initEnc(&avinfo.enc); + ini.initRtc(&avinfo.rtc); + initExt(&ini); +} + +void YangContext::init() { + yang_init_avinfo(&avinfo); + initExt(); +} + + +void YangContext::initExt() { + +} +void YangContext::initExt(void *filename) { + +} + +YangBufferManager::YangBufferManager() { + m_curindex = 0; + m_size = 0; + m_unitsize = 0; + m_cache = NULL; +} +YangBufferManager::YangBufferManager(int32_t num, int32_t unitsize) { + m_curindex = 0; + m_size = 0; + m_unitsize = 0; + m_cache = NULL; + init(num, unitsize); +} +YangBufferManager::~YangBufferManager() { + yang_deleteA(m_cache); +} + +void YangBufferManager::init(int32_t num, int32_t unitsize) { + m_size = num; + m_unitsize = unitsize; + if (m_cache == NULL) + m_cache = new uint8_t[m_unitsize * m_size]; +} +uint8_t* YangBufferManager::getBuffer() { + if (!m_cache) + return NULL; + if (m_curindex >= m_size) + m_curindex = 0; + return m_cache + m_unitsize * m_curindex++; +} diff --git a/libmetartc3/src/yangutil/YangIniImpl.cpp b/libmetartc3/src/yangutil/YangIniImpl.cpp new file mode 100755 index 00000000..3355c9f2 --- /dev/null +++ b/libmetartc3/src/yangutil/YangIniImpl.cpp @@ -0,0 +1,263 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER +#include +#endif +#define SECTION_MAX_LEN 256 +#define STRVALUE_MAX_LEN 256 +#define LINE_CONTENT_MAX_LEN 256 +#define FILEPATH_MAX 80 + + +YangIni::YangIni() { + m_file = NULL; +} + +YangIni::~YangIni() { + yang_free(m_file); +} +void YangIni::init(const char *p_filename) { + if(!p_filename) return; + char file1[300]; + memset(file1, 0, 300); + char file_path_getcwd[255]; + memset(file_path_getcwd, 0, 255); +#ifdef _MSC_VER + if(_getcwd(file_path_getcwd, 255)){ +#else + if(getcwd(file_path_getcwd, 255)){ +#endif + sprintf(file1, "%s/%s", file_path_getcwd, p_filename); + int32_t len = strlen(file1) + 1; + m_file = (char*) malloc(len); + memset(m_file, 0, len); + strcpy(m_file, file1); + } + +} + +void yangTrim(char *ps, int32_t len) { + for (int32_t i = 0; i < len; i++) + if (*(ps + i) == 0x0a || *(ps + i) == 0x0d) + *(ps + i) = 0x00; +} +int32_t YangIni::IniReadValue(const char *section, const char *key, char *val) { + FILE *fp; + int32_t i = 0; + int32_t lineContentLen = 0; + int32_t position = 0; + int32_t ret = 1; + char lineContent[LINE_CONTENT_MAX_LEN]; + bool bFoundSection = false; + bool bFoundKey = false; + fp = fopen(m_file, "r"); + if (fp == NULL) { + printf("%s: Opent file %s failed.\n", __FILE__, m_file); + return -1; + } + int32_t stLen=0; + while (feof(fp) == 0) { + memset(lineContent, 0, LINE_CONTENT_MAX_LEN); + if(!fgets(lineContent, LINE_CONTENT_MAX_LEN, fp)) continue; + if ((lineContent[0] == ';') || (lineContent[0] == '\0') || (lineContent[0] == '\r') || (lineContent[0] == '\n')) { + continue; + } + + //check section + if (strncmp(lineContent, section, strlen(section)) == 0) { + bFoundSection = true; + //printf("Found section = %s\n", lineContent); + while (feof(fp) == 0) { + memset(lineContent, 0, LINE_CONTENT_MAX_LEN); + if(!fgets(lineContent, LINE_CONTENT_MAX_LEN, fp)) continue; + //check key + if (strncmp(lineContent, key, strlen(key)) == 0) { + bFoundKey = true; + lineContentLen = strlen(lineContent); + //find value + for (i = strlen(key); i < lineContentLen; i++) { + if (lineContent[i] == '=') { + position = i + 1; + break; + } + } + if (i >= lineContentLen) break; + + stLen=strlen((char*)(lineContent + position)); + strncpy(val, lineContent + position,stLen); + lineContentLen = strlen(val); + for (i = 0; i < lineContentLen; i++) { + if ((lineContent[i] == '\0') || (lineContent[i] == '\r') + || (lineContent[i] == '\n')) { + val[i] = '\0'; + break; + } + } + } else if (lineContent[0] == '[') { + break; + } + } + break; + } + } + if (!bFoundSection) { + ret = -1; + } + else if (!bFoundKey) { + ret = -1; + } + fclose(fp); + yangTrim(val, strlen(val)); + return ret; +} + +int32_t YangIni::readStringValue(const char *section, const char *key,char *val, const char *p_defaultStr) { + char sect[SECTION_MAX_LEN]; + + if (section == NULL || key == NULL || val == NULL || m_file == NULL) { + printf("%s: input parameter(s) is NULL!\n", __func__); + strcpy(val, p_defaultStr); + return -1; + } + + memset(sect, 0, SECTION_MAX_LEN); + sprintf(sect, "[%s]", section); + int32_t ret = IniReadValue(sect, key, val); + if (ret == -1) + strcpy(val, p_defaultStr); + return ret; +} + +int32_t YangIni::readStringValue1(const char *section, const char *key, char *val) { + char sect[SECTION_MAX_LEN]; + if (section == NULL || key == NULL || val == NULL || m_file == NULL) { + printf("%s: input parameter(s) is NULL!\n", __func__); + return -1; + } + + memset(sect, 0, SECTION_MAX_LEN); + sprintf(sect, "[%s]", section); + return IniReadValue(sect, key, val); +} + +int32_t YangIni::readIntValue(const char *section, const char *key, int32_t p_defaultInt) { + char strValue[STRVALUE_MAX_LEN]; + memset(strValue, '\0', STRVALUE_MAX_LEN); + if (readStringValue1(section, key, strValue) != 1) { + return p_defaultInt; + } + return (atoi(strValue)); +} + + +void YangIni::initVideo(YangVideoInfo* video){ + memset(video,0,sizeof(YangVideoInfo)); + video->width = readIntValue("video", "width", 1280); + video->height = readIntValue("video", "height", 720); + video->outWidth = readIntValue("video", "outWidth", 1280); + video->outHeight = readIntValue("video", "outHeight", 720); + video->rate = readIntValue("video", "rate", 2048); + video->frame = readIntValue("video", "frame", 10); + + video->bitDepth = readIntValue("video", "bitDepth", 8); + + video->videoCacheNum = readIntValue("video", "videoCacheNum", 50); + video->evideoCacheNum = readIntValue("video", "evideoCacheNum", 50); + video->videoPlayCacheNum = readIntValue("video", "videoPlayCacheNum", 5); + + video->videoCaptureFormat = (YangYuvType)readIntValue("video", "videoCaptureFormat", YangYuy2); + video->videoEncoderFormat = (YangYuvType)readIntValue("video", "videoEncoderFormat", YangI420); + video->videoDecoderFormat = (YangYuvType)readIntValue("video", "videoDecoderFormat", YangI420); + + video->videoEncoderType = readIntValue("video", "videoEncoderType", 0); + video->videoDecoderType = readIntValue("video", "videoDecoderType", 0); + video->videoEncHwType = readIntValue("video", "videoEncHwType", 0); + video->videoDecHwType = readIntValue("video", "videoDecHwType", 0); + video->vIndex = readIntValue("video", "vIndex", 0); + + +} + void YangIni::initAudio(YangAudioInfo* audio){ + memset(audio,0,sizeof(YangAudioInfo)); + audio->sample=readIntValue("audio", "sample", 44100); + audio->frameSize=readIntValue("audio", "frameSize", 1024); + audio->channel=readIntValue("audio", "channel", 2); + audio->bitrate = readIntValue("audio", "bitrate", 128); + audio->usingMono=readIntValue("audio", "usingMono", 0); + audio->hasAec = readIntValue("audio", "hasAec", 0); + audio->echoPath = readIntValue("audio", "echoPath", 10); + audio->aecBufferFrames=readIntValue("audio", "aecBufferFrames", 0); + audio->hasAudioHeader = readIntValue("audio", "hasAudioHeader", 0); + audio->audioEncoderType = readIntValue("audio", "audioEncoderType", 0); + audio->audioDecoderType = readIntValue("audio", "audioDecoderType", 0); + audio->audioPlayType = readIntValue("audio", "audioPlayType", 0); + audio->audioCacheNum = readIntValue("audio", "audioCacheNum", 100); + audio->audioCacheSize = readIntValue("audio", "audioCacheSize", 100); + audio->audioPlayCacheNum = readIntValue("audio", "audioPlayCacheNum", 10); + + audio->aIndex = readIntValue("audio", "aIndex", 0); + audio->aSubIndex = readIntValue("audio", "aSubIndex", 0); + //aSubIndex + if(audio->usingMono){ + if(audio->audioEncoderType<2) audio->audioEncoderType=2; + if(audio->audioDecoderType<2)audio->audioDecoderType=2; + audio->channel=1; + audio->sample=16000; + audio->frameSize=320; + } + } + void YangIni::initSys(YangSysInfo *sys){ + memset(sys,0,sizeof(YangSysInfo)); + + sys->transType = readIntValue("sys", "transType", 0); + sys->isMultCamera = readIntValue("sys", "isMultCamera", 0); + sys->usingDataServer = readIntValue("sys", "usingDataServer", 0); + + sys->rtmpPort = readIntValue("sys", "rtmpPort", 1935); + sys->srtPort = readIntValue("sys", "srtPort", 8080); + sys->rtcPort = readIntValue("sys", "rtcPort", 1985); + sys->rtcLocalPort = readIntValue("sys", "rtcLocalPort", 16000); + sys->httpPort = readIntValue("sys", "httpPort", 8080); + sys->dataPort = readIntValue("sys", "dataPort", 9999); + sys->hasLogFile=readIntValue("sys", "hasLogFile", 0); + sys->logLevel = readIntValue("sys", "logLevel", 1); + sys->logLevel = readIntValue("sys", "logLevel", 1); + sys->cameraCount = readIntValue("sys", "cameraCount", 3); + readStringValue("sys", "cameraIndexs", sys->cameraIndexs, "1"); + readStringValue("sys", "rtmpServerIP", sys->rtmpServerIP, "127.0.0.1"); + readStringValue("sys", "srtServerIP", sys->srtServerIP, "127.0.0.1"); + readStringValue("sys", "rtcServerIP", sys->rtcServerIP, "127.0.0.1"); + readStringValue("sys", "httpServerIP", sys->httpServerIP, "127.0.0.1"); + readStringValue("sys", "dataServerIP", sys->dataServerIP, "127.0.0.1"); + } + + void YangIni::initEnc(YangVideoEncInfo *enc){ + memset(enc,0,sizeof(YangVideoEncInfo)); + enc->preset = readIntValue("enc", "preset", 3); + enc->level_idc = readIntValue("enc", "level_idc", 31); + enc->profile = readIntValue("enc", "profile", 0); + enc->keyint_max = readIntValue("enc", "keyint_max", 30); + enc->enc_threads = readIntValue("enc", "enc_threads", 4); + enc->createMeta = readIntValue("enc", "createMeta", 1)==0?false:true; + enc->gop = readIntValue("enc", "gop", 30); + + } + + void YangIni::initRtc(YangRtcInfo *rtc){ + memset(rtc,0,sizeof(YangRtcInfo)); + rtc->sendTwcc = readIntValue("rtc", "sendTwcc", 0); + rtc->mixAvqueue = readIntValue("rtc", "mixAvqueue", 1); + rtc->audioQueueCount = readIntValue("rtc", "audioQueueCount", 5); + rtc->videoQueueCount = readIntValue("rtc", "videoQueueCount", 5); + + } + + diff --git a/libmetartc3/src/yangutil/YangJson.cpp b/libmetartc3/src/yangutil/YangJson.cpp new file mode 100755 index 00000000..6dd33ed3 --- /dev/null +++ b/libmetartc3/src/yangutil/YangJson.cpp @@ -0,0 +1,50 @@ +#include +#include +#include +void yang_gen_jsonstr(vector &jsons,string &outstr){ + /** + outstr="{"; + char msg[1024]; + memset(msg,0,sizeof(msg)); + for(size_t i=0;i &jsons){ + Json::Reader reader; + Json::Value root; + if(!reader.parse(jsonstr, root)){ + return yang_error_wrap(1,"parse json fail"); + } + int size = root.size(); + /** for (int i=0; i +#include +#include +extern "C"{ +#include +} +#include +#include +#include +#include + + +#ifdef _WIN32 +#include +#include +#ifdef _MSC_VER +#include +#endif +#else + #include +#endif + + + +YangLoadLib::YangLoadLib(){ + m_handle=NULL; +} +YangLoadLib::~YangLoadLib(){ + unloadObject(); +} +void* YangLoadLib::loadSysObject(const char *sofile) +{ + +#ifdef _WIN32 + // LPCSTR + m_handle = LoadLibraryA(sofile); +#else + m_handle = dlopen(sofile, RTLD_NOW|RTLD_LOCAL); +#endif + + if (m_handle == 0) { + yang_error("Failed loading %s: %s", sofile, (char *) dlerror()); + } + return (m_handle); +} +#define LENTH 200 +void* YangLoadLib::loadObject(const char *sofile) +{ + + + char file1[LENTH+50]; + char file_path_getcwd[LENTH]; + memset(file1, 0, LENTH+50); + memset(file_path_getcwd, 0, LENTH); + if(yang_getLibpath(file_path_getcwd)!=Yang_Ok){ + yang_error( "Failed loading shared obj %s: %s,getcwd error!", sofile, (char *) dlerror()); + return NULL; + } + +#ifdef _WIN32 + sprintf(file1, "%s/%s.dll", file_path_getcwd, sofile); + m_handle = LoadLibraryA(file1); +#else + sprintf(file1, "%s/%s.so", file_path_getcwd, sofile); + m_handle = dlopen(file1, RTLD_NOW|RTLD_LOCAL); +#endif + + if (m_handle == 0) { + + yang_error( "Failed loading shared obj %s: %s", sofile, (char *) dlerror()); + } + return (m_handle); +} +#ifdef _WIN32 +char *YangLoadLib::dlerror(){ + return (char*)"loadlib error"; +} +#endif + +void* YangLoadLib::loadFunction( const char *name) +{ + +#ifdef _WIN32 + void *symbol = (void *) GetProcAddress(m_handle, name); +#else + void *symbol = dlsym(m_handle, name); +#endif + + if (symbol == NULL) { + yang_error("Failed loading function %s: %s", name, (const char *) dlerror()); + } + return (symbol); +} + +void YangLoadLib::unloadObject() +{ + if (m_handle) { +#ifdef _WIN32 + FreeLibrary( m_handle); +#else + dlclose(m_handle);; +#endif + m_handle=NULL; + + } +} + +//#endif + diff --git a/libmetartc3/src/yangutil/YangString.cpp b/libmetartc3/src/yangutil/YangString.cpp new file mode 100755 index 00000000..74d5688d --- /dev/null +++ b/libmetartc3/src/yangutil/YangString.cpp @@ -0,0 +1,131 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#define srandom srand +#define random rand +#endif +#include +#ifdef _MSC_VER +#include +#endif + + + + void yang_write_string(YangBuffer* buf,string value) + { + //srs_assert(require((int)value.length())); + if(buf==NULL) return; + memcpy(buf->head, value.data(), value.length()); + buf->head += value.length(); + } + + string yang_read_string(YangBuffer* buf,int32_t len) + { + ////srs_assert(require(len)); + + std::string value; + value.append(buf->head, len); + + buf->head+= len; + + return value; + } + + void skip_first_spaces(std::string& str) +{ + while (! str.empty() && str[0] == ' ') { + str.erase(0, 1); + } +} + + vector yang_split_first(string s, char ch) { + int32_t len = 0; + vector ret; + for (size_t i = 0; i < s.length(); i++) { + if (s[i] == ch) { + ret.push_back(s.substr(0, i)); + ret.push_back(s.substr(i+1, s.length()-1)); + return ret; + } + else { + len++; + } + } + + return ret; + } + +vector yang_split(string s, char ch) { + size_t start = 0; + int32_t len = 0; + vector ret; + for (size_t i = 0; i < s.length(); i++) { + if (s[i] == ch) { + ret.push_back(s.substr(start, len)); + start = i + 1; + len = 0; + } + else { + len++; + } + } + if (start < s.length()) + ret.push_back(s.substr(start, len)); + return ret; +} + +std::vector yang_splits(const std::string& str, const std::string& delim) +{ + std::vector ret; + size_t pre_pos = 0; + std::string tmp; + size_t pos = 0; + do { + pos = str.find(delim, pre_pos); + tmp = str.substr(pre_pos, pos - pre_pos); + ret.push_back(tmp); + pre_pos = pos + delim.size(); + } while (pos != std::string::npos); + + return ret; +} + + +std::string yang_int2str(int64_t value) { + + char tmp[22]; + snprintf(tmp, 22, "%" PRId64, value); + return string(tmp); +} +std::string yang_random_str(int32_t len) { + static string random_table ="01234567890123456789012345678901234567890123456789abcdefghijklmnopqrstuvwxyz"; + string ret; + ret.reserve(len); + for (int32_t i = 0; i < len; ++i) { + ret.append(1, random_table[yang_random() % random_table.size()]); + } + + return ret; +} +void yang_replace(std::string& strBig, const std::string& strsrc, const std::string& strdst) +{ + std::string::size_type pos = 0; + std::string::size_type srclen = strsrc.size(); + std::string::size_type dstlen = strdst.size(); + + while ((pos = strBig.find(strsrc, pos)) != std::string::npos) + { + strBig.replace(pos, srclen, strdst); + pos += dstlen; + } +} + + diff --git a/libmetartc3/src/yangutil/YangSysMessageHandle.cpp b/libmetartc3/src/yangutil/YangSysMessageHandle.cpp new file mode 100755 index 00000000..d7b11035 --- /dev/null +++ b/libmetartc3/src/yangutil/YangSysMessageHandle.cpp @@ -0,0 +1,119 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#include +#include +#include "yangutil/sys/YangLog.h" + +void yang_post_message(int32_t st, int32_t uid, YangSysMessageI *mhandle,void* user) { + if (YangSysMessageHandle::m_instance) + YangSysMessageHandle::m_instance->putMessage(mhandle, st, uid, 0,user); +} +void yang_post_state_message(int32_t st, int32_t uid, int32_t handleState,YangSysMessageI *mhandle) { + if (YangSysMessageHandle::m_instance) + YangSysMessageHandle::m_instance->putMessage(mhandle, st, uid, + handleState); +} +void yang_post_userMessage(int32_t st, int32_t uid,YangSysMessageI *mhandle,void* user){ + if (YangSysMessageHandle::m_instance) + YangSysMessageHandle::m_instance->putMessage(mhandle, st, uid, 0); +} +YangSysMessageHandle::YangSysMessageHandle() { + m_loop = 0; + m_isStart = 0; + //m_lock = PTHREAD_MUTEX_INITIALIZER; + //m_cond_mess = PTHREAD_COND_INITIALIZER; + pthread_mutex_init(&m_lock, NULL); + pthread_cond_init(&m_cond_mess, NULL); + m_waitState=0; + m_receive=NULL; + if (m_instance == NULL) + m_instance = this; +} + +YangSysMessageHandle::~YangSysMessageHandle() { + + if (m_isStart) { + stop(); + while (m_isStart) { + yang_usleep(1000); + } + } + m_instance = NULL; + pthread_mutex_destroy(&m_lock); + pthread_cond_destroy(&m_cond_mess); + +} +YangSysMessageHandle *YangSysMessageHandle::m_instance = NULL; +void YangSysMessageHandle::run() { + m_isStart = 1; + startLoop(); + //startLoop(); + m_isStart = 0; +} +void YangSysMessageHandle::stop() { + stopLoop(); + +} +void YangSysMessageHandle::stopLoop() { + m_loop = 0; + //pthread_mutex_unlock(&m_lock); + if(m_waitState){ + pthread_mutex_lock(&m_lock); + pthread_cond_signal(&m_cond_mess); + pthread_mutex_unlock(&m_lock); + + } +} + +void YangSysMessageHandle::putMessage(YangSysMessageI *handle, int32_t pst, + int32_t puid, int32_t handleState,void* user) { + if(!m_loop) return; + + YangSysMessage* mes=new YangSysMessage(); + + mes->uid = puid; + mes->messageId = pst; + mes->handleState = handleState; + mes->handle = handle; + mes->user=user; + + m_sysMessages.push_back(mes); + mes=NULL; + + if(m_waitState){ + pthread_mutex_lock(&m_lock); + + pthread_cond_signal(&m_cond_mess); + pthread_mutex_unlock(&m_lock); + } +} + +void YangSysMessageHandle::startLoop() { + m_loop = 1; + + pthread_mutex_unlock(&m_lock); + pthread_mutex_lock(&m_lock); + + while (m_loop) { + + m_waitState=1; + pthread_cond_wait(&m_cond_mess, &m_lock); + m_waitState=0; + + while (m_sysMessages.size()>0) { + handleMessage(m_sysMessages.front()); + m_sysMessages.front()->handle = NULL; + delete m_sysMessages.front(); + m_sysMessages.front() = NULL; + m_sysMessages.erase(m_sysMessages.begin()); + } + + } + pthread_mutex_unlock(&m_lock); +} + diff --git a/libmetartc3/src/yangutil/YangThread.cpp b/libmetartc3/src/yangutil/YangThread.cpp new file mode 100755 index 00000000..282509f4 --- /dev/null +++ b/libmetartc3/src/yangutil/YangThread.cpp @@ -0,0 +1,66 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#include "yangutil/sys/YangThread.h" +#include "yangutil/sys/YangLog.h" + +YangThread::YangThread(){ +#ifndef _MSC_VER + + m_thread=0; +#endif +} +YangThread::~YangThread(){ + +} + +int32_t YangThread::start() +{ + if (pthread_create( &m_thread, 0, &YangThread::go, this)) + { + yang_error("YangThread::start could not start thread"); + return -1; + } + + return 0; +} + +void* YangThread::go(void* obj) +{ + reinterpret_cast(obj)->run(); + return NULL; +} + +void* YangThread::join() +{ + void* ret; + pthread_join(m_thread, &ret); + return ret; +} + +pthread_t YangThread::getThread() +{ + return m_thread; +} + +int32_t YangThread::detach() +{ + return pthread_detach(m_thread); +} + +int32_t YangThread::equals(YangThread* t) +{ + return pthread_equal(m_thread, t->getThread()); +} + +void YangThread::exitThread(void* value_ptr) +{ + pthread_exit(value_ptr); +} + +int32_t YangThread::cancel() +{ + return pthread_cancel(m_thread); +} + diff --git a/libmetartc3/src/yangutil/YangTimer.cpp b/libmetartc3/src/yangutil/YangTimer.cpp new file mode 100755 index 00000000..c492cf81 --- /dev/null +++ b/libmetartc3/src/yangutil/YangTimer.cpp @@ -0,0 +1,221 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#ifdef _WIN32 + +#else +#include +#endif +#include +#include +#include +#include +#include + +#if !Yang_Using_Phtread +#ifdef _WIN32 +#pragma comment(lib,"Winmm.lib") +void CALLBACK YangTimer::TimeEvent(PVOID user, BOOLEAN TimerOrWaitFired2) +{ + YangTimer* timer=(YangTimer*)user; + if(timer->m_task) timer->m_task->doTask(timer->m_taskId); + return; +} + +void YangTimer::startWindowsEventTime(int pwaitTime,DWORD_PTR duser) +{ + m_hTimerQueue = CreateTimerQueue(); + if(m_hTimerQueue!=NULL){ + if (!CreateTimerQueueTimer(&m_hTimerQueueTimer, m_hTimerQueue, TimeEvent, this, 0, pwaitTime, WT_EXECUTEDEFAULT)) + { + m_hTimerQueue = NULL; + m_hTimerQueueTimer = NULL; + } + } + + + return; +} +#else +#include +#include +#include + +#endif + +#endif + +YangTimer::YangTimer() { + m_loop = 0; + m_isStart = 0; + m_waitState = 0; + m_waitTime = 100; +#if Yang_Using_Phtread + pthread_mutex_init(&m_lock,NULL); + pthread_cond_init(&m_cond_mess,NULL); + //m_lock = PTHREAD_MUTEX_INITIALIZER; + //m_cond_mess = PTHREAD_COND_INITIALIZER; +#else +#ifdef _WIN32 + m_hTimerQueue=NULL; + m_hTimerQueueTimer=NULL; + m_winEvent=CreateEvent(NULL,TRUE,FALSE,NULL); +#else + m_timerfd = timerfd_create(CLOCK_REALTIME, 0); + m_efd = -1; +#endif + +#endif + m_task = NULL; + m_taskId = 0; +} + +YangTimer::~YangTimer() { + + if (m_isStart) { + stop(); + while (m_isStart) + yang_usleep(1000); + } + m_task = NULL; +#if Yang_Using_Phtread + pthread_mutex_destroy(&m_lock); + pthread_cond_destroy(&m_cond_mess); +#endif +} +void YangTimer::setTaskId(int32_t ptaskId) { + m_taskId = ptaskId; +} +void YangTimer::setTask(YangTimerTask *ptask) { + m_task = ptask; +} +void YangTimer::setTimelen(int32_t ptimelen) { + m_waitTime = ptimelen; +} +void YangTimer::run() { + m_isStart = 1; + startLoop(); + m_isStart = 0; +} +void YangTimer::stop() { + stopLoop(); + +} +void YangTimer::stopLoop() { + m_loop = 0; +#if Yang_Using_Phtread + if(m_waitState){ + pthread_mutex_lock(&m_lock); + pthread_cond_signal(&m_cond_mess); + pthread_mutex_unlock(&m_lock); + + } +#else +#ifdef _WIN32 + if (m_hTimerQueueTimer != NULL) + DeleteTimerQueueTimer(m_hTimerQueue, m_hTimerQueueTimer, INVALID_HANDLE_VALUE); + if (m_hTimerQueue != NULL) + DeleteTimerQueueEx(m_hTimerQueue, INVALID_HANDLE_VALUE); + + m_hTimerQueueTimer = NULL; + m_hTimerQueue = NULL; + SetEvent(m_winEvent); +#else + if (m_isStart) { + struct epoll_event tev; + tev.events = EPOLLIN | EPOLLET; + tev.data.fd = m_timerfd; + epoll_ctl(m_efd, EPOLL_CTL_DEL, m_timerfd, &tev); + close(m_efd); + m_efd = -1; + + } + close(m_timerfd); + m_timerfd = -1; + +#endif +#endif +} + +void YangTimer::startLoop() { + m_loop = 1; + //pthread_mutex_unlock(&m_lock); +#if Yang_Using_Phtread + struct timespec outtime; + struct timeval now; + pthread_mutex_lock(&m_lock); + while (m_loop) { + gettimeofday(&now, NULL); + + long nsec = now.tv_usec * 1000 + (m_waitTime % 1000) * 1000000; + outtime.tv_sec=now.tv_sec + nsec / 1000000000 + m_waitTime / 1000; + outtime.tv_nsec=nsec % 1000000000; + + m_waitState=1; + + pthread_cond_timedwait(&m_cond_mess, &m_lock,&outtime); + m_waitState=0; + if(m_task) m_task->doTask(m_taskId); + } + pthread_mutex_unlock(&m_lock); +#else +#ifdef _WIN32 + + startWindowsEventTime(m_waitTime,(DWORD_PTR)this); + if(WaitForSingleObject(m_winEvent,INFINITE) !=WAIT_OBJECT_0) + { + yang_error("YangTimer WaitForSingleObject fail"); + } + + CloseHandle(m_winEvent); + m_winEvent=NULL; +#else + struct itimerspec itimer; + itimer.it_value.tv_sec = m_waitTime / 1000; + itimer.it_value.tv_nsec = (m_waitTime % 1000) * 1000 * 1000; + itimer.it_interval.tv_sec = m_waitTime / 1000; + itimer.it_interval.tv_nsec = (m_waitTime % 1000) * 1000 * 1000; + int ret = timerfd_settime(m_timerfd, TFD_TIMER_ABSTIME, &itimer, NULL); + if (ret == -1) { + yang_error("timerfd_settime"); + } + + int opts; + opts = fcntl(m_timerfd, F_GETFL); + if (opts < 0) { + yang_error("fcntl(sock,GETFL)"); + exit(1); + } + opts = opts | O_NONBLOCK; + if (fcntl(m_timerfd, F_SETFL, opts) < 0) { + yang_error("fcntl(sock,SETFL,opts)"); + exit(1); + } + m_efd = epoll_create(1); + struct epoll_event tev; + tev.events = EPOLLIN | EPOLLET; + tev.data.fd = m_timerfd; + epoll_ctl(m_efd, EPOLL_CTL_ADD, m_timerfd, &tev); + struct epoll_event ev[1]; + while (m_loop) { + int nev = epoll_wait(m_efd, ev, 1, 0); + + if (nev > 0 && (ev[0].events & EPOLLIN)) { + uint64_t res; + int bytes = read(m_timerfd, &res, sizeof(res)); + + if (m_task) + m_task->doTask(m_taskId); + } + } + +#endif +#endif + +} + + + diff --git a/libmetartc3/src/yangutil/YangWindowsMouse.cpp b/libmetartc3/src/yangutil/YangWindowsMouse.cpp new file mode 100755 index 00000000..a413aca7 --- /dev/null +++ b/libmetartc3/src/yangutil/YangWindowsMouse.cpp @@ -0,0 +1,158 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#ifdef _WIN32 + +YangWindowsMouse::YangWindowsMouse() +{ + +} + +YangWindowsMouse::~YangWindowsMouse() +{ + +} + +void YangWindowsMouse::moveTo(int x, int y) +{ + p.x = x; + p.y = y; + SetCursorPos(x, y); +} + + +void YangWindowsMouse::relativeMove(int cx, int cy) +{ + GetCursorPos(&p); + p.x += cx; + p.y += cy; + SetCursorPos(p.x, p.y); +} + + +void YangWindowsMouse::setPos() +{ + GetCursorPos(&p); +} + + +void YangWindowsMouse::restorePos() +{ + SetCursorPos(p.x, p.y); +} + + +void YangWindowsMouse::lockMouse() +{ + POINT pt; + RECT rt; + + GetCursorPos(&pt); + rt.left = rt.right = pt.x; + rt.top = rt.bottom = pt.y; + rt.right++; + rt.bottom++; + ClipCursor(&rt); +} + + +void YangWindowsMouse::unlockMouse() +{ + ClipCursor(NULL); +} + + +void YangWindowsMouse::leftBClick() +{ + setPos(); + mouse_event(MOUSEEVENTF_LEFTDOWN | MOUSEEVENTF_LEFTUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::leftbDClick() +{ + setPos(); + mouse_event(MOUSEEVENTF_LEFTDOWN | MOUSEEVENTF_LEFTUP, p.x, p.y, 0, 0); + mouse_event(MOUSEEVENTF_LEFTDOWN | MOUSEEVENTF_LEFTUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::leftBDown() +{ + setPos(); + mouse_event(MOUSEEVENTF_LEFTDOWN, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::leftBUp() +{ + setPos(); + mouse_event(MOUSEEVENTF_LEFTUP, p.x, p.y, 0, 0); +} + +//middle +void YangWindowsMouse::middleBClick() +{ + setPos(); + mouse_event(MOUSEEVENTF_MIDDLEDOWN | MOUSEEVENTF_MIDDLEUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::middleBDbClick() +{ + setPos(); + mouse_event(MOUSEEVENTF_MIDDLEDOWN | MOUSEEVENTF_MIDDLEUP, p.x, p.y, 0, 0); + mouse_event(MOUSEEVENTF_MIDDLEDOWN | MOUSEEVENTF_MIDDLEUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::middleBDown() +{ + setPos(); + mouse_event(MOUSEEVENTF_MIDDLEDOWN, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::middleBUp() +{ + setPos(); + mouse_event(MOUSEEVENTF_MIDDLEUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::middleBRoll(int x,int y,int ch) +{ + setPos(); + mouse_event(MOUSEEVENTF_WHEEL, x, y, ch, 0); +} + +void YangWindowsMouse::rightBClick() +{ + setPos(); + mouse_event(MOUSEEVENTF_RIGHTDOWN | MOUSEEVENTF_RIGHTUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::rightBDbClick() +{ + setPos(); + mouse_event(MOUSEEVENTF_RIGHTDOWN | MOUSEEVENTF_RIGHTUP, p.x, p.y, 0, 0); + mouse_event(MOUSEEVENTF_RIGHTDOWN | MOUSEEVENTF_RIGHTUP, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::rightBDown() +{ + setPos(); + mouse_event(MOUSEEVENTF_RIGHTDOWN, p.x, p.y, 0, 0); +} + + +void YangWindowsMouse::rightBUp() +{ + setPos(); + mouse_event(MOUSEEVENTF_RIGHTUP, p.x, p.y, 0, 0); +} + +#endif diff --git a/libmetartc3/src/yangutil/buffer/YangAudioBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangAudioBuffer.cpp new file mode 100755 index 00000000..8d114342 --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangAudioBuffer.cpp @@ -0,0 +1,55 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/buffer/YangAudioBuffer.h" + +#include +#include "stdio.h" + + + +YangAudioBuffer::YangAudioBuffer(int32_t pcacheNum) +{ + resetIndex(); + m_cache_num=pcacheNum; + m_bufLen=0; +} + +void YangAudioBuffer::reset(){ + resetIndex(); +} + +YangAudioBuffer::~YangAudioBuffer(void) +{ + +} + +void YangAudioBuffer::putAudio(YangFrame* pframe) +{ + if(m_bufLen==0){ + m_bufLen=pframe->nb; + initFrames(m_cache_num,pframe->nb); + } + putFrame(pframe); + +} + +int32_t YangAudioBuffer::getAudio(YangFrame* pframe) +{ + if(size()>0){ + getFrame(pframe); + return 0; + }else + return 1; +} + + +uint8_t *YangAudioBuffer::getAudioRef(YangFrame* pframe) +{ + if(size()>0){ + return getFrameRef(pframe); + }else{ + return NULL; + } +} + diff --git a/libmetartc3/src/yangutil/buffer/YangAudioEncoderBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangAudioEncoderBuffer.cpp new file mode 100755 index 00000000..98803444 --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangAudioEncoderBuffer.cpp @@ -0,0 +1,45 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/buffer/YangAudioEncoderBuffer.h" + +#include "malloc.h" +#include "string.h" + +YangAudioEncoderBuffer::YangAudioEncoderBuffer(int32_t paudioCacheNum) +{ + resetIndex(); + m_cache_num=paudioCacheNum; + initFrames(m_cache_num,1024); + +} + +YangAudioEncoderBuffer::~YangAudioEncoderBuffer() +{ + +} + + +void YangAudioEncoderBuffer::reset(){ + resetIndex(); +} + +void YangAudioEncoderBuffer::putAudio(YangFrame* audioFrame){ + putPlayAudio(audioFrame); +} + +void YangAudioEncoderBuffer::getAudio(YangFrame* audioFrame){ + getPlayAudio(audioFrame); +} +uint8_t* YangAudioEncoderBuffer::getAudioRef(YangFrame* frame){ + return getFrameRef(frame); +} +void YangAudioEncoderBuffer::putPlayAudio(YangFrame* audioFrame){ + putFrame(audioFrame); +} + +void YangAudioEncoderBuffer::getPlayAudio(YangFrame* audioFrame){ + getFrame(audioFrame); +} + + diff --git a/libmetartc3/src/yangutil/buffer/YangAudioPlayBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangAudioPlayBuffer.cpp new file mode 100755 index 00000000..f86f2c2d --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangAudioPlayBuffer.cpp @@ -0,0 +1,56 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/buffer/YangAudioPlayBuffer.h" + +#include +#include "stdio.h" + + +YangAudioPlayBuffer::YangAudioPlayBuffer() { + resetIndex(); + m_cache_num = 50; + m_bufLen=0; + +} + +void YangAudioPlayBuffer::reset() { + resetIndex(); +} + +YangAudioPlayBuffer::~YangAudioPlayBuffer(void) { + +} + +void YangAudioPlayBuffer::putAudio(YangFrame* pframe) { + if(m_bufLen==0){ + m_bufLen = pframe->nb; + initFrames(m_cache_num,pframe->nb); + + } + + putFrame(pframe); +} + +void YangAudioPlayBuffer::getAudio(YangFrame* pframe) { + + getFrame(pframe); +} + +uint8_t* YangAudioPlayBuffer::getAudios(YangFrame* pframe) { + if (size() < 1) + return NULL; + return getFrameRef(pframe); + +} +int32_t YangAudioPlayBuffer::getFrameTimestamp(int64_t *ptimestamp) { + + YangFrame* f=getCurFrameRef(); + if(f) *ptimestamp=f->pts; + return Yang_Ok; +} +int64_t YangAudioPlayBuffer::getNextTimestamp(){ + return getNextFrameTimestamp(); +} + + diff --git a/libmetartc3/src/yangutil/buffer/YangMediaBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangMediaBuffer.cpp new file mode 100755 index 00000000..1d97ee03 --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangMediaBuffer.cpp @@ -0,0 +1,104 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include +#include +#include +#include +#define Yang_MediaBuffer_Minus 5000 +YangMediaBuffer::YangMediaBuffer() { + resetIndex(); + m_cache_num = 5; + m_mediaType = 1; + m_uid = -1; + m_frames = NULL; + m_bufferManager = NULL; + m_lock = PTHREAD_MUTEX_INITIALIZER; + pthread_mutex_init(&m_lock,NULL); +} + + +YangMediaBuffer::~YangMediaBuffer() { + if (m_frames) { + for (int32_t i = 0; i < m_cache_num; i++) { + yang_delete(m_frames[i]); + } + yang_deleteA(m_frames); + } + yang_delete(m_bufferManager); + pthread_mutex_destroy(&m_lock); +} + +void YangMediaBuffer::resetIndex() { + m_putIndex = 0; + m_getIndex = 0; + m_nextIndex=0; + + m_ret=0; + m_size = 0; +} +int32_t YangMediaBuffer::size(){ + + return m_size; +} + + +void YangMediaBuffer::initFrames(int32_t pnum, int unitsize) { + if (m_bufferManager == NULL) m_bufferManager = new YangBufferManager(pnum, unitsize); + if (m_frames == NULL) { + m_frames = new YangFrame*[pnum]; + for (int32_t i = 0; i < pnum; i++) { + m_frames[i] = new YangFrame(); + memset(m_frames[i],0,sizeof(YangFrame)); + m_frames[i]->payload = m_bufferManager->getBuffer(); + } + } +} +void YangMediaBuffer::putFrame(YangFrame *pframe) { + + if (!pframe||size()>=m_cache_num) return; + + pthread_mutex_lock(&m_lock); + yang_frame_copy_buffer(pframe, m_frames[m_putIndex++]); + if (m_putIndex >= m_cache_num) m_putIndex = 0; + m_size++; + pthread_mutex_unlock(&m_lock); + +} +void YangMediaBuffer::getFrame(YangFrame *pframe) { + if (!pframe||!size()) return; + + pthread_mutex_lock(&m_lock); + yang_frame_copy_buffer(m_frames[m_getIndex++], pframe); + if (m_getIndex >= m_cache_num) m_getIndex = 0; + m_size--; + pthread_mutex_unlock(&m_lock); +} +uint8_t* YangMediaBuffer::getFrameRef(YangFrame *pframe) { + if (!size()||!pframe) return NULL; + pthread_mutex_lock(&m_lock); + yang_frame_copy_nobuffer(m_frames[m_getIndex], pframe); + uint8_t *p = m_frames[m_getIndex]->payload; + m_getIndex++; + if (m_getIndex >= m_cache_num) m_getIndex = 0; + m_size--; + pthread_mutex_unlock(&m_lock); + + return p; + +} +YangFrame* YangMediaBuffer::getCurFrameRef() { + if(!size()) return NULL; + m_nextIndex=m_getIndex; + if(m_nextIndex>=m_cache_num) m_nextIndex=0; + return m_frames[m_nextIndex]; +} + +int64_t YangMediaBuffer::getNextFrameTimestamp(){ + if(!size()) return 0; + m_nextIndex=m_getIndex; + if(m_nextIndex>=m_cache_num) m_nextIndex=0; + return m_frames[m_nextIndex]->pts; + +} + diff --git a/libmetartc3/src/yangutil/buffer/YangVideoBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangVideoBuffer.cpp new file mode 100755 index 00000000..7172f820 --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangVideoBuffer.cpp @@ -0,0 +1,78 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include + +#include + + +YangVideoBuffer::~YangVideoBuffer(void) +{ + +} + +YangVideoBuffer::YangVideoBuffer(int32_t pBitDepthLen) +{ + reset(); + m_bitDepthLen=pBitDepthLen; + m_frames=0; + m_cache_num = 30; + m_width=0; + m_height=0; + m_length=0; + +} + + +YangVideoBuffer::YangVideoBuffer(int32_t pwid,int32_t phei,YangYuvType ptype,int32_t pBitDepthLen){ + reset(); + m_bitDepthLen=pBitDepthLen; + init(pwid,phei,ptype); + +} +void YangVideoBuffer::init(int32_t pwid,int32_t phei,YangYuvType ptype){ + reset(); + m_width=pwid; + m_height=phei; + m_length=m_width*m_height*3*m_bitDepthLen/2; + if(ptype==YangYuy2) m_length=m_width*m_height*2*m_bitDepthLen; + if(ptype==YangRgb) m_length=m_width*m_height*3*m_bitDepthLen; + if(ptype==YangArgb||ptype==YangBgra) m_length=m_width*m_height*4*m_bitDepthLen; + m_cache_num=30; + initFrames(m_cache_num,m_length); + + m_frames=0; + +} + +void YangVideoBuffer::reset() { + resetIndex(); +} + + +void YangVideoBuffer::putVideo(YangFrame* pframe){ + putFrame(pframe); +} + +void YangVideoBuffer::getVideo(YangFrame* pframe){ + + getFrame(pframe); +} +uint8_t * YangVideoBuffer::getVideoRef(YangFrame* pframe){ + return getFrameRef(pframe); + +} + +int64_t YangVideoBuffer::getTimestamp(int64_t *ptimestamp){ + + YangFrame* f=getCurFrameRef(); + if(f) *ptimestamp=f->pts; + return 0; +} + +int64_t YangVideoBuffer::getNextTimestamp(){ + return getNextFrameTimestamp(); +} +YangFrame* YangVideoBuffer::getCurVideoFrame(){ + return getCurFrameRef(); +} diff --git a/libmetartc3/src/yangutil/buffer/YangVideoDecoderBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangVideoDecoderBuffer.cpp new file mode 100755 index 00000000..bf23757d --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangVideoDecoderBuffer.cpp @@ -0,0 +1,29 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/buffer/YangVideoDecoderBuffer.h" + +#include +#include "memory.h" + + + + +YangVideoDecoderBuffer::YangVideoDecoderBuffer(int num) { + resetIndex(); + m_cache_num = num; + initFrames(m_cache_num,YANG_VIDEO_ENCODE_BUFFER_LEN); +} + +YangVideoDecoderBuffer::~YangVideoDecoderBuffer(void) { + +} + +void YangVideoDecoderBuffer::getEVideo(YangFrame* pframe) { + getFrame(pframe); +} + +void YangVideoDecoderBuffer::putEVideo(YangFrame* pframe) { + putFrame(pframe); +} + diff --git a/libmetartc3/src/yangutil/buffer/YangVideoEncoderBuffer.cpp b/libmetartc3/src/yangutil/buffer/YangVideoEncoderBuffer.cpp new file mode 100755 index 00000000..505d4cac --- /dev/null +++ b/libmetartc3/src/yangutil/buffer/YangVideoEncoderBuffer.cpp @@ -0,0 +1,34 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// +#include "yangutil/buffer/YangVideoEncoderBuffer.h" + +#include +#include "string.h" + +YangVideoEncoderBuffer::YangVideoEncoderBuffer(int pcachenum) +{ + resetIndex(); + m_cache_num=pcachenum; + initFrames(m_cache_num,YANG_VIDEO_ENCODE_BUFFER_LEN); + +} + +YangVideoEncoderBuffer::~YangVideoEncoderBuffer(void) +{ + +} + + +void YangVideoEncoderBuffer::getEVideo(YangFrame* pframe){ + + getFrame(pframe); +} +uint8_t * YangVideoEncoderBuffer::getEVideoRef(YangFrame* frame){ + return getFrameRef(frame); + +} +void YangVideoEncoderBuffer::putEVideo(YangFrame* pframe){ + putFrame(pframe); +} + diff --git a/libmetartc3/utils.cmake b/libmetartc3/utils.cmake new file mode 100755 index 00000000..5073d585 --- /dev/null +++ b/libmetartc3/utils.cmake @@ -0,0 +1,15 @@ +# 文件列表按正则表达式排除,list(FILTER 实现, 3.5版本没有list(FILTER功能. +macro(list_excluding srcs) + foreach(regex ${ARGN}) + unset(del_list) + foreach(row ${${srcs}}) + string(REGEX MATCH ${regex} tmp1 ${row}) + if(tmp1) + list(APPEND del_list ${row}) + endif() + endforeach() + foreach(row ${del_list}) + list(REMOVE_ITEM ${srcs} ${row}) + endforeach() + endforeach() +endmacro() diff --git a/libmetartccore3/CMakeLists.txt b/libmetartccore3/CMakeLists.txt new file mode 100755 index 00000000..78d20874 --- /dev/null +++ b/libmetartccore3/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 2.8) +project(libmetartccore3) + +add_definitions(-D__STDC_FORMAT_MACROS) + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu11 -ffunction-sections -fdata-sections") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++11 -ffunction-sections -fdata-sections") +set(HOME_BASE "../") + +# 头文件目录 +include_directories(${HOME_BASE}/include) +include_directories(${HOME_BASE}/thirdparty/include) +include_directories(${HOME_BASE}/thirdparty/user_include) +include_directories(${HOME_BASE}/thirdparty/user_include/ffmpeg) +include_directories(${HOME_BASE}/libmetartccore3/src) + + +# 发现目录下的源文件 + +aux_source_directory(./src/yangutil/sys DIR_SRCS) +aux_source_directory(./src/yangavutil DIR_SRCS) +aux_source_directory(./src/yangcsrs DIR_SRCS) +aux_source_directory(./src/yangsdp DIR_SRCS) +aux_source_directory(./src/yangstream DIR_SRCS) +aux_source_directory(./src/yangrtmp DIR_SRCS) +aux_source_directory(./src/yangrtp DIR_SRCS) +aux_source_directory(./src/yangwebrtc DIR_SRCS) +include(utils.cmake) +# 排除不参与编译的文件 + + +add_library(metartccore3 ${DIR_SRCS}) diff --git a/libmetartccore3/metartccore3.pro b/libmetartccore3/metartccore3.pro new file mode 100755 index 00000000..8a5aa37a --- /dev/null +++ b/libmetartccore3/metartccore3.pro @@ -0,0 +1,216 @@ +CONFIG -= qt + +TEMPLATE = lib +CONFIG += staticlib + +CONFIG += c++14 + +# The following define makes your compiler emit warnings if you use +# any Qt feature that has been marked deprecated (the exact warnings +# depend on your compiler). Please consult the documentation of the +# deprecated API in order to know how to port your code away from it. +DEFINES += QT_DEPRECATED_WARNINGS +DEFINES += __STDC_FORMAT_MACROS +HOME_BASE=../ +INCLUDEPATH += $$HOME_BASE/include +INCLUDEPATH += $$HOME_BASE/thirdparty/include +INCLUDEPATH += $$HOME_BASE/thirdparty/user_include +INCLUDEPATH += $$HOME_BASE/thirdparty/user_include/ffmpeg +INCLUDEPATH += $$HOME_BASE/libmetartccore3/src + +unix{ + CONFIG(debug, debug|release) { + DESTDIR += $$HOME_BASE/bin/lib_debug + }else{ + + DESTDIR += $$HOME_BASE/bin/lib_release + } +} +win32{ + DEFINES += _AMD64_ + INCLUDEPATH += $$HOME_BASE\thirdparty\include\win + CONFIG(debug, debug|release) { + DESTDIR += $$HOME_BASE\bin\lib_win_debug + }else{ + DESTDIR += $$HOME_BASE\bin\lib_win_release + } + + msvc{ + QMAKE_CFLAGS += /utf-8 + QMAKE_CXXFLAGS += /utf-8 + # QMAKE_CXXFLAGS += /source-charset:utf-8 /execution-charset:utf-8 + DEFINES +=HAVE_STRUCT_TIMESPEC + DEFINES +=WIN32_LEAN_AND_MEAN + INCLUDEPATH += $$HOME_BASE\thirdparty\include\win\include + } + +} +# You can also make your code fail to compile if it uses deprecated APIs. +# In order to do so, uncomment the following line. +# You can also select to disable deprecated APIs only up to a certain version of Qt. +#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0 + +SOURCES += \ + src/yangavutil/YangAudioMix.c \ + src/yangavutil/YangAudioUtil.c \ + src/yangavutil/YangConvert.c \ + src/yangavutil/YangMeta.c \ + src/yangavutil/YangNalu.c \ + src/yangavutil/YangPreProcess.c \ + src/yangavutil/YangResample.c \ + src/yangavutil/YangRtcAec.c \ + src/yangavutil/YangYuvUtil.c \ + src/yangcsrs/YangSrsConnection.c \ + src/yangcsrs/YangSrsRtcHandle.c \ + src/yangcsrs/YangSrsSdp.c \ + src/yangrtmp/YangRtmp.c \ + src/yangrtp/YangPublishNackBuffer.c \ + src/yangrtp/YangReceiveNackBuffer.c \ + src/yangrtp/YangRtcp.c \ + src/yangrtp/YangRtcpApp.c \ + src/yangrtp/YangRtcpCommon.c \ + src/yangrtp/YangRtcpCompound.c \ + src/yangrtp/YangRtcpNack.c \ + src/yangrtp/YangRtcpPli.c \ + src/yangrtp/YangRtcpPsfbCommon.c \ + src/yangrtp/YangRtcpRR.c \ + src/yangrtp/YangRtcpRpsi.c \ + src/yangrtp/YangRtcpSR.c \ + src/yangrtp/YangRtcpSli.c \ + src/yangrtp/YangRtcpTWCC.c \ + src/yangrtp/YangRtcpXr.c \ + src/yangrtp/YangRtp.c \ + src/yangrtp/YangRtpBuffer.c \ + src/yangrtp/YangRtpFUAPayload.c \ + src/yangrtp/YangRtpFUAPayload2.c \ + src/yangrtp/YangRtpHeader.c \ + src/yangrtp/YangRtpPacket.c \ + src/yangrtp/YangRtpRawPayload.c \ + src/yangrtp/YangRtpRecvNack.c \ + src/yangrtp/YangRtpSTAPPayload.c \ + src/yangsdp/YangAudioPayload.c \ + src/yangsdp/YangCodecPayload.c \ + src/yangsdp/YangMediaDesc.c \ + src/yangsdp/YangMediaPayloadType.c \ + src/yangsdp/YangRedPayload.c \ + src/yangsdp/YangRtcSdp.c \ + src/yangsdp/YangRtxPayloadDes.c \ + src/yangsdp/YangSSRCInfo.c \ + src/yangsdp/YangSdp.c \ + src/yangsdp/YangSdpHandle.c \ + src/yangstream/YangStream.c \ + src/yangstream/YangStreamCapture.c \ + src/yangstream/YangStreamRtc.c \ + src/yangstream/YangStreamRtmp.c \ + src/yangutil/sys/YangAmf.c \ + src/yangutil/sys/YangAvtype.c \ + src/yangutil/sys/YangBuffer.c \ + src/yangutil/sys/YangCLog.c \ + src/yangutil/sys/YangCString.c \ + src/yangutil/sys/YangCTimer.c \ + src/yangutil/sys/YangEndian.c \ + src/yangutil/sys/YangFile.c \ + src/yangutil/sys/YangHttpSocket.c \ + src/yangutil/sys/YangLibHandle.c \ + src/yangutil/sys/YangMath.c \ + src/yangutil/sys/YangSRtp.c \ + src/yangutil/sys/YangSocket.c \ + src/yangutil/sys/YangSsl.c \ + src/yangutil/sys/YangSsrc.c \ + src/yangutil/sys/YangTime.c \ + src/yangutil/sys/YangUrl.c \ + src/yangutil/sys/YangVector.c \ + src/yangutil/sys/YangWebsocket.c \ + src/yangwebrtc/YangAec.c \ + src/yangwebrtc/YangH264RecvTrack.c \ + src/yangwebrtc/YangH264RtpEncode.c \ + src/yangwebrtc/YangH265RecvTrack.c \ + src/yangwebrtc/YangH265RtpEncode.c \ + src/yangwebrtc/YangMetaConnection.c \ + src/yangwebrtc/YangPeerConnection.c \ + src/yangwebrtc/YangRecvTrack.c \ + src/yangwebrtc/YangRtcAudioRecvTrack.c \ + src/yangwebrtc/YangRtcConnection.c \ + src/yangwebrtc/YangRtcContext.c \ + src/yangwebrtc/YangRtcDtls.c \ + src/yangwebrtc/YangRtcPlayStream.c \ + src/yangwebrtc/YangRtcPublishStream.c \ + src/yangwebrtc/YangRtcSession.c \ + src/yangwebrtc/YangRtcStun.c \ + src/yangwebrtc/YangStreamHandle.c \ + src/yangwebrtc/YangUdpHandle.c \ + src/yangwebrtc/YangVideoRecvTrack.c + + +HEADERS += \ + src/yangcsrs/YangSrsConnection.h \ + src/yangcsrs/YangSrsSdp.h \ + src/yangrtmp/YangRtmp.h \ + src/yangrtmp/YangRtmp2.h \ + src/yangrtp/YangPublishNackBuffer.h \ + src/yangrtp/YangReceiveNackBuffer.h \ + src/yangrtp/YangRtcp.h \ + src/yangrtp/YangRtcpApp.h \ + src/yangrtp/YangRtcpCommon.h \ + src/yangrtp/YangRtcpCompound.h \ + src/yangrtp/YangRtcpNack.h \ + src/yangrtp/YangRtcpPli.h \ + src/yangrtp/YangRtcpPsfbCommon.h \ + src/yangrtp/YangRtcpRR.h \ + src/yangrtp/YangRtcpRpsi.h \ + src/yangrtp/YangRtcpSR.h \ + src/yangrtp/YangRtcpSli.h \ + src/yangrtp/YangRtcpTWCC.h \ + src/yangrtp/YangRtcpXr.h \ + src/yangrtp/YangRtp.h \ + src/yangrtp/YangRtpBuffer.h \ + src/yangrtp/YangRtpConstant.h \ + src/yangrtp/YangRtpFUAPayload.h \ + src/yangrtp/YangRtpFUAPayload2.h \ + src/yangrtp/YangRtpHeader.h \ + src/yangrtp/YangRtpPacket.h \ + src/yangrtp/YangRtpRawPayload.h \ + src/yangrtp/YangRtpRecvNack.h \ + src/yangrtp/YangRtpSTAPPayload.h \ + src/yangsdp/YangAudioPayload.h \ + src/yangsdp/YangCodecPayload.h \ + src/yangsdp/YangMediaDesc.h \ + src/yangsdp/YangMediaPayloadType.h \ + src/yangsdp/YangRedPayload.h \ + src/yangsdp/YangRtcSdp.h \ + src/yangsdp/YangRtxPayloadDes.h \ + src/yangsdp/YangSSRCInfo.h \ + src/yangsdp/YangSdp.h \ + src/yangsdp/YangSdpHandle.h \ + src/yangsdp/YangSdpType.h \ + src/yangstream/YangStreamRtc.h \ + src/yangstream/YangStreamRtmp.h \ + src/yangwebrtc/YangH264RecvTrack.h \ + src/yangwebrtc/YangH264RtpEncode.h \ + src/yangwebrtc/YangH265RecvTrack.h \ + src/yangwebrtc/YangH265RtpEncode.h \ + src/yangwebrtc/YangRecvTrack.h \ + src/yangwebrtc/YangRtcAudioRecvTrack.h \ + src/yangwebrtc/YangRtcConnection.h \ + src/yangwebrtc/YangRtcContext.h \ + src/yangwebrtc/YangRtcContextH.h \ + src/yangwebrtc/YangRtcDtls.h \ + src/yangwebrtc/YangRtcDtlsH.h \ + src/yangwebrtc/YangRtcEncodeCommon.h \ + src/yangwebrtc/YangRtcPlayStream.h \ + src/yangwebrtc/YangRtcPublishStream.h \ + src/yangwebrtc/YangRtcSession.h \ + src/yangwebrtc/YangRtcSessionH.h \ + src/yangwebrtc/YangRtcStream.h \ + src/yangwebrtc/YangRtcStun.h \ + src/yangwebrtc/YangStreamHandle.h \ + src/yangwebrtc/YangUdpHandle.h \ + src/yangwebrtc/YangUdpHandleH.h \ + src/yangwebrtc/YangVideoRecvTrack.h + + +# Default rules for deployment. +unix { + target.path = $$[QT_INSTALL_PLUGINS]/generic +} +!isEmpty(target.path): INSTALLS += target diff --git a/libmetartccore3/src/ffmpeg/Makefile b/libmetartccore3/src/ffmpeg/Makefile new file mode 100755 index 00000000..a87ce375 --- /dev/null +++ b/libmetartccore3/src/ffmpeg/Makefile @@ -0,0 +1,676 @@ +NAME = avformat +DESC = FFmpeg container format library + +HEADERS = avformat.h \ + avio.h \ + version.h \ + +OBJS = allformats.o \ + avio.o \ + aviobuf.o \ + cutils.o \ + webrtc_demuxer.o \ + webrtc_muxer.o \ + webrtc_proto.o \ + dump.o \ + format.o \ + id3v1.o \ + id3v2.o \ + metadata.o \ + mux.o \ + options.o \ + os_support.o \ + qtpalette.o \ + protocols.o \ + riff.o \ + sdp.o \ + url.o \ + utils.o \ + +OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o + +# subsystems +OBJS-$(CONFIG_ISO_MEDIA) += isom.o +OBJS-$(CONFIG_NETWORK) += network.o +OBJS-$(CONFIG_RIFFDEC) += riffdec.o +OBJS-$(CONFIG_RIFFENC) += riffenc.o +OBJS-$(CONFIG_RTPDEC) += rdt.o \ + rtp.o \ + rtpdec.o \ + rtpdec_ac3.o \ + rtpdec_amr.o \ + rtpdec_asf.o \ + rtpdec_dv.o \ + rtpdec_g726.o \ + rtpdec_h261.o \ + rtpdec_h263.o \ + rtpdec_h263_rfc2190.o \ + rtpdec_h264.o \ + rtpdec_hevc.o \ + rtpdec_ilbc.o \ + rtpdec_jpeg.o \ + rtpdec_latm.o \ + rtpdec_mpa_robust.o \ + rtpdec_mpeg12.o \ + rtpdec_mpeg4.o \ + rtpdec_mpegts.o \ + rtpdec_qcelp.o \ + rtpdec_qdm2.o \ + rtpdec_qt.o \ + rtpdec_rfc4175.o \ + rtpdec_svq3.o \ + rtpdec_vc2hq.o \ + rtpdec_vp8.o \ + rtpdec_vp9.o \ + rtpdec_xiph.o +OBJS-$(CONFIG_RTPENC_CHAIN) += rtpenc_chain.o rtp.o +OBJS-$(CONFIG_SHARED) += log2_tab.o golomb_tab.o +OBJS-$(CONFIG_SRTP) += srtp.o + +# muxers/demuxers +OBJS-$(CONFIG_A64_MUXER) += a64.o rawenc.o +OBJS-$(CONFIG_AA_DEMUXER) += aadec.o +OBJS-$(CONFIG_AAC_DEMUXER) += aacdec.o apetag.o img2.o rawdec.o +OBJS-$(CONFIG_AC3_DEMUXER) += ac3dec.o rawdec.o +OBJS-$(CONFIG_AC3_MUXER) += rawenc.o +OBJS-$(CONFIG_ACM_DEMUXER) += acm.o rawdec.o +OBJS-$(CONFIG_ACT_DEMUXER) += act.o +OBJS-$(CONFIG_ADF_DEMUXER) += bintext.o sauce.o +OBJS-$(CONFIG_ADP_DEMUXER) += adp.o +OBJS-$(CONFIG_ADS_DEMUXER) += ads.o +OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o apetag.o img2.o \ + id3v2enc.o +OBJS-$(CONFIG_ADX_DEMUXER) += adxdec.o +OBJS-$(CONFIG_ADX_MUXER) += rawenc.o +OBJS-$(CONFIG_AEA_DEMUXER) += aea.o pcm.o +OBJS-$(CONFIG_AFC_DEMUXER) += afc.o +OBJS-$(CONFIG_AIFF_DEMUXER) += aiffdec.o pcm.o isom.o \ + mov_chan.o replaygain.o +OBJS-$(CONFIG_AIFF_MUXER) += aiffenc.o id3v2enc.o +OBJS-$(CONFIG_AIX_DEMUXER) += aixdec.o +OBJS-$(CONFIG_ALP_DEMUXER) += alp.o +OBJS-$(CONFIG_AMR_DEMUXER) += amr.o +OBJS-$(CONFIG_AMR_MUXER) += amr.o rawenc.o +OBJS-$(CONFIG_AMRNB_DEMUXER) += amr.o +OBJS-$(CONFIG_AMRWB_DEMUXER) += amr.o +OBJS-$(CONFIG_ANM_DEMUXER) += anm.o +OBJS-$(CONFIG_APC_DEMUXER) += apc.o +OBJS-$(CONFIG_APE_DEMUXER) += ape.o apetag.o img2.o +OBJS-$(CONFIG_APM_DEMUXER) += apm.o riffdec.o +OBJS-$(CONFIG_APNG_DEMUXER) += apngdec.o +OBJS-$(CONFIG_APNG_MUXER) += apngenc.o +OBJS-$(CONFIG_APTX_DEMUXER) += aptxdec.o rawdec.o +OBJS-$(CONFIG_APTX_MUXER) += rawenc.o +OBJS-$(CONFIG_APTX_HD_DEMUXER) += aptxdec.o rawdec.o +OBJS-$(CONFIG_APTX_HD_MUXER) += rawenc.o +OBJS-$(CONFIG_AQTITLE_DEMUXER) += aqtitledec.o subtitles.o +OBJS-$(CONFIG_ARGO_ASF_DEMUXER) += argo_asf.o +OBJS-$(CONFIG_ASF_DEMUXER) += asfdec_f.o asf.o asfcrypt.o \ + avlanguage.o +OBJS-$(CONFIG_ASF_O_DEMUXER) += asfdec_o.o asf.o asfcrypt.o \ + avlanguage.o +OBJS-$(CONFIG_ASF_MUXER) += asfenc.o asf.o avlanguage.o +OBJS-$(CONFIG_ASS_DEMUXER) += assdec.o subtitles.o +OBJS-$(CONFIG_ASS_MUXER) += assenc.o +OBJS-$(CONFIG_AST_DEMUXER) += ast.o astdec.o +OBJS-$(CONFIG_AST_MUXER) += ast.o astenc.o +OBJS-$(CONFIG_AU_DEMUXER) += au.o pcm.o +OBJS-$(CONFIG_AU_MUXER) += au.o rawenc.o +OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o +OBJS-$(CONFIG_AVI_MUXER) += avienc.o mpegtsenc.o avlanguage.o rawutils.o +OBJS-$(CONFIG_AVM2_MUXER) += swfenc.o swf.o +OBJS-$(CONFIG_AVR_DEMUXER) += avr.o pcm.o +OBJS-$(CONFIG_AVS_DEMUXER) += avs.o voc_packet.o vocdec.o voc.o +OBJS-$(CONFIG_AVS2_DEMUXER) += davs2.o rawdec.o +OBJS-$(CONFIG_AVS2_MUXER) += rawenc.o +OBJS-$(CONFIG_BETHSOFTVID_DEMUXER) += bethsoftvid.o +OBJS-$(CONFIG_BFI_DEMUXER) += bfi.o +OBJS-$(CONFIG_BINK_DEMUXER) += bink.o +OBJS-$(CONFIG_BINTEXT_DEMUXER) += bintext.o sauce.o +OBJS-$(CONFIG_BIT_DEMUXER) += bit.o +OBJS-$(CONFIG_BIT_MUXER) += bit.o +OBJS-$(CONFIG_BMV_DEMUXER) += bmv.o +OBJS-$(CONFIG_BOA_DEMUXER) += boadec.o +OBJS-$(CONFIG_BFSTM_DEMUXER) += brstm.o +OBJS-$(CONFIG_BRSTM_DEMUXER) += brstm.o +OBJS-$(CONFIG_C93_DEMUXER) += c93.o voc_packet.o vocdec.o voc.o +OBJS-$(CONFIG_CAF_DEMUXER) += cafdec.o caf.o mov_chan.o mov_esds.o +OBJS-$(CONFIG_CAF_MUXER) += cafenc.o caf.o riff.o +OBJS-$(CONFIG_CAVSVIDEO_DEMUXER) += cavsvideodec.o rawdec.o +OBJS-$(CONFIG_CAVSVIDEO_MUXER) += rawenc.o +OBJS-$(CONFIG_CDG_DEMUXER) += cdg.o +OBJS-$(CONFIG_CDXL_DEMUXER) += cdxl.o +OBJS-$(CONFIG_CINE_DEMUXER) += cinedec.o +OBJS-$(CONFIG_CODEC2_DEMUXER) += codec2.o rawdec.o pcm.o +OBJS-$(CONFIG_CODEC2_MUXER) += codec2.o rawenc.o +OBJS-$(CONFIG_CODEC2RAW_DEMUXER) += codec2.o rawdec.o pcm.o +OBJS-$(CONFIG_CODEC2RAW_MUXER) += rawenc.o +OBJS-$(CONFIG_CONCAT_DEMUXER) += concatdec.o +OBJS-$(CONFIG_CRC_MUXER) += crcenc.o +OBJS-$(CONFIG_DATA_DEMUXER) += rawdec.o +OBJS-$(CONFIG_DATA_MUXER) += rawenc.o +OBJS-$(CONFIG_DASH_MUXER) += dash.o dashenc.o hlsplaylist.o +OBJS-$(CONFIG_DASH_DEMUXER) += dash.o dashdec.o +OBJS-$(CONFIG_DAUD_DEMUXER) += dauddec.o +OBJS-$(CONFIG_DAUD_MUXER) += daudenc.o +OBJS-$(CONFIG_DCSTR_DEMUXER) += dcstr.o +OBJS-$(CONFIG_DERF_DEMUXER) += derf.o pcm.o +OBJS-$(CONFIG_DFA_DEMUXER) += dfa.o +OBJS-$(CONFIG_DHAV_DEMUXER) += dhav.o +OBJS-$(CONFIG_DIRAC_DEMUXER) += diracdec.o rawdec.o +OBJS-$(CONFIG_DIRAC_MUXER) += rawenc.o +OBJS-$(CONFIG_DNXHD_DEMUXER) += dnxhddec.o rawdec.o +OBJS-$(CONFIG_DNXHD_MUXER) += rawenc.o +OBJS-$(CONFIG_DSF_DEMUXER) += dsfdec.o +OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o +OBJS-$(CONFIG_DSS_DEMUXER) += dss.o +OBJS-$(CONFIG_DTSHD_DEMUXER) += dtshddec.o +OBJS-$(CONFIG_DTS_DEMUXER) += dtsdec.o rawdec.o +OBJS-$(CONFIG_DTS_MUXER) += rawenc.o +OBJS-$(CONFIG_DV_DEMUXER) += dv.o +OBJS-$(CONFIG_DV_MUXER) += dvenc.o +OBJS-$(CONFIG_DVBSUB_DEMUXER) += dvbsub.o rawdec.o +OBJS-$(CONFIG_DVBTXT_DEMUXER) += dvbtxt.o rawdec.o +OBJS-$(CONFIG_DXA_DEMUXER) += dxa.o +OBJS-$(CONFIG_EA_CDATA_DEMUXER) += eacdata.o +OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o +OBJS-$(CONFIG_EAC3_DEMUXER) += ac3dec.o rawdec.o +OBJS-$(CONFIG_EAC3_MUXER) += rawenc.o +OBJS-$(CONFIG_EPAF_DEMUXER) += epafdec.o pcm.o +OBJS-$(CONFIG_FFMETADATA_DEMUXER) += ffmetadec.o +OBJS-$(CONFIG_FFMETADATA_MUXER) += ffmetaenc.o +OBJS-$(CONFIG_FIFO_MUXER) += fifo.o +OBJS-$(CONFIG_FIFO_TEST_MUXER) += fifo_test.o +OBJS-$(CONFIG_FILMSTRIP_DEMUXER) += filmstripdec.o +OBJS-$(CONFIG_FILMSTRIP_MUXER) += filmstripenc.o rawenc.o +OBJS-$(CONFIG_FITS_DEMUXER) += fitsdec.o +OBJS-$(CONFIG_FITS_MUXER) += fitsenc.o +OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o rawdec.o \ + flac_picture.o \ + oggparsevorbis.o \ + replaygain.o \ + vorbiscomment.o +OBJS-$(CONFIG_FLAC_MUXER) += flacenc.o flacenc_header.o \ + vorbiscomment.o +OBJS-$(CONFIG_FLIC_DEMUXER) += flic.o +OBJS-$(CONFIG_FLV_DEMUXER) += flvdec.o +OBJS-$(CONFIG_LIVE_FLV_DEMUXER) += flvdec.o +OBJS-$(CONFIG_FLV_MUXER) += flvenc.o avc.o +OBJS-$(CONFIG_FOURXM_DEMUXER) += 4xm.o +OBJS-$(CONFIG_FRAMECRC_MUXER) += framecrcenc.o framehash.o +OBJS-$(CONFIG_FRAMEHASH_MUXER) += hashenc.o framehash.o +OBJS-$(CONFIG_FRAMEMD5_MUXER) += hashenc.o framehash.o +OBJS-$(CONFIG_FRM_DEMUXER) += frmdec.o +OBJS-$(CONFIG_FSB_DEMUXER) += fsb.o +OBJS-$(CONFIG_FWSE_DEMUXER) += fwse.o pcm.o +OBJS-$(CONFIG_GIF_MUXER) += gif.o +OBJS-$(CONFIG_GIF_DEMUXER) += gifdec.o +OBJS-$(CONFIG_GSM_DEMUXER) += gsmdec.o +OBJS-$(CONFIG_GSM_MUXER) += rawenc.o +OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o +OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o +OBJS-$(CONFIG_G722_DEMUXER) += g722.o rawdec.o +OBJS-$(CONFIG_G722_MUXER) += rawenc.o +OBJS-$(CONFIG_G723_1_DEMUXER) += g723_1.o +OBJS-$(CONFIG_G723_1_MUXER) += rawenc.o +OBJS-$(CONFIG_G726_DEMUXER) += g726.o +OBJS-$(CONFIG_G726_MUXER) += rawenc.o +OBJS-$(CONFIG_G726LE_DEMUXER) += g726.o +OBJS-$(CONFIG_G726LE_MUXER) += rawenc.o +OBJS-$(CONFIG_G729_DEMUXER) += g729dec.o +OBJS-$(CONFIG_GDV_DEMUXER) += gdv.o +OBJS-$(CONFIG_GENH_DEMUXER) += genh.o +OBJS-$(CONFIG_H261_DEMUXER) += h261dec.o rawdec.o +OBJS-$(CONFIG_H261_MUXER) += rawenc.o +OBJS-$(CONFIG_H263_DEMUXER) += h263dec.o rawdec.o +OBJS-$(CONFIG_H263_MUXER) += rawenc.o +OBJS-$(CONFIG_H264_DEMUXER) += h264dec.o rawdec.o +OBJS-$(CONFIG_H264_MUXER) += rawenc.o +OBJS-$(CONFIG_HASH_MUXER) += hashenc.o +OBJS-$(CONFIG_HCA_DEMUXER) += hca.o +OBJS-$(CONFIG_HCOM_DEMUXER) += hcom.o pcm.o +OBJS-$(CONFIG_HDS_MUXER) += hdsenc.o +OBJS-$(CONFIG_HEVC_DEMUXER) += hevcdec.o rawdec.o +OBJS-$(CONFIG_HEVC_MUXER) += rawenc.o +OBJS-$(CONFIG_HLS_DEMUXER) += hls.o +OBJS-$(CONFIG_HLS_MUXER) += hlsenc.o hlsplaylist.o +OBJS-$(CONFIG_HNM_DEMUXER) += hnm.o +OBJS-$(CONFIG_ICO_DEMUXER) += icodec.o +OBJS-$(CONFIG_ICO_MUXER) += icoenc.o +OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o +OBJS-$(CONFIG_IDF_DEMUXER) += bintext.o sauce.o +OBJS-$(CONFIG_IFF_DEMUXER) += iff.o +OBJS-$(CONFIG_IFV_DEMUXER) += ifv.o +OBJS-$(CONFIG_ILBC_DEMUXER) += ilbc.o +OBJS-$(CONFIG_ILBC_MUXER) += ilbc.o rawenc.o +OBJS-$(CONFIG_IMAGE2_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE2_MUXER) += img2enc.o img2.o +OBJS-$(CONFIG_IMAGE2PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE2PIPE_MUXER) += img2enc.o img2.o +OBJS-$(CONFIG_IMAGE2_ALIAS_PIX_DEMUXER) += img2_alias_pix.o +OBJS-$(CONFIG_IMAGE2_BRENDER_PIX_DEMUXER) += img2_brender_pix.o +OBJS-$(CONFIG_IMAGE_BMP_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_DDS_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_DPX_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_EXR_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_GIF_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_J2K_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_JPEG_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PAM_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PBM_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PCX_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PGM_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PICTOR_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PNG_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PPM_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_PSD_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_QDRAW_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_SGI_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_SVG_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_TIFF_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_WEBP_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_XPM_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_IMAGE_XWD_PIPE_DEMUXER) += img2dec.o img2.o +OBJS-$(CONFIG_INGENIENT_DEMUXER) += ingenientdec.o rawdec.o +OBJS-$(CONFIG_IPMOVIE_DEMUXER) += ipmovie.o +OBJS-$(CONFIG_IRCAM_DEMUXER) += ircamdec.o ircam.o pcm.o +OBJS-$(CONFIG_IRCAM_MUXER) += ircamenc.o ircam.o rawenc.o +OBJS-$(CONFIG_ISS_DEMUXER) += iss.o +OBJS-$(CONFIG_IV8_DEMUXER) += iv8.o +OBJS-$(CONFIG_IVF_DEMUXER) += ivfdec.o +OBJS-$(CONFIG_IVF_MUXER) += ivfenc.o +OBJS-$(CONFIG_IVR_DEMUXER) += rmdec.o rm.o rmsipr.o +OBJS-$(CONFIG_JACOSUB_DEMUXER) += jacosubdec.o subtitles.o +OBJS-$(CONFIG_JACOSUB_MUXER) += jacosubenc.o rawenc.o +OBJS-$(CONFIG_JV_DEMUXER) += jvdec.o +OBJS-$(CONFIG_KUX_DEMUXER) += flvdec.o +OBJS-$(CONFIG_KVAG_DEMUXER) += kvag.o +OBJS-$(CONFIG_KVAG_MUXER) += kvag.o rawenc.o +OBJS-$(CONFIG_LATM_MUXER) += latmenc.o rawenc.o +OBJS-$(CONFIG_LMLM4_DEMUXER) += lmlm4.o +OBJS-$(CONFIG_LOAS_DEMUXER) += loasdec.o rawdec.o +OBJS-$(CONFIG_LRC_DEMUXER) += lrcdec.o lrc.o subtitles.o +OBJS-$(CONFIG_LRC_MUXER) += lrcenc.o lrc.o +OBJS-$(CONFIG_LVF_DEMUXER) += lvfdec.o +OBJS-$(CONFIG_LXF_DEMUXER) += lxfdec.o +OBJS-$(CONFIG_M4V_DEMUXER) += m4vdec.o rawdec.o +OBJS-$(CONFIG_M4V_MUXER) += rawenc.o +OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroskadec.o matroska.o \ + rmsipr.o flac_picture.o \ + oggparsevorbis.o vorbiscomment.o \ + replaygain.o +OBJS-$(CONFIG_MATROSKA_MUXER) += matroskaenc.o matroska.o \ + av1.o avc.o hevc.o \ + flacenc_header.o avlanguage.o \ + vorbiscomment.o wv.o +OBJS-$(CONFIG_MD5_MUXER) += hashenc.o +OBJS-$(CONFIG_MGSTS_DEMUXER) += mgsts.o +OBJS-$(CONFIG_MICRODVD_DEMUXER) += microdvddec.o subtitles.o +OBJS-$(CONFIG_MICRODVD_MUXER) += microdvdenc.o +OBJS-$(CONFIG_MJPEG_2000_DEMUXER) += rawdec.o mj2kdec.o +OBJS-$(CONFIG_MJPEG_DEMUXER) += rawdec.o +OBJS-$(CONFIG_MJPEG_MUXER) += rawenc.o +OBJS-$(CONFIG_MLP_DEMUXER) += rawdec.o mlpdec.o +OBJS-$(CONFIG_MLP_MUXER) += rawenc.o +OBJS-$(CONFIG_MLV_DEMUXER) += mlvdec.o riffdec.o +OBJS-$(CONFIG_MM_DEMUXER) += mm.o +OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o +OBJS-$(CONFIG_MMF_MUXER) += mmf.o rawenc.o +OBJS-$(CONFIG_MOV_DEMUXER) += mov.o mov_chan.o mov_esds.o replaygain.o +OBJS-$(CONFIG_MOV_MUXER) += movenc.o av1.o avc.o hevc.o vpcc.o \ + movenchint.o mov_chan.o rtp.o \ + movenccenc.o rawutils.o +OBJS-$(CONFIG_MP2_MUXER) += rawenc.o +OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o replaygain.o +OBJS-$(CONFIG_MP3_MUXER) += mp3enc.o rawenc.o id3v2enc.o +OBJS-$(CONFIG_MPC_DEMUXER) += mpc.o apetag.o img2.o +OBJS-$(CONFIG_MPC8_DEMUXER) += mpc8.o apetag.o img2.o +OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpegenc.o +OBJS-$(CONFIG_MPEG1VCD_MUXER) += mpegenc.o +OBJS-$(CONFIG_MPEG1VIDEO_MUXER) += rawenc.o +OBJS-$(CONFIG_MPEG2DVD_MUXER) += mpegenc.o +OBJS-$(CONFIG_MPEG2SVCD_MUXER) += mpegenc.o +OBJS-$(CONFIG_MPEG2VIDEO_MUXER) += rawenc.o +OBJS-$(CONFIG_MPEG2VOB_MUXER) += mpegenc.o +OBJS-$(CONFIG_MPEGPS_DEMUXER) += mpeg.o +OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpegts.o +OBJS-$(CONFIG_MPEGTS_MUXER) += mpegtsenc.o +OBJS-$(CONFIG_MPEGVIDEO_DEMUXER) += mpegvideodec.o rawdec.o +OBJS-$(CONFIG_MPJPEG_DEMUXER) += mpjpegdec.o +OBJS-$(CONFIG_MPJPEG_MUXER) += mpjpeg.o +OBJS-$(CONFIG_MPL2_DEMUXER) += mpl2dec.o subtitles.o +OBJS-$(CONFIG_MSF_DEMUXER) += msf.o +OBJS-$(CONFIG_MPSUB_DEMUXER) += mpsubdec.o subtitles.o +OBJS-$(CONFIG_MSNWC_TCP_DEMUXER) += msnwc_tcp.o +OBJS-$(CONFIG_MTAF_DEMUXER) += mtaf.o +OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o +OBJS-$(CONFIG_MUSX_DEMUXER) += musx.o +OBJS-$(CONFIG_MV_DEMUXER) += mvdec.o +OBJS-$(CONFIG_MVI_DEMUXER) += mvi.o +OBJS-$(CONFIG_MXF_DEMUXER) += mxfdec.o mxf.o +OBJS-$(CONFIG_MXF_MUXER) += mxfenc.o mxf.o avc.o +OBJS-$(CONFIG_MXG_DEMUXER) += mxg.o +OBJS-$(CONFIG_NC_DEMUXER) += ncdec.o +OBJS-$(CONFIG_NISTSPHERE_DEMUXER) += nistspheredec.o pcm.o +OBJS-$(CONFIG_NSP_DEMUXER) += nspdec.o pcm.o +OBJS-$(CONFIG_NSV_DEMUXER) += nsvdec.o +OBJS-$(CONFIG_NULL_MUXER) += nullenc.o +OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o nut.o isom.o +OBJS-$(CONFIG_NUT_MUXER) += nutenc.o nut.o +OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o +OBJS-$(CONFIG_AV1_DEMUXER) += av1dec.o +OBJS-$(CONFIG_OGG_DEMUXER) += oggdec.o \ + oggparsecelt.o \ + oggparsedirac.o \ + oggparseflac.o \ + oggparseogm.o \ + oggparseopus.o \ + oggparseskeleton.o \ + oggparsespeex.o \ + oggparsetheora.o \ + oggparsevorbis.o \ + oggparsevp8.o \ + replaygain.o \ + vorbiscomment.o \ + flac_picture.o +OBJS-$(CONFIG_OGA_MUXER) += oggenc.o \ + vorbiscomment.o +OBJS-$(CONFIG_OGG_MUXER) += oggenc.o \ + vorbiscomment.o +OBJS-$(CONFIG_OGV_MUXER) += oggenc.o \ + vorbiscomment.o +OBJS-$(CONFIG_OMA_DEMUXER) += omadec.o pcm.o oma.o +OBJS-$(CONFIG_OMA_MUXER) += omaenc.o rawenc.o oma.o id3v2enc.o +OBJS-$(CONFIG_OPUS_MUXER) += oggenc.o \ + vorbiscomment.o +OBJS-$(CONFIG_PAF_DEMUXER) += paf.o +OBJS-$(CONFIG_PCM_ALAW_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_ALAW_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_F32BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_F32BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_F32LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_F32LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_F64BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_F64BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_F64LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_F64LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_MULAW_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_MULAW_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S16BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S16BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S16LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S16LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S24BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S24BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S24LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S24LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S32BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S32BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S32LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S32LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_S8_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_S8_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U16BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U16BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U16LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U16LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U24BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U24BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U24LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U24LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U32BE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U32BE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U32LE_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U32LE_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_U8_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_U8_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_VIDC_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_VIDC_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PJS_DEMUXER) += pjsdec.o subtitles.o +OBJS-$(CONFIG_PMP_DEMUXER) += pmpdec.o +OBJS-$(CONFIG_PP_BNK_DEMUXER) += pp_bnk.o +OBJS-$(CONFIG_PVA_DEMUXER) += pva.o +OBJS-$(CONFIG_PVF_DEMUXER) += pvfdec.o pcm.o +OBJS-$(CONFIG_QCP_DEMUXER) += qcp.o +OBJS-$(CONFIG_R3D_DEMUXER) += r3d.o +OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += rawvideodec.o +OBJS-$(CONFIG_RAWVIDEO_MUXER) += rawenc.o +OBJS-$(CONFIG_REALTEXT_DEMUXER) += realtextdec.o subtitles.o +OBJS-$(CONFIG_REDSPARK_DEMUXER) += redspark.o +OBJS-$(CONFIG_RL2_DEMUXER) += rl2.o +OBJS-$(CONFIG_RM_DEMUXER) += rmdec.o rm.o rmsipr.o +OBJS-$(CONFIG_RM_MUXER) += rmenc.o rm.o +OBJS-$(CONFIG_ROQ_DEMUXER) += idroqdec.o +OBJS-$(CONFIG_ROQ_MUXER) += idroqenc.o rawenc.o +OBJS-$(CONFIG_RSD_DEMUXER) += rsd.o +OBJS-$(CONFIG_RPL_DEMUXER) += rpl.o +OBJS-$(CONFIG_RSO_DEMUXER) += rsodec.o rso.o pcm.o +OBJS-$(CONFIG_RSO_MUXER) += rsoenc.o rso.o rawenc.o +OBJS-$(CONFIG_RTP_MPEGTS_MUXER) += rtpenc_mpegts.o +OBJS-$(CONFIG_RTP_MUXER) += rtp.o \ + rtpenc_aac.o \ + rtpenc_latm.o \ + rtpenc_amr.o \ + rtpenc_h261.o \ + rtpenc_h263.o \ + rtpenc_h263_rfc2190.o \ + rtpenc_h264_hevc.o \ + rtpenc_jpeg.o \ + rtpenc_mpv.o \ + rtpenc.o \ + rtpenc_vc2hq.o \ + rtpenc_vp8.o \ + rtpenc_vp9.o \ + rtpenc_xiph.o \ + avc.o hevc.o +OBJS-$(CONFIG_RTSP_DEMUXER) += rtsp.o rtspdec.o httpauth.o \ + urldecode.o +OBJS-$(CONFIG_RTSP_MUXER) += rtsp.o rtspenc.o httpauth.o \ + urldecode.o +OBJS-$(CONFIG_S337M_DEMUXER) += s337m.o spdif.o +OBJS-$(CONFIG_SAMI_DEMUXER) += samidec.o subtitles.o +OBJS-$(CONFIG_SAP_DEMUXER) += sapdec.o +OBJS-$(CONFIG_SAP_MUXER) += sapenc.o +OBJS-$(CONFIG_SBC_DEMUXER) += sbcdec.o rawdec.o +OBJS-$(CONFIG_SBC_MUXER) += rawenc.o +OBJS-$(CONFIG_SBG_DEMUXER) += sbgdec.o +OBJS-$(CONFIG_SCC_DEMUXER) += sccdec.o subtitles.o +OBJS-$(CONFIG_SCC_MUXER) += sccenc.o subtitles.o +OBJS-$(CONFIG_SDP_DEMUXER) += rtsp.o +OBJS-$(CONFIG_SDR2_DEMUXER) += sdr2.o +OBJS-$(CONFIG_SDS_DEMUXER) += sdsdec.o +OBJS-$(CONFIG_SDX_DEMUXER) += sdxdec.o pcm.o +OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o +OBJS-$(CONFIG_SEGAFILM_MUXER) += segafilmenc.o +OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o +OBJS-$(CONFIG_SER_DEMUXER) += serdec.o +OBJS-$(CONFIG_SHORTEN_DEMUXER) += shortendec.o rawdec.o +OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o +OBJS-$(CONFIG_SINGLEJPEG_MUXER) += rawenc.o +OBJS-$(CONFIG_SLN_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o +OBJS-$(CONFIG_SMJPEG_DEMUXER) += smjpegdec.o smjpeg.o +OBJS-$(CONFIG_SMJPEG_MUXER) += smjpegenc.o smjpeg.o +OBJS-$(CONFIG_SMOOTHSTREAMING_MUXER) += smoothstreamingenc.o +OBJS-$(CONFIG_SMUSH_DEMUXER) += smush.o +OBJS-$(CONFIG_SOL_DEMUXER) += sol.o pcm.o +OBJS-$(CONFIG_SOX_DEMUXER) += soxdec.o pcm.o +OBJS-$(CONFIG_SOX_MUXER) += soxenc.o rawenc.o +OBJS-$(CONFIG_SPDIF_DEMUXER) += spdif.o spdifdec.o +OBJS-$(CONFIG_SPDIF_MUXER) += spdif.o spdifenc.o +OBJS-$(CONFIG_SPEEX_MUXER) += oggenc.o \ + vorbiscomment.o +OBJS-$(CONFIG_SRT_DEMUXER) += srtdec.o subtitles.o +OBJS-$(CONFIG_SRT_MUXER) += srtenc.o +OBJS-$(CONFIG_STL_DEMUXER) += stldec.o subtitles.o +OBJS-$(CONFIG_STR_DEMUXER) += psxstr.o +OBJS-$(CONFIG_STREAMHASH_MUXER) += hashenc.o +OBJS-$(CONFIG_STREAM_SEGMENT_MUXER) += segment.o +OBJS-$(CONFIG_SUBVIEWER1_DEMUXER) += subviewer1dec.o subtitles.o +OBJS-$(CONFIG_SUBVIEWER_DEMUXER) += subviewerdec.o subtitles.o +OBJS-$(CONFIG_SUP_DEMUXER) += supdec.o +OBJS-$(CONFIG_SUP_MUXER) += supenc.o +OBJS-$(CONFIG_SVAG_DEMUXER) += svag.o +OBJS-$(CONFIG_SWF_DEMUXER) += swfdec.o swf.o +OBJS-$(CONFIG_SWF_MUXER) += swfenc.o swf.o +OBJS-$(CONFIG_TAK_DEMUXER) += takdec.o apetag.o img2.o rawdec.o +OBJS-$(CONFIG_TEDCAPTIONS_DEMUXER) += tedcaptionsdec.o subtitles.o +OBJS-$(CONFIG_TEE_MUXER) += tee.o tee_common.o +OBJS-$(CONFIG_THP_DEMUXER) += thp.o +OBJS-$(CONFIG_THREEDOSTR_DEMUXER) += 3dostr.o +OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o +OBJS-$(CONFIG_MKVTIMESTAMP_V2_MUXER) += mkvtimestamp_v2.o +OBJS-$(CONFIG_TMV_DEMUXER) += tmv.o +OBJS-$(CONFIG_TRUEHD_DEMUXER) += rawdec.o mlpdec.o +OBJS-$(CONFIG_TRUEHD_MUXER) += rawenc.o +OBJS-$(CONFIG_TTA_DEMUXER) += tta.o apetag.o img2.o +OBJS-$(CONFIG_TTA_MUXER) += ttaenc.o apetag.o img2.o +OBJS-$(CONFIG_TTY_DEMUXER) += tty.o sauce.o +OBJS-$(CONFIG_TY_DEMUXER) += ty.o +OBJS-$(CONFIG_TXD_DEMUXER) += txd.o +OBJS-$(CONFIG_UNCODEDFRAMECRC_MUXER) += uncodedframecrcenc.o framehash.o +OBJS-$(CONFIG_V210_DEMUXER) += v210.o +OBJS-$(CONFIG_V210X_DEMUXER) += v210.o +OBJS-$(CONFIG_VAG_DEMUXER) += vag.o +OBJS-$(CONFIG_VC1_DEMUXER) += rawdec.o vc1dec.o +OBJS-$(CONFIG_VC1_MUXER) += rawenc.o +OBJS-$(CONFIG_VC1T_DEMUXER) += vc1test.o +OBJS-$(CONFIG_VC1T_MUXER) += vc1testenc.o +OBJS-$(CONFIG_VIVIDAS_DEMUXER) += vividas.o +OBJS-$(CONFIG_VIVO_DEMUXER) += vivo.o +OBJS-$(CONFIG_VMD_DEMUXER) += sierravmd.o +OBJS-$(CONFIG_VOBSUB_DEMUXER) += subtitles.o # mpeg demuxer is in the dependencies +OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc_packet.o voc.o +OBJS-$(CONFIG_VOC_MUXER) += vocenc.o voc.o +OBJS-$(CONFIG_VPK_DEMUXER) += vpk.o +OBJS-$(CONFIG_VPLAYER_DEMUXER) += vplayerdec.o subtitles.o +OBJS-$(CONFIG_VQF_DEMUXER) += vqf.o +OBJS-$(CONFIG_W64_DEMUXER) += wavdec.o w64.o pcm.o +OBJS-$(CONFIG_W64_MUXER) += wavenc.o w64.o +OBJS-$(CONFIG_WAV_DEMUXER) += wavdec.o pcm.o +OBJS-$(CONFIG_WAV_MUXER) += wavenc.o +OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o +OBJS-$(CONFIG_WEBM_MUXER) += matroskaenc.o matroska.o \ + av1.o avc.o hevc.o \ + flacenc_header.o avlanguage.o \ + wv.o vorbiscomment.o +OBJS-$(CONFIG_WEBM_DASH_MANIFEST_MUXER) += webmdashenc.o +OBJS-$(CONFIG_WEBM_CHUNK_MUXER) += webm_chunk.o +OBJS-$(CONFIG_WEBP_MUXER) += webpenc.o +OBJS-$(CONFIG_WEBVTT_DEMUXER) += webvttdec.o subtitles.o +OBJS-$(CONFIG_WEBVTT_MUXER) += webvttenc.o +OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood_aud.o +OBJS-$(CONFIG_WSD_DEMUXER) += wsddec.o rawdec.o +OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood_vqa.o +OBJS-$(CONFIG_WTV_DEMUXER) += wtvdec.o wtv_common.o \ + asf.o +OBJS-$(CONFIG_WTV_MUXER) += wtvenc.o wtv_common.o \ + asf.o +OBJS-$(CONFIG_WV_DEMUXER) += wvdec.o wv.o apetag.o img2.o +OBJS-$(CONFIG_WVE_DEMUXER) += wvedec.o pcm.o +OBJS-$(CONFIG_WV_MUXER) += wvenc.o wv.o apetag.o img2.o +OBJS-$(CONFIG_XA_DEMUXER) += xa.o +OBJS-$(CONFIG_XBIN_DEMUXER) += bintext.o sauce.o +OBJS-$(CONFIG_XMV_DEMUXER) += xmv.o +OBJS-$(CONFIG_XVAG_DEMUXER) += xvag.o +OBJS-$(CONFIG_XWMA_DEMUXER) += xwma.o +OBJS-$(CONFIG_YOP_DEMUXER) += yop.o +OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpegdec.o +OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpegenc.o + +# external library muxers/demuxers +OBJS-$(CONFIG_AVISYNTH_DEMUXER) += avisynth.o +OBJS-$(CONFIG_CHROMAPRINT_MUXER) += chromaprint.o +OBJS-$(CONFIG_LIBGME_DEMUXER) += libgme.o +OBJS-$(CONFIG_LIBMODPLUG_DEMUXER) += libmodplug.o +OBJS-$(CONFIG_LIBOPENMPT_DEMUXER) += libopenmpt.o +OBJS-$(CONFIG_VAPOURSYNTH_DEMUXER) += vapoursynth.o + +# protocols I/O +OBJS-$(CONFIG_ASYNC_PROTOCOL) += async.o +OBJS-$(CONFIG_APPLEHTTP_PROTOCOL) += hlsproto.o +OBJS-$(CONFIG_BLURAY_PROTOCOL) += bluray.o +OBJS-$(CONFIG_CACHE_PROTOCOL) += cache.o +OBJS-$(CONFIG_CONCAT_PROTOCOL) += concat.o +OBJS-$(CONFIG_CRYPTO_PROTOCOL) += crypto.o +OBJS-$(CONFIG_DATA_PROTOCOL) += data_uri.o +OBJS-$(CONFIG_FFRTMPCRYPT_PROTOCOL) += rtmpcrypt.o rtmpdigest.o rtmpdh.o +OBJS-$(CONFIG_FFRTMPHTTP_PROTOCOL) += rtmphttp.o +OBJS-$(CONFIG_FILE_PROTOCOL) += file.o +OBJS-$(CONFIG_FTP_PROTOCOL) += ftp.o urldecode.o +OBJS-$(CONFIG_GOPHER_PROTOCOL) += gopher.o +OBJS-$(CONFIG_HLS_PROTOCOL) += hlsproto.o +OBJS-$(CONFIG_HTTP_PROTOCOL) += http.o httpauth.o urldecode.o +OBJS-$(CONFIG_HTTPPROXY_PROTOCOL) += http.o httpauth.o urldecode.o +OBJS-$(CONFIG_HTTPS_PROTOCOL) += http.o httpauth.o urldecode.o +OBJS-$(CONFIG_ICECAST_PROTOCOL) += icecast.o +OBJS-$(CONFIG_MD5_PROTOCOL) += md5proto.o +OBJS-$(CONFIG_MMSH_PROTOCOL) += mmsh.o mms.o asf.o +OBJS-$(CONFIG_MMST_PROTOCOL) += mmst.o mms.o asf.o +OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o +OBJS-$(CONFIG_PROMPEG_PROTOCOL) += prompeg.o +OBJS-$(CONFIG_RTMP_PROTOCOL) += rtmpproto.o rtmpdigest.o rtmppkt.o +OBJS-$(CONFIG_RTMPE_PROTOCOL) += rtmpproto.o rtmpdigest.o rtmppkt.o +OBJS-$(CONFIG_RTMPS_PROTOCOL) += rtmpproto.o rtmpdigest.o rtmppkt.o +OBJS-$(CONFIG_RTMPT_PROTOCOL) += rtmpproto.o rtmpdigest.o rtmppkt.o +OBJS-$(CONFIG_RTMPTE_PROTOCOL) += rtmpproto.o rtmpdigest.o rtmppkt.o +OBJS-$(CONFIG_RTMPTS_PROTOCOL) += rtmpproto.o rtmpdigest.o rtmppkt.o +OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o ip.o +OBJS-$(CONFIG_SCTP_PROTOCOL) += sctp.o +OBJS-$(CONFIG_SRTP_PROTOCOL) += srtpproto.o srtp.o +OBJS-$(CONFIG_SUBFILE_PROTOCOL) += subfile.o +OBJS-$(CONFIG_TEE_PROTOCOL) += teeproto.o tee_common.o +OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o +TLS-OBJS-$(CONFIG_GNUTLS) += tls_gnutls.o +TLS-OBJS-$(CONFIG_LIBTLS) += tls_libtls.o +TLS-OBJS-$(CONFIG_MBEDTLS) += tls_mbedtls.o +TLS-OBJS-$(CONFIG_OPENSSL) += tls_openssl.o +TLS-OBJS-$(CONFIG_SECURETRANSPORT) += tls_securetransport.o +TLS-OBJS-$(CONFIG_SCHANNEL) += tls_schannel.o +OBJS-$(CONFIG_TLS_PROTOCOL) += tls.o $(TLS-OBJS-yes) +OBJS-$(CONFIG_UDP_PROTOCOL) += udp.o ip.o +OBJS-$(CONFIG_UDPLITE_PROTOCOL) += udp.o ip.o +OBJS-$(CONFIG_UNIX_PROTOCOL) += unix.o + +# external library protocols +OBJS-$(CONFIG_LIBAMQP_PROTOCOL) += libamqp.o +OBJS-$(CONFIG_LIBRTMP_PROTOCOL) += librtmp.o +OBJS-$(CONFIG_LIBRTMPE_PROTOCOL) += librtmp.o +OBJS-$(CONFIG_LIBRTMPS_PROTOCOL) += librtmp.o +OBJS-$(CONFIG_LIBRTMPT_PROTOCOL) += librtmp.o +OBJS-$(CONFIG_LIBRTMPTE_PROTOCOL) += librtmp.o +OBJS-$(CONFIG_LIBSMBCLIENT_PROTOCOL) += libsmbclient.o +OBJS-$(CONFIG_LIBSRT_PROTOCOL) += libsrt.o +OBJS-$(CONFIG_LIBSSH_PROTOCOL) += libssh.o +OBJS-$(CONFIG_LIBZMQ_PROTOCOL) += libzmq.o + +# libavdevice dependencies +OBJS-$(CONFIG_IEC61883_INDEV) += dv.o + +# Windows resource file +SLIBOBJS-$(HAVE_GNU_WINDRES) += avformatres.o + +SKIPHEADERS-$(CONFIG_FFRTMPCRYPT_PROTOCOL) += rtmpdh.h +SKIPHEADERS-$(CONFIG_NETWORK) += network.h rtsp.h + +TESTPROGS = seek \ + url \ +# async \ + +FIFO-MUXER-TESTPROGS-$(CONFIG_NETWORK) += fifo_muxer +TESTPROGS-$(CONFIG_FIFO_MUXER) += $(FIFO-MUXER-TESTPROGS-yes) +TESTPROGS-$(CONFIG_FFRTMPCRYPT_PROTOCOL) += rtmpdh +TESTPROGS-$(CONFIG_MOV_MUXER) += movenc +TESTPROGS-$(CONFIG_NETWORK) += noproxy +TESTPROGS-$(CONFIG_SRTP) += srtp + +TOOLS = aviocat \ + ismindex \ + pktdumper \ + probetest \ + seek_print \ + sidxindex \ + venc_data_dump diff --git a/libmetartccore3/src/ffmpeg/YangMetaConnection.h b/libmetartccore3/src/ffmpeg/YangMetaConnection.h new file mode 100755 index 00000000..ee993111 --- /dev/null +++ b/libmetartccore3/src/ffmpeg/YangMetaConnection.h @@ -0,0 +1,104 @@ +// +// Copyright (c) 2019-2022 yanggaofeng +// + +#ifndef INCLUDE_YANGWEBRTC_YANGMETACONNECTION_H_ +#define INCLUDE_YANGWEBRTC_YANGMETACONNECTION_H_ +#include +#define yang_free(a) {if( (a)) {free((a)); (a) = NULL;}} +#define YANG_Frametype_Spspps 9 +#define YANG_Frametype_I 1 +#define YANG_Frametype_P 0 +typedef enum { + Yang_Stream_Play, Yang_Stream_Publish, Yang_Stream_Both +}YangStreamOptType; +typedef enum YangAudioCodec{ + Yang_AED_AAC, + Yang_AED_MP3, + Yang_AED_SPEEX, + Yang_AED_OPUS +}YangAudioCodec; +typedef enum YangVideoCodec{ + Yang_VED_264, + Yang_VED_265, + Yang_VED_AV1, + Yang_VED_VP9 + +}YangVideoCodec; + +typedef struct { + enum YangAudioCodec encode; + int32_t sample; + int32_t channel; + int32_t audioClock; +}YangAudioParam; + +typedef struct { + enum YangVideoCodec encode; + int32_t videoClock; + +}YangVideoParam; +typedef struct { + char serverIp[30]; + char localIp[20]; + char app[20]; + char stream[20]; + char url[50]; + int32_t localPort; + int32_t serverPort; + int32_t uid; + YangStreamOptType streamOptType; +}YangStreamConfig; +typedef struct{ + int32_t mediaType; + int32_t uid; + int32_t frametype; + int32_t nb; + int64_t pts; + int64_t dts; + uint8_t* payload; +}YangFrame; + +typedef enum YangRequestType { + Yang_Req_Sendkeyframe, Yang_Req_Connected, Yang_Req_Disconnected +}YangRequestType; + +typedef enum YangRtcMessageType{ + YangRTC_Decoder_Input +}YangRtcMessageType; +typedef struct{ + void *context; + void (*init)(void* context,int32_t sample,int32_t channel,int32_t echopath); + void (*closeAec)(void* context); + + void (*echoCapture)(void* context,short *rec, short *out); + void (*preprocessRun)(void* context,short *pcm); + void (*echoStateReset)(void* context); + void (*echoPlayback)(void* context,short *play); + void (*echoCancellation)(void* context,const short *rec, const short *play, + short *out); +}YangAec; +typedef struct{ + void (*receiveAudio)(YangFrame *audioFrame, void *user); + void (*receiveVideo)(YangFrame *videoFrame, void *user); + void (*sendRequest)(int32_t puid, uint32_t ssrc, YangRequestType req,void* user); + void (*setPlayMediaConfig)( YangAudioParam *remote_audio,YangVideoParam *remote_video,void* user); +} YangMetaRtcCallback; +typedef struct { + void* context; + void (*init)(void* context,YangMetaRtcCallback* callback,void* user); + int32_t (*initParam)(void* context,char* url,YangStreamOptType opt); + void (*parseHeader)(YangVideoCodec codec,uint8_t *buf, uint8_t *src, int32_t *hLen); + int32_t (*connectServer)(void* context); + int32_t (*disconnectServer)(void* context); + void (*setExtradata)(void* context,YangVideoCodec codec,uint8_t *extradata,int32_t extradata_size); + int32_t (*publishAudio)(void* context,YangFrame* audioFrame); + int32_t (*publishVideo)(void* context,YangFrame* videoFrame); + int32_t (*getState)(void* context); + int32_t (*recvvideoNotify)(void* context, YangRtcMessageType mess); +}YangMetaConnection; +void yang_init_metaConnection(YangMetaConnection* metaconn); +void yang_destroy_metaConnection(YangMetaConnection* metaconn); +void yang_init_aec(YangAec* aec); +void yang_destroy_aec(YangAec* aec); +#endif /* INCLUDE_YANGWEBRTC_YANGMETACONNECTION_H_ */ diff --git a/libmetartccore3/src/ffmpeg/allformats.c b/libmetartccore3/src/ffmpeg/allformats.c new file mode 100755 index 00000000..559e917e --- /dev/null +++ b/libmetartccore3/src/ffmpeg/allformats.c @@ -0,0 +1,654 @@ +/* + * Register all the formats and protocols + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/thread.h" +#include "libavformat/internal.h" +#include "avformat.h" +#include "rtp.h" +#include "rdt.h" +#include "url.h" +#include "version.h" + +/* (de)muxers */ +extern AVOutputFormat ff_a64_muxer; +extern AVInputFormat ff_aa_demuxer; +extern AVInputFormat ff_aac_demuxer; +extern AVInputFormat ff_ac3_demuxer; +extern AVOutputFormat ff_ac3_muxer; +extern AVInputFormat ff_acm_demuxer; +extern AVInputFormat ff_act_demuxer; +extern AVInputFormat ff_adf_demuxer; +extern AVInputFormat ff_adp_demuxer; +extern AVInputFormat ff_ads_demuxer; +extern AVOutputFormat ff_adts_muxer; +extern AVInputFormat ff_adx_demuxer; +extern AVOutputFormat ff_adx_muxer; +extern AVInputFormat ff_aea_demuxer; +extern AVInputFormat ff_afc_demuxer; +extern AVInputFormat ff_aiff_demuxer; +extern AVOutputFormat ff_aiff_muxer; +extern AVInputFormat ff_aix_demuxer; +extern AVInputFormat ff_alp_demuxer; +extern AVInputFormat ff_amr_demuxer; +extern AVOutputFormat ff_amr_muxer; +extern AVInputFormat ff_amrnb_demuxer; +extern AVInputFormat ff_amrwb_demuxer; +extern AVInputFormat ff_anm_demuxer; +extern AVInputFormat ff_apc_demuxer; +extern AVInputFormat ff_ape_demuxer; +extern AVInputFormat ff_apm_demuxer; +extern AVInputFormat ff_apng_demuxer; +extern AVOutputFormat ff_apng_muxer; +extern AVInputFormat ff_aptx_demuxer; +extern AVOutputFormat ff_aptx_muxer; +extern AVInputFormat ff_aptx_hd_demuxer; +extern AVOutputFormat ff_aptx_hd_muxer; +extern AVInputFormat ff_aqtitle_demuxer; +extern AVInputFormat ff_argo_asf_demuxer; +extern AVInputFormat ff_asf_demuxer; +extern AVOutputFormat ff_asf_muxer; +extern AVInputFormat ff_asf_o_demuxer; +extern AVInputFormat ff_ass_demuxer; +extern AVOutputFormat ff_ass_muxer; +extern AVInputFormat ff_ast_demuxer; +extern AVOutputFormat ff_ast_muxer; +extern AVOutputFormat ff_asf_stream_muxer; +extern AVInputFormat ff_au_demuxer; +extern AVOutputFormat ff_au_muxer; +extern AVInputFormat ff_av1_demuxer; +extern AVInputFormat ff_avi_demuxer; +extern AVOutputFormat ff_avi_muxer; +extern AVInputFormat ff_avisynth_demuxer; +extern AVOutputFormat ff_avm2_muxer; +extern AVInputFormat ff_avr_demuxer; +extern AVInputFormat ff_avs_demuxer; +extern AVInputFormat ff_avs2_demuxer; +extern AVOutputFormat ff_avs2_muxer; +extern AVInputFormat ff_bethsoftvid_demuxer; +extern AVInputFormat ff_bfi_demuxer; +extern AVInputFormat ff_bintext_demuxer; +extern AVInputFormat ff_bink_demuxer; +extern AVInputFormat ff_bit_demuxer; +extern AVOutputFormat ff_bit_muxer; +extern AVInputFormat ff_bmv_demuxer; +extern AVInputFormat ff_bfstm_demuxer; +extern AVInputFormat ff_brstm_demuxer; +extern AVInputFormat ff_boa_demuxer; +extern AVInputFormat ff_c93_demuxer; +extern AVInputFormat ff_caf_demuxer; +extern AVOutputFormat ff_caf_muxer; +extern AVInputFormat ff_cavsvideo_demuxer; +extern AVOutputFormat ff_cavsvideo_muxer; +extern AVInputFormat ff_cdg_demuxer; +extern AVInputFormat ff_cdxl_demuxer; +extern AVInputFormat ff_cine_demuxer; +extern AVInputFormat ff_codec2_demuxer; +extern AVOutputFormat ff_codec2_muxer; +extern AVInputFormat ff_codec2raw_demuxer; +extern AVOutputFormat ff_codec2raw_muxer; +extern AVInputFormat ff_concat_demuxer; +extern AVOutputFormat ff_crc_muxer; +extern AVInputFormat ff_dash_demuxer; +extern AVOutputFormat ff_dash_muxer; +extern AVInputFormat ff_data_demuxer; +extern AVOutputFormat ff_data_muxer; +extern AVInputFormat ff_daud_demuxer; +extern AVOutputFormat ff_daud_muxer; +extern AVInputFormat ff_dcstr_demuxer; +extern AVInputFormat ff_derf_demuxer; +extern AVInputFormat ff_dfa_demuxer; +extern AVInputFormat ff_dhav_demuxer; +extern AVInputFormat ff_dirac_demuxer; +extern AVOutputFormat ff_dirac_muxer; +extern AVInputFormat ff_dnxhd_demuxer; +extern AVOutputFormat ff_dnxhd_muxer; +extern AVInputFormat ff_dsf_demuxer; +extern AVInputFormat ff_dsicin_demuxer; +extern AVInputFormat ff_dss_demuxer; +extern AVInputFormat ff_dts_demuxer; +extern AVOutputFormat ff_dts_muxer; +extern AVInputFormat ff_dtshd_demuxer; +extern AVInputFormat ff_dv_demuxer; +extern AVOutputFormat ff_dv_muxer; +extern AVInputFormat ff_dvbsub_demuxer; +extern AVInputFormat ff_dvbtxt_demuxer; +extern AVInputFormat ff_dxa_demuxer; +extern AVInputFormat ff_ea_demuxer; +extern AVInputFormat ff_ea_cdata_demuxer; +extern AVInputFormat ff_eac3_demuxer; +extern AVOutputFormat ff_eac3_muxer; +extern AVInputFormat ff_epaf_demuxer; +extern AVOutputFormat ff_f4v_muxer; +extern AVInputFormat ff_ffmetadata_demuxer; +extern AVOutputFormat ff_ffmetadata_muxer; +extern AVOutputFormat ff_fifo_muxer; +extern AVOutputFormat ff_fifo_test_muxer; +extern AVInputFormat ff_filmstrip_demuxer; +extern AVOutputFormat ff_filmstrip_muxer; +extern AVInputFormat ff_fits_demuxer; +extern AVOutputFormat ff_fits_muxer; +extern AVInputFormat ff_flac_demuxer; +extern AVOutputFormat ff_flac_muxer; +extern AVInputFormat ff_flic_demuxer; +extern AVInputFormat ff_flv_demuxer; +extern AVOutputFormat ff_flv_muxer; +extern AVInputFormat ff_live_flv_demuxer; +extern AVInputFormat ff_fourxm_demuxer; +extern AVOutputFormat ff_framecrc_muxer; +extern AVOutputFormat ff_framehash_muxer; +extern AVOutputFormat ff_framemd5_muxer; +extern AVInputFormat ff_frm_demuxer; +extern AVInputFormat ff_fsb_demuxer; +extern AVInputFormat ff_fwse_demuxer; +extern AVInputFormat ff_g722_demuxer; +extern AVOutputFormat ff_g722_muxer; +extern AVInputFormat ff_g723_1_demuxer; +extern AVOutputFormat ff_g723_1_muxer; +extern AVInputFormat ff_g726_demuxer; +extern AVOutputFormat ff_g726_muxer; +extern AVInputFormat ff_g726le_demuxer; +extern AVOutputFormat ff_g726le_muxer; +extern AVInputFormat ff_g729_demuxer; +extern AVInputFormat ff_gdv_demuxer; +extern AVInputFormat ff_genh_demuxer; +extern AVInputFormat ff_gif_demuxer; +extern AVOutputFormat ff_gif_muxer; +extern AVInputFormat ff_gsm_demuxer; +extern AVOutputFormat ff_gsm_muxer; +extern AVInputFormat ff_gxf_demuxer; +extern AVOutputFormat ff_gxf_muxer; +extern AVInputFormat ff_h261_demuxer; +extern AVOutputFormat ff_h261_muxer; +extern AVInputFormat ff_h263_demuxer; +extern AVOutputFormat ff_h263_muxer; +extern AVInputFormat ff_h264_demuxer; +extern AVOutputFormat ff_h264_muxer; +extern AVOutputFormat ff_hash_muxer; +extern AVInputFormat ff_hca_demuxer; +extern AVInputFormat ff_hcom_demuxer; +extern AVOutputFormat ff_hds_muxer; +extern AVInputFormat ff_hevc_demuxer; +extern AVOutputFormat ff_hevc_muxer; +extern AVInputFormat ff_hls_demuxer; +extern AVOutputFormat ff_hls_muxer; +extern AVInputFormat ff_hnm_demuxer; +extern AVInputFormat ff_ico_demuxer; +extern AVOutputFormat ff_ico_muxer; +extern AVInputFormat ff_idcin_demuxer; +extern AVInputFormat ff_idf_demuxer; +extern AVInputFormat ff_iff_demuxer; +extern AVInputFormat ff_ifv_demuxer; +extern AVInputFormat ff_ilbc_demuxer; +extern AVOutputFormat ff_ilbc_muxer; +extern AVInputFormat ff_image2_demuxer; +extern AVOutputFormat ff_image2_muxer; +extern AVInputFormat ff_image2pipe_demuxer; +extern AVOutputFormat ff_image2pipe_muxer; +extern AVInputFormat ff_image2_alias_pix_demuxer; +extern AVInputFormat ff_image2_brender_pix_demuxer; +extern AVInputFormat ff_ingenient_demuxer; +extern AVInputFormat ff_ipmovie_demuxer; +extern AVOutputFormat ff_ipod_muxer; +extern AVInputFormat ff_ircam_demuxer; +extern AVOutputFormat ff_ircam_muxer; +extern AVOutputFormat ff_ismv_muxer; +extern AVInputFormat ff_iss_demuxer; +extern AVInputFormat ff_iv8_demuxer; +extern AVInputFormat ff_ivf_demuxer; +extern AVOutputFormat ff_ivf_muxer; +extern AVInputFormat ff_ivr_demuxer; +extern AVInputFormat ff_jacosub_demuxer; +extern AVOutputFormat ff_jacosub_muxer; +extern AVInputFormat ff_jv_demuxer; +extern AVInputFormat ff_kux_demuxer; +extern AVInputFormat ff_kvag_demuxer; +extern AVOutputFormat ff_kvag_muxer; +extern AVOutputFormat ff_latm_muxer; +extern AVInputFormat ff_lmlm4_demuxer; +extern AVInputFormat ff_loas_demuxer; +extern AVInputFormat ff_lrc_demuxer; +extern AVOutputFormat ff_lrc_muxer; +extern AVInputFormat ff_lvf_demuxer; +extern AVInputFormat ff_lxf_demuxer; +extern AVInputFormat ff_m4v_demuxer; +extern AVOutputFormat ff_m4v_muxer; +extern AVOutputFormat ff_md5_muxer; +extern AVInputFormat ff_matroska_demuxer; +extern AVOutputFormat ff_matroska_muxer; +extern AVOutputFormat ff_matroska_audio_muxer; +extern AVInputFormat ff_mgsts_demuxer; +extern AVInputFormat ff_microdvd_demuxer; +extern AVOutputFormat ff_microdvd_muxer; +extern AVInputFormat ff_mjpeg_demuxer; +extern AVOutputFormat ff_mjpeg_muxer; +extern AVInputFormat ff_mjpeg_2000_demuxer; +extern AVInputFormat ff_mlp_demuxer; +extern AVOutputFormat ff_mlp_muxer; +extern AVInputFormat ff_mlv_demuxer; +extern AVInputFormat ff_mm_demuxer; +extern AVInputFormat ff_mmf_demuxer; +extern AVOutputFormat ff_mmf_muxer; +extern AVInputFormat ff_mov_demuxer; +extern AVOutputFormat ff_mov_muxer; +extern AVOutputFormat ff_mp2_muxer; +extern AVInputFormat ff_mp3_demuxer; +extern AVOutputFormat ff_mp3_muxer; +extern AVOutputFormat ff_mp4_muxer; +extern AVInputFormat ff_mpc_demuxer; +extern AVInputFormat ff_mpc8_demuxer; +extern AVOutputFormat ff_mpeg1system_muxer; +extern AVOutputFormat ff_mpeg1vcd_muxer; +extern AVOutputFormat ff_mpeg1video_muxer; +extern AVOutputFormat ff_mpeg2dvd_muxer; +extern AVOutputFormat ff_mpeg2svcd_muxer; +extern AVOutputFormat ff_mpeg2video_muxer; +extern AVOutputFormat ff_mpeg2vob_muxer; +extern AVInputFormat ff_mpegps_demuxer; +extern AVInputFormat ff_mpegts_demuxer; +extern AVOutputFormat ff_mpegts_muxer; +extern AVInputFormat ff_mpegtsraw_demuxer; +extern AVInputFormat ff_mpegvideo_demuxer; +extern AVInputFormat ff_mpjpeg_demuxer; +extern AVOutputFormat ff_mpjpeg_muxer; +extern AVInputFormat ff_mpl2_demuxer; +extern AVInputFormat ff_mpsub_demuxer; +extern AVInputFormat ff_msf_demuxer; +extern AVInputFormat ff_msnwc_tcp_demuxer; +extern AVInputFormat ff_mtaf_demuxer; +extern AVInputFormat ff_mtv_demuxer; +extern AVInputFormat ff_musx_demuxer; +extern AVInputFormat ff_mv_demuxer; +extern AVInputFormat ff_mvi_demuxer; +extern AVInputFormat ff_mxf_demuxer; +extern AVOutputFormat ff_mxf_muxer; +extern AVOutputFormat ff_mxf_d10_muxer; +extern AVOutputFormat ff_mxf_opatom_muxer; +extern AVInputFormat ff_mxg_demuxer; +extern AVInputFormat ff_nc_demuxer; +extern AVInputFormat ff_nistsphere_demuxer; +extern AVInputFormat ff_nsp_demuxer; +extern AVInputFormat ff_nsv_demuxer; +extern AVOutputFormat ff_null_muxer; +extern AVInputFormat ff_nut_demuxer; +extern AVOutputFormat ff_nut_muxer; +extern AVInputFormat ff_nuv_demuxer; +extern AVOutputFormat ff_oga_muxer; +extern AVInputFormat ff_ogg_demuxer; +extern AVOutputFormat ff_ogg_muxer; +extern AVOutputFormat ff_ogv_muxer; +extern AVInputFormat ff_oma_demuxer; +extern AVOutputFormat ff_oma_muxer; +extern AVOutputFormat ff_opus_muxer; +extern AVInputFormat ff_paf_demuxer; +extern AVInputFormat ff_pcm_alaw_demuxer; +extern AVOutputFormat ff_pcm_alaw_muxer; +extern AVInputFormat ff_pcm_mulaw_demuxer; +extern AVOutputFormat ff_pcm_mulaw_muxer; +extern AVInputFormat ff_pcm_vidc_demuxer; +extern AVOutputFormat ff_pcm_vidc_muxer; +extern AVInputFormat ff_pcm_f64be_demuxer; +extern AVOutputFormat ff_pcm_f64be_muxer; +extern AVInputFormat ff_pcm_f64le_demuxer; +extern AVOutputFormat ff_pcm_f64le_muxer; +extern AVInputFormat ff_pcm_f32be_demuxer; +extern AVOutputFormat ff_pcm_f32be_muxer; +extern AVInputFormat ff_pcm_f32le_demuxer; +extern AVOutputFormat ff_pcm_f32le_muxer; +extern AVInputFormat ff_pcm_s32be_demuxer; +extern AVOutputFormat ff_pcm_s32be_muxer; +extern AVInputFormat ff_pcm_s32le_demuxer; +extern AVOutputFormat ff_pcm_s32le_muxer; +extern AVInputFormat ff_pcm_s24be_demuxer; +extern AVOutputFormat ff_pcm_s24be_muxer; +extern AVInputFormat ff_pcm_s24le_demuxer; +extern AVOutputFormat ff_pcm_s24le_muxer; +extern AVInputFormat ff_pcm_s16be_demuxer; +extern AVOutputFormat ff_pcm_s16be_muxer; +extern AVInputFormat ff_pcm_s16le_demuxer; +extern AVOutputFormat ff_pcm_s16le_muxer; +extern AVInputFormat ff_pcm_s8_demuxer; +extern AVOutputFormat ff_pcm_s8_muxer; +extern AVInputFormat ff_pcm_u32be_demuxer; +extern AVOutputFormat ff_pcm_u32be_muxer; +extern AVInputFormat ff_pcm_u32le_demuxer; +extern AVOutputFormat ff_pcm_u32le_muxer; +extern AVInputFormat ff_pcm_u24be_demuxer; +extern AVOutputFormat ff_pcm_u24be_muxer; +extern AVInputFormat ff_pcm_u24le_demuxer; +extern AVOutputFormat ff_pcm_u24le_muxer; +extern AVInputFormat ff_pcm_u16be_demuxer; +extern AVOutputFormat ff_pcm_u16be_muxer; +extern AVInputFormat ff_pcm_u16le_demuxer; +extern AVOutputFormat ff_pcm_u16le_muxer; +extern AVInputFormat ff_pcm_u8_demuxer; +extern AVOutputFormat ff_pcm_u8_muxer; +extern AVInputFormat ff_pjs_demuxer; +extern AVInputFormat ff_pmp_demuxer; +extern AVInputFormat ff_pp_bnk_demuxer; +extern AVOutputFormat ff_psp_muxer; +extern AVInputFormat ff_pva_demuxer; +extern AVInputFormat ff_pvf_demuxer; +extern AVInputFormat ff_qcp_demuxer; +extern AVInputFormat ff_r3d_demuxer; +extern AVInputFormat ff_rawvideo_demuxer; +extern AVOutputFormat ff_rawvideo_muxer; +extern AVInputFormat ff_realtext_demuxer; +extern AVInputFormat ff_redspark_demuxer; +extern AVInputFormat ff_rl2_demuxer; +extern AVInputFormat ff_rm_demuxer; +extern AVOutputFormat ff_rm_muxer; +extern AVInputFormat ff_roq_demuxer; +extern AVOutputFormat ff_roq_muxer; +extern AVInputFormat ff_rpl_demuxer; +extern AVInputFormat ff_rsd_demuxer; +extern AVInputFormat ff_rso_demuxer; +extern AVOutputFormat ff_rso_muxer; +extern AVInputFormat ff_rtp_demuxer; +extern AVOutputFormat ff_rtp_muxer; +extern AVOutputFormat ff_rtp_mpegts_muxer; +extern AVInputFormat ff_rtsp_demuxer; +extern AVOutputFormat ff_rtsp_muxer; +extern AVInputFormat ff_s337m_demuxer; +extern AVInputFormat ff_sami_demuxer; +extern AVInputFormat ff_sap_demuxer; +extern AVOutputFormat ff_sap_muxer; +extern AVInputFormat ff_sbc_demuxer; +extern AVOutputFormat ff_sbc_muxer; +extern AVInputFormat ff_sbg_demuxer; +extern AVInputFormat ff_scc_demuxer; +extern AVOutputFormat ff_scc_muxer; +extern AVInputFormat ff_sdp_demuxer; +extern AVInputFormat ff_sdr2_demuxer; +extern AVInputFormat ff_sds_demuxer; +extern AVInputFormat ff_sdx_demuxer; +extern AVInputFormat ff_segafilm_demuxer; +extern AVOutputFormat ff_segafilm_muxer; +extern AVOutputFormat ff_segment_muxer; +extern AVOutputFormat ff_stream_segment_muxer; +extern AVInputFormat ff_ser_demuxer; +extern AVInputFormat ff_shorten_demuxer; +extern AVInputFormat ff_siff_demuxer; +extern AVOutputFormat ff_singlejpeg_muxer; +extern AVInputFormat ff_sln_demuxer; +extern AVInputFormat ff_smacker_demuxer; +extern AVInputFormat ff_smjpeg_demuxer; +extern AVOutputFormat ff_smjpeg_muxer; +extern AVOutputFormat ff_smoothstreaming_muxer; +extern AVInputFormat ff_smush_demuxer; +extern AVInputFormat ff_sol_demuxer; +extern AVInputFormat ff_sox_demuxer; +extern AVOutputFormat ff_sox_muxer; +extern AVOutputFormat ff_spx_muxer; +extern AVInputFormat ff_spdif_demuxer; +extern AVOutputFormat ff_spdif_muxer; +extern AVInputFormat ff_srt_demuxer; +extern AVOutputFormat ff_srt_muxer; +extern AVInputFormat ff_str_demuxer; +extern AVInputFormat ff_stl_demuxer; +extern AVOutputFormat ff_streamhash_muxer; +extern AVInputFormat ff_subviewer1_demuxer; +extern AVInputFormat ff_subviewer_demuxer; +extern AVInputFormat ff_sup_demuxer; +extern AVOutputFormat ff_sup_muxer; +extern AVInputFormat ff_svag_demuxer; +extern AVInputFormat ff_swf_demuxer; +extern AVOutputFormat ff_swf_muxer; +extern AVInputFormat ff_tak_demuxer; +extern AVOutputFormat ff_tee_muxer; +extern AVInputFormat ff_tedcaptions_demuxer; +extern AVOutputFormat ff_tg2_muxer; +extern AVOutputFormat ff_tgp_muxer; +extern AVInputFormat ff_thp_demuxer; +extern AVInputFormat ff_threedostr_demuxer; +extern AVInputFormat ff_tiertexseq_demuxer; +extern AVOutputFormat ff_mkvtimestamp_v2_muxer; +extern AVInputFormat ff_tmv_demuxer; +extern AVInputFormat ff_truehd_demuxer; +extern AVOutputFormat ff_truehd_muxer; +extern AVInputFormat ff_tta_demuxer; +extern AVOutputFormat ff_tta_muxer; +extern AVInputFormat ff_txd_demuxer; +extern AVInputFormat ff_tty_demuxer; +extern AVInputFormat ff_ty_demuxer; +extern AVOutputFormat ff_uncodedframecrc_muxer; +extern AVInputFormat ff_v210_demuxer; +extern AVInputFormat ff_v210x_demuxer; +extern AVInputFormat ff_vag_demuxer; +extern AVInputFormat ff_vc1_demuxer; +extern AVOutputFormat ff_vc1_muxer; +extern AVInputFormat ff_vc1t_demuxer; +extern AVOutputFormat ff_vc1t_muxer; +extern AVInputFormat ff_vividas_demuxer; +extern AVInputFormat ff_vivo_demuxer; +extern AVInputFormat ff_vmd_demuxer; +extern AVInputFormat ff_vobsub_demuxer; +extern AVInputFormat ff_voc_demuxer; +extern AVOutputFormat ff_voc_muxer; +extern AVInputFormat ff_vpk_demuxer; +extern AVInputFormat ff_vplayer_demuxer; +extern AVInputFormat ff_vqf_demuxer; +extern AVInputFormat ff_w64_demuxer; +extern AVOutputFormat ff_w64_muxer; +extern AVInputFormat ff_wav_demuxer; +extern AVOutputFormat ff_wav_muxer; +extern AVInputFormat ff_wc3_demuxer; +extern AVOutputFormat ff_webm_muxer; +extern AVInputFormat ff_webm_dash_manifest_demuxer; +extern AVOutputFormat ff_webm_dash_manifest_muxer; +extern AVOutputFormat ff_webm_chunk_muxer; +extern AVOutputFormat ff_webp_muxer; +extern AVInputFormat ff_webvtt_demuxer; +extern AVOutputFormat ff_webvtt_muxer; +extern AVInputFormat ff_wsaud_demuxer; +extern AVInputFormat ff_wsd_demuxer; +extern AVInputFormat ff_wsvqa_demuxer; +extern AVInputFormat ff_wtv_demuxer; +extern AVOutputFormat ff_wtv_muxer; +extern AVInputFormat ff_wve_demuxer; +extern AVInputFormat ff_wv_demuxer; +extern AVOutputFormat ff_wv_muxer; +extern AVInputFormat ff_xa_demuxer; +extern AVInputFormat ff_xbin_demuxer; +extern AVInputFormat ff_xmv_demuxer; +extern AVInputFormat ff_xvag_demuxer; +extern AVInputFormat ff_xwma_demuxer; +extern AVInputFormat ff_yop_demuxer; +extern AVInputFormat ff_yuv4mpegpipe_demuxer; +extern AVOutputFormat ff_yuv4mpegpipe_muxer; +/* image demuxers */ +extern AVInputFormat ff_image_bmp_pipe_demuxer; +extern AVInputFormat ff_image_dds_pipe_demuxer; +extern AVInputFormat ff_image_dpx_pipe_demuxer; +extern AVInputFormat ff_image_exr_pipe_demuxer; +extern AVInputFormat ff_image_gif_pipe_demuxer; +extern AVInputFormat ff_image_j2k_pipe_demuxer; +extern AVInputFormat ff_image_jpeg_pipe_demuxer; +extern AVInputFormat ff_image_jpegls_pipe_demuxer; +extern AVInputFormat ff_image_pam_pipe_demuxer; +extern AVInputFormat ff_image_pbm_pipe_demuxer; +extern AVInputFormat ff_image_pcx_pipe_demuxer; +extern AVInputFormat ff_image_pgmyuv_pipe_demuxer; +extern AVInputFormat ff_image_pgm_pipe_demuxer; +extern AVInputFormat ff_image_pictor_pipe_demuxer; +extern AVInputFormat ff_image_png_pipe_demuxer; +extern AVInputFormat ff_image_ppm_pipe_demuxer; +extern AVInputFormat ff_image_psd_pipe_demuxer; +extern AVInputFormat ff_image_qdraw_pipe_demuxer; +extern AVInputFormat ff_image_sgi_pipe_demuxer; +extern AVInputFormat ff_image_svg_pipe_demuxer; +extern AVInputFormat ff_image_sunrast_pipe_demuxer; +extern AVInputFormat ff_image_tiff_pipe_demuxer; +extern AVInputFormat ff_image_webp_pipe_demuxer; +extern AVInputFormat ff_image_xpm_pipe_demuxer; +extern AVInputFormat ff_image_xwd_pipe_demuxer; + +/* external libraries */ +extern AVOutputFormat ff_chromaprint_muxer; +extern AVOutputFormat ff_webrtc_muxer; +extern AVInputFormat ff_libgme_demuxer; +extern AVInputFormat ff_libmodplug_demuxer; +extern AVInputFormat ff_libopenmpt_demuxer; +extern AVInputFormat ff_vapoursynth_demuxer; +extern AVInputFormat ff_webrtc_demuxer; + +#include "libavformat/muxer_list.c" +#include "libavformat/demuxer_list.c" + +static const AVInputFormat * const *indev_list = NULL; +static const AVOutputFormat * const *outdev_list = NULL; + +const AVOutputFormat *av_muxer_iterate(void **opaque) +{ + static const uintptr_t size = sizeof(muxer_list)/sizeof(muxer_list[0]) - 1; + uintptr_t i = (uintptr_t)*opaque; + const AVOutputFormat *f = NULL; + + if (i < size) { + f = muxer_list[i]; + } else if (indev_list) { + f = outdev_list[i - size]; + } + + if (f) + *opaque = (void*)(i + 1); + return f; +} + +const AVInputFormat *av_demuxer_iterate(void **opaque) +{ + static const uintptr_t size = sizeof(demuxer_list)/sizeof(demuxer_list[0]) - 1; + uintptr_t i = (uintptr_t)*opaque; + const AVInputFormat *f = NULL; + + if (i < size) { + f = demuxer_list[i]; + } else if (outdev_list) { + f = indev_list[i - size]; + } + + if (f) + *opaque = (void*)(i + 1); + return f; +} + +static AVMutex avpriv_register_devices_mutex = AV_MUTEX_INITIALIZER; + +#if FF_API_NEXT +FF_DISABLE_DEPRECATION_WARNINGS +static AVOnce av_format_next_init = AV_ONCE_INIT; + +static void av_format_init_next(void) +{ + AVOutputFormat *prevout = NULL, *out; + AVInputFormat *previn = NULL, *in; + + ff_mutex_lock(&avpriv_register_devices_mutex); + + for (int i = 0; (out = (AVOutputFormat*)muxer_list[i]); i++) { + if (prevout) + prevout->next = out; + prevout = out; + } + + if (outdev_list) { + for (int i = 0; (out = (AVOutputFormat*)outdev_list[i]); i++) { + if (prevout) + prevout->next = out; + prevout = out; + } + } + + for (int i = 0; (in = (AVInputFormat*)demuxer_list[i]); i++) { + if (previn) + previn->next = in; + previn = in; + } + + if (indev_list) { + for (int i = 0; (in = (AVInputFormat*)indev_list[i]); i++) { + if (previn) + previn->next = in; + previn = in; + } + } + + ff_mutex_unlock(&avpriv_register_devices_mutex); +} + +AVInputFormat *av_iformat_next(const AVInputFormat *f) +{ + ff_thread_once(&av_format_next_init, av_format_init_next); + + if (f) +#if FF_API_AVIOFORMAT + return f->next; +#else + return (AVInputFormat *) f->next; +#endif + else { + void *opaque = NULL; + return (AVInputFormat *)av_demuxer_iterate(&opaque); + } +} + +AVOutputFormat *av_oformat_next(const AVOutputFormat *f) +{ + ff_thread_once(&av_format_next_init, av_format_init_next); + + if (f) +#if FF_API_AVIOFORMAT + return f->next; +#else + return (AVOutputFormat *) f->next; +#endif + else { + void *opaque = NULL; + return (AVOutputFormat *)av_muxer_iterate(&opaque); + } +} + +void av_register_all(void) +{ + ff_thread_once(&av_format_next_init, av_format_init_next); +} + +void av_register_input_format(AVInputFormat *format) +{ + ff_thread_once(&av_format_next_init, av_format_init_next); +} + +void av_register_output_format(AVOutputFormat *format) +{ + ff_thread_once(&av_format_next_init, av_format_init_next); +} +FF_ENABLE_DEPRECATION_WARNINGS +#endif + +void avpriv_register_devices(const AVOutputFormat * const o[], const AVInputFormat * const i[]) +{ + ff_mutex_lock(&avpriv_register_devices_mutex); + outdev_list = o; + indev_list = i; + ff_mutex_unlock(&avpriv_register_devices_mutex); +#if FF_API_NEXT + av_format_init_next(); +#endif +} diff --git a/libmetartccore3/src/ffmpeg/demuxer_list.c b/libmetartccore3/src/ffmpeg/demuxer_list.c new file mode 100755 index 00000000..b086ec1a --- /dev/null +++ b/libmetartccore3/src/ffmpeg/demuxer_list.c @@ -0,0 +1,307 @@ +static const AVInputFormat * const demuxer_list[] = { + &ff_aa_demuxer, + &ff_aac_demuxer, + &ff_ac3_demuxer, + &ff_acm_demuxer, + &ff_act_demuxer, + &ff_adf_demuxer, + &ff_adp_demuxer, + &ff_ads_demuxer, + &ff_adx_demuxer, + &ff_aea_demuxer, + &ff_afc_demuxer, + &ff_aiff_demuxer, + &ff_aix_demuxer, + &ff_alp_demuxer, + &ff_amr_demuxer, + &ff_amrnb_demuxer, + &ff_amrwb_demuxer, + &ff_anm_demuxer, + &ff_apc_demuxer, + &ff_ape_demuxer, + &ff_apm_demuxer, + &ff_apng_demuxer, + &ff_aptx_demuxer, + &ff_aptx_hd_demuxer, + &ff_aqtitle_demuxer, + &ff_argo_asf_demuxer, + &ff_asf_demuxer, + &ff_asf_o_demuxer, + &ff_ass_demuxer, + &ff_ast_demuxer, + &ff_au_demuxer, + &ff_av1_demuxer, + &ff_avi_demuxer, + &ff_avr_demuxer, + &ff_avs_demuxer, + &ff_avs2_demuxer, + &ff_bethsoftvid_demuxer, + &ff_bfi_demuxer, + &ff_bintext_demuxer, + &ff_bink_demuxer, + &ff_bit_demuxer, + &ff_bmv_demuxer, + &ff_bfstm_demuxer, + &ff_brstm_demuxer, + &ff_boa_demuxer, + &ff_c93_demuxer, + &ff_caf_demuxer, + &ff_cavsvideo_demuxer, + &ff_cdg_demuxer, + &ff_cdxl_demuxer, + &ff_cine_demuxer, + &ff_codec2_demuxer, + &ff_codec2raw_demuxer, + &ff_concat_demuxer, + &ff_data_demuxer, + &ff_daud_demuxer, + &ff_dcstr_demuxer, + &ff_derf_demuxer, + &ff_dfa_demuxer, + &ff_dhav_demuxer, + &ff_dirac_demuxer, + &ff_dnxhd_demuxer, + &ff_dsf_demuxer, + &ff_dsicin_demuxer, + &ff_dss_demuxer, + &ff_dts_demuxer, + &ff_dtshd_demuxer, + &ff_dv_demuxer, + &ff_dvbsub_demuxer, + &ff_dvbtxt_demuxer, + &ff_dxa_demuxer, + &ff_ea_demuxer, + &ff_ea_cdata_demuxer, + &ff_eac3_demuxer, + &ff_epaf_demuxer, + &ff_ffmetadata_demuxer, + &ff_filmstrip_demuxer, + &ff_fits_demuxer, + &ff_flac_demuxer, + &ff_flic_demuxer, + &ff_flv_demuxer, + &ff_live_flv_demuxer, + &ff_fourxm_demuxer, + &ff_frm_demuxer, + &ff_fsb_demuxer, + &ff_fwse_demuxer, + &ff_g722_demuxer, + &ff_g723_1_demuxer, + &ff_g726_demuxer, + &ff_g726le_demuxer, + &ff_g729_demuxer, + &ff_gdv_demuxer, + &ff_genh_demuxer, + &ff_gif_demuxer, + &ff_gsm_demuxer, + &ff_gxf_demuxer, + &ff_h261_demuxer, + &ff_h263_demuxer, + &ff_h264_demuxer, + &ff_hca_demuxer, + &ff_hcom_demuxer, + &ff_hevc_demuxer, + &ff_hls_demuxer, + &ff_hnm_demuxer, + &ff_ico_demuxer, + &ff_idcin_demuxer, + &ff_idf_demuxer, + &ff_iff_demuxer, + &ff_ifv_demuxer, + &ff_ilbc_demuxer, + &ff_image2_demuxer, + &ff_image2pipe_demuxer, + &ff_image2_alias_pix_demuxer, + &ff_image2_brender_pix_demuxer, + &ff_ingenient_demuxer, + &ff_ipmovie_demuxer, + &ff_ircam_demuxer, + &ff_iss_demuxer, + &ff_iv8_demuxer, + &ff_ivf_demuxer, + &ff_ivr_demuxer, + &ff_jacosub_demuxer, + &ff_jv_demuxer, + &ff_kux_demuxer, + &ff_kvag_demuxer, + &ff_lmlm4_demuxer, + &ff_loas_demuxer, + &ff_lrc_demuxer, + &ff_lvf_demuxer, + &ff_lxf_demuxer, + &ff_m4v_demuxer, + &ff_matroska_demuxer, + &ff_mgsts_demuxer, + &ff_microdvd_demuxer, + &ff_mjpeg_demuxer, + &ff_mjpeg_2000_demuxer, + &ff_mlp_demuxer, + &ff_mlv_demuxer, + &ff_mm_demuxer, + &ff_mmf_demuxer, + &ff_mov_demuxer, + &ff_mp3_demuxer, + &ff_mpc_demuxer, + &ff_mpc8_demuxer, + &ff_mpegps_demuxer, + &ff_mpegts_demuxer, + &ff_mpegtsraw_demuxer, + &ff_mpegvideo_demuxer, + &ff_mpjpeg_demuxer, + &ff_mpl2_demuxer, + &ff_mpsub_demuxer, + &ff_msf_demuxer, + &ff_msnwc_tcp_demuxer, + &ff_mtaf_demuxer, + &ff_mtv_demuxer, + &ff_musx_demuxer, + &ff_mv_demuxer, + &ff_mvi_demuxer, + &ff_mxf_demuxer, + &ff_mxg_demuxer, + &ff_nc_demuxer, + &ff_nistsphere_demuxer, + &ff_nsp_demuxer, + &ff_nsv_demuxer, + &ff_nut_demuxer, + &ff_nuv_demuxer, + &ff_ogg_demuxer, + &ff_oma_demuxer, + &ff_paf_demuxer, + &ff_pcm_alaw_demuxer, + &ff_pcm_mulaw_demuxer, + &ff_pcm_vidc_demuxer, + &ff_pcm_f64be_demuxer, + &ff_pcm_f64le_demuxer, + &ff_pcm_f32be_demuxer, + &ff_pcm_f32le_demuxer, + &ff_pcm_s32be_demuxer, + &ff_pcm_s32le_demuxer, + &ff_pcm_s24be_demuxer, + &ff_pcm_s24le_demuxer, + &ff_pcm_s16be_demuxer, + &ff_pcm_s16le_demuxer, + &ff_pcm_s8_demuxer, + &ff_pcm_u32be_demuxer, + &ff_pcm_u32le_demuxer, + &ff_pcm_u24be_demuxer, + &ff_pcm_u24le_demuxer, + &ff_pcm_u16be_demuxer, + &ff_pcm_u16le_demuxer, + &ff_pcm_u8_demuxer, + &ff_pjs_demuxer, + &ff_pmp_demuxer, + &ff_pp_bnk_demuxer, + &ff_pva_demuxer, + &ff_pvf_demuxer, + &ff_qcp_demuxer, + &ff_r3d_demuxer, + &ff_rawvideo_demuxer, + &ff_realtext_demuxer, + &ff_redspark_demuxer, + &ff_rl2_demuxer, + &ff_rm_demuxer, + &ff_roq_demuxer, + &ff_rpl_demuxer, + &ff_rsd_demuxer, + &ff_rso_demuxer, + &ff_rtp_demuxer, + &ff_rtsp_demuxer, + &ff_s337m_demuxer, + &ff_sami_demuxer, + &ff_sap_demuxer, + &ff_sbc_demuxer, + &ff_sbg_demuxer, + &ff_scc_demuxer, + &ff_sdp_demuxer, + &ff_sdr2_demuxer, + &ff_sds_demuxer, + &ff_sdx_demuxer, + &ff_segafilm_demuxer, + &ff_ser_demuxer, + &ff_shorten_demuxer, + &ff_siff_demuxer, + &ff_sln_demuxer, + &ff_smacker_demuxer, + &ff_smjpeg_demuxer, + &ff_smush_demuxer, + &ff_sol_demuxer, + &ff_sox_demuxer, + &ff_spdif_demuxer, + &ff_srt_demuxer, + &ff_str_demuxer, + &ff_stl_demuxer, + &ff_subviewer1_demuxer, + &ff_subviewer_demuxer, + &ff_sup_demuxer, + &ff_svag_demuxer, + &ff_swf_demuxer, + &ff_tak_demuxer, + &ff_tedcaptions_demuxer, + &ff_thp_demuxer, + &ff_threedostr_demuxer, + &ff_tiertexseq_demuxer, + &ff_tmv_demuxer, + &ff_truehd_demuxer, + &ff_tta_demuxer, + &ff_txd_demuxer, + &ff_tty_demuxer, + &ff_ty_demuxer, + &ff_v210_demuxer, + &ff_v210x_demuxer, + &ff_vag_demuxer, + &ff_vc1_demuxer, + &ff_vc1t_demuxer, + &ff_vividas_demuxer, + &ff_vivo_demuxer, + &ff_vmd_demuxer, + &ff_vobsub_demuxer, + &ff_voc_demuxer, + &ff_vpk_demuxer, + &ff_vplayer_demuxer, + &ff_vqf_demuxer, + &ff_w64_demuxer, + &ff_wav_demuxer, + &ff_wc3_demuxer, + &ff_webm_dash_manifest_demuxer, + &ff_webvtt_demuxer, + &ff_wsaud_demuxer, + &ff_wsd_demuxer, + &ff_wsvqa_demuxer, + &ff_wtv_demuxer, + &ff_wve_demuxer, + &ff_wv_demuxer, + &ff_xa_demuxer, + &ff_xbin_demuxer, + &ff_xmv_demuxer, + &ff_xvag_demuxer, + &ff_xwma_demuxer, + &ff_yop_demuxer, + &ff_yuv4mpegpipe_demuxer, + &ff_image_bmp_pipe_demuxer, + &ff_image_dds_pipe_demuxer, + &ff_image_dpx_pipe_demuxer, + &ff_image_exr_pipe_demuxer, + &ff_image_gif_pipe_demuxer, + &ff_image_j2k_pipe_demuxer, + &ff_image_jpeg_pipe_demuxer, + &ff_image_jpegls_pipe_demuxer, + &ff_image_pam_pipe_demuxer, + &ff_image_pbm_pipe_demuxer, + &ff_image_pcx_pipe_demuxer, + &ff_image_pgmyuv_pipe_demuxer, + &ff_image_pgm_pipe_demuxer, + &ff_image_pictor_pipe_demuxer, + &ff_image_png_pipe_demuxer, + &ff_image_ppm_pipe_demuxer, + &ff_image_psd_pipe_demuxer, + &ff_image_qdraw_pipe_demuxer, + &ff_image_sgi_pipe_demuxer, + &ff_image_svg_pipe_demuxer, + &ff_image_sunrast_pipe_demuxer, + &ff_image_tiff_pipe_demuxer, + &ff_image_webp_pipe_demuxer, + &ff_image_xpm_pipe_demuxer, + &ff_image_xwd_pipe_demuxer, + &ff_webrtc_demuxer, + NULL }; diff --git a/libmetartccore3/src/ffmpeg/ffmpeg.c b/libmetartccore3/src/ffmpeg/ffmpeg.c new file mode 100755 index 00000000..d7798e00 --- /dev/null +++ b/libmetartccore3/src/ffmpeg/ffmpeg.c @@ -0,0 +1,5093 @@ +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * multimedia converter based on the FFmpeg libraries + */ + +#include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#if HAVE_IO_H +#include +#endif +#if HAVE_UNISTD_H +#include +#endif + +#include "libavformat/avformat.h" +#include "libavdevice/avdevice.h" +#include "libswresample/swresample.h" +#include "libavutil/opt.h" +#include "libavutil/channel_layout.h" +#include "libavutil/parseutils.h" +#include "libavutil/samplefmt.h" +#include "libavutil/fifo.h" +#include "libavutil/hwcontext.h" +#include "libavutil/internal.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/dict.h" +#include "libavutil/display.h" +#include "libavutil/mathematics.h" +#include "libavutil/pixdesc.h" +#include "libavutil/avstring.h" +#include "libavutil/libm.h" +#include "libavutil/imgutils.h" +#include "libavutil/timestamp.h" +#include "libavutil/bprint.h" +#include "libavutil/time.h" +#include "libavutil/thread.h" +#include "libavutil/threadmessage.h" +#include "libavcodec/mathops.h" +#include "libavformat/os_support.h" + +# include "libavfilter/avfilter.h" +# include "libavfilter/buffersrc.h" +# include "libavfilter/buffersink.h" + +#if HAVE_SYS_RESOURCE_H +#include +#include +#include +#elif HAVE_GETPROCESSTIMES +#include +#endif +#if HAVE_GETPROCESSMEMORYINFO +#include +#include +#endif +#if HAVE_SETCONSOLECTRLHANDLER +#include +#endif + + +#if HAVE_SYS_SELECT_H +#include +#endif + +#if HAVE_TERMIOS_H +#include +#include +#include +#include +#elif HAVE_KBHIT +#include +#endif + +#include + +#include "ffmpeg.h" +#include "cmdutils.h" + +#include "libavutil/avassert.h" + +const char program_name[] = "ffmpeg"; +const int program_birth_year = 2000; + +static FILE *vstats_file; + +const char *const forced_keyframes_const_names[] = { + "n", + "n_forced", + "prev_forced_n", + "prev_forced_t", + "t", + NULL +}; + +typedef struct BenchmarkTimeStamps { + int64_t real_usec; + int64_t user_usec; + int64_t sys_usec; +} BenchmarkTimeStamps; + +static void do_video_stats(OutputStream *ost, int frame_size); +static BenchmarkTimeStamps get_benchmark_time_stamps(void); +static int64_t getmaxrss(void); +static int ifilter_has_all_input_formats(FilterGraph *fg); + +static int run_as_daemon = 0; +static int nb_frames_dup = 0; +static unsigned dup_warning = 1000; +static int nb_frames_drop = 0; +static int64_t decode_error_stat[2]; + +static int want_sdp = 1; + +static BenchmarkTimeStamps current_time; +AVIOContext *progress_avio = NULL; + +static uint8_t *subtitle_out; + +InputStream **input_streams = NULL; +int nb_input_streams = 0; +InputFile **input_files = NULL; +int nb_input_files = 0; + +OutputStream **output_streams = NULL; +int nb_output_streams = 0; +OutputFile **output_files = NULL; +int nb_output_files = 0; + +FilterGraph **filtergraphs; +int nb_filtergraphs; + +#if HAVE_TERMIOS_H + +/* init terminal so that we can grab keys */ +static struct termios oldtty; +static int restore_tty; +#endif + +#if HAVE_THREADS +static void free_input_threads(void); +#endif + +/* sub2video hack: + Convert subtitles to video with alpha to insert them in filter graphs. + This is a temporary solution until libavfilter gets real subtitles support. + */ + +static int sub2video_get_blank_frame(InputStream *ist) +{ + int ret; + AVFrame *frame = ist->sub2video.frame; + + av_frame_unref(frame); + ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; + ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; + ist->sub2video.frame->format = AV_PIX_FMT_RGB32; + if ((ret = av_frame_get_buffer(frame, 0)) < 0) + return ret; + memset(frame->data[0], 0, frame->height * frame->linesize[0]); + return 0; +} + +static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, + AVSubtitleRect *r) +{ + uint32_t *pal, *dst2; + uint8_t *src, *src2; + int x, y; + + if (r->type != SUBTITLE_BITMAP) { + av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n"); + return; + } + if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) { + av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n", + r->x, r->y, r->w, r->h, w, h + ); + return; + } + + dst += r->y * dst_linesize + r->x * 4; + src = r->data[0]; + pal = (uint32_t *)r->data[1]; + for (y = 0; y < r->h; y++) { + dst2 = (uint32_t *)dst; + src2 = src; + for (x = 0; x < r->w; x++) + *(dst2++) = pal[*(src2++)]; + dst += dst_linesize; + src += r->linesize[0]; + } +} + +static void sub2video_push_ref(InputStream *ist, int64_t pts) +{ + AVFrame *frame = ist->sub2video.frame; + int i; + int ret; + + av_assert1(frame->data[0]); + ist->sub2video.last_pts = frame->pts = pts; + for (i = 0; i < ist->nb_filters; i++) { + ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame, + AV_BUFFERSRC_FLAG_KEEP_REF | + AV_BUFFERSRC_FLAG_PUSH); + if (ret != AVERROR_EOF && ret < 0) + av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n", + av_err2str(ret)); + } +} + +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) +{ + AVFrame *frame = ist->sub2video.frame; + int8_t *dst; + int dst_linesize; + int num_rects, i; + int64_t pts, end_pts; + + if (!frame) + return; + if (sub) { + pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL, + AV_TIME_BASE_Q, ist->st->time_base); + end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL, + AV_TIME_BASE_Q, ist->st->time_base); + num_rects = sub->num_rects; + } else { + /* If we are initializing the system, utilize current heartbeat + PTS as the start time, and show until the following subpicture + is received. Otherwise, utilize the previous subpicture's end time + as the fall-back value. */ + pts = ist->sub2video.initialize ? + heartbeat_pts : ist->sub2video.end_pts; + end_pts = INT64_MAX; + num_rects = 0; + } + if (sub2video_get_blank_frame(ist) < 0) { + av_log(ist->dec_ctx, AV_LOG_ERROR, + "Impossible to get a blank canvas.\n"); + return; + } + dst = frame->data [0]; + dst_linesize = frame->linesize[0]; + for (i = 0; i < num_rects; i++) + sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); + sub2video_push_ref(ist, pts); + ist->sub2video.end_pts = end_pts; + ist->sub2video.initialize = 0; +} + +static void sub2video_heartbeat(InputStream *ist, int64_t pts) +{ + InputFile *infile = input_files[ist->file_index]; + int i, j, nb_reqs; + int64_t pts2; + + /* When a frame is read from a file, examine all sub2video streams in + the same file and send the sub2video frame again. Otherwise, decoded + video frames could be accumulating in the filter graph while a filter + (possibly overlay) is desperately waiting for a subtitle frame. */ + for (i = 0; i < infile->nb_streams; i++) { + InputStream *ist2 = input_streams[infile->ist_index + i]; + if (!ist2->sub2video.frame) + continue; + /* subtitles seem to be usually muxed ahead of other streams; + if not, subtracting a larger time here is necessary */ + pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1; + /* do not send the heartbeat frame if the subtitle is already ahead */ + if (pts2 <= ist2->sub2video.last_pts) + continue; + if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) + /* if we have hit the end of the current displayed subpicture, + or if we need to initialize the system, update the + overlayed subpicture and its start/end times */ + sub2video_update(ist2, pts2 + 1, NULL); + for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) + nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); + if (nb_reqs) + sub2video_push_ref(ist2, pts2); + } +} + +static void sub2video_flush(InputStream *ist) +{ + int i; + int ret; + + if (ist->sub2video.end_pts < INT64_MAX) + sub2video_update(ist, INT64_MAX, NULL); + for (i = 0; i < ist->nb_filters; i++) { + ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); + if (ret != AVERROR_EOF && ret < 0) + av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n"); + } +} + +/* end of sub2video hack */ + +static void term_exit_sigsafe(void) +{ +#if HAVE_TERMIOS_H + if(restore_tty) + tcsetattr (0, TCSANOW, &oldtty); +#endif +} + +void term_exit(void) +{ + av_log(NULL, AV_LOG_QUIET, "%s", ""); + term_exit_sigsafe(); +} + +static volatile int received_sigterm = 0; +static volatile int received_nb_signals = 0; +static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); +static volatile int ffmpeg_exited = 0; +static int main_return_code = 0; + +static void +sigterm_handler(int sig) +{ + int ret; + received_sigterm = sig; + received_nb_signals++; + term_exit_sigsafe(); + if(received_nb_signals > 3) { + ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n", + strlen("Received > 3 system signals, hard exiting\n")); + if (ret < 0) { /* Do nothing */ }; + exit(123); + } +} + +#if HAVE_SETCONSOLECTRLHANDLER +static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) +{ + av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType); + + switch (fdwCtrlType) + { + case CTRL_C_EVENT: + case CTRL_BREAK_EVENT: + sigterm_handler(SIGINT); + return TRUE; + + case CTRL_CLOSE_EVENT: + case CTRL_LOGOFF_EVENT: + case CTRL_SHUTDOWN_EVENT: + sigterm_handler(SIGTERM); + /* Basically, with these 3 events, when we return from this method the + process is hard terminated, so stall as long as we need to + to try and let the main thread(s) clean up and gracefully terminate + (we have at most 5 seconds, but should be done far before that). */ + while (!ffmpeg_exited) { + Sleep(0); + } + return TRUE; + + default: + av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType); + return FALSE; + } +} +#endif + +void term_init(void) +{ +#if HAVE_TERMIOS_H + if (!run_as_daemon && stdin_interaction) { + struct termios tty; + if (tcgetattr (0, &tty) == 0) { + oldtty = tty; + restore_tty = 1; + + tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP + |INLCR|IGNCR|ICRNL|IXON); + tty.c_oflag |= OPOST; + tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN); + tty.c_cflag &= ~(CSIZE|PARENB); + tty.c_cflag |= CS8; + tty.c_cc[VMIN] = 1; + tty.c_cc[VTIME] = 0; + + tcsetattr (0, TCSANOW, &tty); + } + signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ + } +#endif + + signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ + signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ +#ifdef SIGXCPU + signal(SIGXCPU, sigterm_handler); +#endif +#ifdef SIGPIPE + signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */ +#endif +#if HAVE_SETCONSOLECTRLHANDLER + SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE); +#endif +} + +/* read a key without blocking */ +static int read_key(void) +{ + unsigned char ch; +#if HAVE_TERMIOS_H + int n = 1; + struct timeval tv; + fd_set rfds; + + FD_ZERO(&rfds); + FD_SET(0, &rfds); + tv.tv_sec = 0; + tv.tv_usec = 0; + n = select(1, &rfds, NULL, NULL, &tv); + if (n > 0) { + n = read(0, &ch, 1); + if (n == 1) + return ch; + + return n; + } +#elif HAVE_KBHIT +# if HAVE_PEEKNAMEDPIPE + static int is_pipe; + static HANDLE input_handle; + DWORD dw, nchars; + if(!input_handle){ + input_handle = GetStdHandle(STD_INPUT_HANDLE); + is_pipe = !GetConsoleMode(input_handle, &dw); + } + + if (is_pipe) { + /* When running under a GUI, you will end here. */ + if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) { + // input pipe may have been closed by the program that ran ffmpeg + return -1; + } + //Read it + if(nchars != 0) { + read(0, &ch, 1); + return ch; + }else{ + return -1; + } + } +# endif + if(kbhit()) + return(getch()); +#endif + return -1; +} + +static int decode_interrupt_cb(void *ctx) +{ + return received_nb_signals > atomic_load(&transcode_init_done); +} + +const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; + +static void ffmpeg_cleanup(int ret) +{ + int i, j; + + if (do_benchmark) { + int maxrss = getmaxrss() / 1024; + av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss); + } + + for (i = 0; i < nb_filtergraphs; i++) { + FilterGraph *fg = filtergraphs[i]; + avfilter_graph_free(&fg->graph); + for (j = 0; j < fg->nb_inputs; j++) { + InputFilter *ifilter = fg->inputs[j]; + struct InputStream *ist = ifilter->ist; + + while (av_fifo_size(ifilter->frame_queue)) { + AVFrame *frame; + av_fifo_generic_read(ifilter->frame_queue, &frame, + sizeof(frame), NULL); + av_frame_free(&frame); + } + av_fifo_freep(&ifilter->frame_queue); + if (ist->sub2video.sub_queue) { + while (av_fifo_size(ist->sub2video.sub_queue)) { + AVSubtitle sub; + av_fifo_generic_read(ist->sub2video.sub_queue, + &sub, sizeof(sub), NULL); + avsubtitle_free(&sub); + } + av_fifo_freep(&ist->sub2video.sub_queue); + } + av_buffer_unref(&ifilter->hw_frames_ctx); + av_freep(&ifilter->name); + av_freep(&fg->inputs[j]); + } + av_freep(&fg->inputs); + for (j = 0; j < fg->nb_outputs; j++) { + OutputFilter *ofilter = fg->outputs[j]; + + avfilter_inout_free(&ofilter->out_tmp); + av_freep(&ofilter->name); + av_freep(&ofilter->formats); + av_freep(&ofilter->channel_layouts); + av_freep(&ofilter->sample_rates); + av_freep(&fg->outputs[j]); + } + av_freep(&fg->outputs); + av_freep(&fg->graph_desc); + + av_freep(&filtergraphs[i]); + } + av_freep(&filtergraphs); + + av_freep(&subtitle_out); + + /* close files */ + for (i = 0; i < nb_output_files; i++) { + OutputFile *of = output_files[i]; + AVFormatContext *s; + if (!of) + continue; + s = of->ctx; + if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE)) + avio_closep(&s->pb); + avformat_free_context(s); + av_dict_free(&of->opts); + + av_freep(&output_files[i]); + } + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + + if (!ost) + continue; + + av_bsf_free(&ost->bsf_ctx); + + av_frame_free(&ost->filtered_frame); + av_frame_free(&ost->last_frame); + av_dict_free(&ost->encoder_opts); + + av_freep(&ost->forced_keyframes); + av_expr_free(ost->forced_keyframes_pexpr); + av_freep(&ost->avfilter); + av_freep(&ost->logfile_prefix); + + av_freep(&ost->audio_channels_map); + ost->audio_channels_mapped = 0; + + av_dict_free(&ost->sws_dict); + av_dict_free(&ost->swr_opts); + + avcodec_free_context(&ost->enc_ctx); + avcodec_parameters_free(&ost->ref_par); + + if (ost->muxing_queue) { + while (av_fifo_size(ost->muxing_queue)) { + AVPacket pkt; + av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); + av_packet_unref(&pkt); + } + av_fifo_freep(&ost->muxing_queue); + } + + av_freep(&output_streams[i]); + } +#if HAVE_THREADS + free_input_threads(); +#endif + for (i = 0; i < nb_input_files; i++) { + avformat_close_input(&input_files[i]->ctx); + av_freep(&input_files[i]); + } + for (i = 0; i < nb_input_streams; i++) { + InputStream *ist = input_streams[i]; + + av_frame_free(&ist->decoded_frame); + av_frame_free(&ist->filter_frame); + av_dict_free(&ist->decoder_opts); + avsubtitle_free(&ist->prev_sub.subtitle); + av_frame_free(&ist->sub2video.frame); + av_freep(&ist->filters); + av_freep(&ist->hwaccel_device); + av_freep(&ist->dts_buffer); + + avcodec_free_context(&ist->dec_ctx); + + av_freep(&input_streams[i]); + } + + if (vstats_file) { + if (fclose(vstats_file)) + av_log(NULL, AV_LOG_ERROR, + "Error closing vstats file, loss of information possible: %s\n", + av_err2str(AVERROR(errno))); + } + av_freep(&vstats_filename); + + av_freep(&input_streams); + av_freep(&input_files); + av_freep(&output_streams); + av_freep(&output_files); + + uninit_opts(); + + avformat_network_deinit(); + + if (received_sigterm) { + av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n", + (int) received_sigterm); + } else if (ret && atomic_load(&transcode_init_done)) { + av_log(NULL, AV_LOG_INFO, "Conversion failed!\n"); + } + term_exit(); + ffmpeg_exited = 1; +} + +void remove_avoptions(AVDictionary **a, AVDictionary *b) +{ + AVDictionaryEntry *t = NULL; + + while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) { + av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE); + } +} + +void assert_avoptions(AVDictionary *m) +{ + AVDictionaryEntry *t; + if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { + av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); + exit_program(1); + } +} + +static void abort_codec_experimental(AVCodec *c, int encoder) +{ + exit_program(1); +} + +static void update_benchmark(const char *fmt, ...) +{ + if (do_benchmark_all) { + BenchmarkTimeStamps t = get_benchmark_time_stamps(); + va_list va; + char buf[1024]; + + if (fmt) { + va_start(va, fmt); + vsnprintf(buf, sizeof(buf), fmt, va); + va_end(va); + av_log(NULL, AV_LOG_INFO, + "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n", + t.user_usec - current_time.user_usec, + t.sys_usec - current_time.sys_usec, + t.real_usec - current_time.real_usec, buf); + } + current_time = t; + } +} + +static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others) +{ + int i; + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost2 = output_streams[i]; + ost2->finished |= ost == ost2 ? this_stream : others; + } +} + +static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue) +{ + AVFormatContext *s = of->ctx; + AVStream *st = ost->st; + int ret; + + /* + * Audio encoders may split the packets -- #frames in != #packets out. + * But there is no reordering, so we can limit the number of output packets + * by simply dropping them here. + * Counting encoded video frames needs to be done separately because of + * reordering, see do_video_out(). + * Do not count the packet when unqueued because it has been counted when queued. + */ + if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) { + if (ost->frame_number >= ost->max_frames) { + av_packet_unref(pkt); + return; + } + ost->frame_number++; + } + + if (!of->header_written) { + AVPacket tmp_pkt = {0}; + /* the muxer is not initialized yet, buffer the packet */ + if (!av_fifo_space(ost->muxing_queue)) { + int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue), + ost->max_muxing_queue_size); + if (new_size <= av_fifo_size(ost->muxing_queue)) { + av_log(NULL, AV_LOG_ERROR, + "Too many packets buffered for output stream %d:%d.\n", + ost->file_index, ost->st->index); + exit_program(1); + } + ret = av_fifo_realloc2(ost->muxing_queue, new_size); + if (ret < 0) + exit_program(1); + } + ret = av_packet_make_refcounted(pkt); + if (ret < 0) + exit_program(1); + av_packet_move_ref(&tmp_pkt, pkt); + av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL); + return; + } + + if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) || + (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0)) + pkt->pts = pkt->dts = AV_NOPTS_VALUE; + + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + int i; + uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, + NULL); + ost->quality = sd ? AV_RL32(sd) : -1; + ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE; + + for (i = 0; ierror); i++) { + if (sd && i < sd[5]) + ost->error[i] = AV_RL64(sd + 8 + 8*i); + else + ost->error[i] = -1; + } + + if (ost->frame_rate.num && ost->is_cfr) { + if (pkt->duration > 0) + av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n"); + pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate), + ost->mux_timebase); + } + } + + av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base); + + if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) { + if (pkt->dts != AV_NOPTS_VALUE && + pkt->pts != AV_NOPTS_VALUE && + pkt->dts > pkt->pts) { + av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n", + pkt->dts, pkt->pts, + ost->file_index, ost->st->index); + pkt->pts = + pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1 + - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1) + - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1); + } + if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) && + pkt->dts != AV_NOPTS_VALUE && + !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) && + ost->last_mux_dts != AV_NOPTS_VALUE) { + int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); + if (pkt->dts < max) { + int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG; + if (exit_on_error) + loglevel = AV_LOG_ERROR; + av_log(s, loglevel, "Non-monotonous DTS in output stream " + "%d:%d; previous: %"PRId64", current: %"PRId64"; ", + ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); + if (exit_on_error) { + av_log(NULL, AV_LOG_FATAL, "aborting.\n"); + exit_program(1); + } + av_log(s, loglevel, "changing to %"PRId64". This may result " + "in incorrect timestamps in the output file.\n", + max); + if (pkt->pts >= pkt->dts) + pkt->pts = FFMAX(pkt->pts, max); + pkt->dts = max; + } + } + } + ost->last_mux_dts = pkt->dts; + + ost->data_size += pkt->size; + ost->packets_written++; + + pkt->stream_index = ost->index; + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "muxer <- type:%s " + "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n", + av_get_media_type_string(ost->enc_ctx->codec_type), + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base), + pkt->size + ); + } + + ret = av_interleaved_write_frame(s, pkt); + if (ret < 0) { + print_error("av_interleaved_write_frame()", ret); + main_return_code = 1; + close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED); + } + av_packet_unref(pkt); +} + +static void close_output_stream(OutputStream *ost) +{ + OutputFile *of = output_files[ost->file_index]; + + ost->finished |= ENCODER_FINISHED; + if (of->shortest) { + int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q); + of->recording_time = FFMIN(of->recording_time, end); + } +} + +/* + * Send a single packet to the output, applying any bitstream filters + * associated with the output stream. This may result in any number + * of packets actually being written, depending on what bitstream + * filters are applied. The supplied packet is consumed and will be + * blank (as if newly-allocated) when this function returns. + * + * If eof is set, instead indicate EOF to all bitstream filters and + * therefore flush any delayed packets to the output. A blank packet + * must be supplied in this case. + */ +static void output_packet(OutputFile *of, AVPacket *pkt, + OutputStream *ost, int eof) +{ + int ret = 0; + + /* apply the output bitstream filters */ + if (ost->bsf_ctx) { + ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt); + if (ret < 0) + goto finish; + while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0) + write_packet(of, pkt, ost, 0); + if (ret == AVERROR(EAGAIN)) + ret = 0; + } else if (!eof) + write_packet(of, pkt, ost, 0); + +finish: + if (ret < 0 && ret != AVERROR_EOF) { + av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output " + "packet for stream #%d:%d.\n", ost->file_index, ost->index); + if(exit_on_error) + exit_program(1); + } +} + +static int check_recording_time(OutputStream *ost) +{ + OutputFile *of = output_files[ost->file_index]; + + if (of->recording_time != INT64_MAX && + av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time, + AV_TIME_BASE_Q) >= 0) { + close_output_stream(ost); + return 0; + } + return 1; +} + +static void do_audio_out(OutputFile *of, OutputStream *ost, + AVFrame *frame) +{ + AVCodecContext *enc = ost->enc_ctx; + AVPacket pkt; + int ret; + + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + + if (!check_recording_time(ost)) + return; + + if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0) + frame->pts = ost->sync_opts; + ost->sync_opts = frame->pts + frame->nb_samples; + ost->samples_encoded += frame->nb_samples; + ost->frames_encoded++; + + av_assert0(pkt.size || !pkt.data); + update_benchmark(NULL); + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder <- type:audio " + "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", + av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base), + enc->time_base.num, enc->time_base.den); + } + + ret = avcodec_send_frame(enc, frame); + if (ret < 0) + goto error; + + while (1) { + ret = avcodec_receive_packet(enc, &pkt); + if (ret == AVERROR(EAGAIN)) + break; + if (ret < 0) + goto error; + + update_benchmark("encode_audio %d.%d", ost->file_index, ost->index); + + av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder -> type:audio " + "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", + av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base), + av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); + } + + output_packet(of, &pkt, ost, 0); + } + + return; +error: + av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); + exit_program(1); +} + +static void do_subtitle_out(OutputFile *of, + OutputStream *ost, + AVSubtitle *sub) +{ + int subtitle_out_max_size = 1024 * 1024; + int subtitle_out_size, nb, i; + AVCodecContext *enc; + AVPacket pkt; + int64_t pts; + + if (sub->pts == AV_NOPTS_VALUE) { + av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); + if (exit_on_error) + exit_program(1); + return; + } + + enc = ost->enc_ctx; + + if (!subtitle_out) { + subtitle_out = av_malloc(subtitle_out_max_size); + if (!subtitle_out) { + av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n"); + exit_program(1); + } + } + + /* Note: DVB subtitle need one packet to draw them and one other + packet to clear them */ + /* XXX: signal it in the codec context ? */ + if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) + nb = 2; + else + nb = 1; + + /* shift timestamp to honor -ss and make check_recording_time() work with -t */ + pts = sub->pts; + if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE) + pts -= output_files[ost->file_index]->start_time; + for (i = 0; i < nb; i++) { + unsigned save_num_rects = sub->num_rects; + + ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base); + if (!check_recording_time(ost)) + return; + + sub->pts = pts; + // start_display_time is required to be 0 + sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q); + sub->end_display_time -= sub->start_display_time; + sub->start_display_time = 0; + if (i == 1) + sub->num_rects = 0; + + ost->frames_encoded++; + + subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out, + subtitle_out_max_size, sub); + if (i == 1) + sub->num_rects = save_num_rects; + if (subtitle_out_size < 0) { + av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n"); + exit_program(1); + } + + av_init_packet(&pkt); + pkt.data = subtitle_out; + pkt.size = subtitle_out_size; + pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase); + pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); + if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { + /* XXX: the pts correction is handled here. Maybe handling + it in the codec would be better */ + if (i == 0) + pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); + else + pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); + } + pkt.dts = pkt.pts; + output_packet(of, &pkt, ost, 0); + } +} + +static void do_video_out(OutputFile *of, + OutputStream *ost, + AVFrame *next_picture, + double sync_ipts) +{ + int ret, format_video_sync; + AVPacket pkt; + AVCodecContext *enc = ost->enc_ctx; + AVCodecParameters *mux_par = ost->st->codecpar; + AVRational frame_rate; + int nb_frames, nb0_frames, i; + double delta, delta0; + double duration = 0; + int frame_size = 0; + InputStream *ist = NULL; + AVFilterContext *filter = ost->filter->filter; + + if (ost->source_index >= 0) + ist = input_streams[ost->source_index]; + + frame_rate = av_buffersink_get_frame_rate(filter); + if (frame_rate.num > 0 && frame_rate.den > 0) + duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base)); + + if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num) + duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base))); + + if (!ost->filters_script && + !ost->filters && + (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) && + next_picture && + ist && + lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) { + duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)); + } + + if (!next_picture) { + //end, flushing + nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0], + ost->last_nb0_frames[1], + ost->last_nb0_frames[2]); + } else { + delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output. + delta = delta0 + duration; + + /* by default, we output a single frame */ + nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR) + nb_frames = 1; + + format_video_sync = video_sync_method; + if (format_video_sync == VSYNC_AUTO) { + if(!strcmp(of->ctx->oformat->name, "avi")) { + format_video_sync = VSYNC_VFR; + } else + format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR; + if ( ist + && format_video_sync == VSYNC_CFR + && input_files[ist->file_index]->ctx->nb_streams == 1 + && input_files[ist->file_index]->input_ts_offset == 0) { + format_video_sync = VSYNC_VSCFR; + } + if (format_video_sync == VSYNC_CFR && copy_ts) { + format_video_sync = VSYNC_VSCFR; + } + } + ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR); + + if (delta0 < 0 && + delta > 0 && + format_video_sync != VSYNC_PASSTHROUGH && + format_video_sync != VSYNC_DROP) { + if (delta0 < -0.6) { + av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0); + } else + av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0); + sync_ipts = ost->sync_opts; + duration += delta0; + delta0 = 0; + } + + switch (format_video_sync) { + case VSYNC_VSCFR: + if (ost->frame_number == 0 && delta0 >= 0.5) { + av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); + delta = duration; + delta0 = 0; + ost->sync_opts = llrint(sync_ipts); + } + case VSYNC_CFR: + // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c + if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) { + nb_frames = 0; + } else if (delta < -1.1) + nb_frames = 0; + else if (delta > 1.1) { + nb_frames = lrintf(delta); + if (delta0 > 1.1) + nb0_frames = llrintf(delta0 - 0.6); + } + break; + case VSYNC_VFR: + if (delta <= -0.6) + nb_frames = 0; + else if (delta > 0.6) + ost->sync_opts = llrint(sync_ipts); + break; + case VSYNC_DROP: + case VSYNC_PASSTHROUGH: + ost->sync_opts = llrint(sync_ipts); + break; + default: + av_assert0(0); + } + } + + nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number); + nb0_frames = FFMIN(nb0_frames, nb_frames); + + memmove(ost->last_nb0_frames + 1, + ost->last_nb0_frames, + sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1)); + ost->last_nb0_frames[0] = nb0_frames; + + if (nb0_frames == 0 && ost->last_dropped) { + nb_frames_drop++; + av_log(NULL, AV_LOG_VERBOSE, + "*** dropping frame %d from stream %d at ts %"PRId64"\n", + ost->frame_number, ost->st->index, ost->last_frame->pts); + } + if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) { + if (nb_frames > dts_error_threshold * 30) { + av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1); + nb_frames_drop++; + return; + } + nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames); + av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1); + if (nb_frames_dup > dup_warning) { + av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning); + dup_warning *= 10; + } + } + ost->last_dropped = nb_frames == nb0_frames && next_picture; + + /* duplicates frame if needed */ + for (i = 0; i < nb_frames; i++) { + AVFrame *in_picture; + int forced_keyframe = 0; + double pts_time; + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + + if (i < nb0_frames && ost->last_frame) { + in_picture = ost->last_frame; + } else + in_picture = next_picture; + + if (!in_picture) + return; + + in_picture->pts = ost->sync_opts; + + if (!check_recording_time(ost)) + return; + + if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && + ost->top_field_first >= 0) + in_picture->top_field_first = !!ost->top_field_first; + + if (in_picture->interlaced_frame) { + if (enc->codec->id == AV_CODEC_ID_MJPEG) + mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; + else + mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; + } else + mux_par->field_order = AV_FIELD_PROGRESSIVE; + + in_picture->quality = enc->global_quality; + in_picture->pict_type = 0; + + if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE && + in_picture->pts != AV_NOPTS_VALUE) + ost->forced_kf_ref_pts = in_picture->pts; + + pts_time = in_picture->pts != AV_NOPTS_VALUE ? + (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN; + if (ost->forced_kf_index < ost->forced_kf_count && + in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { + ost->forced_kf_index++; + forced_keyframe = 1; + } else if (ost->forced_keyframes_pexpr) { + double res; + ost->forced_keyframes_expr_const_values[FKF_T] = pts_time; + res = av_expr_eval(ost->forced_keyframes_pexpr, + ost->forced_keyframes_expr_const_values, NULL); + ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n", + ost->forced_keyframes_expr_const_values[FKF_N], + ost->forced_keyframes_expr_const_values[FKF_N_FORCED], + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N], + ost->forced_keyframes_expr_const_values[FKF_T], + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T], + res); + if (res) { + forced_keyframe = 1; + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = + ost->forced_keyframes_expr_const_values[FKF_N]; + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = + ost->forced_keyframes_expr_const_values[FKF_T]; + ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1; + } + + ost->forced_keyframes_expr_const_values[FKF_N] += 1; + } else if ( ost->forced_keyframes + && !strncmp(ost->forced_keyframes, "source", 6) + && in_picture->key_frame==1 + && !i) { + forced_keyframe = 1; + } + + if (forced_keyframe) { + in_picture->pict_type = AV_PICTURE_TYPE_I; + av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time); + } + + update_benchmark(NULL); + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder <- type:video " + "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", + av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base), + enc->time_base.num, enc->time_base.den); + } + + ost->frames_encoded++; + + ret = avcodec_send_frame(enc, in_picture); + if (ret < 0) + goto error; + // Make sure Closed Captions will not be duplicated + av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC); + + while (1) { + ret = avcodec_receive_packet(enc, &pkt); + update_benchmark("encode_video %d.%d", ost->file_index, ost->index); + if (ret == AVERROR(EAGAIN)) + break; + if (ret < 0) + goto error; + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder -> type:video " + "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", + av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base), + av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); + } + + if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY)) + pkt.pts = ost->sync_opts; + + av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder -> type:video " + "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", + av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase), + av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase)); + } + + frame_size = pkt.size; + output_packet(of, &pkt, ost, 0); + + /* if two pass, output log */ + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); + } + } + ost->sync_opts++; + /* + * For video, number of frames in == number of packets out. + * But there may be reordering, so we can't throw away frames on encoder + * flush, we need to limit them here, before they go into encoder. + */ + ost->frame_number++; + + if (vstats_filename && frame_size) + do_video_stats(ost, frame_size); + } + + if (!ost->last_frame) + ost->last_frame = av_frame_alloc(); + av_frame_unref(ost->last_frame); + if (next_picture && ost->last_frame) + av_frame_ref(ost->last_frame, next_picture); + else + av_frame_free(&ost->last_frame); + + return; +error: + av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); + exit_program(1); +} + +static double psnr(double d) +{ + return -10.0 * log10(d); +} + +static void do_video_stats(OutputStream *ost, int frame_size) +{ + AVCodecContext *enc; + int frame_number; + double ti1, bitrate, avg_bitrate; + + /* this is executed just the first time do_video_stats is called */ + if (!vstats_file) { + vstats_file = fopen(vstats_filename, "w"); + if (!vstats_file) { + perror("fopen"); + exit_program(1); + } + } + + enc = ost->enc_ctx; + if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { + frame_number = ost->st->nb_frames; + if (vstats_version <= 1) { + fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, + ost->quality / (float)FF_QP2LAMBDA); + } else { + fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number, + ost->quality / (float)FF_QP2LAMBDA); + } + + if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR)) + fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0))); + + fprintf(vstats_file,"f_size= %6d ", frame_size); + /* compute pts value */ + ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base); + if (ti1 < 0.01) + ti1 = 0.01; + + bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0; + avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0; + fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", + (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate); + fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type)); + } +} + +static int init_output_stream(OutputStream *ost, char *error, int error_len); +static int yang_init_output_stream(OutputFile *oc,OutputStream *ost, char *error, int error_len); + +static void finish_output_stream(OutputStream *ost) +{ + OutputFile *of = output_files[ost->file_index]; + int i; + + ost->finished = ENCODER_FINISHED | MUXER_FINISHED; + + if (of->shortest) { + for (i = 0; i < of->ctx->nb_streams; i++) + output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED; + } +} + +/** + * Get and encode new output from any of the filtergraphs, without causing + * activity. + * + * @return 0 for success, <0 for severe errors + */ +static int reap_filters(int flush) +{ + AVFrame *filtered_frame = NULL; + int i; + + /* Reap all buffers present in the buffer sinks */ + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + OutputFile *of = output_files[ost->file_index]; + AVFilterContext *filter; + AVCodecContext *enc = ost->enc_ctx; + int ret = 0; + + if (!ost->filter || !ost->filter->graph->graph) + continue; + filter = ost->filter->filter; + + if (!ost->initialized) { + char error[1024] = ""; + + ret = yang_init_output_stream(of,ost, error, sizeof(error)); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", + ost->file_index, ost->index, error); + exit_program(1); + } + } + + if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) { + return AVERROR(ENOMEM); + } + filtered_frame = ost->filtered_frame; + + while (1) { + double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision + ret = av_buffersink_get_frame_flags(filter, filtered_frame, + AV_BUFFERSINK_FLAG_NO_REQUEST); + if (ret < 0) { + if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { + av_log(NULL, AV_LOG_WARNING, + "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); + } else if (flush && ret == AVERROR_EOF) { + if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO) + do_video_out(of, ost, NULL, AV_NOPTS_VALUE); + } + break; + } + if (ost->finished) { + av_frame_unref(filtered_frame); + continue; + } + if (filtered_frame->pts != AV_NOPTS_VALUE) { + int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; + AVRational filter_tb = av_buffersink_get_time_base(filter); + AVRational tb = enc->time_base; + int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); + + tb.den <<= extra_bits; + float_pts = + av_rescale_q(filtered_frame->pts, filter_tb, tb) - + av_rescale_q(start_time, AV_TIME_BASE_Q, tb); + float_pts /= 1 << extra_bits; + // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers + float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); + + filtered_frame->pts = + av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) - + av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); + } + + switch (av_buffersink_get_type(filter)) { + case AVMEDIA_TYPE_VIDEO: + if (!ost->frame_aspect_ratio.num) + enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", + av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base), + float_pts, + enc->time_base.num, enc->time_base.den); + } + + do_video_out(of, ost, filtered_frame, float_pts); + break; + case AVMEDIA_TYPE_AUDIO: + if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && + enc->channels != filtered_frame->channels) { + av_log(NULL, AV_LOG_ERROR, + "Audio filter graph output is not normalized and encoder does not support parameter changes\n"); + break; + } + do_audio_out(of, ost, filtered_frame); + break; + default: + // TODO support subtitle filters + av_assert0(0); + } + + av_frame_unref(filtered_frame); + } + } + + return 0; +} + +static void print_final_stats(int64_t total_size) +{ + uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0; + uint64_t subtitle_size = 0; + uint64_t data_size = 0; + float percent = -1.0; + int i, j; + int pass1_used = 1; + + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + switch (ost->enc_ctx->codec_type) { + case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break; + case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break; + case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break; + default: other_size += ost->data_size; break; + } + extra_size += ost->enc_ctx->extradata_size; + data_size += ost->data_size; + if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) + != AV_CODEC_FLAG_PASS1) + pass1_used = 0; + } + + if (data_size && total_size>0 && total_size >= data_size) + percent = 100.0 * (total_size - data_size) / data_size; + + av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ", + video_size / 1024.0, + audio_size / 1024.0, + subtitle_size / 1024.0, + other_size / 1024.0, + extra_size / 1024.0); + if (percent >= 0.0) + av_log(NULL, AV_LOG_INFO, "%f%%", percent); + else + av_log(NULL, AV_LOG_INFO, "unknown"); + av_log(NULL, AV_LOG_INFO, "\n"); + + /* print verbose per-stream stats */ + for (i = 0; i < nb_input_files; i++) { + InputFile *f = input_files[i]; + uint64_t total_packets = 0, total_size = 0; + + av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n", + i, f->ctx->url); + + for (j = 0; j < f->nb_streams; j++) { + InputStream *ist = input_streams[f->ist_index + j]; + enum AVMediaType type = ist->dec_ctx->codec_type; + + total_size += ist->data_size; + total_packets += ist->nb_packets; + + av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ", + i, j, media_type_string(type)); + av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ", + ist->nb_packets, ist->data_size); + + if (ist->decoding_needed) { + av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded", + ist->frames_decoded); + if (type == AVMEDIA_TYPE_AUDIO) + av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded); + av_log(NULL, AV_LOG_VERBOSE, "; "); + } + + av_log(NULL, AV_LOG_VERBOSE, "\n"); + } + + av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n", + total_packets, total_size); + } + + for (i = 0; i < nb_output_files; i++) { + OutputFile *of = output_files[i]; + uint64_t total_packets = 0, total_size = 0; + + av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n", + i, of->ctx->url); + + for (j = 0; j < of->ctx->nb_streams; j++) { + OutputStream *ost = output_streams[of->ost_index + j]; + enum AVMediaType type = ost->enc_ctx->codec_type; + + total_size += ost->data_size; + total_packets += ost->packets_written; + + av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ", + i, j, media_type_string(type)); + if (ost->encoding_needed) { + av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded", + ost->frames_encoded); + if (type == AVMEDIA_TYPE_AUDIO) + av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded); + av_log(NULL, AV_LOG_VERBOSE, "; "); + } + + av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ", + ost->packets_written, ost->data_size); + + av_log(NULL, AV_LOG_VERBOSE, "\n"); + } + + av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n", + total_packets, total_size); + } + if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){ + av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded "); + if (pass1_used) { + av_log(NULL, AV_LOG_WARNING, "\n"); + } else { + av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n"); + } + } +} + +static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time) +{ + AVBPrint buf, buf_script; + OutputStream *ost; + AVFormatContext *oc; + int64_t total_size; + AVCodecContext *enc; + int frame_number, vid, i; + double bitrate; + double speed; + int64_t pts = INT64_MIN + 1; + static int64_t last_time = -1; + static int qp_histogram[52]; + int hours, mins, secs, us; + const char *hours_sign; + int ret; + float t; + + if (!print_stats && !is_last_report && !progress_avio) + return; + + if (!is_last_report) { + if (last_time == -1) { + last_time = cur_time; + return; + } + if ((cur_time - last_time) < 500000) + return; + last_time = cur_time; + } + + t = (cur_time-timer_start) / 1000000.0; + + + oc = output_files[0]->ctx; + + total_size = avio_size(oc->pb); + if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too + total_size = avio_tell(oc->pb); + + vid = 0; + av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); + av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC); + for (i = 0; i < nb_output_streams; i++) { + float q = -1; + ost = output_streams[i]; + enc = ost->enc_ctx; + if (!ost->stream_copy) + q = ost->quality / (float) FF_QP2LAMBDA; + + if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { + av_bprintf(&buf, "q=%2.1f ", q); + av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", + ost->file_index, ost->index, q); + } + if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { + float fps; + + frame_number = ost->frame_number; + fps = t > 1 ? frame_number / t : 0; + av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ", + frame_number, fps < 9.95, fps, q); + av_bprintf(&buf_script, "frame=%d\n", frame_number); + av_bprintf(&buf_script, "fps=%.2f\n", fps); + av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", + ost->file_index, ost->index, q); + if (is_last_report) + av_bprintf(&buf, "L"); + if (qp_hist) { + int j; + int qp = lrintf(q); + if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram)) + qp_histogram[qp]++; + for (j = 0; j < 32; j++) + av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1)); + } + + if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) { + int j; + double error, error_sum = 0; + double scale, scale_sum = 0; + double p; + char type[3] = { 'Y','U','V' }; + av_bprintf(&buf, "PSNR="); + for (j = 0; j < 3; j++) { + if (is_last_report) { + error = enc->error[j]; + scale = enc->width * enc->height * 255.0 * 255.0 * frame_number; + } else { + error = ost->error[j]; + scale = enc->width * enc->height * 255.0 * 255.0; + } + if (j) + scale /= 4; + error_sum += error; + scale_sum += scale; + p = psnr(error / scale); + av_bprintf(&buf, "%c:%2.2f ", type[j], p); + av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n", + ost->file_index, ost->index, type[j] | 32, p); + } + p = psnr(error_sum / scale_sum); + av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum)); + av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n", + ost->file_index, ost->index, p); + } + vid = 1; + } + /* compute min output value */ + if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) + pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st), + ost->st->time_base, AV_TIME_BASE_Q)); + if (is_last_report) + nb_frames_drop += ost->last_dropped; + } + + secs = FFABS(pts) / AV_TIME_BASE; + us = FFABS(pts) % AV_TIME_BASE; + mins = secs / 60; + secs %= 60; + hours = mins / 60; + mins %= 60; + hours_sign = (pts < 0) ? "-" : ""; + + bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1; + speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1; + + if (total_size < 0) av_bprintf(&buf, "size=N/A time="); + else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0); + if (pts == AV_NOPTS_VALUE) { + av_bprintf(&buf, "N/A "); + } else { + av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ", + hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE); + } + + if (bitrate < 0) { + av_bprintf(&buf, "bitrate=N/A"); + av_bprintf(&buf_script, "bitrate=N/A\n"); + }else{ + av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate); + av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate); + } + + if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n"); + else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size); + if (pts == AV_NOPTS_VALUE) { + av_bprintf(&buf_script, "out_time_us=N/A\n"); + av_bprintf(&buf_script, "out_time_ms=N/A\n"); + av_bprintf(&buf_script, "out_time=N/A\n"); + } else { + av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts); + av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts); + av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n", + hours_sign, hours, mins, secs, us); + } + + if (nb_frames_dup || nb_frames_drop) + av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop); + av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup); + av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop); + + if (speed < 0) { + av_bprintf(&buf, " speed=N/A"); + av_bprintf(&buf_script, "speed=N/A\n"); + } else { + av_bprintf(&buf, " speed=%4.3gx", speed); + av_bprintf(&buf_script, "speed=%4.3gx\n", speed); + } + + if (print_stats || is_last_report) { + const char end = is_last_report ? '\n' : '\r'; + if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) { + fprintf(stderr, "%s %c", buf.str, end); + } else + av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end); + + fflush(stderr); + } + av_bprint_finalize(&buf, NULL); + + if (progress_avio) { + av_bprintf(&buf_script, "progress=%s\n", + is_last_report ? "end" : "continue"); + avio_write(progress_avio, buf_script.str, + FFMIN(buf_script.len, buf_script.size - 1)); + avio_flush(progress_avio); + av_bprint_finalize(&buf_script, NULL); + if (is_last_report) { + if ((ret = avio_closep(&progress_avio)) < 0) + av_log(NULL, AV_LOG_ERROR, + "Error closing progress log, loss of information possible: %s\n", av_err2str(ret)); + } + } + + if (is_last_report) + print_final_stats(total_size); +} + +static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par) +{ + // We never got any input. Set a fake format, which will + // come from libavformat. + ifilter->format = par->format; + ifilter->sample_rate = par->sample_rate; + ifilter->channels = par->channels; + ifilter->channel_layout = par->channel_layout; + ifilter->width = par->width; + ifilter->height = par->height; + ifilter->sample_aspect_ratio = par->sample_aspect_ratio; +} + +static void flush_encoders(void) +{ + int i, ret; + + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + AVCodecContext *enc = ost->enc_ctx; + OutputFile *of = output_files[ost->file_index]; + + if (!ost->encoding_needed) + continue; + + // Try to enable encoding with no input frames. + // Maybe we should just let encoding fail instead. + if (!ost->initialized) { + FilterGraph *fg = ost->filter->graph; + char error[1024] = ""; + + av_log(NULL, AV_LOG_WARNING, + "Finishing stream %d:%d without any data written to it.\n", + ost->file_index, ost->st->index); + + if (ost->filter && !fg->graph) { + int x; + for (x = 0; x < fg->nb_inputs; x++) { + InputFilter *ifilter = fg->inputs[x]; + if (ifilter->format < 0) + ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar); + } + + if (!ifilter_has_all_input_formats(fg)) + continue; + + ret = configure_filtergraph(fg); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n"); + exit_program(1); + } + + finish_output_stream(ost); + } + + ret = init_output_stream(ost, error, sizeof(error)); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", + ost->file_index, ost->index, error); + exit_program(1); + } + } + + if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) + continue; + + for (;;) { + const char *desc = NULL; + AVPacket pkt; + int pkt_size; + + switch (enc->codec_type) { + case AVMEDIA_TYPE_AUDIO: + desc = "audio"; + break; + case AVMEDIA_TYPE_VIDEO: + desc = "video"; + break; + default: + av_assert0(0); + } + + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + + update_benchmark(NULL); + + while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) { + ret = avcodec_send_frame(enc, NULL); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n", + desc, + av_err2str(ret)); + exit_program(1); + } + } + + update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index); + if (ret < 0 && ret != AVERROR_EOF) { + av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n", + desc, + av_err2str(ret)); + exit_program(1); + } + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); + } + if (ret == AVERROR_EOF) { + output_packet(of, &pkt, ost, 1); + break; + } + if (ost->finished & MUXER_FINISHED) { + av_packet_unref(&pkt); + continue; + } + av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); + pkt_size = pkt.size; + output_packet(of, &pkt, ost, 0); + if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) { + do_video_stats(ost, pkt_size); + } + } + } +} + +/* + * Check whether a packet from ist should be written into ost at this time + */ +static int check_output_constraints(InputStream *ist, OutputStream *ost) +{ + OutputFile *of = output_files[ost->file_index]; + int ist_index = input_files[ist->file_index]->ist_index + ist->st->index; + + if (ost->source_index != ist_index) + return 0; + + if (ost->finished) + return 0; + + if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time) + return 0; + + return 1; +} + +static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt) +{ + OutputFile *of = output_files[ost->file_index]; + InputFile *f = input_files [ist->file_index]; + int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; + int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase); + AVPacket opkt; + + // EOF: flush output bitstream filters. + if (!pkt) { + av_init_packet(&opkt); + opkt.data = NULL; + opkt.size = 0; + output_packet(of, &opkt, ost, 1); + return; + } + + if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && + !ost->copy_initial_nonkeyframes) + return; + + if (!ost->frame_number && !ost->copy_prior_start) { + int64_t comp_start = start_time; + if (copy_ts && f->start_time != AV_NOPTS_VALUE) + comp_start = FFMAX(start_time, f->start_time + f->ts_offset); + if (pkt->pts == AV_NOPTS_VALUE ? + ist->pts < comp_start : + pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base)) + return; + } + + if (of->recording_time != INT64_MAX && + ist->pts >= of->recording_time + start_time) { + close_output_stream(ost); + return; + } + + if (f->recording_time != INT64_MAX) { + start_time = f->ctx->start_time; + if (f->start_time != AV_NOPTS_VALUE && copy_ts) + start_time += f->start_time; + if (ist->pts >= f->recording_time + start_time) { + close_output_stream(ost); + return; + } + } + + /* force the input stream PTS */ + if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) + ost->sync_opts++; + + if (av_packet_ref(&opkt, pkt) < 0) + exit_program(1); + + if (pkt->pts != AV_NOPTS_VALUE) + opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; + + if (pkt->dts == AV_NOPTS_VALUE) { + opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); + } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size); + if(!duration) + duration = ist->dec_ctx->frame_size; + opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts, + (AVRational){1, ist->dec_ctx->sample_rate}, duration, + &ist->filter_in_rescale_delta_last, ost->mux_timebase); + /* dts will be set immediately afterwards to what pts is now */ + opkt.pts = opkt.dts - ost_tb_start_time; + } else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); + opkt.dts -= ost_tb_start_time; + + opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); + + output_packet(of, &opkt, ost, 0); +} + +int guess_input_channel_layout(InputStream *ist) +{ + AVCodecContext *dec = ist->dec_ctx; + + if (!dec->channel_layout) { + char layout_name[256]; + + if (dec->channels > ist->guess_layout_max) + return 0; + dec->channel_layout = av_get_default_channel_layout(dec->channels); + if (!dec->channel_layout) + return 0; + av_get_channel_layout_string(layout_name, sizeof(layout_name), + dec->channels, dec->channel_layout); + av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream " + "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name); + } + return 1; +} + +static void check_decode_result(InputStream *ist, int *got_output, int ret) +{ + if (*got_output || ret<0) + decode_error_stat[ret<0] ++; + + if (ret < 0 && exit_on_error) + exit_program(1); + + if (*got_output && ist) { + if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) { + av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING, + "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index); + if (exit_on_error) + exit_program(1); + } + } +} + +// Filters can be configured only if the formats of all inputs are known. +static int ifilter_has_all_input_formats(FilterGraph *fg) +{ + int i; + for (i = 0; i < fg->nb_inputs; i++) { + if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO || + fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO)) + return 0; + } + return 1; +} + +static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) +{ + FilterGraph *fg = ifilter->graph; + int need_reinit, ret, i; + + /* determine if the parameters for this input changed */ + need_reinit = ifilter->format != frame->format; + + switch (ifilter->ist->st->codecpar->codec_type) { + case AVMEDIA_TYPE_AUDIO: + need_reinit |= ifilter->sample_rate != frame->sample_rate || + ifilter->channels != frame->channels || + ifilter->channel_layout != frame->channel_layout; + break; + case AVMEDIA_TYPE_VIDEO: + need_reinit |= ifilter->width != frame->width || + ifilter->height != frame->height; + break; + } + + if (!ifilter->ist->reinit_filters && fg->graph) + need_reinit = 0; + + if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx || + (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data)) + need_reinit = 1; + + if (need_reinit) { + ret = ifilter_parameters_from_frame(ifilter, frame); + if (ret < 0) + return ret; + } + + /* (re)init the graph if possible, otherwise buffer the frame and return */ + if (need_reinit || !fg->graph) { + for (i = 0; i < fg->nb_inputs; i++) { + if (!ifilter_has_all_input_formats(fg)) { + AVFrame *tmp = av_frame_clone(frame); + if (!tmp) + return AVERROR(ENOMEM); + av_frame_unref(frame); + + if (!av_fifo_space(ifilter->frame_queue)) { + ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue)); + if (ret < 0) { + av_frame_free(&tmp); + return ret; + } + } + av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL); + return 0; + } + } + + ret = reap_filters(1); + if (ret < 0 && ret != AVERROR_EOF) { + av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); + return ret; + } + + ret = configure_filtergraph(fg); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n"); + return ret; + } + } + + ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH); + if (ret < 0) { + if (ret != AVERROR_EOF) + av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); + return ret; + } + + return 0; +} + +static int ifilter_send_eof(InputFilter *ifilter, int64_t pts) +{ + int ret; + + ifilter->eof = 1; + + if (ifilter->filter) { + ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH); + if (ret < 0) + return ret; + } else { + // the filtergraph was never configured + if (ifilter->format < 0) + ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar); + if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) { + av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index); + return AVERROR_INVALIDDATA; + } + } + + return 0; +} + +// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2. +// There is the following difference: if you got a frame, you must call +// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0 +// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet) +static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt) +{ + int ret; + + *got_frame = 0; + + if (pkt) { + ret = avcodec_send_packet(avctx, pkt); + // In particular, we don't expect AVERROR(EAGAIN), because we read all + // decoded frames with avcodec_receive_frame() until done. + if (ret < 0 && ret != AVERROR_EOF) + return ret; + } + + ret = avcodec_receive_frame(avctx, frame); + if (ret < 0 && ret != AVERROR(EAGAIN)) + return ret; + if (ret >= 0) + *got_frame = 1; + + return 0; +} + +static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame) +{ + int i, ret; + AVFrame *f; + + av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */ + for (i = 0; i < ist->nb_filters; i++) { + if (i < ist->nb_filters - 1) { + f = ist->filter_frame; + ret = av_frame_ref(f, decoded_frame); + if (ret < 0) + break; + } else + f = decoded_frame; + ret = ifilter_send_frame(ist->filters[i], f); + if (ret == AVERROR_EOF) + ret = 0; /* ignore */ + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, + "Failed to inject frame into filter network: %s\n", av_err2str(ret)); + break; + } + } + return ret; +} + +static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, + int *decode_failed) +{ + AVFrame *decoded_frame; + AVCodecContext *avctx = ist->dec_ctx; + int ret, err = 0; + AVRational decoded_frame_tb; + + if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc())) + return AVERROR(ENOMEM); + if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc())) + return AVERROR(ENOMEM); + decoded_frame = ist->decoded_frame; + + update_benchmark(NULL); + ret = decode(avctx, decoded_frame, got_output, pkt); + update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index); + if (ret < 0) + *decode_failed = 1; + + if (ret >= 0 && avctx->sample_rate <= 0) { + av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate); + ret = AVERROR_INVALIDDATA; + } + + if (ret != AVERROR_EOF) + check_decode_result(ist, got_output, ret); + + if (!*got_output || ret < 0) + return ret; + + ist->samples_decoded += decoded_frame->nb_samples; + ist->frames_decoded++; + + /* increment next_dts to use for the case where the input stream does not + have timestamps or there are multiple frames in the packet */ + ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / + avctx->sample_rate; + ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / + avctx->sample_rate; + + if (decoded_frame->pts != AV_NOPTS_VALUE) { + decoded_frame_tb = ist->st->time_base; + } else if (pkt && pkt->pts != AV_NOPTS_VALUE) { + decoded_frame->pts = pkt->pts; + decoded_frame_tb = ist->st->time_base; + }else { + decoded_frame->pts = ist->dts; + decoded_frame_tb = AV_TIME_BASE_Q; + } + if (decoded_frame->pts != AV_NOPTS_VALUE) + decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts, + (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, + (AVRational){1, avctx->sample_rate}); + ist->nb_samples = decoded_frame->nb_samples; + err = send_frame_to_filters(ist, decoded_frame); + + av_frame_unref(ist->filter_frame); + av_frame_unref(decoded_frame); + return err < 0 ? err : ret; +} + +static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, + int *decode_failed) +{ + AVFrame *decoded_frame; + int i, ret = 0, err = 0; + int64_t best_effort_timestamp; + int64_t dts = AV_NOPTS_VALUE; + AVPacket avpkt; + + // With fate-indeo3-2, we're getting 0-sized packets before EOF for some + // reason. This seems like a semi-critical bug. Don't trigger EOF, and + // skip the packet. + if (!eof && pkt && pkt->size == 0) + return 0; + + if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc())) + return AVERROR(ENOMEM); + if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc())) + return AVERROR(ENOMEM); + decoded_frame = ist->decoded_frame; + if (ist->dts != AV_NOPTS_VALUE) + dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); + if (pkt) { + avpkt = *pkt; + avpkt.dts = dts; // ffmpeg.c probably shouldn't do this + } + + // The old code used to set dts on the drain packet, which does not work + // with the new API anymore. + if (eof) { + void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0])); + if (!new) + return AVERROR(ENOMEM); + ist->dts_buffer = new; + ist->dts_buffer[ist->nb_dts_buffer++] = dts; + } + + update_benchmark(NULL); + ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL); + update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); + if (ret < 0) + *decode_failed = 1; + + // The following line may be required in some cases where there is no parser + // or the parser does not has_b_frames correctly + if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) { + if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) { + ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames; + } else + av_log(ist->dec_ctx, AV_LOG_WARNING, + "video_delay is larger in decoder than demuxer %d > %d.\n" + "If you want to help, upload a sample " + "of this file to https://streams.videolan.org/upload/ " + "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", + ist->dec_ctx->has_b_frames, + ist->st->codecpar->video_delay); + } + + if (ret != AVERROR_EOF) + check_decode_result(ist, got_output, ret); + + if (*got_output && ret >= 0) { + if (ist->dec_ctx->width != decoded_frame->width || + ist->dec_ctx->height != decoded_frame->height || + ist->dec_ctx->pix_fmt != decoded_frame->format) { + av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n", + decoded_frame->width, + decoded_frame->height, + decoded_frame->format, + ist->dec_ctx->width, + ist->dec_ctx->height, + ist->dec_ctx->pix_fmt); + } + } + + if (!*got_output || ret < 0) + return ret; + + if(ist->top_field_first>=0) + decoded_frame->top_field_first = ist->top_field_first; + + ist->frames_decoded++; + + if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) { + err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame); + if (err < 0) + goto fail; + } + ist->hwaccel_retrieved_pix_fmt = decoded_frame->format; + + best_effort_timestamp= decoded_frame->best_effort_timestamp; + *duration_pts = decoded_frame->pkt_duration; + + if (ist->framerate.num) + best_effort_timestamp = ist->cfr_next_pts++; + + if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) { + best_effort_timestamp = ist->dts_buffer[0]; + + for (i = 0; i < ist->nb_dts_buffer - 1; i++) + ist->dts_buffer[i] = ist->dts_buffer[i + 1]; + ist->nb_dts_buffer--; + } + + if(best_effort_timestamp != AV_NOPTS_VALUE) { + int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); + + if (ts != AV_NOPTS_VALUE) + ist->next_pts = ist->pts = ts; + } + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video " + "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n", + ist->st->index, av_ts2str(decoded_frame->pts), + av_ts2timestr(decoded_frame->pts, &ist->st->time_base), + best_effort_timestamp, + av_ts2timestr(best_effort_timestamp, &ist->st->time_base), + decoded_frame->key_frame, decoded_frame->pict_type, + ist->st->time_base.num, ist->st->time_base.den); + } + + if (ist->st->sample_aspect_ratio.num) + decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; + + err = send_frame_to_filters(ist, decoded_frame); + +fail: + av_frame_unref(ist->filter_frame); + av_frame_unref(decoded_frame); + return err < 0 ? err : ret; +} + +static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, + int *decode_failed) +{ + AVSubtitle subtitle; + int free_sub = 1; + int i, ret = avcodec_decode_subtitle2(ist->dec_ctx, + &subtitle, got_output, pkt); + + check_decode_result(NULL, got_output, ret); + + if (ret < 0 || !*got_output) { + *decode_failed = 1; + if (!pkt->size) + sub2video_flush(ist); + return ret; + } + + if (ist->fix_sub_duration) { + int end = 1; + if (ist->prev_sub.got_output) { + end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts, + 1000, AV_TIME_BASE); + if (end < ist->prev_sub.subtitle.end_display_time) { + av_log(ist->dec_ctx, AV_LOG_DEBUG, + "Subtitle duration reduced from %"PRId32" to %d%s\n", + ist->prev_sub.subtitle.end_display_time, end, + end <= 0 ? ", dropping it" : ""); + ist->prev_sub.subtitle.end_display_time = end; + } + } + FFSWAP(int, *got_output, ist->prev_sub.got_output); + FFSWAP(int, ret, ist->prev_sub.ret); + FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle); + if (end <= 0) + goto out; + } + + if (!*got_output) + return ret; + + if (ist->sub2video.frame) { + sub2video_update(ist, INT64_MIN, &subtitle); + } else if (ist->nb_filters) { + if (!ist->sub2video.sub_queue) + ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle)); + if (!ist->sub2video.sub_queue) + exit_program(1); + if (!av_fifo_space(ist->sub2video.sub_queue)) { + ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue)); + if (ret < 0) + exit_program(1); + } + av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL); + free_sub = 0; + } + + if (!subtitle.num_rects) + goto out; + + ist->frames_decoded++; + + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + + if (!check_output_constraints(ist, ost) || !ost->encoding_needed + || ost->enc->type != AVMEDIA_TYPE_SUBTITLE) + continue; + + do_subtitle_out(output_files[ost->file_index], ost, &subtitle); + } + +out: + if (free_sub) + avsubtitle_free(&subtitle); + return ret; +} + +static int send_filter_eof(InputStream *ist) +{ + int i, ret; + /* TODO keep pts also in stream time base to avoid converting back */ + int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base, + AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); + + for (i = 0; i < ist->nb_filters; i++) { + ret = ifilter_send_eof(ist->filters[i], pts); + if (ret < 0) + return ret; + } + return 0; +} + +/* pkt = NULL means EOF (needed to flush decoder buffers) */ +static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) +{ + int ret = 0, i; + int repeating = 0; + int eof_reached = 0; + + AVPacket avpkt; + if (!ist->saw_first_ts) { + ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; + ist->pts = 0; + if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) { + ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); + ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong + } + ist->saw_first_ts = 1; + } + + if (ist->next_dts == AV_NOPTS_VALUE) + ist->next_dts = ist->dts; + if (ist->next_pts == AV_NOPTS_VALUE) + ist->next_pts = ist->pts; + + if (!pkt) { + /* EOF handling */ + av_init_packet(&avpkt); + avpkt.data = NULL; + avpkt.size = 0; + } else { + avpkt = *pkt; + } + + if (pkt && pkt->dts != AV_NOPTS_VALUE) { + ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); + if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) + ist->next_pts = ist->pts = ist->dts; + } + + // while we have more to decode or while the decoder did output something on EOF + while (ist->decoding_needed) { + int64_t duration_dts = 0; + int64_t duration_pts = 0; + int got_output = 0; + int decode_failed = 0; + + ist->pts = ist->next_pts; + ist->dts = ist->next_dts; + + switch (ist->dec_ctx->codec_type) { + case AVMEDIA_TYPE_AUDIO: + ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output, + &decode_failed); + break; + case AVMEDIA_TYPE_VIDEO: + ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt, + &decode_failed); + if (!repeating || !pkt || got_output) { + if (pkt && pkt->duration) { + duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); + } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { + int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame; + duration_dts = ((int64_t)AV_TIME_BASE * + ist->dec_ctx->framerate.den * ticks) / + ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; + } + + if(ist->dts != AV_NOPTS_VALUE && duration_dts) { + ist->next_dts += duration_dts; + }else + ist->next_dts = AV_NOPTS_VALUE; + } + + if (got_output) { + if (duration_pts > 0) { + ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q); + } else { + ist->next_pts += duration_dts; + } + } + break; + case AVMEDIA_TYPE_SUBTITLE: + if (repeating) + break; + ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed); + if (!pkt && ret >= 0) + ret = AVERROR_EOF; + break; + default: + return -1; + } + + if (ret == AVERROR_EOF) { + eof_reached = 1; + break; + } + + if (ret < 0) { + if (decode_failed) { + av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n", + ist->file_index, ist->st->index, av_err2str(ret)); + } else { + av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded " + "data for stream #%d:%d\n", ist->file_index, ist->st->index); + } + if (!decode_failed || exit_on_error) + exit_program(1); + break; + } + + if (got_output) + ist->got_output = 1; + + if (!got_output) + break; + + // During draining, we might get multiple output frames in this loop. + // ffmpeg.c does not drain the filter chain on configuration changes, + // which means if we send multiple frames at once to the filters, and + // one of those frames changes configuration, the buffered frames will + // be lost. This can upset certain FATE tests. + // Decode only 1 frame per call on EOF to appease these FATE tests. + // The ideal solution would be to rewrite decoding to use the new + // decoding API in a better way. + if (!pkt) + break; + + repeating = 1; + } + + /* after flushing, send an EOF on all the filter inputs attached to the stream */ + /* except when looping we need to flush but not to send an EOF */ + if (!pkt && ist->decoding_needed && eof_reached && !no_eof) { + int ret = send_filter_eof(ist); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n"); + exit_program(1); + } + } + + /* handle stream copy */ + if (!ist->decoding_needed && pkt) { + ist->dts = ist->next_dts; + switch (ist->dec_ctx->codec_type) { + case AVMEDIA_TYPE_AUDIO: + av_assert1(pkt->duration >= 0); + if (ist->dec_ctx->sample_rate) { + ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) / + ist->dec_ctx->sample_rate; + } else { + ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); + } + break; + case AVMEDIA_TYPE_VIDEO: + if (ist->framerate.num) { + // TODO: Remove work-around for c99-to-c89 issue 7 + AVRational time_base_q = AV_TIME_BASE_Q; + int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate)); + ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q); + } else if (pkt->duration) { + ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); + } else if(ist->dec_ctx->framerate.num != 0) { + int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame; + ist->next_dts += ((int64_t)AV_TIME_BASE * + ist->dec_ctx->framerate.den * ticks) / + ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; + } + break; + } + ist->pts = ist->dts; + ist->next_pts = ist->next_dts; + } + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + + if (!check_output_constraints(ist, ost) || ost->encoding_needed) + continue; + + do_streamcopy(ist, ost, pkt); + } + + return !eof_reached; +} + +static void print_sdp(void) +{ + char sdp[16384]; + int i; + int j; + AVIOContext *sdp_pb; + AVFormatContext **avc; + + for (i = 0; i < nb_output_files; i++) { + if (!output_files[i]->header_written) + return; + } + + avc = av_malloc_array(nb_output_files, sizeof(*avc)); + if (!avc) + exit_program(1); + for (i = 0, j = 0; i < nb_output_files; i++) { + if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) { + avc[j] = output_files[i]->ctx; + j++; + } + } + + if (!j) + goto fail; + + av_sdp_create(avc, j, sdp, sizeof(sdp)); + + if (!sdp_filename) { + printf("SDP:\n%s\n", sdp); + fflush(stdout); + } else { + if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) { + av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename); + } else { + avio_print(sdp_pb, sdp); + avio_closep(&sdp_pb); + av_freep(&sdp_filename); + } + } + +fail: + av_freep(&avc); +} + +static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts) +{ + InputStream *ist = s->opaque; + const enum AVPixelFormat *p; + int ret; + + for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) { + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p); + const AVCodecHWConfig *config = NULL; + int i; + + if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) + break; + + if (ist->hwaccel_id == HWACCEL_GENERIC || + ist->hwaccel_id == HWACCEL_AUTO) { + for (i = 0;; i++) { + config = avcodec_get_hw_config(s->codec, i); + if (!config) + break; + if (!(config->methods & + AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)) + continue; + if (config->pix_fmt == *p) + break; + } + } + if (config) { + if (config->device_type != ist->hwaccel_device_type) { + // Different hwaccel offered, ignore. + continue; + } + + ret = hwaccel_decode_init(s); + if (ret < 0) { + if (ist->hwaccel_id == HWACCEL_GENERIC) { + av_log(NULL, AV_LOG_FATAL, + "%s hwaccel requested for input stream #%d:%d, " + "but cannot be initialized.\n", + av_hwdevice_get_type_name(config->device_type), + ist->file_index, ist->st->index); + return AV_PIX_FMT_NONE; + } + continue; + } + } else { + const HWAccel *hwaccel = NULL; + int i; + for (i = 0; hwaccels[i].name; i++) { + if (hwaccels[i].pix_fmt == *p) { + hwaccel = &hwaccels[i]; + break; + } + } + if (!hwaccel) { + // No hwaccel supporting this pixfmt. + continue; + } + if (hwaccel->id != ist->hwaccel_id) { + // Does not match requested hwaccel. + continue; + } + + ret = hwaccel->init(s); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, + "%s hwaccel requested for input stream #%d:%d, " + "but cannot be initialized.\n", hwaccel->name, + ist->file_index, ist->st->index); + return AV_PIX_FMT_NONE; + } + } + + if (ist->hw_frames_ctx) { + s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx); + if (!s->hw_frames_ctx) + return AV_PIX_FMT_NONE; + } + + ist->hwaccel_pix_fmt = *p; + break; + } + + return *p; +} + +static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags) +{ + InputStream *ist = s->opaque; + + if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt) + return ist->hwaccel_get_buffer(s, frame, flags); + + return avcodec_default_get_buffer2(s, frame, flags); +} + +static int init_input_stream(int ist_index, char *error, int error_len) +{ + int ret; + InputStream *ist = input_streams[ist_index]; + + if (ist->decoding_needed) { + AVCodec *codec = ist->dec; + if (!codec) { + snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d", + avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index); + return AVERROR(EINVAL); + } + + ist->dec_ctx->opaque = ist; + ist->dec_ctx->get_format = get_format; + ist->dec_ctx->get_buffer2 = get_buffer; + ist->dec_ctx->thread_safe_callbacks = 1; + + av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0); + if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE && + (ist->decoding_needed & DECODING_FOR_OST)) { + av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE); + if (ist->decoding_needed & DECODING_FOR_FILTER) + av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n"); + } + + av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE); + + /* Useful for subtitles retiming by lavf (FIXME), skipping samples in + * audio, and video decoders such as cuvid or mediacodec */ + ist->dec_ctx->pkt_timebase = ist->st->time_base; + + if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0)) + av_dict_set(&ist->decoder_opts, "threads", "auto", 0); + /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */ + if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC) + av_dict_set(&ist->decoder_opts, "threads", "1", 0); + + ret = hw_device_setup_for_decode(ist); + if (ret < 0) { + snprintf(error, error_len, "Device setup failed for " + "decoder on input stream #%d:%d : %s", + ist->file_index, ist->st->index, av_err2str(ret)); + return ret; + } + + if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) { + if (ret == AVERROR_EXPERIMENTAL) + abort_codec_experimental(codec, 0); + + snprintf(error, error_len, + "Error while opening decoder for input stream " + "#%d:%d : %s", + ist->file_index, ist->st->index, av_err2str(ret)); + return ret; + } + assert_avoptions(ist->decoder_opts); + } + + ist->next_pts = AV_NOPTS_VALUE; + ist->next_dts = AV_NOPTS_VALUE; + + return 0; +} + +static InputStream *get_input_stream(OutputStream *ost) +{ + if (ost->source_index >= 0) + return input_streams[ost->source_index]; + return NULL; +} + +static int compare_int64(const void *a, const void *b) +{ + return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b); +} + +/* open the muxer when all the streams are initialized */ +static int check_init_output_file(OutputFile *of, int file_index) +{ + int ret, i; + + for (i = 0; i < of->ctx->nb_streams; i++) { + OutputStream *ost = output_streams[of->ost_index + i]; + if (!ost->initialized) + return 0; + } + + of->ctx->interrupt_callback = int_cb; + + ret = avformat_write_header(of->ctx, &of->opts); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, + "Could not write header for output file #%d " + "(incorrect codec parameters ?): %s\n", + file_index, av_err2str(ret)); + return ret; + } + //assert_avoptions(of->opts); + of->header_written = 1; + + av_dump_format(of->ctx, file_index, of->ctx->url, 1); + + if (sdp_filename || want_sdp) + print_sdp(); + + /* flush the muxing queues */ + for (i = 0; i < of->ctx->nb_streams; i++) { + OutputStream *ost = output_streams[of->ost_index + i]; + + /* try to improve muxing time_base (only possible if nothing has been written yet) */ + if (!av_fifo_size(ost->muxing_queue)) + ost->mux_timebase = ost->st->time_base; + + while (av_fifo_size(ost->muxing_queue)) { + AVPacket pkt; + av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); + write_packet(of, &pkt, ost, 1); + } + } + + return 0; +} + +static int init_output_bsfs(OutputStream *ost) +{ + AVBSFContext *ctx = ost->bsf_ctx; + int ret; + + if (!ctx) + return 0; + + ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar); + if (ret < 0) + return ret; + + ctx->time_base_in = ost->st->time_base; + + ret = av_bsf_init(ctx); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n", + ctx->filter->name); + return ret; + } + + ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out); + if (ret < 0) + return ret; + ost->st->time_base = ctx->time_base_out; + + return 0; +} + +static int init_output_stream_streamcopy(OutputStream *ost) +{ + OutputFile *of = output_files[ost->file_index]; + InputStream *ist = get_input_stream(ost); + AVCodecParameters *par_dst = ost->st->codecpar; + AVCodecParameters *par_src = ost->ref_par; + AVRational sar; + int i, ret; + uint32_t codec_tag = par_dst->codec_tag; + + av_assert0(ist && !ost->filter); + + ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar); + if (ret >= 0) + ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, + "Error setting up codec context options.\n"); + return ret; + } + + ret = avcodec_parameters_from_context(par_src, ost->enc_ctx); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, + "Error getting reference codec parameters.\n"); + return ret; + } + + if (!codec_tag) { + unsigned int codec_tag_tmp; + if (!of->ctx->oformat->codec_tag || + av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id || + !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp)) + codec_tag = par_src->codec_tag; + } + + ret = avcodec_parameters_copy(par_dst, par_src); + if (ret < 0) + return ret; + + par_dst->codec_tag = codec_tag; + + if (!ost->frame_rate.num) + ost->frame_rate = ist->framerate; + ost->st->avg_frame_rate = ost->frame_rate; + + ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb); + if (ret < 0) + return ret; + + // copy timebase while removing common factors + if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) + ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1}); + + // copy estimated duration as a hint to the muxer + if (ost->st->duration <= 0 && ist->st->duration > 0) + ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); + + // copy disposition + ost->st->disposition = ist->st->disposition; + + if (ist->st->nb_side_data) { + for (i = 0; i < ist->st->nb_side_data; i++) { + const AVPacketSideData *sd_src = &ist->st->side_data[i]; + uint8_t *dst_data; + + dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); + if (!dst_data) + return AVERROR(ENOMEM); + memcpy(dst_data, sd_src->data, sd_src->size); + } + } + + if (ost->rotate_overridden) { + uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX, + sizeof(int32_t) * 9); + if (sd) + av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value); + } + + switch (par_dst->codec_type) { + case AVMEDIA_TYPE_AUDIO: + if (audio_volume != 256) { + av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n"); + exit_program(1); + } + if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3) + par_dst->block_align= 0; + if(par_dst->codec_id == AV_CODEC_ID_AC3) + par_dst->block_align= 0; + break; + case AVMEDIA_TYPE_VIDEO: + if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option + sar = + av_mul_q(ost->frame_aspect_ratio, + (AVRational){ par_dst->height, par_dst->width }); + av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio " + "with stream copy may produce invalid files\n"); + } + else if (ist->st->sample_aspect_ratio.num) + sar = ist->st->sample_aspect_ratio; + else + sar = par_src->sample_aspect_ratio; + ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar; + ost->st->avg_frame_rate = ist->st->avg_frame_rate; + ost->st->r_frame_rate = ist->st->r_frame_rate; + break; + } + + ost->mux_timebase = ist->st->time_base; + + return 0; +} + +static void set_encoder_id(OutputFile *of, OutputStream *ost) +{ + AVDictionaryEntry *e; + + uint8_t *encoder_string; + int encoder_string_len; + int format_flags = 0; + int codec_flags = ost->enc_ctx->flags; + + if (av_dict_get(ost->st->metadata, "encoder", NULL, 0)) + return; + + e = av_dict_get(of->opts, "fflags", NULL, 0); + if (e) { + const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0); + if (!o) + return; + av_opt_eval_flags(of->ctx, o, e->value, &format_flags); + } + e = av_dict_get(ost->encoder_opts, "flags", NULL, 0); + if (e) { + const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0); + if (!o) + return; + av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags); + } + + encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2; + encoder_string = av_mallocz(encoder_string_len); + if (!encoder_string) + exit_program(1); + + if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT)) + av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len); + else + av_strlcpy(encoder_string, "Lavc ", encoder_string_len); + av_strlcat(encoder_string, ost->enc->name, encoder_string_len); + av_dict_set(&ost->st->metadata, "encoder", encoder_string, + AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE); +} + +static void parse_forced_key_frames(char *kf, OutputStream *ost, + AVCodecContext *avctx) +{ + char *p; + int n = 1, i, size, index = 0; + int64_t t, *pts; + + for (p = kf; *p; p++) + if (*p == ',') + n++; + size = n; + pts = av_malloc_array(size, sizeof(*pts)); + if (!pts) { + av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); + exit_program(1); + } + + p = kf; + for (i = 0; i < n; i++) { + char *next = strchr(p, ','); + + if (next) + *next++ = 0; + + if (!memcmp(p, "chapters", 8)) { + + AVFormatContext *avf = output_files[ost->file_index]->ctx; + int j; + + if (avf->nb_chapters > INT_MAX - size || + !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1, + sizeof(*pts)))) { + av_log(NULL, AV_LOG_FATAL, + "Could not allocate forced key frames array.\n"); + exit_program(1); + } + t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0; + t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); + + for (j = 0; j < avf->nb_chapters; j++) { + AVChapter *c = avf->chapters[j]; + av_assert1(index < size); + pts[index++] = av_rescale_q(c->start, c->time_base, + avctx->time_base) + t; + } + + } else { + + t = parse_time_or_die("force_key_frames", p, 1); + av_assert1(index < size); + pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); + + } + + p = next; + } + + av_assert0(index == size); + qsort(pts, size, sizeof(*pts), compare_int64); + ost->forced_kf_count = size; + ost->forced_kf_pts = pts; +} + +static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base) +{ + InputStream *ist = get_input_stream(ost); + AVCodecContext *enc_ctx = ost->enc_ctx; + AVFormatContext *oc; + + if (ost->enc_timebase.num > 0) { + enc_ctx->time_base = ost->enc_timebase; + return; + } + + if (ost->enc_timebase.num < 0) { + if (ist) { + enc_ctx->time_base = ist->st->time_base; + return; + } + + oc = output_files[ost->file_index]->ctx; + av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n"); + } + + enc_ctx->time_base = default_time_base; +} + +static int init_output_stream_encode(OutputStream *ost) +{ + InputStream *ist = get_input_stream(ost); + AVCodecContext *enc_ctx = ost->enc_ctx; + AVCodecContext *dec_ctx = NULL; + AVFormatContext *oc = output_files[ost->file_index]->ctx; + int j, ret; + + set_encoder_id(output_files[ost->file_index], ost); + + // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other + // hand, the legacy API makes demuxers set "rotate" metadata entries, + // which have to be filtered out to prevent leaking them to output files. + av_dict_set(&ost->st->metadata, "rotate", NULL, 0); + + if (ist) { + ost->st->disposition = ist->st->disposition; + + dec_ctx = ist->dec_ctx; + + enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location; + } else { + for (j = 0; j < oc->nb_streams; j++) { + AVStream *st = oc->streams[j]; + if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type) + break; + } + if (j == oc->nb_streams) + if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || + ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) + ost->st->disposition = AV_DISPOSITION_DEFAULT; + } + + if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + if (!ost->frame_rate.num) + ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter); + if (ist && !ost->frame_rate.num) + ost->frame_rate = ist->framerate; + if (ist && !ost->frame_rate.num) + ost->frame_rate = ist->st->r_frame_rate; + if (ist && !ost->frame_rate.num) { + ost->frame_rate = (AVRational){25, 1}; + av_log(NULL, AV_LOG_WARNING, + "No information " + "about the input framerate is available. Falling " + "back to a default value of 25fps for output stream #%d:%d. Use the -r option " + "if you want a different framerate.\n", + ost->file_index, ost->index); + } + + if (ost->enc->supported_framerates && !ost->force_fps) { + int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); + ost->frame_rate = ost->enc->supported_framerates[idx]; + } + // reduce frame rate for mpeg4 to be within the spec limits + if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) { + av_reduce(&ost->frame_rate.num, &ost->frame_rate.den, + ost->frame_rate.num, ost->frame_rate.den, 65535); + } + } + + switch (enc_ctx->codec_type) { + case AVMEDIA_TYPE_AUDIO: + enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter); + if (dec_ctx) + enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, + av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3); + enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter); + enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter); + enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter); + + init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate)); + break; + + case AVMEDIA_TYPE_VIDEO: + init_encoder_time_base(ost, av_inv_q(ost->frame_rate)); + + if (!(enc_ctx->time_base.num && enc_ctx->time_base.den)) + enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter); + if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH + && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){ + av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" + "Please consider specifying a lower framerate, a different muxer or -vsync 2\n"); + } + + enc_ctx->width = av_buffersink_get_w(ost->filter->filter); + enc_ctx->height = av_buffersink_get_h(ost->filter->filter); + enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio = + ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option + av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) : + av_buffersink_get_sample_aspect_ratio(ost->filter->filter); + + enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter); + if (dec_ctx) + enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, + av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); + + enc_ctx->framerate = ost->frame_rate; + + ost->st->avg_frame_rate = ost->frame_rate; + + if (!dec_ctx || + enc_ctx->width != dec_ctx->width || + enc_ctx->height != dec_ctx->height || + enc_ctx->pix_fmt != dec_ctx->pix_fmt) { + enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample; + } + + if (ost->top_field_first == 0) { + enc_ctx->field_order = AV_FIELD_BB; + } else if (ost->top_field_first == 1) { + enc_ctx->field_order = AV_FIELD_TT; + } + + if (ost->forced_keyframes) { + if (!strncmp(ost->forced_keyframes, "expr:", 5)) { + ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5, + forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, + "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5); + return ret; + } + ost->forced_keyframes_expr_const_values[FKF_N] = 0; + ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0; + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN; + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN; + + // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes', + // parse it only for static kf timings + } else if(strncmp(ost->forced_keyframes, "source", 6)) { + parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx); + } + } + break; + case AVMEDIA_TYPE_SUBTITLE: + enc_ctx->time_base = AV_TIME_BASE_Q; + if (!enc_ctx->width) { + enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width; + enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height; + } + break; + case AVMEDIA_TYPE_DATA: + break; + default: + abort(); + break; + } + + ost->mux_timebase = enc_ctx->time_base; + + return 0; +} + +static int yang_init_output_stream(OutputFile *oc,OutputStream *ost, char *error, int error_len) +{ + int ret = 0; + + if (ost->encoding_needed) { + AVCodec *codec = ost->enc; + AVCodecContext *dec = NULL; + InputStream *ist; + + ret = init_output_stream_encode(ost); + if (ret < 0) + return ret; + + if ((ist = get_input_stream(ost))) + dec = ist->dec_ctx; + if (dec && dec->subtitle_header) { + /* ASS code assumes this buffer is null terminated so add extra byte. */ + ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1); + if (!ost->enc_ctx->subtitle_header) + return AVERROR(ENOMEM); + memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); + ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size; + } + if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0)) + av_dict_set(&ost->encoder_opts, "threads", "auto", 0); + if (ost->enc->type == AVMEDIA_TYPE_AUDIO && + !codec->defaults && + !av_dict_get(ost->encoder_opts, "b", NULL, 0) && + !av_dict_get(ost->encoder_opts, "ab", NULL, 0)) + av_dict_set(&ost->encoder_opts, "b", "128000", 0); + + ret = hw_device_setup_for_encode(ost); + if (ret < 0) { + snprintf(error, error_len, "Device setup failed for " + "encoder on output stream #%d:%d : %s", + ost->file_index, ost->index, av_err2str(ret)); + return ret; + } + + if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) { + int input_props = 0, output_props = 0; + AVCodecDescriptor const *input_descriptor = + avcodec_descriptor_get(dec->codec_id); + AVCodecDescriptor const *output_descriptor = + avcodec_descriptor_get(ost->enc_ctx->codec_id); + if (input_descriptor) + input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); + if (output_descriptor) + output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); + if (input_props && output_props && input_props != output_props) { + snprintf(error, error_len, + "Subtitle encoding currently only possible from text to text " + "or bitmap to bitmap"); + return AVERROR_INVALIDDATA; + } + } + + if(ost->enc_ctx->codec_id==AV_CODEC_ID_H264&&memcmp(oc->ctx->filename,"webrtc",6)==0){ + ost->enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P; + av_dict_set(&ost->encoder_opts, "preset", "superfast", 0); + av_dict_set(&ost->encoder_opts, "tune", "zerolatency", 0); + av_dict_set(&ost->encoder_opts, "profile", "baseline", 0); + + ost->enc_ctx->profile = FF_PROFILE_H264_CONSTRAINED_BASELINE;//:FF_PROFILE_HEVC_MAIN; + ost->enc_ctx->level=31; + ost->enc_ctx->max_b_frames=0; + ost->enc_ctx->has_b_frames=0; + } + + if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) { + if (ret == AVERROR_EXPERIMENTAL) + abort_codec_experimental(codec, 1); + snprintf(error, error_len, + "Error while opening encoder for output stream #%d:%d - " + "maybe incorrect parameters such as bit_rate, rate, width or height", + ost->file_index, ost->index); + return ret; + } + if (ost->enc->type == AVMEDIA_TYPE_AUDIO && + !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) + av_buffersink_set_frame_size(ost->filter->filter, + ost->enc_ctx->frame_size); + assert_avoptions(ost->encoder_opts); + if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 && + ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */) + av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." + " It takes bits/s as argument, not kbits/s\n"); + + ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, + "Error initializing the output stream codec context.\n"); + exit_program(1); + } + /* + * FIXME: ost->st->codec should't be needed here anymore. + */ + ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx); + if (ret < 0) + return ret; + + if (ost->enc_ctx->nb_coded_side_data) { + int i; + + for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) { + const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i]; + uint8_t *dst_data; + + dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); + if (!dst_data) + return AVERROR(ENOMEM); + memcpy(dst_data, sd_src->data, sd_src->size); + } + } + + /* + * Add global input side data. For now this is naive, and copies it + * from the input stream's global side data. All side data should + * really be funneled over AVFrame and libavfilter, then added back to + * packet side data, and then potentially using the first packet for + * global side data. + */ + if (ist) { + int i; + for (i = 0; i < ist->st->nb_side_data; i++) { + AVPacketSideData *sd = &ist->st->side_data[i]; + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } + } + } + + // copy timebase while removing common factors + if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) + ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1}); + + // copy estimated duration as a hint to the muxer + if (ost->st->duration <= 0 && ist && ist->st->duration > 0) + ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); + + ost->st->codec->codec= ost->enc_ctx->codec; + } else if (ost->stream_copy) { + ret = init_output_stream_streamcopy(ost); + if (ret < 0) + return ret; + } + + // parse user provided disposition, and update stream values + if (ost->disposition) { + static const AVOption opts[] = { + { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, + { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" }, + { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" }, + { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" }, + { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" }, + { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" }, + { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" }, + { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" }, + { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" }, + { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" }, + { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" }, + { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" }, + { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" }, + { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" }, + { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" }, + { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" }, + { NULL }, + }; + static const AVClass class = { + .class_name = "", + .item_name = av_default_item_name, + .option = opts, + .version = LIBAVUTIL_VERSION_INT, + }; + const AVClass *pclass = &class; + + ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition); + if (ret < 0) + return ret; + } + + /* initialize bitstream filters for the output stream + * needs to be done here, because the codec id for streamcopy is not + * known until now */ + ret = init_output_bsfs(ost); + if (ret < 0) + return ret; + + ost->initialized = 1; + + ret = check_init_output_file(output_files[ost->file_index], ost->file_index); + if (ret < 0) + return ret; + + return ret; +} +static int init_output_stream(OutputStream *ost, char *error, int error_len) +{ + int ret = 0; + + if (ost->encoding_needed) { + AVCodec *codec = ost->enc; + AVCodecContext *dec = NULL; + InputStream *ist; + + ret = init_output_stream_encode(ost); + if (ret < 0) + return ret; + + if ((ist = get_input_stream(ost))) + dec = ist->dec_ctx; + if (dec && dec->subtitle_header) { + /* ASS code assumes this buffer is null terminated so add extra byte. */ + ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1); + if (!ost->enc_ctx->subtitle_header) + return AVERROR(ENOMEM); + memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); + ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size; + } + if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0)) + av_dict_set(&ost->encoder_opts, "threads", "auto", 0); + if (ost->enc->type == AVMEDIA_TYPE_AUDIO && + !codec->defaults && + !av_dict_get(ost->encoder_opts, "b", NULL, 0) && + !av_dict_get(ost->encoder_opts, "ab", NULL, 0)) + av_dict_set(&ost->encoder_opts, "b", "128000", 0); + + ret = hw_device_setup_for_encode(ost); + if (ret < 0) { + snprintf(error, error_len, "Device setup failed for " + "encoder on output stream #%d:%d : %s", + ost->file_index, ost->index, av_err2str(ret)); + return ret; + } + + if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) { + int input_props = 0, output_props = 0; + AVCodecDescriptor const *input_descriptor = + avcodec_descriptor_get(dec->codec_id); + AVCodecDescriptor const *output_descriptor = + avcodec_descriptor_get(ost->enc_ctx->codec_id); + if (input_descriptor) + input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); + if (output_descriptor) + output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); + if (input_props && output_props && input_props != output_props) { + snprintf(error, error_len, + "Subtitle encoding currently only possible from text to text " + "or bitmap to bitmap"); + return AVERROR_INVALIDDATA; + } + } + + if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) { + if (ret == AVERROR_EXPERIMENTAL) + abort_codec_experimental(codec, 1); + snprintf(error, error_len, + "Error while opening encoder for output stream #%d:%d - " + "maybe incorrect parameters such as bit_rate, rate, width or height", + ost->file_index, ost->index); + return ret; + } + if (ost->enc->type == AVMEDIA_TYPE_AUDIO && + !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) + av_buffersink_set_frame_size(ost->filter->filter, + ost->enc_ctx->frame_size); + assert_avoptions(ost->encoder_opts); + if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 && + ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */) + av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." + " It takes bits/s as argument, not kbits/s\n"); + + ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, + "Error initializing the output stream codec context.\n"); + exit_program(1); + } + /* + * FIXME: ost->st->codec should't be needed here anymore. + */ + ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx); + if (ret < 0) + return ret; + + if (ost->enc_ctx->nb_coded_side_data) { + int i; + + for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) { + const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i]; + uint8_t *dst_data; + + dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); + if (!dst_data) + return AVERROR(ENOMEM); + memcpy(dst_data, sd_src->data, sd_src->size); + } + } + + /* + * Add global input side data. For now this is naive, and copies it + * from the input stream's global side data. All side data should + * really be funneled over AVFrame and libavfilter, then added back to + * packet side data, and then potentially using the first packet for + * global side data. + */ + if (ist) { + int i; + for (i = 0; i < ist->st->nb_side_data; i++) { + AVPacketSideData *sd = &ist->st->side_data[i]; + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } + } + } + + // copy timebase while removing common factors + if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) + ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1}); + + // copy estimated duration as a hint to the muxer + if (ost->st->duration <= 0 && ist && ist->st->duration > 0) + ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); + + ost->st->codec->codec= ost->enc_ctx->codec; + } else if (ost->stream_copy) { + ret = init_output_stream_streamcopy(ost); + if (ret < 0) + return ret; + } + + // parse user provided disposition, and update stream values + if (ost->disposition) { + static const AVOption opts[] = { + { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, + { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" }, + { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" }, + { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" }, + { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" }, + { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" }, + { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" }, + { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" }, + { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" }, + { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" }, + { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" }, + { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" }, + { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" }, + { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" }, + { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" }, + { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" }, + { NULL }, + }; + static const AVClass class = { + .class_name = "", + .item_name = av_default_item_name, + .option = opts, + .version = LIBAVUTIL_VERSION_INT, + }; + const AVClass *pclass = &class; + + ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition); + if (ret < 0) + return ret; + } + + /* initialize bitstream filters for the output stream + * needs to be done here, because the codec id for streamcopy is not + * known until now */ + ret = init_output_bsfs(ost); + if (ret < 0) + return ret; + + ost->initialized = 1; + + ret = check_init_output_file(output_files[ost->file_index], ost->file_index); + if (ret < 0) + return ret; + + return ret; +} +static void report_new_stream(int input_index, AVPacket *pkt) +{ + InputFile *file = input_files[input_index]; + AVStream *st = file->ctx->streams[pkt->stream_index]; + + if (pkt->stream_index < file->nb_streams_warn) + return; + av_log(file->ctx, AV_LOG_WARNING, + "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n", + av_get_media_type_string(st->codecpar->codec_type), + input_index, pkt->stream_index, + pkt->pos, av_ts2timestr(pkt->dts, &st->time_base)); + file->nb_streams_warn = pkt->stream_index + 1; +} + +static int transcode_init(void) +{ + int ret = 0, i, j, k; + AVFormatContext *oc; + OutputStream *ost; + InputStream *ist; + char error[1024] = {0}; + + for (i = 0; i < nb_filtergraphs; i++) { + FilterGraph *fg = filtergraphs[i]; + for (j = 0; j < fg->nb_outputs; j++) { + OutputFilter *ofilter = fg->outputs[j]; + if (!ofilter->ost || ofilter->ost->source_index >= 0) + continue; + if (fg->nb_inputs != 1) + continue; + for (k = nb_input_streams-1; k >= 0 ; k--) + if (fg->inputs[0]->ist == input_streams[k]) + break; + ofilter->ost->source_index = k; + } + } + + /* init framerate emulation */ + for (i = 0; i < nb_input_files; i++) { + InputFile *ifile = input_files[i]; + if (ifile->rate_emu) + for (j = 0; j < ifile->nb_streams; j++) + input_streams[j + ifile->ist_index]->start = av_gettime_relative(); + } + + /* init input streams */ + for (i = 0; i < nb_input_streams; i++) + if ((ret = init_input_stream(i, error, sizeof(error))) < 0) { + for (i = 0; i < nb_output_streams; i++) { + ost = output_streams[i]; + avcodec_close(ost->enc_ctx); + } + goto dump_format; + } + + /* open each encoder */ + for (i = 0; i < nb_output_streams; i++) { + // skip streams fed from filtergraphs until we have a frame for them + if (output_streams[i]->filter) + continue; + + ret = init_output_stream(output_streams[i], error, sizeof(error)); + if (ret < 0) + goto dump_format; + } + + /* discard unused programs */ + for (i = 0; i < nb_input_files; i++) { + InputFile *ifile = input_files[i]; + for (j = 0; j < ifile->ctx->nb_programs; j++) { + AVProgram *p = ifile->ctx->programs[j]; + int discard = AVDISCARD_ALL; + + for (k = 0; k < p->nb_stream_indexes; k++) + if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) { + discard = AVDISCARD_DEFAULT; + break; + } + p->discard = discard; + } + } + + /* write headers for files with no streams */ + for (i = 0; i < nb_output_files; i++) { + oc = output_files[i]->ctx; + if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) { + ret = check_init_output_file(output_files[i], i); + if (ret < 0) + goto dump_format; + } + } + + dump_format: + /* dump the stream mapping */ + av_log(NULL, AV_LOG_INFO, "Stream mapping:\n"); + for (i = 0; i < nb_input_streams; i++) { + ist = input_streams[i]; + + for (j = 0; j < ist->nb_filters; j++) { + if (!filtergraph_is_simple(ist->filters[j]->graph)) { + av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s", + ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?", + ist->filters[j]->name); + if (nb_filtergraphs > 1) + av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index); + av_log(NULL, AV_LOG_INFO, "\n"); + } + } + } + + for (i = 0; i < nb_output_streams; i++) { + ost = output_streams[i]; + + if (ost->attachment_filename) { + /* an attached file */ + av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n", + ost->attachment_filename, ost->file_index, ost->index); + continue; + } + + if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) { + /* output from a complex graph */ + av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name); + if (nb_filtergraphs > 1) + av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index); + + av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index, + ost->index, ost->enc ? ost->enc->name : "?"); + continue; + } + + av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d", + input_streams[ost->source_index]->file_index, + input_streams[ost->source_index]->st->index, + ost->file_index, + ost->index); + if (ost->sync_ist != input_streams[ost->source_index]) + av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]", + ost->sync_ist->file_index, + ost->sync_ist->st->index); + if (ost->stream_copy) + av_log(NULL, AV_LOG_INFO, " (copy)"); + else { + const AVCodec *in_codec = input_streams[ost->source_index]->dec; + const AVCodec *out_codec = ost->enc; + const char *decoder_name = "?"; + const char *in_codec_name = "?"; + const char *encoder_name = "?"; + const char *out_codec_name = "?"; + const AVCodecDescriptor *desc; + + if (in_codec) { + decoder_name = in_codec->name; + desc = avcodec_descriptor_get(in_codec->id); + if (desc) + in_codec_name = desc->name; + if (!strcmp(decoder_name, in_codec_name)) + decoder_name = "native"; + } + + if (out_codec) { + encoder_name = out_codec->name; + desc = avcodec_descriptor_get(out_codec->id); + if (desc) + out_codec_name = desc->name; + if (!strcmp(encoder_name, out_codec_name)) + encoder_name = "native"; + } + + av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))", + in_codec_name, decoder_name, + out_codec_name, encoder_name); + } + av_log(NULL, AV_LOG_INFO, "\n"); + } + + if (ret) { + av_log(NULL, AV_LOG_ERROR, "%s\n", error); + return ret; + } + + atomic_store(&transcode_init_done, 1); + + return 0; +} + +/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */ +static int need_output(void) +{ + int i; + + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + OutputFile *of = output_files[ost->file_index]; + AVFormatContext *os = output_files[ost->file_index]->ctx; + + if (ost->finished || + (os->pb && avio_tell(os->pb) >= of->limit_filesize)) + continue; + if (ost->frame_number >= ost->max_frames) { + int j; + for (j = 0; j < of->ctx->nb_streams; j++) + close_output_stream(output_streams[of->ost_index + j]); + continue; + } + + return 1; + } + + return 0; +} + +/** + * Select the output stream to process. + * + * @return selected output stream, or NULL if none available + */ +static OutputStream *choose_output(void) +{ + int i; + int64_t opts_min = INT64_MAX; + OutputStream *ost_min = NULL; + + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN : + av_rescale_q(ost->st->cur_dts, ost->st->time_base, + AV_TIME_BASE_Q); + if (ost->st->cur_dts == AV_NOPTS_VALUE) + av_log(NULL, AV_LOG_DEBUG, + "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n", + ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished); + + if (!ost->initialized && !ost->inputs_done) + return ost; + + if (!ost->finished && opts < opts_min) { + opts_min = opts; + ost_min = ost->unavailable ? NULL : ost; + } + } + return ost_min; +} + +static void set_tty_echo(int on) +{ +#if HAVE_TERMIOS_H + struct termios tty; + if (tcgetattr(0, &tty) == 0) { + if (on) tty.c_lflag |= ECHO; + else tty.c_lflag &= ~ECHO; + tcsetattr(0, TCSANOW, &tty); + } +#endif +} + +static int check_keyboard_interaction(int64_t cur_time) +{ + int i, ret, key; + static int64_t last_time; + if (received_nb_signals) + return AVERROR_EXIT; + /* read_key() returns 0 on EOF */ + if(cur_time - last_time >= 100000 && !run_as_daemon){ + key = read_key(); + last_time = cur_time; + }else + key = -1; + if (key == 'q') + return AVERROR_EXIT; + if (key == '+') av_log_set_level(av_log_get_level()+10); + if (key == '-') av_log_set_level(av_log_get_level()-10); + if (key == 's') qp_hist ^= 1; + if (key == 'h'){ + if (do_hex_dump){ + do_hex_dump = do_pkt_dump = 0; + } else if(do_pkt_dump){ + do_hex_dump = 1; + } else + do_pkt_dump = 1; + av_log_set_level(AV_LOG_DEBUG); + } + if (key == 'c' || key == 'C'){ + char buf[4096], target[64], command[256], arg[256] = {0}; + double time; + int k, n = 0; + fprintf(stderr, "\nEnter command: |all