Skip to content

Commit

Permalink
Add audio to all stream (thanks to @PieVo)
Browse files Browse the repository at this point in the history
  • Loading branch information
roleoroleo committed Jul 25, 2020
1 parent 14c0209 commit 5a2048f
Show file tree
Hide file tree
Showing 15 changed files with 239 additions and 112 deletions.
3 changes: 2 additions & 1 deletion src/rRTSPServer/Makefile.rRTSPServer
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ livemedia:
rRTSPServer_OBJS = src/rRTSPServer.$(OBJ) \
src/ByteStreamCBMemorySource.$(OBJ) src/H264VideoCBMemoryServerMediaSubsession.$(OBJ) \
src/CBMemoryServerMediaSubsession.$(OBJ) \
src/WAVAudioFifoSource.$(OBJ) src/WAVAudioFifoServerMediaSubsession.$(OBJ)
src/WAVAudioFifoSource.$(OBJ) src/WAVAudioFifoServerMediaSubsession.$(OBJ) \
src/DummySink.$(OBJ)

rRTSPServer$(EXE): $(rRTSPServer_OBJS) $(LOCAL_LIBS)
$(LINK)$@ $(CONSOLE_LINK_OPTS) $(rRTSPServer_OBJS) $(LIBS) -lpthread
Expand Down
29 changes: 29 additions & 0 deletions src/rRTSPServer/include/DummySink.hh
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"

class DummySink: public MediaSink {
public:
static DummySink* createNew(UsageEnvironment& env,
char const* streamId = NULL); // identifies the stream itself (optional)

private:
DummySink(UsageEnvironment& env, char const* streamId);
// called only by "createNew()"
virtual ~DummySink();

static void afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds);
void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned durationInMicroseconds);

private:
// redefined virtual functions:
virtual Boolean continuePlaying();

private:
u_int8_t* fReceiveBuffer;
char* fStreamId;
};

13 changes: 6 additions & 7 deletions src/rRTSPServer/include/WAVAudioFifoServerMediaSubsession.hh
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,15 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
#include "FileServerMediaSubsession.hh"
#endif
#include "StreamReplicator.hh"

class WAVAudioFifoServerMediaSubsession: public FileServerMediaSubsession{
class WAVAudioFifoServerMediaSubsession: public OnDemandServerMediaSubsession {
public:
static WAVAudioFifoServerMediaSubsession*
createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
Boolean convertToULaw = False);
// If "convertToULaw" is True, 16-bit audio streams are converted to
// 8-bit u-law audio prior to streaming.
createNew(UsageEnvironment& env, StreamReplicator* replicator, Boolean reuseFirstSource, Boolean convertToULaw);

protected:
WAVAudioFifoServerMediaSubsession(UsageEnvironment& env, char const* fileName,
WAVAudioFifoServerMediaSubsession(UsageEnvironment& env, StreamReplicator* replicator,
Boolean reuseFirstSource, Boolean convertToULaw);
// called only by createNew();
virtual ~WAVAudioFifoServerMediaSubsession();
Expand All @@ -54,7 +52,6 @@ protected: // redefined virtual functions
virtual float duration() const;

protected:
Boolean fConvertToULaw;

// The following parameters of the input stream are set after
// "createNewStreamSource" is called:
Expand All @@ -63,6 +60,8 @@ protected:
unsigned fSamplingFrequency;
unsigned fNumChannels;
float fFileDuration;
StreamReplicator* fReplicator;
Boolean fConvertToULaw;
};

#endif
13 changes: 5 additions & 8 deletions src/rRTSPServer/include/WAVAudioFifoSource.hh
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,11 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
#include "AudioInputDevice.hh"
#endif

typedef enum {
WA_PCM = 0x01,
WA_PCMA = 0x06,
WA_PCMU = 0x07,
WA_IMA_ADPCM = 0x11,
WA_UNKNOWN
} WAV_AUDIO_FORMAT;

#define WA_PCM 0x01
#define WA_PCMA 0x06
#define WA_PCMU 0x07
#define WA_IMA_ADPCM 0x11
#define WA_UNKNOWN 0x12

class WAVAudioFifoSource: public AudioInputDevice {
public:
Expand Down
4 changes: 2 additions & 2 deletions src/rRTSPServer/include/rRTSPServer.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@
#define RESOLUTION_HIGH 1080
#define RESOLUTION_BOTH 1440

#define OUTPUT_BUFFER_SIZE_LOW 32768
#define OUTPUT_BUFFER_SIZE_HIGH 131072
#define OUTPUT_BUFFER_SIZE_LOW 49152
#define OUTPUT_BUFFER_SIZE_HIGH 196608

typedef struct
{
Expand Down
58 changes: 58 additions & 0 deletions src/rRTSPServer/src/DummySink.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"
#include "DummySink.hh"

// Implementation of "DummySink":

// Even though we're not going to be doing anything with the incoming data, we still need to receive it.
// Define the size of the buffer that we'll use:
#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000

DummySink* DummySink::createNew(UsageEnvironment& env, char const* streamId) {
return new DummySink(env, streamId);
}

DummySink::DummySink(UsageEnvironment& env, char const* streamId)
: MediaSink(env) {
fStreamId = strDup(streamId);
fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
}

DummySink::~DummySink() {
delete[] fReceiveBuffer;
delete[] fStreamId;
}

void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned durationInMicroseconds) {
DummySink* sink = (DummySink*)clientData;
sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}

// If you don't want to see debugging output for each received frame, then comment out the following line:
#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1

void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG
if ((int)presentationTime.tv_sec % 10 == 0) {
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << "DummySink Presentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
}
#endif

// Then continue, to request the next frame of data:
continuePlaying();
}

Boolean DummySink::continuePlaying() {
if (fSource == NULL) return False; // sanity check (should not happen)

// Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}
92 changes: 45 additions & 47 deletions src/rRTSPServer/src/WAVAudioFifoServerMediaSubsession.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,14 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
#include "SimpleRTPSink.hh"

WAVAudioFifoServerMediaSubsession* WAVAudioFifoServerMediaSubsession
::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
Boolean convertToULaw) {
return new WAVAudioFifoServerMediaSubsession(env, fileName,
reuseFirstSource, convertToULaw);
::createNew(UsageEnvironment& env, StreamReplicator* replicator, Boolean reuseFirstSource, Boolean convertToULaw) {
return new WAVAudioFifoServerMediaSubsession(env, replicator, reuseFirstSource, convertToULaw);
}

WAVAudioFifoServerMediaSubsession
::WAVAudioFifoServerMediaSubsession(UsageEnvironment& env, char const* fileName,
Boolean reuseFirstSource, Boolean convertToULaw)
: FileServerMediaSubsession(env, fileName, reuseFirstSource),
::WAVAudioFifoServerMediaSubsession(UsageEnvironment& env, StreamReplicator* replicator, Boolean reuseFirstSource, Boolean convertToULaw)
: OnDemandServerMediaSubsession(env, reuseFirstSource),
fReplicator(replicator),
fConvertToULaw(convertToULaw) {
}

Expand Down Expand Up @@ -97,51 +95,49 @@ ::setStreamSourceScale(FramedSource* inputSource, float scale) {
FramedSource* WAVAudioFifoServerMediaSubsession
::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
FramedSource* resultSource = NULL;
do {
WAVAudioFifoSource* wavSource = WAVAudioFifoSource::createNew(envir(), fFileName);
if (wavSource == NULL) break;

// Get attributes of the audio source:

fAudioFormat = wavSource->getAudioFormat();
fBitsPerSample = wavSource->bitsPerSample();
// We handle only 4,8,16,20,24 bits-per-sample audio:
if (fBitsPerSample%4 != 0 || fBitsPerSample < 4 || fBitsPerSample > 24 || fBitsPerSample == 12) {
envir() << "The input file contains " << fBitsPerSample << " bit-per-sample audio, which we don't handle\n";
break;
WAVAudioFifoSource* originalSource = NULL;
FramedFilter* previousSource = (FramedFilter*)fReplicator->inputSource();

// Iterate back into the filter chain until a source is found that
// has a sample frequency and expected to be a WAVAudioFifoSource.
for (int x = 0; x < 10; x++) {
if (((WAVAudioFifoSource*)(previousSource))->bitsPerSample() != 0) {
#ifdef DEBUG
printf("WAVAudioFifoSource found at x = %d\n", x);
#endif
originalSource = (WAVAudioFifoSource*)(previousSource);
break;
}
fSamplingFrequency = wavSource->samplingFrequency();
fNumChannels = wavSource->numChannels();
previousSource = (FramedFilter*)previousSource->inputSource();
}
#ifdef DEBUG
printf("fReplicator->inputSource() = %p\n", originalSource);
#endif
resultSource = fReplicator->createStreamReplica();
if (resultSource == NULL) {
printf("Failed to create stream replica\n");
Medium::close(resultSource);
return NULL;
}
else {
fAudioFormat = originalSource->getAudioFormat();
fBitsPerSample = originalSource->bitsPerSample();
fSamplingFrequency = originalSource->samplingFrequency();
fConvertToULaw = True;
fNumChannels = originalSource->numChannels();
unsigned bitsPerSecond = fSamplingFrequency*fBitsPerSample*fNumChannels;
#ifdef DEBUG
printf("Original source FMT: %d bps: %d freq: %d\n", fAudioFormat, fBitsPerSample, fSamplingFrequency);
#endif
fFileDuration = ~0;//(float)((8.0*originalSource->numPCMBytes())/(fSamplingFrequency*fNumChannels*fBitsPerSample));

fFileDuration = (float)((8.0*wavSource->numPCMBytes())/(fSamplingFrequency*fNumChannels*fBitsPerSample));
estBitrate = (bitsPerSecond+500)/1000; // kbps

// Add in any filter necessary to transform the data prior to streaming:
resultSource = wavSource; // by default
if (fAudioFormat == WA_PCM) {
if (fBitsPerSample == 16) {
// Note that samples in the WAV audio file are in little-endian order.
if (fConvertToULaw) {
// Add a filter that converts from raw 16-bit PCM audio to 8-bit u-law audio:
resultSource = uLawFromPCMAudioSource::createNew(envir(), wavSource, 1/*little-endian*/);
bitsPerSecond /= 2;
} else {
// Add a filter that converts from little-endian to network (big-endian) order:
resultSource = EndianSwap16::createNew(envir(), wavSource);
}
} else if (fBitsPerSample == 20 || fBitsPerSample == 24) {
// Add a filter that converts from little-endian to network (big-endian) order:
resultSource = EndianSwap24::createNew(envir(), wavSource);
}
}
if (fConvertToULaw)
estBitrate /= 2;

estBitrate = (bitsPerSecond+500)/1000; // kbps
return resultSource;
} while (0);

// An error occurred:
Medium::close(resultSource);
return NULL;
}
}

RTPSink* WAVAudioFifoServerMediaSubsession
Expand Down Expand Up @@ -200,7 +196,9 @@ ::createNewRTPSink(Groupsock* rtpGroupsock,
} else { //unknown format
break;
}

#ifdef DEBUG
printf("Create SimpleRTPSink: %s, freq: %d, channels %d\n", mimeType, fSamplingFrequency, fNumChannels);
#endif
return SimpleRTPSink::createNew(envir(), rtpGroupsock,
payloadFormatCode, fSamplingFrequency,
"audio", mimeType, fNumChannels);
Expand Down
Loading

0 comments on commit 5a2048f

Please sign in to comment.