Thanks Ross, setting the frameDuration to the number of frames per
second seemed to make things better.  I guess I'll have to make that
more dynamic later on, but the good news is I'm streaming H.264!!!

the bad news?

My file doesn't keep playing, I modeled my test program after
testMPEG4VideoStreamer where it uses PassiveServerMediaSubsession and
continuously replays the file. 

Also my stream works in VLC and Mplayer, but not Quicktime.

My main goal of writing the framer, parser, and test program is about
90% done.  I think my next goal will be to write a
H264VideoFileServerMediaSubsession!

If anybody wants to help figure out those two problems, I've attached
all my code.

Many thanks to Ross, Jerry, and everybody else who helped out!!



On Wed, 2008-06-25 at 20:46 -0700, Ross Finlayson wrote:
> >I have re-written my framer based off the H263plusVideoFramer instead of
> >trying to subclass the MPEGVideoStreamParser.  Now I can see that my
> >framer is parsing through the NAL units and that the H264RTPSink is
> >actually sending some packets out.  The problem is that the file is
> >getting parsed too quickly and it seems as though the ethernet port
> >can't keep up.
> 
> The problem is probably just that you're not setting 
> "fDurationInMicroseconds" before calling 
> "FramedSource::afterGetting()".
> 
> If you're streaming from a file (as opposed to streaming from a live 
> source), then it's *essential* that you set "fDurationInMicroseconds" 
> correctly, so that the code knows how long to delay after sending 
> each RTP packet.
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
more details.

You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
**********/
// "liveMedia"
// Copyright (c) 1996-2007 Live Networks, Inc.  All rights reserved.
// RTP sink for H.264 video (RFC 3984)
// Implementation
#include <iostream>
#include "H264VideoRTPSink.hh"
#include "H264VideoStreamFramer.hh"

////////// H264VideoRTPSink implementation //////////

H264VideoRTPSink
::H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
		   unsigned char rtpPayloadFormat,
		   unsigned profile_level_id,
		   char const* sprop_parameter_sets_str)
  : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "H264"), 
    fOurFragmenter(NULL) {
  // Set up the "a=fmtp:" SDP line for this stream:
  char const* fmtpFmt =
    "a=fmtp:%d packetization-mode=1"
    ";profile-level-id=%06X"
    ";sprop-parameter-sets=%s\r\n";
  unsigned fmtpFmtSize = strlen(fmtpFmt)
    + 3 /* max char len */
    + 8 /* max unsigned len in hex */
    + strlen(sprop_parameter_sets_str);
  char* fmtp = new char[fmtpFmtSize];
  sprintf(fmtp, fmtpFmt,
          rtpPayloadFormat,
	  profile_level_id,
          sprop_parameter_sets_str);
  fFmtpSDPLine = strDup(fmtp);
  delete[] fmtp;
}

H264VideoRTPSink::~H264VideoRTPSink() {
  delete[] fFmtpSDPLine;
  Medium::close(fOurFragmenter);
  fSource = NULL;
}

H264VideoRTPSink*
H264VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
			    unsigned char rtpPayloadFormat,
			    unsigned profile_level_id,
			    char const* sprop_parameter_sets_str) {
  return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat,
			      profile_level_id, sprop_parameter_sets_str);
}

Boolean H264VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
  // Our source must be an appropriate framer:
  return source.isH264VideoStreamFramer();
}

Boolean H264VideoRTPSink::continuePlaying() {
  // First, check whether we have a 'fragmenter' class set up yet.
  // If not, create it now:
  if (fOurFragmenter == NULL) {
    fOurFragmenter = new H264FUAFragmenter(envir(), fSource, OutPacketBuffer::maxSize,
					   ourMaxPacketSize() - 12/*RTP hdr size*/);
    fSource = fOurFragmenter;
  }

  // Then call the parent class's implementation:
  return MultiFramedRTPSink::continuePlaying();
}

void H264VideoRTPSink::stopPlaying() {
  // First, call the parent class's implementation, to stop our fragmenter object
  // (and its source):
  MultiFramedRTPSink::stopPlaying();

  // Then, close our 'fragmenter' object:
  Medium::close(fOurFragmenter); fOurFragmenter = NULL;
  fSource = NULL;
}

void H264VideoRTPSink::doSpecialFrameHandling(unsigned /*fragmentationOffset*/,
					      unsigned char* /*frameStart*/,
					      unsigned /*numBytesInFrame*/,
					      struct timeval frameTimestamp,
					      unsigned /*numRemainingBytes*/) {
  // Set the RTP 'M' (marker) bit iff
  // 1/ The most recently delivered fragment was the end of
  //    (or the only fragment of) an NAL unit, and
  // 2/ This NAL unit was the last NAL unit of an 'access unit' (i.e. video frame).
  if (fOurFragmenter != NULL) {
    H264VideoStreamFramer* framerSource
      = (H264VideoStreamFramer*)(fOurFragmenter->inputSource());
    // This relies on our fragmenter's source being a "MPEG4VideoStreamFramer".
    if (fOurFragmenter->lastFragmentCompletedNALUnit()
	&& framerSource != NULL && framerSource->currentNALUnitEndsAccessUnit()) {
      setMarkerBit();
    }
  }

  setTimestamp(frameTimestamp);
}

Boolean H264VideoRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
				 unsigned /*numBytesInFrame*/) const {
  return False;
}

char const* H264VideoRTPSink::auxSDPLine() {
  return fFmtpSDPLine;
}


////////// H264FUAFragmenter implementation //////////

H264FUAFragmenter::H264FUAFragmenter(UsageEnvironment& env,
				     FramedSource* inputSource,
				     unsigned inputBufferMax,
				     unsigned maxOutputPacketSize)
  : FramedFilter(env, inputSource),
    fInputBufferSize(inputBufferMax+1), fMaxOutputPacketSize(maxOutputPacketSize),
    fNumValidDataBytes(1), fCurDataOffset(1), fSaveNumTruncatedBytes(0),
    fLastFragmentCompletedNALUnit(True), fSPSSize(0), fPPSSize(0),
    sentSPS(False), sentPPS(False) {
    fInputBuffer = new unsigned char[fInputBufferSize];
    fSPS = new unsigned char[fInputBufferSize];
    fPPS = new unsigned char[fInputBufferSize];
}

H264FUAFragmenter::~H264FUAFragmenter() {
  delete[] fInputBuffer;
  fInputSource = NULL; // so that the subsequent ~FramedFilter doesn't delete it
}

void H264FUAFragmenter::doGetNextFrame() {
//     std::cout << "H264FUAFragmenter: in doGetNextFrame - fNumValidDataBytes: " << fNumValidDataBytes << std::endl;
  if (fNumValidDataBytes == 1) {
    // We have no NAL unit data currently in the buffer.  Read a new one:
    fInputSource->getNextFrame(&fInputBuffer[1], fInputBufferSize - 1,
			       afterGettingFrame, this,
			       FramedSource::handleClosure, this);
  } else {
    // We have NAL unit data in the buffer.  There are three cases to consider:
    // 1. There is a new NAL unit in the buffer, and it's small enough to deliver
    //    to the RTP sink (as is).
    // 2. There is a new NAL unit in the buffer, but it's too large to deliver to
    //    the RTP sink in its entirety.  Deliver the first fragment of this data,
    //    as a FU-A packet, with one extra preceding header byte.
    // 3. There is a NAL unit in the buffer, and we've already delivered some
    //    fragment(s) of this.  Deliver the next fragment of this data,
    //    as a FU-A packet, with two extra preceding header bytes.

    if (fMaxSize < fMaxOutputPacketSize) { // shouldn't happen
      envir() << "H264FUAFragmenter::doGetNextFrame(): fMaxSize ("
	      << fMaxSize << ") is smaller than expected\n";
    } else {
      fMaxSize = fMaxOutputPacketSize;
    }

    fLastFragmentCompletedNALUnit = True; // by default



    if (fCurDataOffset == 1) { // case 1 or 2
      if (fNumValidDataBytes - 1 <= fMaxSize) { // case 1
        if ((fInputBuffer[1] & 0x1F) == 8) {
//             std::cout << "we have a PPS NAL unit!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
            memmove(fPPS, &fInputBuffer[1], fNumValidDataBytes - 1);
            fPPSSize = fNumValidDataBytes - 1;
//             std::cout << "fPPSSize: " << fPPSSize << std::endl;
        }
        else if ((fInputBuffer[1] & 0x1F) == 7) {
//             std::cout << "we have a SPS NAL unit!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
            memmove(fSPS, &fInputBuffer[1], fNumValidDataBytes - 1);
            fSPSSize = fNumValidDataBytes - 1;
//             std::cout << "fSPSSize: " << fSPSSize << std::endl;
        }
        
        if ((fInputBuffer[1] & 0x1F) == 5) {
//             std::cout << "we have a IDR NAL unit!!!!!!!!!! I would like to send the sps and pps again please: case 1" << std::endl;
            if (fPPSSize && fSPSSize) {
                if (!sentSPS) {
//                     std::cout << "sending SPS!!!" << std::endl;
                    memmove(fTo, fSPS, fSPSSize);
                    fFrameSize = fSPSSize;
                    sentSPS = true;
                }
                else if (!sentPPS) {
//                     std::cout << "sending PPS!!!" << std::endl;
                    memmove(fTo, fPPS, fPPSSize);
                    fFrameSize = fPPSSize;
                    sentPPS = true;
                }
                else {
//                     std::cout << "going to send acutal data now hopefully!!!!!" << std::endl;
                    memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1);
                    fFrameSize = fNumValidDataBytes - 1;
                    fCurDataOffset = fNumValidDataBytes;
                    sentPPS = false;
                    sentSPS = false;
                }
            }
            else {
//                 std::cout << "we have no SPS or PPS data to send!!!!" << std::endl;
                memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1);
                fFrameSize = fNumValidDataBytes - 1;
                fCurDataOffset = fNumValidDataBytes;
            }


        }
        else {
	        memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1);
	        fFrameSize = fNumValidDataBytes - 1;
	        fCurDataOffset = fNumValidDataBytes;
/*            sentPPS = false;
            sentSPS = false;*/
        }
      } else { // case 2
          if ((fInputBuffer[1] & 0x1F) == 5) {
//             std::cout << "we have a IDR NAL unit!!!!!!!!!! I would like to send the sps and pps again please: case 2" << std::endl;
            if (fPPSSize && fSPSSize) {
                if (!sentSPS) {
//                     std::cout << "sending SPS!!!" << std::endl;
                    memmove(fTo, fSPS, fSPSSize);
                    fFrameSize = fSPSSize;
                    sentSPS = true;
                }
                else if (!sentPPS) {
//                     std::cout << "sending PPS!!!" << std::endl;
                    memmove(fTo, fPPS, fPPSSize);
                    fFrameSize = fPPSSize;
                    sentPPS = true;
                }
                else {
//                     std::cout << "going to send acutal data now hopefully!!!!!" << std::endl;

                    fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator
                    fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit)
                    memmove(fTo, fInputBuffer, fMaxSize);
                    fFrameSize = fMaxSize;
                    fCurDataOffset += fMaxSize - 1;
                    fLastFragmentCompletedNALUnit = False;

                    sentPPS = false;
                    sentSPS = false;
                }
            }
            else {
//                 std::cout << "we have no SPS or PPS data to send!!!!" << std::endl;
                fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator
                fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit)
                memmove(fTo, fInputBuffer, fMaxSize);
                fFrameSize = fMaxSize;
                fCurDataOffset += fMaxSize - 1;
                fLastFragmentCompletedNALUnit = False;
            }
        }
        else {
	        // We need to send the NAL unit data as FU-A packets.  Deliver the first
	        // packet now.  Note that we add FU indicator and FU header bytes to the front
	        // of the packet (reusing the existing NAL header byte for the FU header).
	        fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator
	        fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit)
	        memmove(fTo, fInputBuffer, fMaxSize);
	        fFrameSize = fMaxSize;
	        fCurDataOffset += fMaxSize - 1;
	        fLastFragmentCompletedNALUnit = False;
        }
      }
    } else { // case 3
      // We are sending this NAL unit data as FU-A packets.  We've already sent the
      // first packet (fragment).  Now, send the next fragment.  Note that we add
      // FU indicator and FU header bytes to the front.  (We reuse these bytes that
      // we already sent for the first fragment, but clear the S bit, and add the E
      // bit if this is the last fragment.)
      fInputBuffer[fCurDataOffset-2] = fInputBuffer[0]; // FU indicator
      fInputBuffer[fCurDataOffset-1] = fInputBuffer[1]&~0x80; // FU header (no S bit)
      unsigned numBytesToSend = 2 + fNumValidDataBytes - fCurDataOffset;
      if (numBytesToSend > fMaxSize) {
	// We can't send all of the remaining data this time:
	numBytesToSend = fMaxSize;
	fLastFragmentCompletedNALUnit = False;
      } else {
	// This is the last fragment:
	fInputBuffer[fCurDataOffset-1] |= 0x40; // set the E bit in the FU header
	fNumTruncatedBytes = fSaveNumTruncatedBytes;
      }
      memmove(fTo, &fInputBuffer[fCurDataOffset-2], numBytesToSend);
      fFrameSize = numBytesToSend;
      fCurDataOffset += numBytesToSend - 2;
    }

    if (fCurDataOffset >= fNumValidDataBytes) {
      // We're done with this data.  Reset the pointers for receiving new data:
      fNumValidDataBytes = fCurDataOffset = 1;
    }

    // Complete delivery to the client:
    FramedSource::afterGetting(this);
  }
}

void H264FUAFragmenter::afterGettingFrame(void* clientData, unsigned frameSize,
					  unsigned numTruncatedBytes,
					  struct timeval presentationTime,
					  unsigned durationInMicroseconds) {
  H264FUAFragmenter* fragmenter = (H264FUAFragmenter*)clientData;
  fragmenter->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime,
				 durationInMicroseconds);
}

void H264FUAFragmenter::afterGettingFrame1(unsigned frameSize,
					   unsigned numTruncatedBytes,
					   struct timeval presentationTime,
					   unsigned durationInMicroseconds) {
  fNumValidDataBytes += frameSize;
  fSaveNumTruncatedBytes = numTruncatedBytes;
  fPresentationTime = presentationTime;
  fDurationInMicroseconds = durationInMicroseconds;

  // Deliver data to the client:
  doGetNextFrame();
}
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
more details.

You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
**********/
// "liveMedia"
// Copyright (c) 1996-2007 Live Networks, Inc.  All rights reserved.
// Any source that feeds into a "H264VideoRTPSink" must be of this class.
// This is a virtual base class; subclasses must implement the
// "currentNALUnitEndsAccessUnit()" virtual function.
// Implementation

// #include "H264VideoStreamFramer.hh"
// 
// H264VideoStreamFramer::H264VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource)
//   : FramedFilter(env, inputSource) {
// }
// 
// H264VideoStreamFramer::~H264VideoStreamFramer() {
// }
// 
// Boolean H264VideoStreamFramer::isH264VideoStreamFramer() const {
//   return True;
// }

// ***************************** GOING FOR A RIDE YEAH

#include <iostream>
#include "H264VideoStreamFramer.hh"
#include "H264VideoStreamParser.hh"

#include <string.h>
#include <GroupsockHelper.hh>

int check = 0;
///////////////////////////////////////////////////////////////////////////////
////////// h264VideoStreamFramer implementation //////////
//public///////////////////////////////////////////////////////////////////////
H264VideoStreamFramer* H264VideoStreamFramer::createNew(
                                                         UsageEnvironment& env,
                                                         FramedSource* inputSource)
{
   // Need to add source type checking here???  #####
//     std::cout << "H264VideoStreamFramer: in createNew" << std::endl;
   H264VideoStreamFramer* fr;
   fr = new H264VideoStreamFramer(env, inputSource);
   return fr;
}


///////////////////////////////////////////////////////////////////////////////
H264VideoStreamFramer::H264VideoStreamFramer(
                              UsageEnvironment& env,
                              FramedSource* inputSource,
                              Boolean createParser)
                              : FramedFilter(env, inputSource),
                                fFrameRate(0.0), // until we learn otherwise 
                                fPictureEndMarker(False)
{
   // Use the current wallclock time as the base 'presentation time':
   gettimeofday(&fPresentationTimeBase, NULL);
//     std::cout << "H264VideoStreamFramer: going to create H264VideoStreamParser" << std::endl;
   fParser = createParser ? new H264VideoStreamParser(this, inputSource) : NULL;
}

///////////////////////////////////////////////////////////////////////////////
H264VideoStreamFramer::~H264VideoStreamFramer()
{
   delete   fParser;
}


///////////////////////////////////////////////////////////////////////////////
#ifdef DEBUG
static struct timeval firstPT;
#endif


///////////////////////////////////////////////////////////////////////////////
void H264VideoStreamFramer::doGetNextFrame()
{
//   std::cout << "H264VideoStreamFramer: in doGetNextFrame" << std::endl;
  fParser->registerReadInterest(fTo, fMaxSize);
  continueReadProcessing();
}


///////////////////////////////////////////////////////////////////////////////
Boolean H264VideoStreamFramer::isH264VideoStreamFramer() const
{
  return True;
}

///////////////////////////////////////////////////////////////////////////////
Boolean H264VideoStreamFramer::currentNALUnitEndsAccessUnit() 
{
  return True;
}


///////////////////////////////////////////////////////////////////////////////
void H264VideoStreamFramer::continueReadProcessing(
                                   void* clientData,
                                   unsigned char* /*ptr*/, unsigned /*size*/,
                                   struct timeval /*presentationTime*/)
{
   H264VideoStreamFramer* framer = (H264VideoStreamFramer*)clientData;
   framer->continueReadProcessing();
}

///////////////////////////////////////////////////////////////////////////////
void H264VideoStreamFramer::continueReadProcessing()
{
   unsigned acquiredFrameSize; 
//     std::cout << "H264VideoStreamFramer: in continueReadProcessing" << std::endl;
   u_int64_t frameDuration;  // in ms

//    acquiredFrameSize = fParser->parse(frameDuration);
      acquiredFrameSize = fParser->parse();
// Calculate some average bitrate information (to be adapted)   
//  avgBitrate = (totalBytes * 8 * H263_TIMESCALE) / totalDuration;
//     std::cout << "continueReadProcessing, acquiredFrameSize: " << acquiredFrameSize << std::endl;

//     while (!acquiredFrameSize) {
//         std::cout << "waiting for data!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
//     }

   if (acquiredFrameSize > 0) {
//       std::cout << "continueReadProcessing, acquiredFrameSize: " << acquiredFrameSize << std::endl;
        check++;
//         std::cout << "NAL # " << check << std::endl;
      // We were able to acquire a frame from the input.
      // It has already been copied to the reader's space.
      fFrameSize = acquiredFrameSize;
//    fNumTruncatedBytes = fParser->numTruncatedBytes(); // not needed so far
      frameDuration = 30;
      fFrameRate = frameDuration == 0 ? 0.0 : 1000./(long)frameDuration;

      // Compute "fPresentationTime" 
      if (acquiredFrameSize == 5) // first frame
         fPresentationTime = fPresentationTimeBase;
      else 
         fPresentationTime.tv_usec += (long) frameDuration*1000;

      while (fPresentationTime.tv_usec >= 1000000) {
         fPresentationTime.tv_usec -= 1000000;
         ++fPresentationTime.tv_sec;
      }

      // Compute "fDurationInMicroseconds" 
      fDurationInMicroseconds = (unsigned int) frameDuration*1000;;


      // Call our own 'after getting' function.  Because we're not a 'leaf'
      // source, we can call this directly, without risking infinite recursion.
      afterGetting(this);
   } else {
//         std::cout << "need to do somemore reading here to get a frame!!!!" << std::endl;
//         std::cout << "lets check parse state: " << fParser->getParseState() << std::endl;
//         afterGetting(this);
      // We were unable to parse a complete frame from the input, because:
      // - we had to read more data from the source stream, or
      // - the source stream has ended.
   }
}
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
more details.

You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
**********/
// "liveMedia"
// Copyright (c) 1996-2007 Live Networks, Inc.  All rights reserved.
// An abstract parser for H264 video streams
// Implementation

#include "H264VideoStreamFramer.hh"
#include "H264VideoStreamParser.hh"
#include <iostream>

H264VideoStreamParser
::H264VideoStreamParser(H264VideoStreamFramer* usingSource,
			FramedSource* inputSource)
  : StreamParser(inputSource, FramedSource::handleClosure, usingSource,
		 &H264VideoStreamFramer::continueReadProcessing, usingSource),
  fUsingSource(usingSource) {
}

H264VideoStreamParser::~H264VideoStreamParser() {
}

void H264VideoStreamParser::restoreSavedParserState() {
  StreamParser::restoreSavedParserState();
  fTo = fSavedTo;
  fNumTruncatedBytes = fSavedNumTruncatedBytes;
}

void H264VideoStreamParser::setParseState(H264ParseState parseState) {
//     std::cout << "H264VideoStreamPARSER: in setParseState: " << parseState << std::endl;
  fSavedTo = fTo;
  fSavedNumTruncatedBytes = fNumTruncatedBytes;
  fCurrentParseState = parseState;
  saveParserState();
}

unsigned H264VideoStreamParser::getParseState() {
    return fCurrentParseState;
}

void H264VideoStreamParser::registerReadInterest(unsigned char* to,
						 unsigned maxSize) {
//     std::cout << "Parser max size??: " << maxSize << std::endl;

  fStartOfFrame = fTo = fSavedTo = to;
  fLimit = to + maxSize;
  fNumTruncatedBytes = fSavedNumTruncatedBytes = 0;
}

unsigned H264VideoStreamParser::parse() {
    
  try {
//     std::cout << "H264VideoStreamPARSER: parse : " << fCurrentParseState << std::endl;
    switch (fCurrentParseState) {
    case PARSING_START_SEQUENCE: {
        return parseStartSequence();
//         return 0;
    }
    case PARSING_NAL_UNIT: {
        return parseNALUnit();
//         return 0;
    }
    default: {
      return 0; // shouldn't happen
    }
    }
  } catch (int /*e*/) {
#ifdef DEBUG
    fprintf(stderr, "H264VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
#endif
    return 0;  // the parsing got interrupted
  }
}

unsigned H264VideoStreamParser::parseStartSequence()
{
//     std::cout << "H264VideoStreamPARSER: parseStartSequence" << std::endl;
    // Find start sequence (0001)
//     std::cout << "going to test4Bytes" << std::endl;
    u_int32_t test = test4Bytes();
//     std::cout << "test result: " << test << std::endl;
    while (test != 0x00000001)
    {
//         std::cout << "waiting for start sequence" << std::endl;
        skipBytes(1);
        test = test4Bytes();
    }
//     setParseState(PARSING_NAL_UNIT);
    setParseState(PARSING_START_SEQUENCE);

//     std::cout << "going to skip bytes" << std::endl;
    skipBytes(4);
    
    return parseNALUnit();
//     return 0;
//     // Compute this frame's presentation time:
//     usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
//     // This header forms part of the 'configuration' information:
//     usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());


}

unsigned H264VideoStreamParser::parseNALUnit()
{
//     std::cout << "H264VideoStreamPARSER: parseNALUnit" << std::endl;
    // Find next start sequence (0001) or end of stream
    u_int32_t test = test4Bytes();
    int numBytes = 0;
    while (test != 0x00000001)
    {
        saveByte(get1Byte());
        numBytes++;
        test = test4Bytes();
    }

//     std::cout << "just read this many bytes: " << numBytes << std::endl;
    
//     setParseState(PARSING_START_SEQUENCE);

//     // Compute this frame's presentation time:
//     usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
//     // This header forms part of the 'configuration' information:
//     usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());

    return curFrameSize();
}

/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
more details.

You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
**********/
// "liveMedia"
// Copyright (c) 1996-2007 Live Networks, Inc.  All rights reserved.
// An abstract parser for H264 video streams
// C++ header

#ifndef _H264_VIDEO_STREAM_PARSER_HH
#define _H264_VIDEO_STREAM_PARSER_HH

#ifndef _STREAM_PARSER_HH
#include "StreamParser.hh"
#endif
#ifndef _H264_VIDEO_STREAM_FRAMER_HH
#include "H264VideoStreamFramer.hh"
#endif

////////// H264VideoStreamParser definition //////////

enum H264ParseState {
  PARSING_START_SEQUENCE,
  PARSING_NAL_UNIT
}; 


class H264VideoStreamParser: public StreamParser {
public:
  H264VideoStreamParser(H264VideoStreamFramer* usingSource,
			FramedSource* inputSource);
  virtual ~H264VideoStreamParser();

public:
  void registerReadInterest(unsigned char* to, unsigned maxSize);

  virtual unsigned parse();
  unsigned numTruncatedBytes() const { return fNumTruncatedBytes; }
  unsigned getParseState();

protected:
  void setParseState(H264ParseState parseState);
  unsigned parseStartSequence();
  unsigned parseNALUnit();

  // Record "byte" in the current output frame:
  void saveByte(u_int8_t byte) {
    if (fTo >= fLimit) { // there's no space left
      ++fNumTruncatedBytes;
      return;
    }

    *fTo++ = byte;
  }

  void save4Bytes(u_int32_t word) {
    if (fTo+4 > fLimit) { // there's no space left
      fNumTruncatedBytes += 4;
      return;
    }

    *fTo++ = word>>24; *fTo++ = word>>16; *fTo++ = word>>8; *fTo++ = word;
  }

  // Save data until we see a sync word (0x000001xx):
  void saveToNextCode(u_int32_t& curWord) {
    saveByte(curWord>>24);
    curWord = (curWord<<8)|get1Byte();
    while ((curWord&0xFFFFFF00) != 0x00000100) {
      if ((unsigned)(curWord&0xFF) > 1) {
	// a sync word definitely doesn't begin anywhere in "curWord"
	save4Bytes(curWord);
	curWord = get4Bytes();
      } else {
	// a sync word might begin in "curWord", although not at its start
	saveByte(curWord>>24);
	unsigned char newByte = get1Byte();
	curWord = (curWord<<8)|newByte;
      }
    }
  }

  // Skip data until we see a sync word (0x000001xx):
  void skipToNextCode(u_int32_t& curWord) {
    curWord = (curWord<<8)|get1Byte();
    while ((curWord&0xFFFFFF00) != 0x00000100) {
      if ((unsigned)(curWord&0xFF) > 1) {
	// a sync word definitely doesn't begin anywhere in "curWord"
	curWord = get4Bytes();
      } else {
	// a sync word might begin in "curWord", although not at its start
	unsigned char newByte = get1Byte();
	curWord = (curWord<<8)|newByte;
      }
    }
  }

protected:
  H264VideoStreamFramer* fUsingSource;

  // state of the frame that's currently being read:
  unsigned char* fStartOfFrame;
  unsigned char* fTo;
  unsigned char* fLimit;
  unsigned fNumTruncatedBytes;
  unsigned curFrameSize() { return fTo - fStartOfFrame; }
  unsigned char* fSavedTo;
  unsigned fSavedNumTruncatedBytes;

private: // redefined virtual functions
  virtual void restoreSavedParserState();
  H264ParseState fCurrentParseState;

};

#endif
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
more details.

You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
**********/
// "liveMedia"
// Copyright (c) 1996-2007 Live Networks, Inc.  All rights reserved.
// RTP sink for H.264 video (RFC 3984)
// C++ header

#ifndef _H264_VIDEO_RTP_SINK_HH
#define _H264_VIDEO_RTP_SINK_HH

#ifndef _VIDEO_RTP_SINK_HH
#include "VideoRTPSink.hh"
#endif
#ifndef _FRAMED_FILTER_HH
#include "FramedFilter.hh"
#endif

class H264FUAFragmenter;

class H264VideoRTPSink: public VideoRTPSink {
public:
  static H264VideoRTPSink* createNew(UsageEnvironment& env,
				     Groupsock* RTPgs,
				     unsigned char rtpPayloadFormat,
				     unsigned profile_level_id,
				     char const* sprop_parameter_sets_str);

protected:
  H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
		   unsigned char rtpPayloadFormat,
		   unsigned profile_level_id,
		   char const* sprop_parameter_sets_str);
	// called only by createNew()

  virtual ~H264VideoRTPSink();

private: // redefined virtual functions:
  virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
  virtual Boolean continuePlaying();
  virtual void stopPlaying();
  virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
                                      unsigned char* frameStart,
                                      unsigned numBytesInFrame,
                                      struct timeval frameTimestamp,
                                      unsigned numRemainingBytes);
  virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
						 unsigned numBytesInFrame) const;
  virtual char const* auxSDPLine();

protected:
  H264FUAFragmenter* fOurFragmenter;

private:
  char* fFmtpSDPLine;
};


////////// H264FUAFragmenter definition //////////

// Because of the ideosyncracies of the H.264 RTP payload format, we implement
// "H264VideoRTPSink" using a separate "H264FUAFragmenter" class that delivers,
// to the "H264VideoRTPSink", only fragments that will fit within an outgoing
// RTP packet.  I.e., we implement fragmentation in this separate "H264FUAFragmenter"
// class, rather than in "H264VideoRTPSink".
// (Note: This class should be used only by "H264VideoRTPSink", or a subclass.)

class H264FUAFragmenter: public FramedFilter {
public:
  H264FUAFragmenter(UsageEnvironment& env, FramedSource* inputSource,
		    unsigned inputBufferMax, unsigned maxOutputPacketSize);
  virtual ~H264FUAFragmenter();

  Boolean lastFragmentCompletedNALUnit() const { return fLastFragmentCompletedNALUnit; }

private: // redefined virtual functions:
  virtual void doGetNextFrame();

private:
  static void afterGettingFrame(void* clientData, unsigned frameSize,
				unsigned numTruncatedBytes,
                                struct timeval presentationTime,
                                unsigned durationInMicroseconds);
  void afterGettingFrame1(unsigned frameSize,
                          unsigned numTruncatedBytes,
                          struct timeval presentationTime,
                          unsigned durationInMicroseconds);

private:
  unsigned fInputBufferSize;
  unsigned fMaxOutputPacketSize;
  unsigned char* fInputBuffer;
  unsigned char* fSPS;
  unsigned fSPSSize;
  unsigned char* fPPS;
  unsigned fPPSSize;
  unsigned fNumValidDataBytes;
  unsigned fCurDataOffset;
  unsigned fSaveNumTruncatedBytes;
  Boolean fLastFragmentCompletedNALUnit;
  Boolean sentSPS;
  Boolean sentPPS;
};


#endif
#ifndef _H264_VIDEO_STREAM_FRAMER_HH
#define _H264_VIDEO_STREAM_FRAMER_HH

#ifndef _FRAMED_FILTER_HH
#include "FramedFilter.hh"
#endif


class H264VideoStreamFramer: public FramedFilter {
public:

  static H264VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource);
  virtual Boolean currentNALUnitEndsAccessUnit();
  Boolean& pictureEndMarker() { return fPictureEndMarker; }    // a hack for implementing the RTP 'M' bit

protected:
  // Constructor called only by createNew(), or by subclass constructors
  H264VideoStreamFramer(UsageEnvironment& env,
                            FramedSource* inputSource,
                            Boolean createParser = True);
  virtual ~H264VideoStreamFramer();


public: 
  static void continueReadProcessing(void* clientData,
                     unsigned char* ptr, unsigned size,
                     struct timeval presentationTime);
  void continueReadProcessing();

private:
  virtual void doGetNextFrame();
  virtual Boolean isH264VideoStreamFramer() const;
  

protected:
  double   fFrameRate;    // Note: For MPEG-4, this is really a 'tick rate' ??
  unsigned fPictureCount; // hack used to implement doGetNextFrame() ??
  Boolean  fPictureEndMarker;

private:
  class H264VideoStreamParser* fParser;
  struct timeval fPresentationTimeBase;
};

#endif
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
more details.

You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
**********/
// Copyright (c) 1996-2007, Live Networks, Inc.  All rights reserved
// A test program that reads a MPEG-4 Video Elementary Stream file,
// and streams it using RTP
// main program
#include <iostream>
#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"
#include "GroupsockHelper.hh"

UsageEnvironment* env;
char const* inputFileName = "test.h264";
H264VideoStreamFramer* videoSource;
RTPSink* videoSink;

void play(); // forward

int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer"
  // test program - not this test program - as a model.

  const unsigned short rtpPortNum = 18888;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
  rtpGroupsock.multicastSendOnly(); // we're a SSM source
  Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
  rtcpGroupsock.multicastSendOnly(); // we're a SSM source

  // Create a 'MPEG-4 Video RTP' sink from the RTP 'groupsock':
  videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96, 0x42, "h264");

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  RTCPInstance* rtcp
  = RTCPInstance::createNew(*env, &rtcpGroupsock,
			    estimatedSessionBandwidth, CNAME,
			    videoSink, NULL /* we're a server */,
			    True /* we're a SSM source */);
  // Note: This starts RTCP running automatically

  RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", inputFileName,
		   "Session streamed by \"testMPEG4VideoStreamer\"",
					   True /*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
  rtspServer->addServerMediaSession(sms);

  char* url = rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Start the streaming:
  *env << "Beginning streaming...\n";
  play();

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}

void afterPlaying(void* /*clientData*/) {
  *env << "...done reading from file\n";

  Medium::close(videoSource);
  // Note that this also closes the input file that this source read from.

  // Start playing once again:
  play();
}

void play() {
  // Open the input file as a 'byte-stream file source':
  ByteStreamFileSource* fileSource
    = ByteStreamFileSource::createNew(*env, inputFileName);
  if (fileSource == NULL) {
    *env << "Unable to open file \"" << inputFileName
	 << "\" as a byte-stream file source\n";
    exit(1);
  }
  
  FramedSource* videoES = fileSource;

  // Create a framer for the Video Elementary Stream:
  videoSource = H264VideoStreamFramer::createNew(*env, videoES);
  
  // Finally, start playing:
  *env << "Beginning to read from file...\n";
  videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
}
_______________________________________________
live-devel mailing list
live-devel@lists.live555.com
http://lists.live555.com/mailman/listinfo/live-devel

Reply via email to