Hi Ross,

I have attached 
1. my Device source file Wavsource.cpp 
2. WaveStreamer .cpp( took a reference from testWavAudioStreamer.cpp) where I 
have thread to read the samples and have code for initialization and starting 
the session.

Regards
From: Ross Finlayson 
Sent: Thursday, October 24, 2013 6:23 PM
To: LIVE555 Streaming Media - development & use 
Subject: Re: [Live-devel] FrameSource:Getnextframe error while 
streamingPCMframes

  I found the problem that uLawFromPCMAudioSource  afterGettingFrame is not 
getting called when I use DeviceSource based design and triggering concept.
  i.e. 
  If I am calling FramedSource::afterGetting(this) in doGetNextFrame itself , 
it is calling afterGettingFrame  function in uLawFromPCMAudioSource   followed 
by calling afterGettingFrame   function in MultiFramedRTPSink.

  If I am calling FramedSource::afterGetting(this) in deliverFrame(which will 
called by trigger event), then it is calling only afterGettingFrame   function 
in MultiFramedRTPSink and not uLawFromPCMAudioSource   afterGettingFrame  
function.
  That's why I am getting FramedSource ::getNextFrame():attempting to read more 
than once at the same time. 

  Where I am going wrong?

I can't tell what's wrong, without seeing your code.

Please post the code for your "OnDemandServerMediaSubsession" subclass, and for 
your "DeviceSource" based class.



Ross Finlayson
Live Networks, Inc.
http://www.live555.com/ 




--------------------------------------------------------------------------------
_______________________________________________
live-devel mailing list
live-devel@lists.live555.com
http://lists.live555.com/mailman/listinfo/live-devel
#include "liveMedia.hh"
#include "GroupsockHelper.hh"

#include "BasicUsageEnvironment.hh"
#include "pthread.h"
#include "WAVSource.hh"


// To convert 16-bit samples to 8-bit u-law ("u" is the Greek letter "mu")
// encoding, before streaming, uncomment the following line:
#define CONVERT_TO_ULAW 1

UsageEnvironment* env;

void play(); // forward

pthread_t ThreadID;
TaskScheduler* scheduler;
WAVSource* wavSource;
unsigned char audioFormat;
unsigned char bitsPerSample;
unsigned samplingFrequency;
unsigned char numChannels;
unsigned bitsPerSecond;
char const* mimeType="PCMU";
 unsigned char payloadFormatCode;
uLawFromPCMAudioSource* uLawsource;

struct sessionState_t {
  FramedSource* source;
  RTPSink* sink;
  RTCPInstance* rtcpInstance;
  Groupsock* rtpGroupsock;
  Groupsock* rtcpGroupsock;
  RTSPServer* rtspServer;
} sessionState;

void triggerLive555Scheduler(void) {
        scheduler->triggerEvent(WAVSource::s_frameReceivedTrigger, 
sessionState.source);

}

/**
Thread function to read the PCM samples from teh device & trigger live555 
thread.
**/
void* StartStreaming(void* p)
{
        static bool s_bFirstInstance = true;
        // Start the streaming:
        *env << "Beginning streaming...\n";

        //play();

        s_bFirstInstance = false;

        bool m_bStreamingEnable = true;
        while(true == m_bStreamingEnable)
        {
                /* get encoded the frames from the camera */
                if(true == wavSource->doReadFromDriver())
                {
                        /* if capture frame is success, trigger the live555 
thread for initiate transfer */
                        triggerLive555Scheduler();

                }
                else
                {
                        printf("Source Capture frame failed....Go back to Wait 
state\r\n");
                        /* Disable streaming on error conditions */
                        m_bStreamingEnable = false;
                        /* Signal the main routine to re-initiate the session */
                        break;
                }
        usleep(33333);
        }
}

int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);
 // Get attributes of the audio source:
  audioFormat = WAVSource::getAudioFormat();
  printf("audioFormat:%d",audioFormat);
  bitsPerSample = WAVSource::bitsPerSample();

   samplingFrequency = WAVSource::samplingFrequency();
  numChannels = WAVSource::numChannels();
  bitsPerSecond = samplingFrequency*bitsPerSample*numChannels/2;
  *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, ";
  *env << bitsPerSample << " bits-per-sample, ";
  *env << numChannels << " channels => ";
  *env << bitsPerSecond << " bits-per-second\n";
 
      
  payloadFormatCode = 96; // by default, unless a static RTP payload type can 
be used

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = (bitsPerSecond + 500)/1000; // in 
kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer" demo 
application,
  // or the "LIVE555 Media Server" - not this application - as a model.

  const unsigned short rtpPortNum = 2222;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  sessionState.rtpGroupsock
    = new Groupsock(*env, destinationAddress, rtpPort, ttl);
  sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source
  sessionState.rtcpGroupsock
    = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
  sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source
sessionState.sink
    = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock,
                               payloadFormatCode, samplingFrequency,
                               "audio", mimeType, numChannels);
 sessionState.rtcpInstance
    = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
                              estimatedSessionBandwidth, CNAME,
                              sessionState.sink, NULL /* we're a server */,
                              True /* we're a SSM source*/);
  // Create an appropriate audio RTP sink (using "SimpleRTPSink") from the RTP 
'groupsock':
  
  // Create and start a RTSP server to serve this stream:
  sessionState.rtspServer = RTSPServer::createNew(*env, 8554);
  if (sessionState.rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
 
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", "PCMStreamer",
           "Session streamed by \"RanjithWaveStreamer\"", True/*SSM*/);
  
sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, 
sessionState.rtcpInstance));
  sessionState.rtspServer->addServerMediaSession(sms);

  char* url = sessionState.rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Note: This starts RTCP running automatically

  
  play();

  pthread_create(&ThreadID, NULL,StartStreaming, NULL);
  
  

  env->taskScheduler().doEventLoop(); // does not return
  return 0; // only to prevent compiler warnings
}


void afterPlaying(void* clientData); // forward

// A structure to hold the state of the current session.
// It is used in the "afterPlaying()" function to clean up the session.


void play() {
  // Open the file as a 'WAV' file:
   wavSource = WAVSource::createNew(*env);
  if (wavSource == NULL) {
    *env << "Wav Source is NULL " << "\n";
    exit(1);
  }
   sessionState.source = wavSource; // by default
  if (audioFormat == WA_PCM) {
    if (bitsPerSample == 16) {
      // Note that samples in the WAV audio file are in little-endian order.
#ifdef CONVERT_TO_ULAW
      // Add a filter that converts from raw 16-bit PCM audio (in little-endian 
order) to 8-bit u-law audio:
     uLawsource = uLawFromPCMAudioSource::createNew(*env, wavSource, 
1/*little-endian*/);
   sessionState.source = uLawsource;
      if (sessionState.source == NULL) {
        *env << "Unable to create a u-law filter from the PCM audio source: " 
<< env->getResultMsg() << "\n";
        exit(1);
      }
      bitsPerSecond /= 2;
      *env << "Converting to 8-bit u-law audio for streaming => " << 
bitsPerSecond << " bits-per-second\n";
      mimeType = "PCMU";
      if (samplingFrequency == 8000 && numChannels == 1) {
        payloadFormatCode = 0; // a static RTP payload type
      printf("payloadFormatCode:0 in PCMU\n");
      }
#else
      // Add a filter that converts from little-endian to network (big-endian) 
order: 
      sessionState.source = EndianSwap16::createNew(*env, wavSource);
      if (sessionState.source == NULL) {
        *env << "Unable to create a little->bit-endian order filter from the 
PCM audio source: " << env->getResultMsg() << "\n";
        exit(1);
      }
      *env << "Converting to network byte order for streaming\n";
      mimeType = "L16";
      if (samplingFrequency == 44100 && numChannels == 2) {
        payloadFormatCode = 10; // a static RTP payload type
        printf("payloadFormatCode:10\n");
      } else if (samplingFrequency == 44100 && numChannels == 1) {
        payloadFormatCode = 11; // a static RTP payload type
printf("payloadFormatCode:11\n");
      }
#endif
    }  else { // bitsPerSample == 8 (we assume that bitsPerSample == 4 is only 
for WA_IMA_ADPCM)
      // Don't do any transformation; send the 8-bit PCM data 'as is':
      mimeType = "L8";
    }
  } else if (audioFormat == WA_PCMU) {
    mimeType = "PCMU";
    if (samplingFrequency == 8000 && numChannels == 1) {
      payloadFormatCode = 0; // a static RTP payload type    
printf("payloadFormatCode:0\n");                                                
                      
    }
  } else if (audioFormat == WA_PCMA) {
    mimeType = "PCMA";
    if (samplingFrequency == 8000 && numChannels == 1) {
      payloadFormatCode = 8; // a static RTP payload type    
printf("payloadFormatCode:8\n");                                                
                      
    } 
  } else { //unknown format                                                     
                                                   
    *env << "Unknown audio format code \"" << audioFormat << "\" in WAV file 
header\n";
    exit(1);
  }
  // Finally, start the streaming:
  *env << "Beginning streaming11111...\n";
  sessionState.sink->startPlaying(*sessionState.source, 
afterPlaying,sessionState.sink);
}


void afterPlaying(void* /*clientData*/) {
  *env << "...done streaming\n";

  // End by closing the media:
  Medium::close(sessionState.rtspServer);
  Medium::close(sessionState.rtcpInstance);
  Medium::close(sessionState.sink);
  delete sessionState.rtpGroupsock;
  Medium::close(sessionState.source);
  delete sessionState.rtcpGroupsock;

  // We're done:
  exit(0);
}




#include <GroupsockHelper.hh>
#include <FramedSource.hh>
#include <iostream>
#include "WAVSource.hh"

// Audio device setting hardcoded for testing
 unsigned char WAVSource::audioformat=0x01;
unsigned int WAVSource::frequency=44100;
unsigned int WAVSource::channels=2;
 unsigned int WAVSource::bitspersample=16;
unsigned WAVSource::fPreferredFrameSize;
using namespace std;

EventTriggerId WAVSource::s_frameReceivedTrigger = 0;
char* WAVSource::s_pu8RawFrameBuff = NULL;


 unsigned char WAVSource::getAudioFormat() {
  return audioformat;
}

 unsigned int WAVSource::bitsPerSample() {
return bitspersample;
}

 unsigned int WAVSource::samplingFrequency() {
return frequency;
}
 unsigned int WAVSource::numChannels() {
return channels;
}


unsigned int WAVSource::s_readError = 0;
/**
 * \brief createNew: call constructor - To get code in sync with Live555 methods

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
WAVSource* WAVSource::createNew(UsageEnvironment& env) {
        return new WAVSource(env);
}
/**
 * \brief Constructor: Initializes member attributes and opens the encoder 
source for receiving the frames

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
WAVSource::WAVSource(UsageEnvironment& env) : AudioInputDevice(env, 0, 0, 0, 0) 
{
        
        


printf("audioformat:%d,channels:%d,bitspersample:%d,frequency:%d\n",audioformat,channels,bitspersample,frequency);
        
       fSamplingFrequency=frequency;
       fBitsPerSample=bitspersample;
       fNumChannels=channels;
        if (0 == s_frameReceivedTrigger) {
                s_frameReceivedTrigger = 
envir().taskScheduler().createEventTrigger(deliverFrame0);
        }
        s_pu8RawFrameBuff = NULL;
   fPlayTimePerSample = 1e6/(double)frequency;
         unsigned maxSamplesPerFrame = (1400*8)/(channels*bitspersample);
         unsigned desiredSamplesPerFrame = (unsigned)(0.02*frequency);
        unsigned samplesPerFrame = desiredSamplesPerFrame < maxSamplesPerFrame 
? desiredSamplesPerFrame : maxSamplesPerFrame;
        fPreferredFrameSize = (samplesPerFrame*channels*bitspersample)/8;
        printf("fPreferredFrameSize:%d\n",fPreferredFrameSize);


        s_pu8RawFrameBuff = new char[16];//check

        if(NULL == s_pu8RawFrameBuff)
        {
                printf("s_pu8RawFrameBuff allocation failed\r\n");
        }

        if(true == OpenSource())
        {
                printf("Source Open Success\r\n");
        }
        else
        {
                printf("Source Open Failed\r\n");
        }
}
/**
 * \brief Destructor  - Deletes the array allocated for raw and encoded frames 
in constructor

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
WAVSource::~WAVSource() {
        delete []s_pu8RawFrameBuff;

}
/**
 * \brief doGetNextFrame

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
void WAVSource::doGetNextFrame () {
 printf("WAVSource::doGetNextFrame called\n");
        if (s_readError != 0) {
printf("INside and returning do get next frame\n");
                handleClosure(this);
                return;

        }
//doReadFromDriver();
//deliverFrame();
}
/**
 * \brief deliverFrame0

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
void WAVSource::deliverFrame0(void* clientData) {
        ((WAVSource*)clientData)->deliverFrame();
}


s
/**
 * \brief CaptureFrame:Captures the encoded frames from the camera device

 * \param[in]: None

 * \return bool - True - If CaptureFrame is successfull
 *                              - False - If CaptureFrame Fails
      <Additional Description>

 * \details

 **/
bool WAVSource::doReadFromDriver()
{
  // Read from audio device 
   AlsaDevice::getInstance()->Readbuf(s_pu8RawFrameBuff)
}
/**
 * \brief OpenSource - Opens the Audio device on Session 
Initialization/Re-Initialization

 * \param[in]: None

 * \return bool - True if Open Source is Successfull
 *                              - False - If Codec device open fails
      <Additional Description>

 * \details
 * Initializes source  member variables used for connection status and 
streaming <br>

 **/
bool WAVSource::OpenSource()
{
AlsaDevice::getInstance()->openDevice();
return true;
        /* Open the Audio device */     
}
/**
 * \brief CloseSource: Close the Audio device

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
void WAVSource::CloseSource()
{
AlsaDevice::getInstance()->closeDevice();
        /* Close the Audio device */
        
}
/**
 * \brief deliverFrame - Live555 subclass method, receives the encoded frame 
and send it to lower FramedSource method for streaming

 * \param[in]: None

 * \return None
      <Additional Description>

 * \details

 **/
void WAVSource::deliverFrame() {

        static unsigned int count = 0;
        count++;
        //printf("fDurationInMicroseconds %d\r\n", fDurationInMicroseconds);
        if (!isCurrentlyAwaitingData()) return;

        /* Check the encoded frame size, it truncates the bytes if encoded 
frame size is greater that max send frame size */
        
        fFrameSize = fPreferredFrameSize;
    printf("fPreferredFrameSize1:%d",fPreferredFrameSize);
    printf("fFrameSize0123:%d",fFrameSize);
        

    //printf("fFrameSize %d, SOI %x, %x, EOI %x, %x \r\n", fFrameSize, 
m_pu8EncFrameBuff[0], m_pu8EncFrameBuff[1], m_pu8EncFrameBuff[fFrameSize - 2], 
m_pu8EncFrameBuff[fFrameSize - 1]);

        printf("deliverFrame\r\n");
        gettimeofday(&fPresentationTime, NULL);
        unsigned bytesPerSample = (fNumChannels*fBitsPerSample)/8;
        if (bytesPerSample == 0) bytesPerSample = 1;
         fDurationInMicroseconds = 
(unsigned)((fPlayTimePerSample*fFrameSize)/bytesPerSample);


#ifdef PRESENTIME
        char uSecsStr[6+1]; // used to output the 'microseconds' part of the 
presentation time
        sprintf(uSecsStr, "%06u", (unsigned)fPresentationTime.tv_usec);
        //envir() << "count "<<s_count<<"\t"<<" sec" << 
(unsigned)fPresentationTime.tv_sec <<"\t" <<"usec " << uSecsStr<<"\n";

         static unsigned long currTime;
         currTime = fPresentationTime.tv_sec * 1000 + fPresentationTime.tv_usec 
/ 1000;
         static unsigned long PrevTime = 0;
         //printf("Time diff:%ld\r\n",currTime - PrevTime);
         PrevTime = currTime;
        // printf("\tCount\t %d \t Pres 
Time\t%ld\t%d\r\n",count,(unsigned)fPresentationTime.tv_sec, uSecsStr);
#endif

        /* Copy the encoded frames to internal send buffer */
        memmove(fTo, s_pu8RawFrameBuff, fFrameSize);
    printf("fFrameSize:%d\n",fFrameSize);
        

        FramedSource::afterGetting(this);
        printf("AFTER FRAME SENDING\n");

#ifdef DEBUG_TIMEDIFF_DF
        val = stop_and_get_timer_val();
        printf ("deliver frame %d ms\r\n",val);
#endif

}
Boolean WAVSource::setInputPort(int /*portIndex*/) {
  return True;
}

double WAVSource::getAverageLevel() const {
  return 0.0;
}
















_______________________________________________
live-devel mailing list
live-devel@lists.live555.com
http://lists.live555.com/mailman/listinfo/live-devel

Reply via email to