hello.

I need help.


I would like to RTSP stream the frames captured from the PC screen.


When I create SimpleRTPSink() within the createNewRTPSink() function,

doGetNextFrame() of FramedSource is called when camera data is received


However, when I create H264VideoRTPSink() within the createNewRTPSink() function, doGetNextFrame() of FramedSource is not called.


I need to create H264VideoRTPSink() to encode the video in h264 and stream it.


I've been struggling with this problem for a week.

Please help.



/********************************** main ***************************************************/

main()

{

OutPacketBuffer::maxSize = 100000; 

    cv::VideoCapture cap(0);

    if (!cap.isOpened()) {

        fprintf(stderr, "Could not open camera\n");

        return -1;

    }


    // 라이브555 환경 초기화

    TaskScheduler* scheduler = BasicTaskScheduler::createNew();

    UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);


    // Create a single groupsock to stream RTP packets


    struct sockaddr_storage destinationAddress;

    destinationAddress.ss_family = AF_INET;

    ((struct sockaddr_in&)destinationAddress).sin_addr.s_addr = chooseRandomIPv4SSMAddress(*env);

    //Groupsock rtpGroupsock(*env, 30000, IP_MULTICAST_TTL);

    Groupsock rtpGroupsock(*env, destinationAddress, 18888, 255);


    // RTSP 서버 생성

    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);

    if (rtspServer == NULL) {

        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";

        return -1;

    }


    // 스트리밍 세션 생성

    ServerMediaSession* sms = ServerMediaSession::createNew(*env, "ggg", "Live Stream", "Live stream from webcam");

    sms->addSubsession(H264LiveServerMediaSubsession::createNew(*env, cap));

    rtspServer->addServerMediaSession(sms);


    *env << "RTSP stream ready at rtsp://localhost:8554/ggg\n";


    // 이벤트 루프

    env->taskScheduler().doEventLoop();


    return 0;

}


/********************************** H264LiveServerMediaSubsession.h ***************************************************/


#include "H264LiveSource.h"


class H264LiveServerMediaSubsession : public OnDemandServerMediaSubsession {

public:

    static H264LiveServerMediaSubsession* createNew(UsageEnvironment& env, cv::VideoCapture& cap) {

        return new H264LiveServerMediaSubsession(env, cap);

    }


protected:

    H264LiveServerMediaSubsession(UsageEnvironment& env, cv::VideoCapture& cap)

        : OnDemandServerMediaSubsession(env, true), fCap(cap), fFrameIndex(0), fLiveSource(NULL) {

        // Initialize FFmpeg

        //avcodec_register_all();

        avformat_network_init();


        codec = avcodec_find_encoder(AV_CODEC_ID_H264);

        codecCtx = avcodec_alloc_context3(codec);

        codecCtx->bit_rate = 400000;

        codecCtx->width = 640;

        codecCtx->height = 480;

        codecCtx->time_base = { 1, 25 };

        codecCtx->framerate = { 25, 1 };

        codecCtx->gop_size = 10;

        codecCtx->max_b_frames = 1;

        codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;


        if (avcodec_open2(codecCtx, codec, NULL) < 0) {

            fprintf(stderr, "Could not open codec\n");

        }


        swsCtx = sws_getContext(codecCtx->width, codecCtx->height, AV_PIX_FMT_BGR24,

            codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P,

            SWS_BICUBIC, NULL, NULL, NULL);


        avFrame = av_frame_alloc();

        avFrame->format = codecCtx->pix_fmt;

        avFrame->width = codecCtx->width;

        avFrame->height = codecCtx->height;

        av_frame_get_buffer(avFrame, 32);

    }


    virtual ~H264LiveServerMediaSubsession() {

        av_frame_free(&avFrame);

        sws_freeContext(swsCtx);

        avcodec_free_context(&codecCtx);

    }


    virtual FramedSource* createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {

        estBitrate = 500; // kbps

        //if (fLiveSource == NULL) 

        {

            fLiveSource = H264LiveSource::createNew(envir(), fCap, codecCtx, swsCtx, avFrame, fFrameIndex);

        }

        return fLiveSource;

    }


    //virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {

    //    return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);

    //    //return SimpleRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, 90000, "video", "H264", 1, true, false);

    //}

    virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) override {

        return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);

        //return SimpleRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, 90000, "video", "H264", 1, true, false);

    }


private:

    cv::VideoCapture& fCap;

    const AVCodec* codec;

    AVCodecContext* codecCtx;

    SwsContext* swsCtx;

    AVFrame* avFrame;

    unsigned fFrameIndex;

    H264LiveSource* fLiveSource;

};


/********************************** H264LiveSource.h ***************************************************/



class H264LiveSource : public FramedSource {

public:

    static H264LiveSource* createNew(UsageEnvironment& env, cv::VideoCapture& cap, AVCodecContext* codecCtx, SwsContext* swsCtx, AVFrame* avFrame, unsigned& frameIndex) {

        return new H264LiveSource(env, cap, codecCtx, swsCtx, avFrame, frameIndex);

    }


protected:

    H264LiveSource(UsageEnvironment& env, cv::VideoCapture& cap, AVCodecContext* codecCtx, SwsContext* swsCtx, AVFrame* avFrame, unsigned& frameIndex)

        : FramedSource(env), fCap(cap), fCodecCtx(codecCtx), fSwsCtx(swsCtx), fAvFrame(avFrame), fFrameIndex(frameIndex) {

        

    }


    virtual ~H264LiveSource() {}


    virtual void doGetNextFrame() {


        //cv::Mat frame;

        //fCap >> fFrame; // Capture a frame from the camera


        if (!fCap.read(fFrame)) {

        //if (fFrame.empty()) {

            handleClosure(this);

            return;

        }


        const uint8_t* srcData[1] = { fFrame.data };

        int srcLinesize[1] = { static_cast<int>(fFrame.step) };

        sws_scale(fSwsCtx, srcData, srcLinesize, 0, fFrame.rows, fAvFrame->data, fAvFrame->linesize);


        fAvFrame->pts = fFrameIndex;


        AVPacket pkt;

        av_init_packet(&pkt);

        pkt.data = NULL;

        pkt.size = 0;


        int ret = avcodec_send_frame(fCodecCtx, fAvFrame);

        if (ret < 0) {

            envir() << "Error sending frame to codec context\n";

            handleClosure(this);

            return;

        }


        ret = avcodec_receive_packet(fCodecCtx, &pkt);

        if (ret == 0) {

            if (pkt.size > fMaxSize) {

                fFrameSize = fMaxSize;

                fNumTruncatedBytes = pkt.size - fMaxSize;

            }

            else {

                fFrameSize = pkt.size;

                fNumTruncatedBytes = 0;

            }

            memmove(fTo, pkt.data, fFrameSize);

            fPresentationTime.tv_sec = fFrameIndex / fCodecCtx->time_base.den;

            fPresentationTime.tv_usec = (fFrameIndex % fCodecCtx->time_base.den) * (1000000 / fCodecCtx->time_base.den);

            ++fFrameIndex;

            FramedSource::afterGetting(this);

            av_packet_unref(&pkt);

        }

        else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {

            envir() << "Error receiving packet from codec context\n";

            handleClosure(this);

            return;

        }

        else {

            envir() << "Error receiving packet from codec context\n";

            handleClosure(this);

            return;

        }

    }


private:

    cv::VideoCapture& fCap;

    cv::Mat fFrame;

    AVCodecContext* fCodecCtx;

    SwsContext* fSwsCtx;

    AVFrame* fAvFrame;

    unsigned& fFrameIndex;

};




   

강현구

책임연구원 / S/W개발팀

Hyungu Kang

 

Senior Research Engineer / S/W R&D Team

www.autol.co.kr

T 070-4228-1071 F 031-702-5314 M 010-8584-8093 E hyungu.k...@autol.co.kr

13453 경기도 성남시 수정구 금토로 80번길 11, 판교이노베이션랩 지식산업센터 305

305, 11 Geumto-ro 80beon-gil, Sujeong-gu, Seongnam-si, Gyeonggi-do, 13453, Republic of Korea

이 메시지(첨부파일 포함)는 지정된 수신인에게만 전달될 목적으로 발송되었으며, 부정경쟁방지 및 영업비밀의 보호에 관한 법률 등 관계법령에 따라 법으로 보호되는 중요한 정보를 담고 있을 수 있습니다. 이 메시지와 첨부파일 등에 있어서, 공개, 복사, 배포 또는 기재된 내용을 근거로 한 일체의 행동 등의 2차 활용에 대해서는 메일 발신자에게 확인을 받아야 하며, 메일 발신자의 확인이 없는 2차 활용은 엄격히 금지되어 있음을 주지하시기 바랍니다. 만약 이 메시지가 잘못 전송되었을 경우, 발신인 또는 당사에 알려주시고, 본 메시지를 즉시 삭제하여 주시기 바랍니다.

This message (including attachments) contains confidential information intended for a specific individual and purpose and is protected by law. If you are not the intended recipient, you should delete this message and are hereby notified that any disclosure, copying, or distribution of this message, or the taking of any action based on it, is strictly prohibited.




_______________________________________________
live-devel mailing list
live-devel@lists.live555.com
http://lists.live555.com/mailman/listinfo/live-devel

Reply via email to