1976
- 收藏
- 点赞
- 分享
- 举报
关于流媒体客户端组RTP包的问题
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/)
{
static int frame_number=0;
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
//if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
// envir() << "!"<<"\n"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
//}
if (fSubsession.rtpSource() != NULL) {
// envir() << fSubsession.rtpSource()->curPacketRTPSeqNum()<<"\n";
fprintf(stderr, "yyyyyyyyyyyy%d\n", fSubsession.rtpSource()->curPacketRTPSeqNum());
}
#ifdef DEBUG_PRINT_NPT
envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
envir() << "\n";
#endif
// 此时,fReceiveBuffer中保存着接收到的视频数据,对该帧数据进行保存
if((0 == strcmp(fSubsession.codecName(),"H264")) && (!isPause))
{
if (!fHaveWrittenFirstFrame) // 仅每次播放的第一次进入执行本段代码
{ // 对视频数据的SPS,PPS进行补偿
unsigned numSPropRecords;
SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSPropRecords);
// spydroid v6.8 or spydroid v9.1.
for (unsigned i = 0; i < numSPropRecords; ++i)
{
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, sPropRecords.sPropBytes, sPropRecords.sPropLength);
p_nalu_tail += sPropRecords.sPropLength;
}
fHaveWrittenFirstFrame = true; // 标记SPS,PPS已经完成补偿
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
p_nalu_tail += frameSize;
}
else
{
if(presentationTime.tv_sec == pre_time_stamp.tv_sec && presentationTime.tv_usec == pre_time_stamp.tv_usec)
{
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
p_nalu_tail += frameSize;
}
else
{
if(p_nalu_tail != nalu_buffer)
{
AVPacket packet, *pkt = &packet;
av_new_packet(pkt, p_nalu_tail - nalu_buffer);
memcpy(pkt->data , nalu_buffer, p_nalu_tail - nalu_buffer);
//保存
framenumber_data++;
/*FILE* WYWFile = fopen(WYWFILE, "w");
char strr[1048576]={0};*/
//for(int i=0;i
//{
///**sprintf(strr,"%x",*(nalu_buffer+i));*/
//fwrite(strr,sizeof(strr),1,WYWFile);*/
// fprintf(WYWFile,"%x",nalu_buffer+i);
//}
/*sprintf(strr,"frame%d:%x",framenumber,nalu_buffer);*/
/*fwrite(strr,sizeof(strr),1,WYWFile);*/
/*保存*/
packet_queue_put(videoq, pkt);
}
p_nalu_tail = nalu_buffer;
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
p_nalu_tail += frameSize;
}
}
pre_time_stamp = presentationTime;
}
// 可选的保存视频数据到Recieved.264文件
#ifdef SAVE_THE_STREAM_INTO_FILE
if(NULL == fout)
{
envir() << "打开输出文件失败\n";
return;
}
fwrite(nalu_buffer, p_nalu_tail - nalu_buffer, 1, fout);
#endif
continuePlaying(); // 继续,请求下一帧数据
}
以上是小弟从网上下的能够利用live555实现RTSP协议接收H.264载荷的RTP数据,FFMPEG解码,SDL显示的程序的数据接收部分,然后小弟将其改成利用OPENCV处理显示实现实时的目标跟踪。
但是回头仔细研究流媒体协议的过程中发现,这段数据接收部分有点看不懂,
代码中首先加上SPS,PPS补偿我理解,这是解码器需要的
后面通过presentationTime显示时间来判断这部分我不明白,还请大神们指教
struct timeval presentationTime, unsigned /*durationInMicroseconds*/)
{
static int frame_number=0;
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
//if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
// envir() << "!"<<"\n"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
//}
if (fSubsession.rtpSource() != NULL) {
// envir() << fSubsession.rtpSource()->curPacketRTPSeqNum()<<"\n";
fprintf(stderr, "yyyyyyyyyyyy%d\n", fSubsession.rtpSource()->curPacketRTPSeqNum());
}
#ifdef DEBUG_PRINT_NPT
envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
envir() << "\n";
#endif
// 此时,fReceiveBuffer中保存着接收到的视频数据,对该帧数据进行保存
if((0 == strcmp(fSubsession.codecName(),"H264")) && (!isPause))
{
if (!fHaveWrittenFirstFrame) // 仅每次播放的第一次进入执行本段代码
{ // 对视频数据的SPS,PPS进行补偿
unsigned numSPropRecords;
SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSPropRecords);
// spydroid v6.8 or spydroid v9.1.
for (unsigned i = 0; i < numSPropRecords; ++i)
{
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, sPropRecords.sPropBytes, sPropRecords.sPropLength);
p_nalu_tail += sPropRecords.sPropLength;
}
fHaveWrittenFirstFrame = true; // 标记SPS,PPS已经完成补偿
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
p_nalu_tail += frameSize;
}
else
{
if(presentationTime.tv_sec == pre_time_stamp.tv_sec && presentationTime.tv_usec == pre_time_stamp.tv_usec)
{
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
p_nalu_tail += frameSize;
}
else
{
if(p_nalu_tail != nalu_buffer)
{
AVPacket packet, *pkt = &packet;
av_new_packet(pkt, p_nalu_tail - nalu_buffer);
memcpy(pkt->data , nalu_buffer, p_nalu_tail - nalu_buffer);
//保存
framenumber_data++;
/*FILE* WYWFile = fopen(WYWFILE, "w");
char strr[1048576]={0};*/
//for(int i=0;i
///**sprintf(strr,"%x",*(nalu_buffer+i));*/
//fwrite(strr,sizeof(strr),1,WYWFile);*/
// fprintf(WYWFile,"%x",nalu_buffer+i);
//}
/*sprintf(strr,"frame%d:%x",framenumber,nalu_buffer);*/
/*fwrite(strr,sizeof(strr),1,WYWFile);*/
/*保存*/
packet_queue_put(videoq, pkt);
}
p_nalu_tail = nalu_buffer;
memcpy(p_nalu_tail, start_code, sizeof(start_code));
p_nalu_tail += sizeof(start_code);
memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
p_nalu_tail += frameSize;
}
}
pre_time_stamp = presentationTime;
}
// 可选的保存视频数据到Recieved.264文件
#ifdef SAVE_THE_STREAM_INTO_FILE
if(NULL == fout)
{
envir() << "打开输出文件失败\n";
return;
}
fwrite(nalu_buffer, p_nalu_tail - nalu_buffer, 1, fout);
#endif
continuePlaying(); // 继续,请求下一帧数据
}
以上是小弟从网上下的能够利用live555实现RTSP协议接收H.264载荷的RTP数据,FFMPEG解码,SDL显示的程序的数据接收部分,然后小弟将其改成利用OPENCV处理显示实现实时的目标跟踪。
但是回头仔细研究流媒体协议的过程中发现,这段数据接收部分有点看不懂,
代码中首先加上SPS,PPS补偿我理解,这是解码器需要的
后面通过presentationTime显示时间来判断这部分我不明白,还请大神们指教
我来回答
回答0个
时间排序
认可量排序
暂无数据
或将文件直接拖到这里
悬赏:
E币
网盘
* 网盘链接:
* 提取码:
悬赏:
E币
Markdown 语法
- 加粗**内容**
- 斜体*内容*
- 删除线~~内容~~
- 引用> 引用内容
- 代码`代码`
- 代码块```编程语言↵代码```
- 链接[链接标题](url)
- 无序列表- 内容
- 有序列表1. 内容
- 缩进内容
- 图片![alt](url)
相关问答
-
2019-01-06 10:10:35
-
2014-10-28 10:02:03
-
22020-07-17 15:53:15
-
2016-05-18 11:34:00
-
2017-06-19 14:38:34
-
2021-01-28 19:15:35
-
2015-08-26 10:51:06
-
2020-11-11 15:56:26
-
2020-02-24 20:22:28
-
2016-12-06 11:01:44
-
2017-03-23 09:49:02
-
2020-07-17 15:28:21
-
2020-03-03 13:38:51
-
2016-12-08 16:51:40
-
642014-10-22 11:23:30
-
2016-07-18 15:12:22
-
2018-12-03 21:59:12
-
2018-12-29 10:42:27
-
2019-01-06 10:33:08
无更多相似问答 去提问
点击登录
-- 积分
-- E币
提问
—
收益
—
被采纳
—
我要提问
切换马甲
上一页
下一页
悬赏问答
-
50如何获取vpss chn的图像修改后发送至vo
-
5FPGA通过Bt1120传YUV422数据过来,vi接收不到数据——3516dv500
-
50SS928 运行PQtools 拼接 推到设备里有一半画面会异常
-
53536AV100的sample_vdec输出到CVBS显示
-
10海思板子mpp怎么在vi阶段改变视频数据尺寸
-
10HI3559AV100 多摄像头同步模式
-
9海思ss928单路摄像头vio中加入opencv处理并显示
-
10EB-RV1126-BC-191板子运行自己编码的程序
-
10求HI3519DV500_SDK_V2.0.1.1
-
5有偿求HI3516DV500 + OV5647驱动
举报反馈
举报类型
- 内容涉黄/赌/毒
- 内容侵权/抄袭
- 政治相关
- 涉嫌广告
- 侮辱谩骂
- 其他
详细说明
提醒
你的问题还没有最佳答案,是否结题,结题后将扣除20%的悬赏金
取消
确认
提醒
你的问题还没有最佳答案,是否结题,结题后将根据回答情况扣除相应悬赏金(1回答=1E币)
取消
确认