SRS之SrsHls::on_video详解

1. SrsHls::on_video

/*
 * mux the video packets to ts.
 * @param shared_video, directly ptr, copy it if need to save it.
 * @param is_sps_pps, whether the video is h.264 sps/pps.
 */
int SrsHls::on_video(SrsSharedPtrMessage* shared_video, bool is_sps_pps)
{
    int ret = ERROR_SUCCESS;

    if (!hls_enabled) {
        return ret;
    }

    /* update the hls time, for hos_dispose. */
    last_update_time = srs_get_system_time_ms();

    SrsSharedPtrMessage* video = shared_video->copy();
    SrsAutoFree(SrsSharedPtrMessage, video);

    /* user can disable the sps parse to workaround when parse sps failed. */
    if (is_sps_pps) {
        /* 是否使能解析 sps,默认使能 */
        codec->avc_parse_sps = _srs_config->get_parse_sps(_req->vhost);
    }

    sample->clear();
    /* demux the video packet in h.264 codec.
     * the packet mux in FLV/RTMP format defined in flv specification.
     * demux the video specified data(frame_type, codec_id, ...) to sample.
     * demux the h.264 specified data(avc_profile, ...) to codec from sequence header.
     * demux the h.264 NALUs to sample units. */
    if ((ret = codec->video_avc_demux(video->payload, video->size, sample))
        != ERROR_SUCCESS) {
        srs_error("hls codec demux video failed. ret=%d", ret);
        return ret;
    }

    /* ignore info frame */
    if (sample->frame_type == SrsCodecVideoAVCFrameVideoInfoFrame) {
        return ret;
    }

    if (codec->video_codec_id != SrsCodecVideoAVC) {
        return ret;
    }

    /* ignore sequence header */
    if (sample->frame_type == SrsCodecVideoAVCFrameKeyFrame
         && sample->avc_packet_type == SrsCodecVideoAVCTypeSequenceHeader) {
        return hls_cache->on_sequence_header(muxer);
    }

    /* TODO: FIXME: config the jitter of HLS. */
    if ((ret = jitter->correct(video, SrsRtmpJitterAlgorithmOFF)) != ERROR_SUCCESS) {
        srs_error("rtmp jitter correct video failed. ret=%d", ret);
        return ret;
    }

    /* flv 的时间戳(单位 ms)将每一秒分为 90000 份,因此为 1/90 ms */
    int64_t dts = video->timestamp * 90;
    stream_dts = dts;
    /* write video to muxer */
    if ((ret = hls_cache->write_video(codec, muxer, dts, sample)) != ERROR_SUCCESS) {
        srs_error("hls cache write video failed. ret=%d", ret);
        return ret;
    }

    /* pithy print message. */
    hls_show_mux_log();

    return ret;
}

2. SrsAvcAacCodec::video_avc_demux

接收到一个视频消息,首先调用该函数解析该视频数据。

int SrsAvcAacCodec::video_avc_demux(char* data, int size, SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    sample->is_video = true;

    if (!data || size <= 0) {
        srs_trace("no video present, ignore it.");
        return ret;
    }

    if ((ret = stream->initialize(data, size)) != ERROR_SUCCESS) {
        return ret;
    }

    /* video decode */
    if (!stream->require(1)) {
        ret = ERROR_HLS_DECODE_ERROR;
        srs_error("avc decode frame_type failed. ret=%d", ret);
        return ret;
    }

    /* Video Tag 数据区的第一个字节是视频信息 */
    /* E.4.3 Video Tags, video_file_format_spec_v10_1.pdf, page 78 */
    int8_t frame_type = stream->read_1bytes();
    /* 编码ID:4bits */
    int8_t codec_id = frame_type & 0x0f;
    /* 帧类型:4bits */
    frame_type = (frame_type >> 4) & 0x0f;

    sample->frame_type = (SrsCodecVideoAVCFrame)frame_type;

    /* ignore info frame without error
     * @see https://github.com/ossrs/srs/issues/288#issuecomment-69863909 */
    if (sample->frame_type == SrsCodecVideoAVCFrameVideoInfoFrame) {
        srs_warn("avc igone the info frame, ret=%d", ret);
        return ret;
    }

    /* only support h.264/avc */
    if (codec_id != SrsCodecVideoAVC) {
        ret = ERROR_HLS_DECODE_ERROR;
        srs_error("avc only support video h.264/avc codec. actual=%d, ret=%d",
                  codec_id, ret);
        return ret;
    }
    video_codec_id = codec_id;

    if (!stream->require(4)) {
        ret = ERROR_HLS_DECODE_ERROR;
        srs_error("avc decode avc_packet_type failed. ret=%d", ret);
        return ret;
    }
    /* AVC Packet 类型:1byte, 0: AVC序列头, 1: AVC NALU 单元 */
    int8_t avc_packet_type = stream->read_1bytes();
    /* CTS: 3bytes,如果 AVC packet 类型为 1,则为 cts 偏移,其他情况则为 0
     * cts = (pts - dts) / 90,单位毫秒 */
    int32_t composition_time = stream->read_3bytes();

    /* pts = dts + cts. */
    sample->cts = composition_time;
    sample->avc_packet_type = (SrsCodecVideoAVCType)avc_packet_type;

    if (avc_packet_type == SrsCodecVideoAVCTypeSequenceHeader) {
        /* 解析 sps,pps 数据 */
        if ((ret = avc_demux_sps_pps(stream)) != ERROR_SUCCESS) {
            return ret;
        }
    } else if (avc_packet_type == SrsCodecVideoAVCTypeNALU){
        /* 检测 H264 的封装格式为 AnnexB 还是 "ISO Base Media File Format",
         * 然后根据相应封装格式的特点提取出 NALU,将该 NALU 保存到 sample 的
         * sample_units 数组中 */
        if ((ret = video_nalu_demux(stream, sample)) != ERROR_SUCCESS) {
            return ret;
        }
    } else {
        /* ignored. */
    }

    return ret;
}

接下来,若检测接收到的 video packet type 为 1,即为 NAL 单元,则调用 video_nalu_demux 进行解封装。

2.1 SrsAvcAacCodec::video_nalu_demux

int SrsAvcAacCodec::video_nalu_demux(SrsStream* stream, SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    /* ensure the sequence header demuxed */
    if (!is_avc_codec_ok()) {
        srs_warn("avc ignore type=%d for no sequence header. ret=%d",
                 SrsCodecVideoAVCTypeNALU, ret);
        return ret;
    }

    /* 在 SrsAvcAacCodec 构造函数中初始化该变量为 SrsAvcPayloadFormatGuess */
    /* guess for the first time. */
    if (payload_format == SrsAvcPayloadFormatGuess) {
        /* One or more NALUs (Full frames are required)
         * try "AnnexB" from H.264-AVC-ISO_IEC_14496-10.pdf, page 211. */
        if ((ret = avc_demux_annexb_format(stream, sample)) != ERROR_SUCCESS) {
            /* stop try when system error. */
            if (ret != ERROR_HLS_AVC_TRY_OTHERS) {
                srs_error("avc demux for annexb failed. ret=%d", ret);
                return ret;
            }

            /* try "ISO Base Media File Format" from
             * H.264-AVC-ISO_IEC_14496-15.pdf, page 20" */
            if ((ret = avc_demux_ibmf_format(stream, sample)) != ERROR_SUCCESS) {
                return ret;
            } else {
                payload_format = SrsAvcPayloadFormatIbmf;
                srs_info("hls guess avc payload is ibmf format.");
            }
        } else {
            payload_format = SrsAvcPayloadFormatAnnexb;
            srs_info("hls guess avc payload is annexb format.");
        }
    } else if (payload_format == SrsAvcPayloadFormatIbmf) {
        /* try "ISO Base Media File Format" from H.264-AVC-ISO_IEC_14496-15.pdf, page 20 */
        if ((ret = avc_demux_ibmf_format(stream, sample)) != ERROR_SUCCESS) {
            return ret;
        }
        srs_info("hls decode avc payload in ibmf format.");
    } else {
        /* One or more NALUs (Full frames are required)
         * try  "AnnexB" from H.264-AVC-ISO_IEC_14496-10.pdf, page 211. */
        if ((ret = avc_demux_annexb_format(stream, sample)) != ERROR_SUCCESS) {
            /* ok, we guess out the payload is annexb, but maybe changed to ibmf. */
            if (ret != ERROR_HLS_AVC_TRY_OTHERS) {
                srs_error("avc demux for annexb failed. ret=%d", ret);
                return ret;
            }

            /* try "ISO Base Media File Format" from
             * H.264-AVC-ISO_IEC_14496-15.pdf, page 20 */
            if ((ret = avc_demux_ibmf_format(stream, sample)) != ERROR_SUCCESS) {
                return ret;
            } else {
                payload_format = SrsAvcPayloadFormatIbmf;
                srs_warn("hls avc payload change from annexb to ibmf format.");
            }
        }
        srs_info("hls decode avc payload in annexb format.");
    }

    return ret;
}

下面先尝试为当前的 H264 封装为 Annexb 格式,因此调用 SrsAvcAacCodec::avc_demux_annexb_format 函数进行解析。

2.2 SrsAvcAacCodec::avc_demux_annexb_format

int SrsAvcAacCodec::avc_demux_annexb_format(SrsStream* stream, SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    /* not annexb, try others */
    if (!srs_avc_startswith_annexb(stream, NULL)) {
        return ERROR_HLS_AVC_TRY_OTHERS;
    }

    /* AnnexB
     * B.1.1 Byte stream NAL unit syntax,
     * H.264-AVC-ISO_IEC_14496-10.pdf, page 211.
     */
    while (!stream->empty()) {
        /* find start code */
        int nb_start_code = 0;
        if (!srs_avc_startswith_annexb(stream, &nb_start_code)) {
            return ret;
        }

        /* skip the start code. */
        if (nb_start_code > 0) {
            stream->skip(nb_start_code);
        }

        /* the NALU start bytes. */
        char* p = stream->data() + stream->pos();

        /* get the last matched NALU */
        while (!stream->empty()) {
            if (srs_avc_startswith_annexb(stream, NULL)) {
                break;
            }

            stream->skip(1);
        }

        /* 此时 pp 指向下一个 NALU start bytes */
        char* pp = stream->data() + stream->pos();

        /* skip the empty. */
        if (pp - p <= 0) {
            continue;
        }

        /* 获取到一个 NALU 后,将该 NALU 添加到 sample 中的 sample_units 数组中 */
        /* got the NALU. */
        if ((ret = sample->add_sample_unit(p, pp - p)) != ERROR_SUCCESS) {
            srs_error("annexb add video sample failed. ret=%d", ret);
            return ret;
        }
    }

    return ret;
}

2.2.1 srs_avc_startswith_annexb

/*
 * whether stream starts with the avc NALU in "AnnexB"
 * from H.264-AVC-ISO_IEC_14496-10.pdf, page 211.
 * start code must be "N[00] 00 00 01" where N>=0
 * @param pnb_start_code, output the size of start code, must >=3.
 *       NULL to ignore.
 */
bool srs_avc_startswith_annexb(SrsStream* stream, int* pnb_start_code)
{
    char* bytes = stream->data() + stream->pos();
    char* p = bytes;

    for ( ;; ) {
        if (!stream->require(p - bytes + 3)) {
            return false;
        }

        /* not match */
        if (p[0] != (char)0x00 || p[1] != (char)0x00) {
            return false;
        }

        /* match N[00] 00 00 01, where N>=0 */
        if (p[2] == (char)0x01) {
            if (pnb_start_code) {
                *pnb_start_code = (int)(p - bytes) + 3;
            }
            return true;
        }

        p++;
    }

    return false;
}

由该代码可知,若 H264 为 Annexb 封装格式,则 NALU 之间是以 0x000001(3bytes) 或者 0x00000001(4bytes) 分割。

2.2.2 SrsCodecSample::add_sample_unit

/*
 * add the a sample unit, it‘s a h.264 NALU or aac raw data.
 * the sample unit directly use the ptr of packet bytes,
 * so user must never use sample unit when packet is destroyed.
 * in a word, user must clear sample before demux it.
 */
int SrsCodecSample::add_sample_unit(char* bytes, int size)
{
    int ret = ERROR_SUCCESS;

    /* sample_units 数组的最大值为 128 */
    if (nb_sample_units >= SRS_SRS_MAX_CODEC_SAMPLE) {
        ret = ERROR_HLS_DECODE_ERROR;
        srs_error("hls decode samples error, "
            "exceed the max count: %d, ret=%d", SRS_SRS_MAX_CODEC_SAMPLE, ret);
        return ret;
    }

    /* 从 sample_uints 数组中取出一个 sample_unit,用于存放获取到的 NALU 或 aac raw data */
    SrsCodecSampleUnit* sample_unit = &sample_units[nb_sample_units++];
    sample_unit->bytes = bytes;
    /* 该 NALU 单元的大小 或 aac raw data 的大小 */
    sample_unit->size = size;

    // for video, parse the nalu type, set the IDR flag.
    if (is_video) {
        SrsAvcNaluType nal_unit_type = (SrsAvcNaluType)(bytes[0] & 0x1f);

        if (nal_unit_type == SrsAvcNaluTypeIDR) {
            /* 若当前 NALU 为 I 帧,则置位该标志位,表示当前 sample_units 数组中含有 I 帧 */
            has_idr = true;
        } else if (nal_unit_type == SrsAvcNaluTypeSPS ||
                   nal_unit_type == SrsAvcNaluTypePPS) {
            /* Whether exists SPS/PPS NALU. */
            has_sps_pps = true;
        } else if (nal_unit_type == SrsAvcNaluTypeAccessUnitDelimiter) {
            /* Whether exists AUD NALU. */
            has_aud = true;
        }

        if (first_nalu_type == SrsAvcNaluTypeReserved) {
            /* 记录 sample_units 数组中第一个 NALU 的类型 */
            first_nalu_type = nal_unit_type;
        }
    }

    return ret;
}
  • 若上面尝试调用 avc_demux_annexb_format 函数失败返回,即表明当前 H.264 不是以 AnnexB 格式封装的(即各 NALU 单元之间不是以 0x000001 或 0x00000001 分割的),则接下来尝试调用 avc_demux_ibmf_format 函数进行解封装,即再次尝试该 H.264 是否为 ISO Base Media File Format

2.3 SrsAvcAacCodec::avc_demux_ibmf_format

/*
 * demux the avc NALU in "ISO Base Media File Format"
 * from H.264-AVC-ISO_IEC_14496-15.pdf, page 20
 */
int SrsAvcAacCodec::avc_demux_ibmf_format(SrsStream* stream, SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    int PictureLength = stream->size() - stream->pos();

    /*
     * 5.3.4.2.1 Syntax, H.264-AVC-ISO_IEC_14496-15.pdf, page 16
     * 5.2.4.1 AVC decoder configuration record
     * 5.2.4.1.2 Semantics
     * The value of this field shall be one of 0, 1, or 3 corresponding to a
     * length encoded with 1, 2, or 4 bytes, respectively.
     */
    srs_assert(NAL_unit_length != 2);

    /*
     * 该 NAL_unit_length 的值即为解析 sps 的获取到的 lengthSizeMinusOne 字段值
     */

    /* 5.3.4.2.1 Syntax, H.264-AVC-ISO_IEC_14496-15.pdf, page 20 */
    for (int i = 0; i < PictureLength; ) {
        /* unsigned int((NAL_unit_length+1)*8) NALUnitLength; */
        if (!stream->require(NAL_unit_length + 1)) {
            ret = ERROR_HLS_DECODE_ERROR;
            srs_error("avc decode NALU size failed. ret=%d", ret);
            return ret;
        }
        int32_t NALUnitLength = 0;
        if (NAL_unit_length == 3) {
            NALUnitLength = stream->read_4bytes();
        } else if (NAL_unit_length == 1) {
            NALUnitLength = stream->read_2bytes();
        } else {
            NALUnitLength = stream->read_1bytes();
        }

        /* maybe stream is invalid format.
         * see: https://github.com/ossrs/srs/issues/183 */
        if (NALUnitLength < 0) {
            ret = ERROR_HLS_DECODE_ERROR;
            srs_error("maybe stream is AnnexB format. ret=%d", ret);
            return ret;
        }

        /* NALUnit */
        if (!stream->require(NALUnitLength)) {
            ret = ERROR_HLS_DECODE_ERROR;
            srs_error("avc decode NALU data failed. ret=%d", ret);
            return ret;
        }
        /* 7.3.1 NAL unit syntax, H.264-AVC-ISO_IEC_14496-10.pdf, page 44. */
        if ((ret = sample->add_sample_unit(stream->data() + stream->pos(), NALUnitLength))
            != ERROR_SUCCESS) {
            srs_error("avc add video sample failed. ret=%d", ret);
            return ret;
        }
        stream->skip(NALUnitLength);

        i += NAL_unit_length + 1 + NALUnitLength;
    }

    return ret;
}
  • 由该函数源码可知,若 H264 为 ISO Base Media File Format,则各个 NALUnit 之间是以 1byte 或 2bytes 或 4bytes 分割的,这 1byte 或 2bytes 或 4bytes 即为 NALUnitLength 所占的字节数,具体为 1byte 还是 2bytes 或者 4bytes 是由 sps 中的 lengthSizeMinusOne 值决定的。若 lengthSizeMinusOne 值为 3,则 NALUnitLength 占 4bytes;若 lengthSizeMinusOne 值为 1,则 NALUnitLength 占 2bytes;若 lengthSizeMinusOne 值为 0,则 ALUnitLength 占 1 字节。

3. SrsHlsCache::write_video

/*
 * write video to muxer.
 */
int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer,
    int64_t dts, SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    /* write video to cache. */
    if ((ret = cache->cache_video(codec, dts, sample)) != ERROR_SUCCESS) {
        return ret;
    }

    /* when segment overflow, reap if possible */
    if (muxer->is_segment_overflow()) {
        /* do reap ts if any of:
         *     a. wait keyframe and got keyframe.
         *     b. always reap when not wait keyframe */
        if (!muxer->wait_keyframe() ||
            sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) {
            /* reap the segmtn, which will also flush the video. */
            if ((ret = reap->segment("video", muxer, cache->video->dts))
                != ERROR_SUCCESS) {
                return ret;
            }
        }
    }

    /* flush video when got one */
    if ((ret = muxer->flush_video(cache)) != ERROR_SUCCESS) {
        srs_error("m3u8 muxer flush video failed. ret=%d", ret);
        return ret;
    }

    return ret;
}
  • SrsHlsCache::write_video 函数中,首先调用 SrsTsCache::cache_video 函数将已经保存在 sample 中成员 sample_units 数组中的 NALU 保存到 cache 中。

3.1 SrsTsCache::cache_video

int SrsTsCache::cache_video(SrsAvcAacCodec* codec, int64_t dts,
    SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    /* create the ts video message. */
    if (!video) {
        video = new SrsTsMessage();
        /*
         * write_pcr:
         * whether thisi message with pcr info,
         * generally, the video IDR(I frame, the keyframe of h.264) carray the pcr info.
         */
        /* 若当前帧类型为 I帧,表明携带有 pcr 信息 */
        video->write_pcr = sample->frame_type == SrsCodecVideoAVCFrameKeyFrame;
        /*
         * start_pts:
         * the audio cache buffer start pts, to flush audio if full.
         * @remark, the pts is not the adjust one, it‘s the original pts.
         */
        video->start_pts = dts;
    }

    /* 对于 video,flv/rtmp 的时间戳即为 video 的 dts */
    video->dts = dts;
    /* pts = dts + cts */
    video->pts = video->dts + sample->cts * 90; // in ms
    /* stream id, 视频取值为 (0xe0~0xef),通常为 0xe0,这里即 SrsTsPESStreamIdVideoCommon */
    video->sid = SrsTsPESStreamIdVideoCommon;

    /* write video to cache. */
    if ((ret = do_cache_avc(codec, sample)) != ERROR_SUCCESS) {
        return ret;
    }

    return ret;
}

若当前 SrsTsCache 之前从未缓存过视频数据,即 video 为 NULL,则构建一个新的 SrsTsMessage 类的对象,video 指向该对象。

3.1.1 SrsTsMessage 构造

/*
 * the media audio/video message parsed from PES packet.
 */
SrsTsMessage::SrsTsMessage(SrsTsChannel* c, SrsTsPacket* p)
{
    /*
     * channel and packet:
     * decoder only,
     * the ts message does not use them,
     * for user to get the channel and packet.
     */
    channel = c;
    packet = p;

    /*
     * dts and pts:
     * the timestamp in 90khz
     */
    dts = pts = 0;
    /*
     * sid:
     * the id of pes stream to indicates the payload codec.
     * @remark use is_audio() and is_video() to check it,
     *     and stream_number() to finger it out.
     */
    sid = (SrsTsPESStreamId)0x00;
    /*
     * continuity_counter: the chunk id.
     */
    continuity_counter = 0;
    /*
     * PES_packet_length: the size of payload, 0 indicates the length() of payload.
     */
    PES_packet_length = 0;
    /*
     * payload: the payload bytes.
     */
    payload = new SrsSimpleBuffer();
    /*
     * is_discontinuity: whether got discontinuity ts, for example,
     * sequence header changed.
     */
    is_discontinuity = false;

    /*
     * start_pts: the audio cache buffer start pts, to flush audio if full.
     * @remark the pts is not the adjust one, it‘s the orignal pts.
     */
    start_pts = 0;
    /*
     * write_pcr:
     * whether this message with pcr info,
     * generally, the video IDR(I frame, the keyframe of h.264) carray the pcr info.
     */
    write_pcr = false;
}
  • SrsTsCache::cache_video 函数中,构建 SrsTsMessage 并初始化时间戳等信息后,接着调用 SrsTsCache::do_cache_avc 函数将 video 数据写入到 SrsTsCache.video->payload 中.

3.1.2 SrsTsCache::do_cache_avc

int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
{
    int ret = ERROR_SUCCESS;

    /* whether aud inserted. */
    bool aud_inserted = false;

    /* Insert a default AUD NALU when no AUD in samples. */
    if (!sample->has_aud) {
        /*
         * the aud(access unit delimiter) before each frame.
         * 7.3.2.4 Access unit delimiter RBSP syntax
         * H.264-AVC-ISO_IEC_14496-10-2012.pdf, page 66.
         *
         * primary_pis_type u(3), the first 3bits, primary_pic_type indicates
         *     that the slice_type values for all slices of the primary coded
         *     picture are numbers of the set listed in Table 7-5 for the given
         *     value of primary_pic_type.
         *     0, slice_type 2, 7
         *     1, slice_type 0, 2, 5, 7
         *     2, slice_type 0, 1, 2, 5, 6, 7
         *     3, slice_type 4, 9
         *     4, slice_type 3, 4, 8, 9
         *     5, slice_type 2, 4, 7, 9
         *     6, slice_type 0, 2, 3, 4, 5, 7, 8, 9
         *     7, slice_type 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
         * 7.4.2.4 Access unit delimiter RBSP semantics
         * H.264-AVC-ISO_IEC_14496-10-2012.pdf, page 102.
         *
         * slice_type specifies the coding type of the slice according to Table 7-6.
         *     0, P (P slice)
         *     1, B (B slice)
         *     2, I (I slice)
         *     3, SP (SP slice)
         *     4, SI (SI slice)
         *     5, P (P slice)
         *     6, B (B slice)
         *     7, I (I slice)
         *     8, SP (SP slice)
         *     9, SI (SI slice)
         * H.264-AVC-ISO_IEC_14496-10-2012.pdf, page 105.
         */
        static u_int8_t default_aud_nalu[] = { 0x09, 0xf0 };
        /* 这里封装 H264 使用的是 Annexb 格式,在 aud 前会插入
         * 4 字节的分隔符: 0x00000001 */
        srs_avc_insert_aud(video->payload, aud_inserted);
        video->payload->append((const char*)default_aud_nalu, 2);
    }

    /* 除了 AUD 前插入的是 4 字节的分隔符 0x00000001 外,其他的
     * NALU 前插入的都为 3 字节的分隔符: 0x000001 */

    bool is_sps_pps_appended = false;
    /* all sample use cont nalu header, except the sps-pps before IDR frame. */
    for (int i = 0; i < sample->nb_sample_units, i++) {
        SrsCodecSampleUnit* sample_unit = &sample->sample_units[i];
        int32_t size = sample_unit->size;

        if (!sample_unit->bytes || size <= 0) {
            ret = ERROR_HLS_AVC_SAMPLE_SIZE;
            srs_error("invalid avc sample length=%d, ret=%d", size, ret);
            return ret;
        }

        /*
         * 5bits, 7.3.1 NAL unit syntax,
         * H.264-AVC-ISO_IEC_14496-10-2012.pdf, page 83.
         */
        SrsAvcNaluType nal_unit_type = (SrsAvcNaluType)(sample_unit->bytes[0] & 0x1f);

        /* 在 IDR 帧前先插入 sps 和 pps */
        /*
         * Insert sps/pps before IDR when there is no sps/pps in samples.
         * The sps/pps is parsed from sequence header(generally the first flv packet).
         */
        if (nal_unit_type == SrsAvcNaluTypeIDR &&
            !sample->has_sps_pps && !is_sps_pps_appended) {
            if (codec->sequenceParameterSetLength > 0) {
                /* 插入 3 字节的分隔符: 0x000001 */
                srs_avc_insert_aud(video->payload, aud_inserted);
                /* 接着插入 sps */
                video->payload->append(codec->sequenceParameterSetNALUnit,
                                       codec->sequenceParameterSetLength);
            }
            if (codec->pictureParameterSetLength > 0) {
                srs_avc_insert_aud(video->payload, aud_inserted);
                video->payload->append(codec->pictureParameterSetNALUnit,
                                       codec->pictureParameterSetLength);
            }
            is_sps_pps_appended = true;
        }

        /* Insert the NALU to video in annexb. */
        srs_avc_insert_aud(video->payload, aud_inserted);
        video->payload->append(sample->unit->bytes, sample->unit->size);
    }

    return ret;
}

若当前为接收到 sps 和 pps 后的第二个 video 消息,则根据上面源码可知,在 video->payload 中各 nalu 之间的格式为:

annexb 4B header, 2B aud(nal_unit_type:9)(0x09 0xf0)(AUD)
annexb 3B header, 19B sps(nal_unit_type:7)(SPS)
annexb 3B header, 4B pps(nal_unit_type:8)(PPS)
annexb 3B header, 12B nalu(nal_unit_type:6)(SEI)
annexb 3B header, 2762B nalu(nal_unit_type:5)(IDR)
  • SrsTsCache::do_cache_avc 函数中,首先检测 sample 中是否已经有 aud(即接入单元定界符),若没有,则首先在 video->payload 中插入一个 aud。

3.1.3 srs_avc_insert_aud

void srs_avc_insert_aud(SrsSimpleBuffer* payload, bool& aud_inserted)
{
    /*
     * mux the samples in annexb format,
     * H.264-AVC-ISO_IEC_14496-10-2012.pdf, page 324. */
    /**
     * 00 00 00 01 // header
     *     xxxxxxx // data bytes
     * 00 00 01 // continue header
     *     xxxxxx // data bytes
     *
     * nal_unit_type specifies the type of RBSP data structure contained in the NAL
     * unit as specified in Table 7-1.
     * Table 7-1 - NAL unit type codec, syntax element categories, and NAL unit
     * type classes H.264-AVC-ISO_IEC_14496-10-2012.pdf, page 83.
     *     1, Coded slice of a non-IDR picture slice_layer_without_partitioning_rbsp( )
     *     2, Coded slice data partition A slice_data_partition_a_layer_rbsp( )
     *     3, Coded slice data partition B slice_data_partition_b_layer_rbsp( )
     *     4, Coded slice data partition C slice_data_partition_c_layer_rbsp( )
     *     5, Coded slice of an IDR picture slice_layer_without_partitioning_rbsp( )
     *     6, Supplemental enhancement information (SEI) sei_rbsp( )
     *     7, Sequence parameter set seq_parameter_set_rbsp( )
     *     8, Picture parameter set pic_parameter_set_rbsp( )
     *     9, Access unit delimiter access_unit_delimiter_rbsp( )
     *     10, End of sequence end_of_seq_rbsp( )
     *     11, End of stream end_of_stream_rbsp( )
     *     12, Filler data filler_data_rbsp( )
     *     13, Sequence parameter set extension seq_parameter_set_extension_rbsp( )
     *     14, Prefix NAL unit prefix_nal_unit_rbsp( )
     *     15, Subset sequence parameter set subset_seq_parameter_set_rbsp( )
     *     19, Coded slice of an auxiliary coded picture without partitioning
     *         slice_layer_without_partitioning_rbsp( )
     *     20, Coded slice extension slice_layer_extension_rbsp( )
     * the first ts message of apple sample:
     *     annexb 4B header, 2B aud(nal_unit_type:9)(0x09 0xf0)(AUD)
     *     annexb 3B header, 19B sps(nal_unit_type:7)(SPS)
     *     annexb 3B header, 4B pps(nal_unit_type:8)(PPS)
     *     annexb 3B header, 12B nalu(nal_unit_type:6)(SEI)
     *     annexb 3B header, 21B nalu(nal_unit_type:6)(SEI)
     *     annexb 3B header, 2762B nalu(nal_unit_type:5)(IDR)
     *     annexb 3B header, 3535B nalu(nal_unit_type:5)(IDR)
     * the second ts message of apple ts sample:
     *     annexb 4B header, 2B aud(nal_unit_type:9)(0x09 0xf0)(AUD)
     *     annexb 3B header, 21B nalu(nal_unit_type:6)(SEI)
     *     annexb 3B header, 379B nalu(nal_unit_type:1)(non-IDR,P/B)
     *     annexb 3B header, 406B nalu(nal_unit_type:1)(non-IDR,P/B)
     * @remark we use the sequence of apple
     *         samples http://ossrs.net/apple-sample/bipbopall.m3u8
     */
    static u_int8_t fresh_nalu_header[] = { 0x00, 0x00, 0x00, 0x01 };
    static u_int8_t cont_nalu_header[] = { 0x00, 0x00, 0x01 };

    if (!aud_inserted) {
        aud_inserted = true;
        payload->append((const char*)fresh_nalu_header, 4);
    } else {
        payload->append((const char*)cont_nalu_header, 3);
    }
}
  • 回到 SrsHlsCache::write_video 函数中,调用 SrsTsCache::cache_video 将 sample 中的所有缓存的 nalu 都插入到 SrsTsCache* cache->video->payload 中后,接着调用 SrsHlsMuxer::is_segment_overflow 函数检测当前片的时长是否已经大于 hls_fragment 指定的时长(本配置为 10s),若已经满足,则表示已经可以切割该片了。否则调用 SrsHlsMuxer::flush_video 函数。

3.2 SrsHlsMuxer::is_segment_overflow

/*
 * whether segment overflow.
 * that is whether the current segment duration>=(the segment in config)
 */
bool SrsHlsMuxer::is_segment_overflow()
{
    srs_assert(current);

    /* 若当前片的时长小于最小片时长的2倍限制,即小于 2*100=200ms,则
     * 表示当前片还不可以切割 */
    /* to prevent very small segment. */
    if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
        return false;
    }

    /* use N% deviation, to smoother. */
    /* 默认没有使能 hls_ts_floor,即 deviation 为 0.0 */
    double deviation = hls_ts_floor ?
        SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;

    return current->duration >= hls_fragment + deviation;
}
  • 该函数是检测当前片的时长是否已经大于配置文件中 hls_fragment 指定的片的最小时长,若是,则返回 true,表明可以切割该片了;否则,返回 false。
  • 假设当前片的时长还没满足大于等于 hls_fragment,则 SrsHlsCache::write_video 函数中接着调用 SrsHlsMuxer::flush_video 函数。

3.3 SrsHlsMuxer::flush_video

int SrsHlsMuxer::flush_video(SrsTsCache* cache)
{
    int ret = ERROR_SUCCESS;

    /* if current is NULL, segment is not open, ignore the flush event. */
    if (!current) {
        srs_warn("flush video ignored, for segment is not open.");
        return ret;
    }

    /* 确保 cache 中有 video 数据,否则直接返回 */
    if (!cache->video || cache->video->payload->length() <= 0) {
        return ret;
    }

    srs_assert(current);

    /* update the duration of segment. */
    current->update_duration(cache->video->dts);

    if ((ret = current->muxer->write_video(cache->video))
        != ERROR_SUCCESS) {
        return ret;
    }

    /* write success, clear and free the msg */
    srs_freep(cache->video);

    return ret;
}
  • 该函数首先调用 SrsHlsSegment::update_duration 函数更新当前片的时长。

3.3.1 SrsHlsSegment::update_duration

/*
 * update the segment duration.
 * @param current_frame_dts, the dts of frame, in tbn of ts.
 */
void SrsHlsSegment::update_duration(int64_t current_segment_dts)
{
    /*
     * we use video/audio to update segment duration,
     * so when reap segment, some previous audio frame will
     * update the segment duration, which is nagetive,
     * just ignore it.
     */
    if (current_frame_dts < segment_start_dts) {
        /* for atc and timestamp jump, reset the start dts. */
        if (current_frame_dts < segment_start_dts -
            SRS_AUTO_HLS_SEGMENT_TIMESTAMP_JUMP_MS * 90) {
            srs_warn("hls timestamp jump %"PRId64"=>%"PRId64,
                     segment_start_dts, current_frame_dts);
            segment_start_dts = current_frame_dts;
        }
        return;
    }

    duration = (current_frame_dts - segment_start_dts) / 90000.0;
    srs_assert(duration >= 0);

    return;
}
  • 更新完当前片的时长后,回到 SrsHlsMuxer::flush_video 函数中,接着调用 SrsTSMuxer::write_video 函数将 video frame 写入到 ts 中。

3.3.2 SrsTSMuxer::write_video

/*
 * write a video frame to ts
 */
int SrsTSMuxer::write_video(SrsTsMessage* video)
{
    int ret = ERROR_SUCCESS;

    /* 将 video frame 写入到 PES packet 中 */
    if ((ret = context->encode(writer, video, vcodec, acodec))
        != ERROR_SUCCESS) {
        srs_error("hls encode video failed. ret=%d", ret);
        return ret;
    }

    return ret;
}

3.3.3 SrsTsContext::encode

/*
 * write the PES packet, the video/audio stream.
 * @param msg, the video/audio msg to write to ts.
 * @param vc, the video codec, write the PAT/PMT table when changed.
 * @param ac, the audio codec, write the PAT/PMT table when changed.
 */
int SrsTsContext::encode(SrsFileWriter* writer, SrsTsMessage* msg,
    SrsCodecVideo vc, SrsCodecAudio ac)
{
    int ret = ERROR_SUCCESS;

    SrsTsStream vs, as;
    int16_t video_pid = 0, audio_pid = 0;
    switch (vc) {
        case SrsCodecVideoAVC:
            vs = SrsTsStreamVideoH264;
            video_pid = TS_VIDEO_AVC_PID;
            break;
        case SrsCodecVideoDisabled:
            vs = SrsTsStreamReserved;
            break;
        case SrsCodecVideoReserved:
        case SrsCodecVideoReserved1:
        case SrsCodecVideoReserved2:
        case SrsCodecVideoSorensonH263:
        case SrsCodecVideoScreenVideo:
        case SrsCodecVideoOn2VP6:
        case SrsCodecVideoOn2VP6WithAlphaChannel:
        case SrsCodecVideoScreenVideoVersion2:
            vs = SrsTsStreamReserved;
            break;
    }
    switch (ac) {
        case SrsCodecAudioAAC:
            as = SrsTsStreamAudioAAC;
            audio_pid = TS_AUDIO_AAC_PID;
            break;
        case SrsCodecAudioMP3:
            as = SrsTsStreamAudioMp3;
            audio_pid = TS_AUDIO_MP3_PID;
            break;
        case SrsCodecAudioDisabled:
            as = SrsTsStreamReserved;
            break;
        case SrsCodecAudioReserved1:
        case SrsCodecAudioLinearPCMPlatformEndian:
        case SrsCodecAudioADPCM:
        case SrsCodecAudioLinearPCMLittleEndian:
        case SrsCodecAudioNellymoser16kHzMono:
        case SrsCodecAudioNellymoser8kHzMono:
        case SrsCodecAudioNellymoser:
        case SrsCodecAudioReservedG711AlawLogarithmicPCM:
        case SrsCodecAudioReservedG711MuLawLogarithmicPCM:
        case SrsCodecAudioReserved:
        case SrsCodecAudioSpeex:
        case SrsCodecAudioReservedMP3_8kHz:
        case SrsCodecAudioReservedDeviceSpecificSound:
            as = SrsTsStreamReserved;
            break;
    }

    if (as == SrsTsStreamReserved && vs == SrsTsStreamReserved) {
        ret = ERROR_HLS_NO_STREAM;
        srs_error("hls: no video or audio stream, vcodec=%d, acodec=%d. ret=%d",
                  vc, ac, ret);
        return ret;
    }

    /* 当首次调用该函数将 audio/video frame 写入到 PES packet 中时,
     * 需要将 vcodec 和 acodec 写入到 PAT/PMT table 中 */
    /* when any codec changed, write PAT/PMT table. */
    if (vcodec != vc || acodec != ac) {
        vcodec = vc;
        acodec = ac;
        if ((ret = encode_pat_pmt(writer, video_pid, vs, audio_pid, as))
            != ERROR_SUCCESS) {
            return ret;
        }
    }

    if (msg->is_audio()) {
        return encode_pes(writer, msg, audio_pid, as, vs == SrsTsStreamReserved);
    } else {
        return encode_pes(writer, msg, video_pid, vs, vs == SrsTsStreamReserved);
    }
}
  • 在该函数中,若为第一次调用该函数将 video/audio 数据写入到 PES packet 中,则需要调用 SrsTsContext::encode_pat_pmt 函数将 video 和 audio 的所用的编码格式 vcodec 和 acodec 以及流类型写入到 PAT/PMT 表中。

3.3.4 SrsTsContext::encode_pat_pmt

  • 该函数的具体分析可见于: SRS之TS封装PAT和PMT
  • 回到 SrsTsContext::encode 函数中,首次将 PAT/PMT 写入到 ts 文件中后,接着检测到当前消息为视频,因此调用 SrsTsContext::encode_pes 函数把当前的视频消息封装成 ts 格式,然后写入到 ts 文件中。

3.3.5 SrsTsContext::encode_pes

该函数的具体分析: SRS之SrsTsContext::encode_pes详解

原文地址:https://www.cnblogs.com/jimodetiantang/p/9140251.html

时间: 2024-10-16 15:18:30

SRS之SrsHls::on_video详解的相关文章

SRS之SrsHls::on_audio详解

1. SrsHls::on_audio 将音频数据封装到 ts 文件中. /* * mux the audio packet to ts. * @param shared_audio, directly ptr, copy it if need to save it. */ int SrsHls::on_audio(SrsSharedPtrMessage* shared_audio) { int ret = ERROR_SUCCESS; /* 检测是够使能了 hls */ if (!hls_en

SRS之SrsHlsCache::reap_segment详解

1. 是否可切片的检测 首先在调用 SrsHlsCache::reap_segment 函数进行切片时,针对音频或视频,都会有一个函数来进行检测当前片的时长是否符合所要求的时长. 对于音频,会调用 SrsHlsMuxer::is_segment_absolutely_overflow 函数进行检测,如下: bool SrsHlsMuxer::is_segment_absolutely_overflow() { srs_assert(current); /* 若当前片的时长小于 200 ms,则直

SRS之SrsRtmpConn::service_cycle详解

1. SrsRtmpConn::service_cycle 当服务器在 conn 线程的开始调用 connect_app 函数接收并解析客户端发送的 connect 消息后,调用该 service_cycle 函数开始服务客户端的具体请求. /** * when valid and connected to vhost/app, service the client. */ int SrsRtmpConn::service_cycle() { int ret = ERROR_SUCCESS; /

Oracle 11g数据库详解(2015-1-18更新)

Oracle 11g数据库详解 整理者:高压锅 QQ:280604597 Email:[email protected] 大家有什么不明白的地方,或者想要详细了解的地方可以联系我,我会认真回复的 1   简介 数据库操作主要有以下几步: 1.  启动.停止数据库 2.  连接.断开数据库 3.  创建.修改.删除数据库用户 4.  表空间 5.  新建.修改.删除表 6.  查询.插入.修改.删除表数据 7.  新建.修改.删除视图 8.  新建.修改.删除存储过程 9.  新建.修改.删除触发

Oracle 11g数据库详解(2015-02-28更新)

Oracle 11g数据库详解 整理者:高压锅 QQ:280604597 Email:[email protected] 大家有什么不明白的地方,或者想要详细了解的地方可以联系我,我会认真回复的 1   简介 数据库操作主要有以下几步: 1.  启动.停止数据库 2.  连接.断开数据库 3.  创建.修改.删除数据库用户 4.  表空间 5.  新建.修改.删除表 6.  查询.插入.修改.删除表数据 7.  新建.修改.删除视图 8.  新建.修改.删除存储过程 9.  新建.修改.删除触发

linux查看端口及端口详解

今天现场查看了TCP端口的占用情况,如下图 红色部分是IP,现场那边问我是不是我的程序占用了tcp的链接,,我远程登陆现场查看了一下,这种类型的tcp链接占用了400多个,,后边查了一下资料,说ESTABLISHED状态 ESTABLISHED的意思是建立连接.表示两台机器正在通信.      之后查找  ncube-lm  发现ncube-lm是一个端口,是nCube License Manager (即ncube管理的一个许可证明),意思是被允许,被认证开放的意思,,, 之后查看端口号 是1

详解EBS接口开发之WIP模块接口

总体说明 文档目的 本文档针对WIP模块业务功能和接口进行分析和研究,对采用并发请求方式和调用API方式分别进行介绍 内容 WIP模块常用标准表简介 WIP事物处理组成 WIP相关业务流程 WIP相关API研究事例 (十)参考文档(七)采购相关的一些知识 (一)WIP模块常用标准表简介 1.1   常用标准表 如下表中列出了与WIP导入相关的表和说明: 表名 说明 其他信息 BOM_STRUCTURES_B BOM头信息 BOM_COMPONENTS_B BOM组件信息 BOM_OPERATIO

Spring事务管理(详解+实例)

写这篇博客之前我首先读了<Spring in action>,之后在网上看了一些关于Spring事务管理的文章,感觉都没有讲全,这里就将书上的和网上关于事务的知识总结一下,参考的文章如下: Spring事务机制详解 Spring事务配置的五种方式 Spring中的事务管理实例详解 1 初步理解 理解事务之前,先讲一个你日常生活中最常干的事:取钱. 比如你去ATM机取1000块钱,大体有两个步骤:首先输入密码金额,银行卡扣掉1000元钱:然后ATM出1000元钱.这两个步骤必须是要么都执行要么都

转载:DenseNet算法详解

原文连接:http://blog.csdn.net/u014380165/article/details/75142664 参考连接:http://blog.csdn.net/u012938704/article/details/53468483 本文这里仅当学习笔记使用,具体细节建议前往原文细度. 论文:Densely Connected Convolutional Networks 论文链接:https://arxiv.org/pdf/1608.06993.pdf 代码的github链接:h