ffmpeg实战系列——002

Talk is cheap,Show me the code!

示例1、decode_video.c


int main(int argc, char **argv)

{

const char *filename, *outfilename;

const AVCodec *codec;

AVCodecContext *c= NULL;

int frame_count;

FILE *f;

AVFrame *frame;

uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];

AVPacket avpkt;

filename    = argv[1];

outfilename = argv[2];

 avcodec_register_all();

av_init_packet(&avpkt);

codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);

c = avcodec_alloc_context3(codec);

if (avcodec_open2(c, codec, NULL) < 0) {

}

frame = av_frame_alloc();

frame_count = 0;

for (;;) {

avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);

avpkt.data = inbuf;

while (avpkt.size > 0)

if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)

exit(1);

}

avpkt.data = NULL;

avpkt.size = 0;

decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);

fclose(f);

avcodec_free_context(&c);

av_frame_free(&frame);

return 0;

}

static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,

AVFrame *frame, int *frame_count, AVPacket *pkt, int last)

{

len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);

return 0;

}

下面分析该例子涉及的几个数据结构:

1、AVPacket avpkt;


typedef struct AVPacket {

AVBufferRef *buf;

int64_t pts;

int64_t dts;

uint8_t *data;

int   size;

int   stream_index;

int   flags;

AVPacketSideData *side_data;

int side_data_elems;

int64_t duration;

int64_t pos;                            ///< byte position in stream, -1 if unknown

} AVPacket;

2、AVFrame *frame;


typedef struct AVFrame {

uint8_t *data[AV_NUM_DATA_POINTERS];

int linesize[AV_NUM_DATA_POINTERS];

uint8_t **extended_data;

int width, height;

int nb_samples;

int format;

int key_frame;

enum AVPictureType pict_type;

AVRational sample_aspect_ratio;

int64_t pts;

int64_t pkt_dts;

int coded_picture_number;

int display_picture_number;

int quality;

void *opaque;

int repeat_pict;

int interlaced_frame;

int top_field_first;

int palette_has_changed;

int64_t reordered_opaque;

int sample_rate;

uint64_t channel_layout;

AVBufferRef *buf[AV_NUM_DATA_POINTERS];

AVBufferRef **extended_buf;

int        nb_extended_buf;

AVFrameSideData **side_data;

int            nb_side_data;

int flags;

enum AVColorRange color_range;

enum AVColorPrimaries color_primaries;

enum AVColorTransferCharacteristic color_trc;

enum AVColorSpace colorspace;

enum AVChromaLocation chroma_location;

int64_t best_effort_timestamp;

int64_t pkt_pos;

int64_t pkt_duration;

AVDictionary *metadata;

int decode_error_flags;

int channels;

int pkt_size;

AVBufferRef *hw_frames_ctx;

AVBufferRef *opaque_ref;

} AVFrame;

3、const AVCodec *codec;


codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);

AVCodec *avcodec_find_decoder(enum AVCodecID id)

{

return find_encdec(id, 0);

}

static AVCodec *find_encdec(enum AVCodecID id, int encoder)

{

AVCodec *p, *experimental = NULL;

p = first_avcodec;

id= remap_deprecated_codec_id(id);

while (p) {

if ((encoder ? av_codec_is_encoder(p) : av_codec_is_decoder(p)) &&

p->id == id) {

if (p->capabilities & AV_CODEC_CAP_EXPERIMENTAL && !experimental) {

experimental = p;

} else

return p;

}

p = p->next;

}

return experimental;

}

AVCodec ff_mpeg1video_decoder = {

.name                  = "mpeg1video",

.long_name             = NULL_IF_CONFIG_SMALL("MPEG-1 video"),

.type                  = AVMEDIA_TYPE_VIDEO,

.id                    = AV_CODEC_ID_MPEG1VIDEO,

.priv_data_size        = sizeof(Mpeg1Context),

.init                  = mpeg_decode_init,

.close                 = mpeg_decode_end,

.decode                = mpeg_decode_frame,

.capabilities          = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |

AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |

AV_CODEC_CAP_SLICE_THREADS,

.caps_internal         = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,

.flush                 = flush,

.max_lowres            = 3,

.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)

};

这里顺便介绍一下:

typedef struct Mpeg1Context {

MpegEncContext mpeg_enc_ctx;

int mpeg_enc_ctx_allocated; /* true if decoding context allocated */

int repeat_field;           /* true if we must repeat the field */

AVPanScan pan_scan;         /* some temporary storage for the panscan */

AVStereo3D stereo3d;

int has_stereo3d;

uint8_t *a53_caption;

int a53_caption_size;

uint8_t afd;

int has_afd;

int slice_count;

AVRational save_aspect;

int save_width, save_height, save_progressive_seq;

AVRational frame_rate_ext;  /* MPEG-2 specific framerate modificator */

int sync;                   /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */

int tmpgexs;

int first_slice;

int extradata_decoded;

} Mpeg1Context;

所以如果你要调用AVCodec ff_mpeg1video_decoder帮你干活,你首先要有一个Mpeg1Context,因为ff_mpeg1video_decoder的所有数据都来源于Mpeg1Context。而Mpeg1Context是在decode init的时候初始化的。

4、AVCodecContext *c= NULL;


c = avcodec_alloc_context3(codec);

AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)

{

AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));

if (!avctx)

return NULL;

if (init_context_defaults(avctx, codec) < 0) {

av_free(avctx);

return NULL;

}

return avctx;

}

static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)

{

int flags=0;

memset(s, 0, sizeof(AVCodecContext));

s->av_class = &av_codec_context_class;

s->codec_type = codec ? codec->type : AVMEDIA_TYPE_UNKNOWN;

if (codec) {

s->codec = codec;

s->codec_id = codec->id;

}

if(s->codec_type == AVMEDIA_TYPE_AUDIO)

flags= AV_OPT_FLAG_AUDIO_PARAM;

else if(s->codec_type == AVMEDIA_TYPE_VIDEO)

flags= AV_OPT_FLAG_VIDEO_PARAM;

else if(s->codec_type == AVMEDIA_TYPE_SUBTITLE)

flags= AV_OPT_FLAG_SUBTITLE_PARAM;

av_opt_set_defaults2(s, flags, flags);

s->time_base           = (AVRational){0,1};

s->framerate           = (AVRational){ 0, 1 };

s->pkt_timebase        = (AVRational){ 0, 1 };

s->get_buffer2         = avcodec_default_get_buffer2;

s->get_format          = avcodec_default_get_format;

s->execute             = avcodec_default_execute;

s->execute2            = avcodec_default_execute2;

s->sample_aspect_ratio = (AVRational){0,1};

s->pix_fmt             = AV_PIX_FMT_NONE;

s->sw_pix_fmt          = AV_PIX_FMT_NONE;

s->sample_fmt          = AV_SAMPLE_FMT_NONE;

s->reordered_opaque    = AV_NOPTS_VALUE;

if(codec && codec->priv_data_size){

if(!s->priv_data){

s->priv_data= av_mallocz(codec->priv_data_size);

if (!s->priv_data) {

return AVERROR(ENOMEM);

}

}

if(codec->priv_class){

*(const AVClass**)s->priv_data = codec->priv_class;

av_opt_set_defaults(s->priv_data);

}

}

if (codec && codec->defaults) {

int ret;

const AVCodecDefault *d = codec->defaults;

while (d->key) {

ret = av_opt_set(s, d->key, d->value, 0);

av_assert0(ret >= 0);

d++;

}

}

return 0;

}

5、avcodec_open2(c, codec, NULL)


初始化一些avcodeccontext的成员变量,最重要的是初始化解码器:

ret = avctx->codec->init(avctx);

//legacy decoder

AVCodec ff_mpegvideo_decoder = {

.name           = "mpegvideo",

.long_name      = NULL_IF_CONFIG_SMALL("MPEG-1 video"),

.type           = AVMEDIA_TYPE_VIDEO,

.id             = AV_CODEC_ID_MPEG2VIDEO,

.priv_data_size = sizeof(Mpeg1Context),

.init           = mpeg_decode_init,

.close          = mpeg_decode_end,

.decode         = mpeg_decode_frame,

.capabilities   = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,

.caps_internal  = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,

.flush          = flush,

.max_lowres     = 3,

};

这个太复杂,换一个H264的解码器吧,其实是一样的:

AVCodec ff_h264_decoder = {

.name                  = "h264",

.long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),

.type                  = AVMEDIA_TYPE_VIDEO,

.id                    = AV_CODEC_ID_H264,

.priv_data_size        = sizeof(H264Context),

.init                  = ff_h264_decode_init,

.close                 = h264_decode_end,

.decode                = h264_decode_frame,

.capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |

AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |

AV_CODEC_CAP_FRAME_THREADS,

.caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,

.flush                 = flush_dpb,

.init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),

.update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),

.profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),

.priv_class            = &h264_class,

};

av_cold int ff_h264_decode_init(AVCodecContext *avctx)

{

H264Context *h = avctx->priv_data;

int ret;

ret = h264_init_context(avctx, h);

if (ret < 0)

return ret;

ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);

if (ret != 0) {

av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");

return AVERROR_UNKNOWN;

}

if (avctx->ticks_per_frame == 1) {

if(h->avctx->time_base.den < INT_MAX/2) {

h->avctx->time_base.den *= 2;

} else

h->avctx->time_base.num /= 2;

}

avctx->ticks_per_frame = 2;

if (avctx->extradata_size > 0 && avctx->extradata) {

ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,

&h->ps, &h->is_avc, &h->nal_length_size,

avctx->err_recognition, avctx);

if (ret < 0) {

h264_decode_end(avctx);

return ret;

}

}

if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&

h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {

h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;

}

avctx->internal->allocate_progress = 1;

ff_h264_flush_change(h);

if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))

h->enable_er = 0;

if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {

av_log(avctx, AV_LOG_WARNING,

"Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "

"Use it at your own risk\n");

}

return 0;

}

//初始化解码器的上下文,非常关键。之后解码所有的信息来源都是这里

static int h264_init_context(AVCodecContext *avctx, H264Context *h)

{

int i;

h->avctx                 = avctx;

h->cur_chroma_format_idc = -1;

h->picture_structure     = PICT_FRAME;

h->workaround_bugs       = avctx->workaround_bugs;

h->flags                 = avctx->flags;

h->poc.prev_poc_msb      = 1 << 16;

h->recovery_frame        = -1;

h->frame_recovered       = 0;

h->poc.prev_frame_num    = -1;

h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;

h->sei.unregistered.x264_build = -1;

h->next_outputed_poc = INT_MIN;

for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)

h->last_pocs[i] = INT_MIN;

ff_h264_sei_uninit(&h->sei);

avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;

h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;

h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));

if (!h->slice_ctx) {

h->nb_slice_ctx = 0;

return AVERROR(ENOMEM);

}

for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {

h->DPB[i].f = av_frame_alloc();

if (!h->DPB[i].f)

return AVERROR(ENOMEM);

}

h->cur_pic.f = av_frame_alloc();

if (!h->cur_pic.f)

return AVERROR(ENOMEM);

h->last_pic_for_ec.f = av_frame_alloc();

if (!h->last_pic_for_ec.f)

return AVERROR(ENOMEM);

for (i = 0; i < h->nb_slice_ctx; i++)

h->slice_ctx[i].h264 = h;

return 0;

}

6、解码:


ret = avctx->codec->decode(avctx, picture, got_picture_ptr,

&tmp);

static int h264_decode_frame(AVCodecContext *avctx, void *data,

int *got_frame, AVPacket *avpkt)

{

const uint8_t *buf = avpkt->data;

int buf_size       = avpkt->size;

H264Context *h     = avctx->priv_data;

AVFrame *pict      = data;

int buf_index;

int ret;

h->flags = avctx->flags;

h->setup_finished = 0;

h->nb_slice_ctx_queued = 0;

ff_h264_unref_picture(h, &h->last_pic_for_ec);

/* end of stream, output what is still in the buffers */

if (buf_size == 0)

return send_next_delayed_frame(h, pict, got_frame, 0);

if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {

int side_size;

uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);

if (is_extra(side, side_size))

ff_h264_decode_extradata(side, side_size,

&h->ps, &h->is_avc, &h->nal_length_size,

avctx->err_recognition, avctx);

}

if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){

if (is_extra(buf, buf_size))

return ff_h264_decode_extradata(buf, buf_size,

&h->ps, &h->is_avc, &h->nal_length_size,

avctx->err_recognition, avctx);

}

buf_index = decode_nal_units(h, buf, buf_size);

if (buf_index < 0)

return AVERROR_INVALIDDATA;

if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {

av_assert0(buf_index <= buf_size);

return send_next_delayed_frame(h, pict, got_frame, buf_index);

}

if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {

if (avctx->skip_frame >= AVDISCARD_NONREF ||

buf_size >= 4 && !memcmp("Q264", buf, 4))

return buf_size;

av_log(avctx, AV_LOG_ERROR, "no frame!\n");

return AVERROR_INVALIDDATA;

}

if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||

(h->mb_y >= h->mb_height && h->mb_height)) {

if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)

return ret;

/* Wait for second field. */

if (h->next_output_pic) {

ret = finalize_frame(h, pict, h->next_output_pic, got_frame);

if (ret < 0)

return ret;

}

}

av_assert0(pict->buf[0] || !*got_frame);

ff_h264_unref_picture(h, &h->last_pic_for_ec);

return get_consumed_bytes(buf_index, buf_size);

}

static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame,

int *got_frame, int buf_index)

{

int ret, i, out_idx;

H264Picture *out = h->delayed_pic[0];

h->cur_pic_ptr = NULL;

h->first_field = 0;

out_idx = 0;

for (i = 1;

h->delayed_pic[i] &&

!h->delayed_pic[i]->f->key_frame &&

!h->delayed_pic[i]->mmco_reset;

i++)

if (h->delayed_pic[i]->poc < out->poc) {

out     = h->delayed_pic[i];

out_idx = i;

}

for (i = out_idx; h->delayed_pic[i]; i++)

h->delayed_pic[i] = h->delayed_pic[i + 1];

if (out) {

out->reference &= ~DELAYED_PIC_REF;

ret = finalize_frame(h, dst_frame, out, got_frame);

if (ret < 0)

return ret;

}

return buf_index;

}

static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)

{

int ret;

if (((h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||

(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||

out->recovered)) {

if (!h->avctx->hwaccel &&

(out->field_poc[0] == INT_MAX ||

out->field_poc[1] == INT_MAX)

) {

int p;

AVFrame *f = out->f;

int field = out->field_poc[0] == INT_MAX;

uint8_t *dst_data[4];

int linesizes[4];

const uint8_t *src_data[4];

av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);

for (p = 0; p<4; p++) {

dst_data[p] = f->data[p] + (field^1)*f->linesize[p];

src_data[p] = f->data[p] +  field   *f->linesize[p];

linesizes[p] = 2*f->linesize[p];

}

av_image_copy(dst_data, linesizes, src_data, linesizes,

f->format, f->width, f->height>>1);

}

ret = output_frame(h, dst, out);

if (ret < 0)

return ret;

*got_frame = 1;

if (CONFIG_MPEGVIDEO) {

ff_print_debug_info2(h->avctx, dst, NULL,

out->mb_type,

out->qscale_table,

out->motion_val,

NULL,

h->mb_width, h->mb_height, h->mb_stride, 1);

}

}

return 0;

}

static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)

{

AVFrame *src = srcp->f;

const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);

int i;

int ret;

if (src->format == AV_PIX_FMT_VIDEOTOOLBOX && src->buf[0]->size == 1)

return AVERROR_EXTERNAL;

ret = av_frame_ref(dst, src);

if (ret < 0)

return ret;

av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);

if (srcp->sei_recovery_frame_cnt == 0)

dst->key_frame = 1;

if (!srcp->crop)

return 0;

for (i = 0; i < desc->nb_components; i++) {

int hshift = (i > 0) ? desc->log2_chroma_w : 0;

int vshift = (i > 0) ? desc->log2_chroma_h : 0;

int off    = ((srcp->crop_left >> hshift) << h->pixel_shift) +

(srcp->crop_top  >> vshift) * dst->linesize[i];

dst->data[i] += off;

}

return 0;

}

时间: 2024-10-12 13:09:41

ffmpeg实战系列——002的相关文章

ffmpeg实战系列——001

Talk is cheap,Show me the code! 本文部分参考了雷神的博客,加入自己的理解,新增了对每种数据结构的详细剖析! 开始玩ffmpeg之前,先把ffmpeg中常见的数据结构以及他们的之间的关系了解下,这是基础,非常重要! FFMPEG结构体分析:AVFrameFFMPEG结构体分析:AVFormatContextFFMPEG结构体分析:AVCodecContextFFMPEG结构体分析:AVIOContextFFMPEG结构体分析:AVCodecFFMPEG结构体分析:A

ffmpeg实战系列——003

Talk is cheap,Show me the code! 示例2.demuxing_decoding.c 以下例子并不完整,只列出核心数据结构和代码 static AVFormatContext *fmt_ctx = NULL; static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx; static AVStream *video_stream = NULL, *audio_stream = NULL; static int

Skype For Business 2015实战系列11:创建并发布拓扑

Skype For Business 2015实战系列11:创建并发布拓扑 Skype For Business Server安装前需要先定义好拓扑,因为我们要在拓扑中的每台服务器上安装 Skype for Business Server 系统,必须首先创建和发布一个拓扑.发布拓扑时,拓扑信息会载入中央管理存储数据库.如果这是 Enterprise Edition 池,您将在初次发布新拓扑时创建中央管理存储数据库.如果是 Standard Edition,则需要运行部署向导中的"准备第一个 St

Skype For Business 2015实战系列12:安装前端服务器

Skype For Business 2015实战系列12:安装前端服务器 配置Front01: 打开Skype for Business Server部署向导,点击"安装或更新Skype for Business Server系统": 安装本地配置存储: 点击"运行": 点击下一步: 安装完成,点击完成: 安装或删除Skype for Business Server组建: 点击运行: 点击下一步: 安装完成,点击完成: 请求.安装或分配证书: 点击运行: 输入基本

Skype For Business 2015实战系列13:安装Office Web App Server

Skype For Business 2015实战系列13:安装Office Web App Server 今天要为大家介绍的是Office Web App Server(以下简称OWA),OWA在Skype For Business中的主要作用就是在用户使用Skype客户端开会的时候可以共享PPT,从而可以更好的进行演讲! 安装OWA: 我们登陆到OWA,服务器,坚持服务器的基本信息如下: 安装OWA所需的Windows组建: 以管理员身份打开Powershell,运行如下命令: Add-Wi

Skype For Business 2015实战系列10:DNS准备

Skype For Business 2015实战系列10:DNS准备 DNS的正确配置对于Skype for Business Server 2015来说也是非常重要的,我们要让 Skype for Business Server 正常运行,就必需配置大量DNS纪录.从而使客户端知道该如何访问服务以及让服务器知道相互之间的情况. Skype for Business Server 2015 通过以下方式使用 DNS: ●发现内部服务器或服务器池以进行服务器至服务器之间的通信. ●使客户端可以发

Skype For Business 2015实战系列2:安装活动目录

Skype For Business 2015实战系列2:安装活动目录 今天开始我们就正式进入了Skype For Business 2015的部署阶段,在部署开始之前,我们先来看一下我们本次的环境列表: 计算机名 IP地址 角色 备注 DC 192.168.1.20 AD DS   Mail 192.168.1.22 Exchange 2013   Front01 192.168.1.25 SFB前端   Front01 192.168.1.26 SFB前端   SQL01 192.168.1

Skype For Business 2015实战系列3:安装并配置CA

Skype For Business 2015实战系列3:安装并配置CA 不管是Skype for business server 2015,还是以前的Lync,在部署的过程中一个绕不开的东西就是证书,不要说Skype for business server 2015和Lync绕不过证书,现在几乎所有微软产品都绕不过,像邮件.远程桌面服务.私有云.混合云等等,都将用到证书,其实不光微软,微软之外很多产品与解决方案都绕不过证书这东西,谁叫它是一个基础的东西呢?既然证书对于Skype for bus

SCCM 2012 R2实战系列之一:SQL安装

大家好,从今天开始跟大家一起分享自己学习SCCM 2012 R2的一些心得和具体的部署配置,希望能帮到大家.由于SCCM部署的步骤比较复杂,内容也比较多,所以我把SCCM整个部署过程分为以下三个章节: l SQL Server 2012 SP1的安装 l SCCM安装前的准备工作 l SCCM 主站点的部署 1.1 实验拓扑图: 下面是本次部署的一个拓扑图,所有操作系统均使用Windows Server 2012 R2 Datacenter版本 1.2 域环境的准备 微软的很多产品都是基于域环境