FFmpeg5入门教程10.22:音视频解混合(demuxer)为PCM和YUV420P

索引地址:系列教程索引地址

上一篇:FFmpeg5入门教程10.21:音视频解混合(demuxer)为MP3和H264

可以看出本文要在上一篇的基础上将MP3解码为pcm,将h264解码为yuv。

先看一下流程图

flow

首先是每次都有的,打开与查找

1
2
3
4
5
6
7
8
9
10
11
12
13
//2.打开输入文件
if (avformat_open_input(&pFormatCtx, inputFile, NULL, NULL) != 0)
{
printf("can't open input file\n");
return -1;
}

//3.获取音视频信息
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
printf("can't find stream info\n");
return -1;
}

界面音频部分

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
int avcodec_save_audio_file(AVFormatContext *pFormatCtx, int streamIndex, char *fileName)
{
AVCodec *pCodec;
AVCodecContext *pCodecCtx;
AVCodecParameters *codecpar = pFormatCtx->streams[streamIndex]->codecpar;

//4.获取解码器(一):音频
//根据索引拿到对应的流
pCodec = avcodec_find_decoder(codecpar->codec_id);
if (!pCodec)
{
printf("can't decoder audio\n");
return -1;
}
//申请一个解码上下文
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx)
{
printf("can't allocate a audio decoding context\n");
return -1;
}

//用流解码信息初始化编码参数
avcodec_parameters_to_context(pCodecCtx, codecpar);

//没有此句会出现:Could not update timestamps for skipped samples
pCodecCtx->pkt_timebase = pFormatCtx->streams[streamIndex]->time_base;

//5.打开解码器
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("can't open codec\n");
return -1;
}

// printf("--------------- File Information ----------------\n");
// av_dump_format(pFormatCtx, 0, fileName, 0);
// printf("-------------------------------------------------\n");

//编码数据
AVPacket *packet = (AVPacket*) av_malloc(sizeof(AVPacket));
//解压缩数据
AVFrame *frame = av_frame_alloc();

//frame->16bit 44100 PCM统一音频采样格式与采样率
SwrContext *swrCtx = swr_alloc();
//重采样设置选项-----------------------------------------------------------start
//输入的采样格式
AVSampleFormat inSampleFmt = pCodecCtx->sample_fmt;
//输出的采样格式
AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_S16;
//输入的采样率
int inSampleRate = pCodecCtx->sample_rate;
//输出的采样率
int outSampleRate = 44100;
//输入的声道布局
uint64_t inChannelLayout = pCodecCtx->channel_layout;
//输出的声道布局:CHANNEL_IN_MONO为单声道,CHANNEL_IN_STEREO为双声道
uint64_t outChannelLayout = AV_CH_LAYOUT_MONO;

printf("inSampleFmt = %d, inSampleRate = %d, inChannelLayout = %d, name = %s\n", inSampleFmt, inSampleRate,
(int) inChannelLayout, pCodec->name);

swr_alloc_set_opts(swrCtx, outChannelLayout, outSampleFmt, outSampleRate,
inChannelLayout, inSampleFmt, inSampleRate, 0, NULL);
swr_init(swrCtx);
//重采样设置选项-----------------------------------------------------------end

//获取输出的声道个数
int outChannelNb = av_get_channel_layout_nb_channels(outChannelLayout);
printf("outChannelNb = %d\n", outChannelNb);

//存储PCM数据
uint8_t *outBuffer = (uint8_t*) av_malloc(2 * 44100);

FILE *fp = fopen(fileName, "wb");

//回到流的初始位置
av_seek_frame(pFormatCtx, streamIndex, 0, AVSEEK_FLAG_BACKWARD);

//6.一帧一帧读取压缩的音频数据AVPacket
while (av_read_frame(pFormatCtx, packet) >= 0)
{
if (packet->stream_index == streamIndex)
{
//解码AVPacket --> AVFrame
int ret = avcodec_send_packet(pCodecCtx, packet);
if (ret < 0)
{
printf("Decode error\n");
break;
}

if (avcodec_receive_frame(pCodecCtx, frame) >= 0)
{
swr_convert(swrCtx, &outBuffer, 2 * 44100, (const uint8_t**) frame->data, frame->nb_samples);
//获取sample的size
int outBufferSize = av_samples_get_buffer_size(NULL, outChannelNb, frame->nb_samples, outSampleFmt, 1);
//写入文件
fwrite(outBuffer, 1, outBufferSize, fp);
}
}

av_packet_unref(packet);
}

fclose(fp);
av_frame_free(&frame);
av_free(outBuffer);
swr_free(&swrCtx);
avcodec_close(pCodecCtx);

return 0;
}

解码视频部分

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
int avcodec_save_video_file(AVFormatContext *pFormatCtx, int streamIndex, char *fileName)
{
AVCodec *pCodec;
AVCodecContext *pCodecCtx;
AVCodecParameters *codecpar = pFormatCtx->streams[streamIndex]->codecpar;

//4.获取解码器(一):视频
//根据索引拿到对应的流
pCodec = avcodec_find_decoder(codecpar->codec_id);
if (!pCodec)
{
printf("can't decoder audio\n");
return -1;
}
//申请一个解码上下文
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx)
{
printf("can't allocate a audio decoding context\n");
return -1;
}

//用流解码信息初始化编码参数
avcodec_parameters_to_context(pCodecCtx, codecpar);

//没有此句会出现:Could not update timestamps for skipped samples
// pCodecCtx->pkt_timebase = pFormatCtx->streams[streamIndex]->time_base;

//5.打开解码器
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("can't open codec\n");
return -1;
}

// printf("--------------- File Information ----------------\n");
// av_dump_format(pFormatCtx, 0, fileName, 0);
// printf("-------------------------------------------------\n");

//编码数据
AVPacket *pPacket = (AVPacket*) av_malloc(sizeof(AVPacket));
//解压缩数据
AVFrame *pFrame = av_frame_alloc();
AVFrame *pFrameYUV = av_frame_alloc();

unsigned char *outBuffer = (unsigned char*) av_malloc(
av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, outBuffer,
AV_PIX_FMT_YUV420P, pCodecCtx->width,
pCodecCtx->height, 1);

SwsContext *pImgConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

printf("width = %d, height = %d, name = %s\n", pCodecCtx->width, pCodecCtx->height, pCodec->name);

FILE *fp = fopen(fileName, "wb+");

//回到流的初始位置
av_seek_frame(pFormatCtx, streamIndex, 0, AVSEEK_FLAG_BACKWARD);

//6.一帧一帧读取压缩的视频数据AVPacket
while (av_read_frame(pFormatCtx, pPacket) >= 0)
{
if (pPacket->stream_index == streamIndex)
{
//解码AVPacket --> AVFrame
int ret = avcodec_send_packet(pCodecCtx, pPacket);
if (ret < 0)
{
printf("Decode error\n");
break;
}

if (avcodec_receive_frame(pCodecCtx, pFrame) >= 0)
{
sws_scale(pImgConvertCtx, pFrame->data, pFrame->linesize, 0,
pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

int y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp); //V
}
}

av_packet_unref(pPacket);
}

fclose(fp);
av_frame_free(&pFrame);
av_frame_free(&pFrameYUV);
av_free(outBuffer);
avcodec_close(pCodecCtx);

return 0;
}

最终得到*.pcm*.yuv文件

我们测试一下结果。

result

播放测试

pcm音频播放

result

yuv视频播放

result

完整代码在ffmpeg_beginner中的10.22.video_demuxer_mp42yuvpcm

下一篇:FFmpeg5入门教程10.23:音视频mp3和h264混合(muxer)编码为mp4


FFmpeg5入门教程10.22:音视频解混合(demuxer)为PCM和YUV420P
https://feater.top/ffmpeg/ffmpeg-demuxer-video-to-pcm-and-yuv420p/
作者
JackeyLea
发布于
2021年5月23日
许可协议