zl程序教程

您现在的位置是:首页 >  移动开发

当前栏目

android ffmpeg提取视频中的i帧和p帧

Android 视频 提取 FFMPEG
2023-09-14 09:14:22 时间

在工作中会遇到提取视频中的i帧或者p帧的需求,现在来讲解下怎么实现这个功能,代码如下:

int mp4_to_yuv(char * input_path, char * output_path)
{
    //1、注册所有组件
    av_register_all();

    //2、打开视频文件
    AVFormatContext *pFormatCtx = avformat_alloc_context();
    if ((avformat_open_input(&pFormatCtx, input_path, NULL, NULL)) < 0) 
    {       
        printf("Cannot open input file");
        return -1;
    }

    //3、获取视频信息
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
    {       
        printf("Cannot find stream\n");
        if(pFormatCtx)
            avformat_free_context(pFormatCtx);
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    //4、找到视频流的位置
    int video_stream_index = -1;
    int i = 0;
    for (; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            video_stream_index = i;
            //av_log(pFormatCtx, AV_LOG_ERROR, "find the stream index %d.\n", video_stream_index);
            //LOGE("find the stream index %d", video_stream_index);
            break;
        }
    }

    //5、获取解码器
    AVCodecContext *pCodeCtx = pFormatCtx->streams[video_stream_index]->codec;
    int width = pCodeCtx->width;
    int height = pCodeCtx->height;

    AVCodec *pCodec = avcodec_find_decoder(pCodeCtx->codec_id);
    if (pCodec == NULL)
    {
        av_log(pFormatCtx, AV_LOG_ERROR, "Cannot find decoder.\n");
       if(pFormatCtx)
            avformat_free_context(pFormatCtx);
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    //6、打开解码器
    if (avcodec_open2(pCodeCtx, pCodec, NULL) < 0)
    {
        printf("Cannot open codec\n");
        if(pFormatCtx)
            avformat_free_context(pFormatCtx);
        avformat_close_input(&pFormatCtx);
        return -1;
    }
    //7、解析每一帧数据
    int got_picture_ptr, frame_count = 1;
    //压缩数据
    AVPacket *packet = (AVPacket *) av_malloc(sizeof(AVPacket));
    //解压缩数据
    AVFrame *frame = av_frame_alloc();
    AVFrame *yuvFrame = av_frame_alloc();

    //将视频转换成指定的420P的YUV格式
    //缓冲区分配内存
    uint8_t *out_buffer = (uint8_t *) av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodeCtx->width, pCodeCtx->height));

    //初始化缓冲区
    avpicture_fill((AVPicture *) yuvFrame, out_buffer, AV_PIX_FMT_YUV420P, pCodeCtx->width,
                   pCodeCtx->height);
    //用于像素格式转换或者缩放
    struct SwsContext *sws_ctx = sws_getContext(pCodeCtx->width, pCodeCtx->height, pCodeCtx->pix_fmt,pCodeCtx->width, pCodeCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    //输出文件
    FILE *fp_yuv = fopen(output_path, "wb");
    //一帧一帧读取压缩的视频数据
    while (av_read_frame(pFormatCtx, packet) >= 0)
    {
        //找到视频流
        if (packet->stream_index == video_stream_index)
        {
            avcodec_decode_video2(pCodeCtx, frame, &got_picture_ptr, packet);
            
            //提取p帧,和i帧
            if((frame->pict_type == AV_PICTURE_TYPE_I) || (frame->pict_type == AV_PICTURE_TYPE_P))
            {
                printf("this is i PICTURE\n");
                //正在解码
                if (got_picture_ptr)
                {
                    //frame->yuvFrame,转为指定的YUV420P像素帧
                    sws_scale(sws_ctx, (const uint8_t *const *) frame->data, frame->linesize, 0,
                              frame->height, yuvFrame->data, yuvFrame->linesize);
                    //计算视频数据总大小
                    int y_size = pCodeCtx->width * pCodeCtx->height;
                    //AVFrame->YUV,由于YUV的比例是4:1:1
                    fwrite(yuvFrame->data[0], 1, y_size, fp_yuv);
                    fwrite(yuvFrame->data[1], 1, y_size / 4, fp_yuv);
                    fwrite(yuvFrame->data[2], 1, y_size / 4, fp_yuv);                    
                }
            }
            
            //提取b帧
            /*
            if(frame->pict_type == AV_PICTURE_TYPE_B)
            {               
                //正在解码
                if (got_picture_ptr)
                {
                    //frame->yuvFrame,转为指定的YUV420P像素帧
                    sws_scale(sws_ctx, (const uint8_t *const *) frame->data, frame->linesize, 0,
                              frame->height, yuvFrame->data, yuvFrame->linesize);
                    //计算视频数据总大小
                    int y_size = pCodeCtx->width * pCodeCtx->height;
                    //AVFrame->YUV,由于YUV的比例是4:1:1
                    fwrite(yuvFrame->data[0], 1, y_size, fp_yuv);
                    fwrite(yuvFrame->data[1], 1, y_size / 4, fp_yuv);
                    fwrite(yuvFrame->data[2], 1, y_size / 4, fp_yuv);                   
                }               
            }
            */
                        
            //释放packet
            av_packet_unref(packet);
        }
    }
    //8、释放资源
    fclose(fp_yuv);
    av_frame_free(&frame);
    av_frame_free(&yuvFrame);
    avcodec_close(pCodeCtx);
    av_free(out_buffer);
    avformat_free_context(pFormatCtx);
  
  return 0;
}




int main()
{
    
    mp4_to_yuv("/home/file/testvideo.mp4",
                "/home/file/testvideo.yuv");
    
    
    return 0;   
}